diff --git a/404/index.html b/404/index.html new file mode 100644 index 0000000000..27cff86b14 --- /dev/null +++ b/404/index.html @@ -0,0 +1,8 @@ +404: This page could not be found

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code.json new file mode 100644 index 0000000000..2d255a1deb --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code.json @@ -0,0 +1 @@ +{"pageProps":{"listData":[{"linkUrl":"/code/abstract-map-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_simulation-55e32b58dd5e4ed9caf7a85baf98677c.png.webp","/_next/static/images/abstract_map_simulation-3a9dbfc04fa16e80a961cec841d316fc.png"],"primaryText":"2D Simulator for Zoo Experiments","secondaryText":"btalb/abstract_map_simulator","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Abstract Map (Python)","secondaryText":"btalb/abstract_map","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map-app","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webm","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.mp4","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webp","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.jpg"],"primaryText":"Android App for Human Participants","secondaryText":"btalb/abstract_map_app","secondaryTransform":"lowercase"},{"linkUrl":"/code/armer","mediaPosition":"center","mediaUrls":["/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webm","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.mp4","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webp","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.jpg"],"primaryText":"Armer Driver","secondaryText":"qcr/armer","secondaryTransform":"lowercase"},{"linkUrl":"/code/ros-trees","mediaPosition":"center","mediaUrls":["/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webm","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.mp4","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webp","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.jpg"],"primaryText":"Behaviour trees for ROS","secondaryText":"qcr/ros_trees","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-addons","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webm","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.mp4","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webp","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.jpg"],"primaryText":"BenchBot Add-ons Manager","secondaryText":"qcr/benchbot_addons","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-supervisor","mediaPosition":"center 0%","mediaUrls":["/_next/static/images/benchbot_supervisor-3e4092b6584962e3e4529101ae489a08.jpg.webp","/_next/static/images/benchbot_supervisor-fb509eb331f3380fbf5da2c3035116b6.jpg"],"primaryText":"BenchBot Backend Supervisor","secondaryText":"qcr/benchbot_supervisor","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-eval","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"BenchBot Evaluation Tools","secondaryText":"qcr/benchbot_eval","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-api","mediaPosition":"center 100%","mediaUrls":["/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webm","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.mp4","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webp","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.jpg"],"primaryText":"BenchBot Python API","secondaryText":"qcr/benchbot_api","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webm","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.mp4","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webp","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.jpg"],"primaryText":"BenchBot Simulator (Isaac)","secondaryText":"qcr/benchbot_simulator","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot Software Stack","secondaryText":"qcr/benchbot","secondaryTransform":"lowercase"},{"linkUrl":"/code/delta_descriptors_code","mediaPosition":"center","mediaUrls":["/_next/static/images/ral-iros-2020-delta-descriptors-schematic-b5f57732c327f2f8546715b5dc3643af.png.webp","/_next/static/images/ral-iros-2020-delta-descriptors-schematic-95f5d1a50f3d92aa3344d9782ac13c32.png"],"primaryText":"Delta Descriptors","secondaryText":"oravus/DeltaDescriptors","secondaryTransform":"lowercase"},{"linkUrl":"/code/pgraph-python","mediaPosition":"center","mediaUrls":["/_next/static/images/roads-8b68dd7b635af6f867a02be9d399b4bd.png.webp","/_next/static/images/roads-18739c10c6cf2a6dccbffb581fb9a183.png"],"primaryText":"Graph classes (Python)","secondaryText":"petercorke/pgraph-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/gtsam-quadrics","mediaPosition":"center","mediaUrls":["/_next/static/images/gtsam_quadrics-9ce945399d611f449b8df8e1db6602ae.png.webp","/_next/static/images/gtsam_quadrics-cb27c37d5d64abed2e30e1523a8cec1a.png"],"primaryText":"GTSAM extension for quadrics","secondaryText":"qcr/gtsam-quadrics","secondaryTransform":"lowercase"},{"linkUrl":"/code/heaputil_code","mediaPosition":"center","mediaUrls":["/_next/static/images/overview-8c193585e23714439d55f0227d88f923.jpg.webp","/_next/static/images/overview-fc609d6102a3c08cb20b14382e57ee50.jpg"],"primaryText":"HEAPUtil","secondaryText":"Nik-V9/HEAPUtil","secondaryTransform":"lowercase"},{"linkUrl":"/code/lost_code","mediaPosition":"center","mediaUrls":["/_next/static/images/day-night-keypoint-correspondence-place-recognition-38203057bf036a1e9271b0a7647119fa.jpg.webp","/_next/static/images/day-night-keypoint-correspondence-place-recognition-bed6f778b7ec1ce4edaa346e24fb33bf.jpg"],"primaryText":"LoST-X","secondaryText":"oravus/lostX","secondaryTransform":"lowercase"},{"linkUrl":"/code/openseqslam2_code","mediaPosition":"center","mediaUrls":["/_next/static/images/openseqslam2-c5079d59d4cff5bd652acb1652d047f6.png.webp","/_next/static/images/openseqslam2-f3755fc8e61c0d81c8f0b0f42c5e08ae.png"],"primaryText":"OpenSeqSLAM2","secondaryText":"qcr/openseqslam2","secondaryTransform":"lowercase"},{"linkUrl":"/code/patchnetvlad_code","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Patch-NetVLAD","secondaryText":"QVPR/Patch-NetVLAD","secondaryTransform":"lowercase"},{"linkUrl":"/code/topometric_localization","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"Place-aware Topometric Localization","secondaryText":"mingu6/TopometricLoc","secondaryTransform":"lowercase"},{"linkUrl":"/code/pdq","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/qcr_web_img-c5a515adb03792ab295e52f405822b65.jpg.webp","/_next/static/images/qcr_web_img-8b73fea58e143ca4e51ab20579b08efa.jpg"],"primaryText":"Probability-based Detection Quality (PDQ)","secondaryText":"david2611/pdq_evaluation","secondaryTransform":"lowercase"},{"linkUrl":"/code/code-templates","mediaPosition":"center","mediaUrls":["/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.webm","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.mp4","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.webp","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.jpg"],"primaryText":"QCR's Code Templates","secondaryText":"qcr/code_templates","secondaryTransform":"lowercase"},{"linkUrl":"/code/quadricslam","mediaPosition":"center","mediaUrls":["/_next/static/images/quadricslam_video-412d8ad8190b4f7eee1320faf254cd6f.png.webp","/_next/static/images/quadricslam_video-a4d673ea6414754e153004c137d2a2c1.png"],"primaryText":"QuadricSLAM","secondaryText":"qcr/quadricslam","secondaryTransform":"lowercase"},{"linkUrl":"/code/robotics-toolbox-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Robotics Toolbox Python","secondaryText":"petercorke/robotics-toolbox-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/ros-omron-driver","mediaPosition":"center","mediaUrls":["/_next/static/images/omron_robot-6882a84f2dec840b5cba11e9f8f19e65.jpg.webp","/_next/static/images/omron_robot-542517e40cecf88333a4f6e07f854cc1.jpg"],"primaryText":"ROS Omron Driver","secondaryText":"qcr/ros_omron_driver","secondaryTransform":"lowercase"},{"linkUrl":"/code/rt_bene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_best_poster_award-5ac70111852de9eac6c94cd88ef726e0.png.webp","/_next/static/images/rt_bene_best_poster_award-d72f84610eb0050287dd856b52cc99c5.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"},{"linkUrl":"/code/rt_gene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"},{"linkUrl":"/code/seq2single_code","mediaPosition":"center","mediaUrls":["/_next/static/images/illustration-73bec1a3cac56819cdbea1268b711fa4.png.webp","/_next/static/images/illustration-1e185173132d7d8138449660ac905c04.png"],"primaryText":"seq2single","secondaryText":"oravus/seq2single","secondaryTransform":"lowercase"},{"linkUrl":"/code/seqnet_code","mediaPosition":"center","mediaUrls":["/_next/static/images/seqnet-cfc1aecd3cd2b268af41400a4fb86e6a.jpg.webp","/_next/static/images/seqnet-69de71978f2b7f0ffbcefcbb976010d3.jpg"],"primaryText":"SeqNet","secondaryText":"oravus/seqNet","secondaryTransform":"lowercase"},{"linkUrl":"/code/spatialmath-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/CartesianSnakes_LogoW-7d2f987ca5432e1ce32ce72e90be7c64.png.webp","/_next/static/images/CartesianSnakes_LogoW-d72d60a588449aa6a08846bed694c0c9.png"],"primaryText":"Spatialmath Python","secondaryText":"petercorke/spatialmath-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/vpr_snn","mediaPosition":"center","mediaUrls":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"primaryText":"Spiking Neural Networks for Visual Place Recognition","secondaryText":"QVPR/VPRSNN","secondaryTransform":"lowercase"},{"linkUrl":"/code/swift","mediaPosition":"center","mediaUrls":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"primaryText":"Swift","secondaryText":"jhavl/swift","secondaryTransform":"lowercase"},{"linkUrl":"/code/event_vpr_code","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"primaryText":"Visual Place Recognition using Event Cameras","secondaryText":"Tobias-Fischer/ensemble-event-vpr","secondaryTransform":"lowercase"},{"linkUrl":"/code/teach_repeat","mediaPosition":"center","mediaUrls":["/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webm","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.mp4","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webp","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.jpg"],"primaryText":"Visual Teach and Repeat","secondaryText":"QVPR/teach-repeat","secondaryTransform":"lowercase"},{"linkUrl":"/code/vprbench","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"MubarizZaffar/VPR-Bench","secondaryTransform":"lowercase"}],"title":"Codebases on GitHub"},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-app.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-app.json new file mode 100644 index 0000000000..7c10108788 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-app.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

~ Please see the abstract map site for further details about the research publication ~

\n

App for the Human vs Abstract Map Zoo Experiments

\n

\n

This repository contains the mobile application used by human participants in the zoo experiments described in our IEEE TCDS journal. The app, created with Android Studio, includes the following:

\n\n

Developing & producing the app

\n

The project should be directly openable using Android Studio.

\n

Please keep in mind that this app was last developed in 2019, and Android Studio often introduces minor breaking changes with new versions. Often you will have to tweak things like Gradle versions / syntax etc. to get a project working with newer versions. Android Studio is very good though with pointing out where it sees errors and offering suggestions for how to resolve them.

\n

Once you have the project open, you should be able to compile the app and load it directly onto a device without issues.

\n

Acknowledgements & Citing our work

\n

This work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.

\n

If you use this software in your research, or for comparisons, please kindly cite our work:

\n
@ARTICLE{9091567,  \n    author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}},  \n    journal={IEEE Transactions on Cognitive and Developmental Systems},   \n    title={Robot Navigation in Unseen Spaces using an Abstract Map},   \n    year={2020},  \n    volume={},  \n    number={},  \n    pages={1-1},\n    keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n    doi={10.1109/TCDS.2020.2993855},\n    ISSN={2379-8939},\n    month={},}\n}\n
\n","name":"Android App for Human Participants","type":"code","url":"https://github.com/btalb/abstract_map_app","image":"./docs/abstract_map_app.gif","_images":["/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webm","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.mp4","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webp","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.jpg"],"src":"/content/human_cues/abstract-map-app.md","id":"abstract-map-app","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json new file mode 100644 index 0000000000..7cb608807e --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

~ Please see the abstract map site for further details about the research publication ~

\n

Using the Abstract Map in a 2D Stage simulation

\n

\"2D

\n

Package contains everything needed to simulate the zoo experiments performed in our IEEE TCDS journal. The package includes:

\n\n

Installing the abstract map simulator

\n

Note: this is just the simulator; to use the abstract map with the simulator please make sure you use the abstract_map package

\n

Clone the repo & install all Python dependencies:

\n
git clone https://github.com/btalb/abstract_map_simulator\npip install -r abstract_map_simulator/requirements.txt\n
\n

Add the new package to your ROS workspace at <ROS_WS>/ by linking in the cloned repository:

\n
ln -s <LOCATION_REPO_WAS_CLONED_ABOVE> <ROS_WS>/src/\n
\n

Install all of the listed ROS dependencies, and build the package:

\n
cd <ROS_WS>/src/\nrosdep install abstract_map_simulator\ncd <ROS_WS>\ncatkin_make\n
\n

Acknowledgements & Citing our work

\n

This work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.

\n

If you use this software in your research, or for comparisons, please kindly cite our work:

\n
@ARTICLE{9091567,  \n    author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}},  \n    journal={IEEE Transactions on Cognitive and Developmental Systems},   \n    title={Robot Navigation in Unseen Spaces using an Abstract Map},   \n    year={2020},  \n    volume={},  \n    number={},  \n    pages={1-1},\n    keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n    doi={10.1109/TCDS.2020.2993855},\n    ISSN={2379-8939},\n    month={},}\n}\n
\n","name":"2D Simulator for Zoo Experiments","type":"code","url":"https://github.com/btalb/abstract_map_simulator","image":"./docs/abstract_map_simulation.png","_images":["/_next/static/images/abstract_map_simulation-55e32b58dd5e4ed9caf7a85baf98677c.png.webp","/_next/static/images/abstract_map_simulation-3a9dbfc04fa16e80a961cec841d316fc.png"],"src":"/content/human_cues/abstract-map-simulator.md","id":"abstract-map-simulator","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json new file mode 100644 index 0000000000..4b7b48c62d --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

~ Please see the abstract map site for further details about the research publication ~

\n

The Abstract Map - using symbols to navigate

\n

\"The

\n

This repository provides the implementation of the abstract map used in our IEEE TCDS journal. The implementation, done in Python, includes the following features:

\n\n

Please see our other related repositories for further resources, and related parts of the abstract map studies:

\n\n

Getting up & running with the abstract map

\n

Note: if you wish to run this in simulation (significantly easier than on a real robot platform), you will also need the abstract_map_simulator package

\n

Setting up your environment

\n

Clone the repo & install all Python dependencies:

\n
git clone https://github.com/btalb/abstract_map\npip install -r abstract_map/requirements.txt\n
\n

Add the new package to your ROS workspace at <ROS_WS>/ by linking in the cloned repository:

\n
ln -s <LOCATION_REPO_WAS_CLONED_ABOVE> <ROS_WS>/src/\n
\n

Install all of the listed ROS dependencies, and build the package:

\n
cd <ROS_WS>/src/\nrosdep install abstract_map\ncd <ROS_WS>\ncatkin_make\n
\n

Running the Zoo experiments

\n

Start the experiment (this will try & launch the 2D simulation back-end by default, so make sure you have that installed if you are using it):

\n
roslaunch abstract_map experiment.launch\n
\n

(please see this issue for details if you get the spam of TF based errors... which probably shouldn't even be errors... )

\n

In another terminal, start the hierarchy publisher to give the abstract map the contextual symbolic spatial information to begin with:

\n
rosrun abstract_map hierarchy_publisher\n
\n

This will use the hierarchy available in ./experiments/zoo_hierarchy.xml by default. Feel free to make your own if you would like to do different experiments.

\n

Start the visualiser in preparation of beginning the experiment (pick either light or dark mode with one of the two commands):

\n
rosrun abstract_map visualiser\n
\n
rosrun abstract_map visualiser --dark\n
\n

\"Visualise

\n

Finally, start the abstract map with a goal, and watch it attempt to complete the navigation task:

\n
roslaunch abstract_map abstract_map.launch goal:=Lion\n
\n

If you want to manually drive the robot around and observe how the abstract map evolves over time, you can run the above command without a goal to start in \"observe mode\".

\n

Acknowledgements & Citing our work

\n

This work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.

\n

If you use this software in your research, or for comparisons, please kindly cite our work:

\n
@ARTICLE{9091567,  \n    author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}},  \n    journal={IEEE Transactions on Cognitive and Developmental Systems},   \n    title={Robot Navigation in Unseen Spaces using an Abstract Map},   \n    year={2020},  \n    volume={},  \n    number={},  \n    pages={1-1},\n    keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n    doi={10.1109/TCDS.2020.2993855},\n    ISSN={2379-8939},\n    month={},}\n}\n
\n","name":"Abstract Map (Python)","type":"code","url":"https://github.com/btalb/abstract_map","image":"./docs/assets/images/abstract_map_in_action.png","_images":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"src":"/content/human_cues/abstract-map.md","id":"abstract-map","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json new file mode 100644 index 0000000000..d246ac3289 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Armer Driver

\n

\"QUT\n\"License\"\n\"Build\n\"Language\n\"codecov\"

\n

\n

\"Armer

\n

\"image\"

\n

Armer aims to provide an interface layer between the hardware drivers of\na robotic arm giving the user control in several ways:

\n\n

In addition to a multiple control method layer, Armer is designed to\nbe a compatability layer allowing the user to use the same code\nacross different robotic platforms. Armer supports control for physical\nand simulated arms giving users the ability to develop even without\naccess to a physical manipulator.

\n

Below is a gif of 3 different simulated arms moving with the same cartesian velocity commands.

\n

\n

Requirements

\n

Several ROS action servers, topics and services are set up by Armer\nto enable this functionality. A summary of these can be found\nhere.

\n

Armer is built on the Python Robotics Toolbox\n(RTB) and requires\na URDF loaded RTB model to calculate the required movement kinematics,\nRTB comes with browser based simulator\nSwift which Armer uses as an out of\nthe box simulator.

\n

Due to these supporting packages using Armer with a manipulator will\nrequire several requirements:

\n

Software requirements

\n\n

Robot specific requirements

\n\n

Installation

\n

Linux (Ubuntu 20.04)

\n

Copy and paste the following code snippet into a terminal to create a\nnew catkin workspace and install Armer to it. Note this\nscript will also add the workspace to be sourced every time a bash\nterminal is opened. If RoboStack is preferred, please follow the steps in the next section

\n
# Install pip \nsudo apt install python3-pip\n\n# Make the workspace and clone armer and armer_msgs packages\nmkdir -p ~/armer_ws/src && cd ~/armer_ws/src \ngit clone https://github.com/qcr/armer.git && git clone https://github.com/qcr/armer_msgs \n\n# Install all required packages\npip install -r ~/armer_ws/src/armer/requirements.txt\ncd .. && rosdep install --from-paths src --ignore-src -r -y \n\n# Make and source the workspace \ncatkin_make \necho \"source ~/armer_ws/devel/setup.bash\" >> ~/.bashrc \nsource ~/armer_ws/devel/setup.bash\necho \"Installation complete!\"\n
\n

macOS and Windows (10/11)

\n

To enable easy use of ROS on these operating systems, it is recommended to use RoboStack; note that ROS 1 (noetic) is recommended at this stage. Please ensure you have mamba installed before proceeding. Please follow all required steps for the RoboStack install (as per their instructions) to enable the smoothest setup on your particular OS.

\n
# --- Mamba Environment Setup --- #\n# Create and activate a new robostack (ros-env) environment\nmamba create -n ros-env ros-noetic-desktop python=3.9 -c robostack-staging -c conda-forge --no-channel-priority --override-channels\nmamba activate ros-env\n\n# Install some compiler packages\nmamba install compilers cmake pkg-config make ninja\n\n# FOR WINDOWS: Install the Visual Studio command prompt - if you use Visual Studio 2022\nmamba install vs2022_win-64\n\n# --- ARMer Setup --- #\n# Make the armer workspace and clone in armer and armer_msgs packages\n# FOR LINUX/MACOS\nmkdir -p ~/armer_ws/src && cd ~/armer_ws/src \n# FOR WINDOWS: Assumes you are in the home folder\nmkdir armer_ws\\src && cd armer_ws\\src\n# Clone in armer and armer_msgs\ngit clone https://github.com/qcr/armer.git && git clone https://github.com/qcr/armer_msgs \n# Install all required packages (into ros-env) - from current directory\n# FOR LINUX/MACOS\npip install -r armer/requirements.txt\n# FOR WINDOWS\npip install -r armer\\requirements.txt\n# Enter armer_ws folder and run rosdep commands\ncd .. && rosdep init && rosdep update && rosdep install --from-paths src --ignore-src -r -y \n\n# Make and source the workspace (including environment)\ncatkin_make \n\n# --- Default Activation of Environment --- #\n# FOR LINUX \necho \"mamba activate ros-env\" >> ~/.bashrc\n\n# FOR MACOS\necho \"mamba activate ros-env\" >> ~/.bash_profile\n\n# --- Workspace Source --- #\nsource ~/armer_ws/devel/setup.bash\n
\n

Supported Arms

\n

Armer relies on the manipulator's ROS driver to communicate with the low level hardware so the the ROS drivers must be started along side Armer. NOTE: the below packages are required for control of a real robot - see below for simulation usage instructions

\n

Currently Armer driver has packages that launches Armer and the target manipulator's drivers are bundled together. If your arm model has a hardware package, control should be a fairly plug and play experience. (An experience we are still working on so please let us know if it isn't.). Below are the github pages to arms with hardware packages. Install directions can be found on their respective pages.

\n\n

For more information on setting up manipulators not listed here see the Armer documentation, Supported Arms.

\n

Usage

\n

The Armer interface can be launched with the following command for simulation (note, please replace USER with your own username):

\n
\n
# Example is using the panda_sim.yaml. Note, please update the below path if the install directory is different\nroslaunch armer armer.launch config:=/home/$USER/armer_ws/src/armer/cfg/panda_sim.yaml\n
\n
\n

Alternatively, the Armer interface can be launched for a real robot using the following command (Note that this can also support simulation if you wish via the sim parameter):

\n
\n
# Note this example launches the panda model in simulation mode (assumes you have this package cloned, see above)\nroslaunch armer_panda robot_bringup.launch sim:=true\n
\n
\n

After launching, an arm can be controlled in several ways. Some quick tutorials can be referenced below:

\n\n

For more information and examples see the Armer\ndocumentation

\n","name":"Armer Driver","type":"code","url":"https://github.com/qcr/armer","image":"https://github.com/qcr/armer/wiki/armer_example.gif","_images":["/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webm","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.mp4","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webp","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.jpg"],"src":"/content/armer.md","id":"armer","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json new file mode 100644 index 0000000000..130afa3d4e --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions here.

\n

BenchBot Add-ons Manager

\n

\"BenchBot\n\"QUT\n\"Primary\n\"License\"

\n

\n

The BenchBot Add-ons Manager allows you to use BenchBot with a wide array of additional content, and customise your installation to suite your needs. Semantic Scene Understanding not your thing? Install the Semantic Question Answering add-ons instead. Want to create your own content? Write some basic YAML files to make your own add-ons. Need to re-use existing content? Simply include a dependency on that add-on. Add-ons are all about making BenchBot whatever you need it to be—build a BenchBot for your research problems, exactly as you need it.

\n

Add-ons come in a variety of types. Anything that you may need to customise for your own experiments or research, should be customisable through an add-on. If not, let us know, and we'll add more add-on enabled functionality to BenchBot!

\n

The list of currently supported types of add-ons are:

\n\n

See the sections below for details of how to interact with installed add-ons, how to create your own add-ons, and formalisation of what's required in an add-on.

\n

Installing and using the add-ons manager

\n

In general, you won't use the add-ons manager directly. Instead you interact with the BenchBot software stack, which uses the add-ons manager to manage and access add-ons.

\n

The manager is a Python package if you do find you want to use it directly, and installable with pip. Run the following in the root directory where the repository was cloned:

\n
u@pc:~$ pip install .\n
\n

The manager can then be imported and used to manage installation, loading, accessing, processing, and updating of add-ons. Some samples of supported functionality are shown below:

\n
from benchbot_addons import manager as bam\n\n# Check if example with 'name' = 'hello_scd' exists\nbam.exists('examples', [('name', 'hello_scd')])\n\n# Find all installed environments\nbam.find_all('environments')\n\n# Get a list of the names for all installed tasks\nbam.get_field('tasks', 'name')\n\n# Get a list of (name, variant) pairs for all installed environments\nbam.get_fields('environments', ['name', 'variant'])\n\n# Find a robot with 'name' = 'carter'\nbam.get_match('robots', [('name', 'carter')])\n\n# Get the 'results_format' value for the task called 'scd:passive:ground_truth'\nbam.get_value_by_name('tasks', 'scd:passive:ground_truth', 'results_format')\n\n# Load YAML data for all installed ground truths\nbam.load_yaml_list(bam.find_all('ground_truths', extension='json'))\n\n# Install a list of comma-separated add-ons\nbam.install_addons('benchbot-addons/ssu,benchbot-addons/sqa')\n\n# Install a specific add-on (& it's dependencies)\nbam.install_addon('tasks_ssu')\n\n# Print the list of currently installed add-ons, & officially available add-ons\nbam.print_state()\n\n# Uninstall all add-ons\nbam.remove_addons()\n\n# Uninstall a string separated list of add-ons\nbam.remove_addon('benchbot-addons/ssu,benchbot-addons/sqa')\n
\n

Creating your own add-on content

\n

Add-ons are designed to make it easy to add your own local content to a BenchBot installation. You can add your own local content to the \"local add-ons\" folder provided with your install. The location on your machine can be printed via the following:

\n
from benchbot_addons import manager as bam\n\nprint(bam.local_addon_path())\n
\n

BenchBot expects add-on content to be in named folders denoting the type of content. For example, robots must be in a folder called 'robots', tasks in a folder called 'tasks', and so on. A list of valid content types is available via the SUPPORTED_TYPES field in the add-ons manager.

\n

Below is an example of the process you would go through to create your own custom task locally:

\n
    \n
  1. Find the location for your custom local add-ons:
    u@pc:~$ python3 -c 'from benchbot_addons import manager as bam; print(bam.local_addon_path())'\n/home/ben/repos/benchbot/addons/benchbot_addons/.local/my_addons\n
    \n
  2. \n
  3. Create the following YAML file for your task: /home/ben/repos/benchbot/addons/benchbot_addons/.local/my_addons/tasks/my_task.yaml
  4. \n
  5. Use the fields described below in the task add-ons specification to define your task
  6. \n
  7. Save the file
  8. \n
\n

Done. Your new custom task should now be available for use in your BenchBot system (e.g. benchbot_run --list-tasks).

\n

Sharing your custom add-ons

\n

Custom add-on content can be grouped together into an add-on package, of which there are two different types: 'official' and third-party.

\n

'Official' packages are those we've verified, and are stored in our benchbot-addons GitHub organisation. You can get a full list of official add-on packages through the manager.official_addons() helper function, or benchbot_install --list-addons script in the BenchBot software stack.

\n

Third-party add-on packages differ only in that we haven't looked at them, and they can be hosted anywhere on GitHub you please.

\n

Creating all add-on packages is exactly the same process, the only difference is whether the repository is inside or outside of the benchbot-addons GitHub organisation:

\n
    \n
  1. Create a new GitHub repository
  2. \n
  3. Add folders corresponding to the type of content your add-ons provide (i.e. an environments add-on has an environments directory at the root).
  4. \n
  5. Add YAML / JSON files for your content, and make sure they match the corresponding format specification from the section below
  6. \n
  7. Add in any extra content your add-on may require: Python files, simulator binaries, images, etc. (if your add-on gets too big for a Git repository, you can zip the content up, host it somewhere, and use the .remote metadata file described in the next section)
  8. \n
  9. Decide if your package has any dependencies, and declare them using the appropriate .dependencies* files
  10. \n
  11. Push everything up to GitHub on your default branch
  12. \n
\n

Note: it's a good idea to only include one type of add-on per repository as it makes your add-on package more usable for others. It's not a hard rule though, so feel free to add multiple folders to your add-on if you require.

\n

Feel free to have a look at any of the official add-ons for help and examples of how to work with add-ons.

\n

Add-ons format specification

\n

Here are the technical details of what's expected in add-on content. The BenchBot system will assume these specifications are adhered to, and errors can be expected if you try to use add-ons that don't match the specifications.

\n

An add-on package has the following structure (technically none of the files are required, they just determine what functionality your add-on includes):

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FilenameDescription
.dependenciesA list of add-on packages that must be installed with this package. Packages are specified by their GitHub identifier (i.e. github_username/repository_name), with one per line
.dependencies-pythonA list of Python dependencies for your add-on. Syntax for file is exactly the same as requirements.txt files.
.remoteSpecifies content that should be installed from a remote URL, rather than residing in this repository. A remote resource is specified as a URL and target directory separated by a space. One resource is specified per line. The add-ons manager will fetch the URL specified, and extract the contents to the target directory (e.g. http://myhost/my_content.zip environments)
<directory>/Each named directory corresponds to an add-on type described below. The directory will be ignored if its name doesn't exactly match any of those below.
\n

Batch add-ons

\n

A YAML file, that must exist in a folder called batches in the root of the add-on package (e.g. batches/my_batch.yaml).

\n

The following keys are supported for batch add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this batch (must be unique!).
'environments'YesA list of environment strings of the format 'name':'variant' (e.g. 'miniroom:1').
\n

Environment add-ons

\n

A YAML file, that must exist in a folder called environments in the root of the add-on package (e.g. environments/my_environment.yaml).

\n

The following keys are supported for environment add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this environment's name (the ('name', 'variant') pair must be unique!).
'variant'YesA string used to refer to this environment's variant (the ('name', 'variant') pair must be unique!).
'type'YesA string describing the type of this environment ('sim_unreal' & 'real' are the only values currently used).
'map_path'YesA path to the map for this environment, which will be used by either the simulator or real world system to load the environment.
'start_pose'YesThe start pose of the robot that will be provided to users through the BenchBot API. The pose is specified as a list of 7 numbers: quarternion_w, quarternion_x, quarternion_y, quarternion_z, position_x, position_y, position_z. This must be accurate!
'trajectory_poses'NoA list of poses for the robot to traverse through in order. Each pose is a list of 7 numbers: quarternion_w, quarternion_x, quarternion_y, quarternion_z, position_x, position_y, position_z. This environment won't be usable for tasks that use the 'move_next' action if this parameter isn't provided.
'robots'NoA list of supported names for robot that are supported in this environment. If this list isn't included, all robots with the same 'type' as this environment will be able to run.
'object_labels'NoA list of labels for the objects that exist in the scene. Can be used with simulated sensors like segmentation sensors.
\n

Evaluation method add-ons

\n

A YAML file, that must exist in a folder called evaluation_methods in the root of the add-on package (e.g. evaluation_methods/my_evaluation_method.yaml).

\n

The following keys are supported for evaluation method add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this evaluation method (must be unique!)
'valid_result_formats'YesList of strings denoting results formats supported by the evaluation method. Ideally these format definitions should also be installed.
'valid_ground_truth_formats'YesList of strings denoting ground truth formats supported by the evaluation method. Ideally these format definitions should also be installed.
'functions'YesDictionary of named functions provided by the evaluation method. The named methods are key value pairs where the key is the function name, and the value is a string describing how the function can be imported with Python. For example, evaluate: \"omq.evaluate_method\" declares a function called 'evaluate' that is imported via from omq import evaluate_method. Likewise \"omq.submodule.combine_method\" translates to from omq.submodule import combine_method. See below for the list of functions expected for evaluation methods.
'description'NoA string describing what the evaluation method is and how it works. Should be included if you want users to understand where your method can be used.
\n

Evaluation methods expect the following named functions:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
NameSignatureUsage
'evaluate'fn(dict: results, list: ground_truths) -> dictEvaluates the performance using a results dictionary, and returns a dictionary of containing the scores. It also takes a list of dictionaries containing each ground truth that will be used in evaluation.
'combine'fn(list: scores) -> dictTakes a list of scores dictionaries, and returns an aggregate score. If this method isn't declared, benchbot_eval won't return a summary score.
\n

Example method add-ons

\n

A YAML file, that must in a folder called examples in the root of the add-on package (e.g. examples/my_example.yaml).

\n

The following keys are supported for example add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
nameYesA string used to refer to this example (must be unique!)
native_commandYesA string describing the command used to run your example natively, relative to the directory of this YAML file! For example running your my_example.py file which is in the same director as this YAML would be python3 ./my_example.py.
container_directoryNoDirectory to be used for Docker's build context. The submission process will automatically look for a file called Dockerfile in that directory unless the 'container_filename' key is also provided.
container_filenameNoCustom filename for your example's Dockerfile. Dockerfile in container_directory will be used if this key is not included. This path is relative to this YAML file, not 'container_directory'.
descriptionNoA string describing what the example is and how it works. Should be included if you want users to understand how your example can be expanded.
\n

Format definition add-ons

\n

A YAML file, that must exist in a folder called formats in the root of the add-on package (e.g. formats/my_format.yaml).

\n

The following keys are supported for format add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this format (must be unique!)
'functions'YesDictionary of named functions for use with this format. The named methods are key-value pairs where the key is the function name, and the value is a string describing how the function can be imported with Python. For example, create: \"object_map.create_empty\" declares a function called 'create' that is imported via from object_map import create_empty. Likewise \"object_map.submodule.validate\" translates to from object_map.submodule import validate. See below for the list of functions expected for format definitions.
'description'NoA string describing what the format is and how it works. Should be included if you want users to understand what your format is supposed to capture.
\n

Format definitions expect the following named functions:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
NameSignatureUsage
'create'fn() -> dictFunction that returns an empty instance of this format. As much as possible should be filled in to make it easy for users to create valid instances (especially when a format is used for results).
'validate'fn(dict: instance) -> NoneTakes a proposed instance of this format and validates whether it meets the requirements. Will typically use a series of assert statements to confirm fields are valid.
\n

Ground truth add-ons

\n

A JSON file, that must exist in a folder called ground_truths in the root of the add-on package (e.g. ground_truths/my_ground_truth.json).

\n

The following keys are supported for ground truth add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'environment'YesA dictionary containing the definition data for the ground truth's reference environment. The data in this field should be a direct copy of an environment add-on.
'format'YesA dictionary containing the definition data for the ground truth's format. The data in this field should be a direct copy of a format definition add-on.
'ground_truth'YesA valid instance of the format described by the 'format' field. This is where your actual ground truth data should be stored.
\n

A lot of these keys should be copied from other valid definitions. Please see the GroundTruthCreator helper class in BenchBot Evaluation for assistance in creating valid ground truths.

\n

Robot add-ons

\n

A YAML file, that must exist in a folder called robots in the root of the add-on package (e.g. robots/my_robot.yaml).

\n

The following keys are supported for robot add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this robot (must be unique!).
'type'YesA string describing the type of this robot ('sim_unreal' & 'real' are the only values currently used).
'address'YesA string for the address where a running BenchBot Robot Controller can be accessed (e.g. 'localhost:10000')
'global_frame'YesThe name of the global TF frame. All poses reported by the BenchBot API will be with respect to this frame.
'robot_frame'YesThe name of the robot's TF frame.
'poses'YesA list of named poses that this robot provides. This list of poses will be available in observations provided by the BenchBot API.
persistent_cmdsYesA list of commands that will be run and kept alive for the lifetime of the robot controller. The commands will be run in parallel, and executed via bash -c <your_command_string>
persistent_statusYesA command used to check the status of your persistent_cmds. This command should execute quickly, and terminate on completion, with the return code being used to evaluate the status. The command string is executed via bash -c <your_command_string>
run_cmdYesA single command issued by the controller to run a simulation. This command must terminate on completion. The command string is executed via bash -c <your_command_string>
stop_cmdYesA single command issued by the controller to stop a simulation. This command must terminate on completion. The command string is executed via bash -c <your_command_string>
'connections'YesA dictionary of connections that your robot makes available to the BenchBot ecosystem. The name of the key-value pair is important, and should follow the recommendations provided on standard channels in the BenchBot API documentation. A description of connection definitions is provided below.
\n

Connections are the lifeblood of interaction between BenchBot and robot platforms. They are defined by named entries, with the following fields:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'connection'YesConnection type string, used by the BenchBot Robot Controller. Supported values are 'api_to_ros' (used for actions), 'ros_to_api' (used for observations), and 'roscache_to_api' (special value used for caching observation values).
'ros_topic'YesTopic name for the ROS side of the connection.
'ros_type'YesTopic type for the ROS side of the connection.
'callback_api'NoA callback that is run on the HTTP encoded data received / sent on the API end of the connection. It takes in data, and returns transformed data based on the callback's action. Callbacks are specified by a string denoting how the callback can be accessed (e.g. 'api_callbacks.convert_to_rgb = from api_callbacks import convert_to_rgb). No data transformation occurs if no callback is provided.
'callback_ros'NoA callback that is run on the ROS data received / sent on the robot controller end of the connection. It takes in data and a reference to the robot controller. 'api_to_ros' connections use this data to act on the robot, whereas 'ros_to_api' connections turn this data into a dictionary that can be serialised into HTTP traffic. Callbacks are specified by a string denoting how the callback can be accessed (e.g. 'api_callbacks.convert_to_rgb = from api_callbacks import convert_to_rgb). No action occurs at the ROS level if no callback is provided.
\n

Task add-ons

\n

A YAML file, that must exist in a folder called tasks in the root of the add-on package (e.g. tasks/my_task.yaml).

\n

The following keys are supported for task add-ons:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
KeyRequiredDescription
'name'YesA string used to refer to this task (must be unique!).
'actions'YesA list of named connections to be provided as actions through the BenchBot API. Running this task will fail if the robot doesn't provide these named connections.
'observations'YesA list of named connections to be provided as observations through the BenchBot API. Running this task will fail if the robot doesn't provide these named connections.
'localisation'NoA string describing the level of localisation. Only supported values currently are 'ground_truth' and 'noisy'. The default value is 'ground_truth'.
'results_format'NoA string naming the format for results. The format must be installed, as BenchBot API will use the format's functions to provide the user with empty results.
'description'NoA string describing what the task is, and how it works. Should be included if you want users to understand what challenges your task is trying to capture.
'type'NoA string describing what robot / environment types are valid for this task. For example, a task that provides a magic image segmentation sensor would only be made available for 'sim_unreal' type robots / environments.
'scene_count'NoInteger representing the number of scenes (i.e. environment variations required for a task). If omitted, a default value of 1 will be used for the task.
\n","name":"BenchBot Add-ons Manager","type":"code","url":"https://github.com/qcr/benchbot_addons","image":"./docs/benchbot_addons.gif","_images":["/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webm","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.mp4","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webp","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.jpg"],"src":"/content/benchbot/benchbot-addons.md","id":"benchbot-addons","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-api.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-api.json new file mode 100644 index 0000000000..b146e0728f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-api.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

NOTE: this software needs to interface with a running instance of the BenchBot software stack. Unless you are running against a remote stack / robot, please install this software with the BenchBot software stack as described here.

\n

BenchBot API

\n

\n

The BenchBot API provides a simple interface for controlling a robot or simulator through actions, and receiving data through observations. As shown above, the entire code required for running an agent in a realistic 3D simulator is only a handful of simple Python commands.

\n

Open AI Gym users will find the breakdown into actions, observations, and steps extremely familiar. BenchBot API allows researchers to develop and test novel algorithms with real robot systems and realistic 3D simulators, without the typical hassles arising when interfacing with complicated multi-component robot systems.

\n

Running a robot through an entire environment, with your own custom agent, is as simple as one line of code with the BenchBot API:

\n
from benchbot_api import BenchBot\nfrom my_agent import MyAgent\n\nBenchBot(agent=MyAgent()).run()\n
\n

The above assumes you have created your own agent by overloading the abstract Agent class provided with the API. Overloading the abstract class requires implementing 3 basic methods. Below is a basic example to spin on the spot:

\n
from benchbot_api import Agent\nimport json\n\nclass MyAgent(Agent):\n\n    def is_done(self, action_result):\n        # Go forever\n        return False\n\n    def pick_action(self, observations, action_list):\n        # Rotates on the spot indefinitely, 5 degrees at a time\n        # (assumes we are running in passive mode)\n        return 'move_angle', {'angle': 5}\n\n    def save_result(self, filename, empty_results, results_format_fns):\n        # Save some blank results\n        with open(filename, 'w') as f:\n            json.dump(empty_results, f)\n
\n

If you prefer to do things manually, a more exhaustive suite of functions are also available as part of the BenchBot API. Instead of using the BenchBot.run() method, a large number of methods are available through the API. Below highlights a handful of the capabilities of BenchBot API:

\n
from benchbot_api import BenchBot, RESULT_LOCATION\nimport json\nimport matplotlib.pyplot as plt\n\n# Create a BenchBot instance & reset the simulator / robot to starting state\nb = BenchBot()\nobservations, action_result = b.reset()\n\n# Print details of selected task & environment\nprint(b.task_details)\nprint(b.environment_details)\n\n# Visualise the current RGB image from the robot\nplt.imshow(observations['image_rgb'])\n\n# Move to the next pose if we have a 'move_next' action available\nif 'move_next' in b.actions:\n    observations, action_result = b.step('move_next')\n\n# Save some empty results\nwith open(RESULT_LOCATION, 'w') as f:\n    json.dump(b.empty_results(), f)\n
\n

For sample solutions that use the BenchBot API, see the examples add-ons available (e.g. benchbot-addons/examples_base and benchbot-addons/examples_ssu).

\n

Installing BenchBot API

\n

BenchBot API is a Python package, installable with pip. Run the following in the root directory of where this repository was cloned:

\n
u@pc:~$ pip install .\n
\n

Using the API to communicate with a robot

\n

Communication with the robot comes through a series of \"channels\" which are defined by the robot's definition file (e.g. carter). A task definition file (e.g. semantic_slam:passive:ground_truth) then declares which of these connections are provided to the API as either sensor observations or actions to be executed by a robot actuator.

\n

The API talks to the BenchBot Supervisor, which handles loading and managing the different kinds of back-end configuration files. This abstracts all of the underlying communication complexities away from the user, allowing the BenchBot API to remain a simple interface that focuses on getting observations and sending actions.

\n

An action is sent to the robot by calling the BenchBot.step() method with a valid action (found by checking the BenchBot.actions property):

\n
from benchbot_api import BenchBot\n\nb = BenchBot()\navailable_actions = b.actions\nb.step(b.actions[0], {'action_arg:', arg_value})  # Perform the first available action\n
\n

The second parameter is a dictionary of named arguments for the selected action. For example, moving 5m forward with the 'move_distance' action is represented by the dictionary {'distance': 5}.

\n

Observations lists are received as return values from a BenchBot.step() call (BenchBot.reset() internally calls BenchBot.step(None), which means don't perform an action):

\n
from benchbot_api import BenchBot\n\nb = BenchBot()\nobservations, action_result = b.reset()\nobservations, action_result = b.step('move_distance', {'distance': 5})\n
\n

The returned observations variable holds a dictionary with key-value pairs corresponding to the name-data defined by each observation channel.

\n

The action_result is an enumerated value denoting the result of the action (use from benchbot_api import ActionResult to access the Enum class). You should use this result to guide the progression of your algorithm either manually or in the is_done() method of your Agent. Possible values for the returned action_result are:

\n\n

Standard Communication Channels

\n

Tasks and robot definition files declare actions and observations, and these files are include through BenchBot add-ons. The add-on creator is free to add and declare channels as they please, but it is a better experience for all if channel definitions are as consistent as possible across the BenchBot ecosystem.

\n

So if you're adding a robot that move between a set of poses, declare a channel called 'move_next with no arguments. Likewise, a robot that receives image observations should use a channel named 'image_rgb' with the same format as described below. Feel free to implement the channels however you please for your robot, but consistent interfaces should always be preferred.

\n

If you encounter a task using non-standard channel configurations, the API has all the functionality you need as a user to handle them (actions, config, & observations properties). On the other hand, maybe the non-standard channel should be a new standard. New standard communication channels are always welcome; please open a pull request with the details!

\n

Standard action channels:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
NameRequired ArgumentsDescription
'move_next'NoneMoves the robot to the next pose in its list of pre-defined poses (only available in environments that declare a 'trajectory_poses' field).
'move_distance'
{'distance': float}
Moves the robot 'distance' metres directly ahead.
'move_angle'
{'angle': float}
Rotate the angle on the spot by 'angle' degrees.
\n

Standard observation channels:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
NameData formatDescription
'image_depth'
numpy.ndarray(shape=(H,W),
dtype='float32')
Depth image from the default image sensor with depths in meters.
'image_depth_info'
{
'frame_id': string
'height': int
'width': int
'matrix_instrinsics':
numpy.ndarray(shape=(3,3),
dtype='float64')
'matrix_projection':
numpy.ndarray(shape=(3,4)
dtype='float64')
}
Sensor information for the depth image. 'matrix_instrinsics' is of the format:
[fx 0 cx]
[0 fy cy]
[0 0 1]
for a camera with focal lengths (fx,fy), & principal point (cx,cy). Likewise, 'matrix_projection' is:
[fx 0 cx Tx]
[0 fy cy Ty]
[0 0 1 0]
where (Tx,Ty) is the translation between stereo sensors. See here for further information on fields.
'image_rgb'
numpy.ndarray(shape=(H,W,3),
dtype='uint8')
RGB image from the default image sensor with colour values mapped to the 3 channels, in the 0-255 range.
'image_rgb_info'
{
'frame_id': string
'height': int
'width': int
'matrix_instrinsics':
numpy.ndarray(shape=(3,3),
dtype='float64')
'matrix_projection':
numpy.ndarray(shape=(3,4)
dtype='float64')
}
Sensor information for the RGB image. 'matrix_instrinsics' is of the format:
[fx 0 cx]
[0 fy cy]
[0 0 1]
for a camera with focal lengths (fx,fy), & principal point (cx,cy). Likewise, 'matrix_projection' is:
[fx 0 cx Tx]
[0 fy cy Ty]
[0 0 1 0]
where (Tx,Ty) is the translation between stereo sensors. See here for further information on fields.
'laser'
{
'range_max': float64,
'range_min': float64,
'scans':
numpy.ndarray(shape=(N,2),
dtype='float64')
}
Set of scan values from a laser sensor, between 'range_min' & 'range_max' (in meters). The 'scans' array consists of N scans of format [scan_value, scan_angle]. For example, scans[100, 0] would get the distance value & scans[100, 1] would get the angle of the 100th scan.
'poses'
{
...
'frame_name': {
'parent_frame': string
'rotation_rpy':
numpy.ndarray(shape=(3,),
dtype='float64')
'rotation_xyzw':
numpy.ndarray(shape=(4,),
dtype='float64')
'translation_xyz':
numpy.ndarray(shape=(3,),
dtype='float64')
}
...
}
Dictionary of relative poses for the current system state. The pose of each system component is available at key 'frame_name'. Each pose has a 'parent_frame' which the pose is relative to (all poses are typically with respect to global 'map' frame), & the pose values. 'rotation_rpy' is [roll,pitch,yaw] in ZYX order, 'rotation_xyzw' is the equivalent quaternion [x,y,z,w], & 'translation_xyz' is the Cartesion [x,y,z] coordinates.
\n

Using the API to communicate with the BenchBot system

\n

A running BenchBot system manages many other elements besides simply getting data to and from a real / simulated robot. BenchBot encapsulates not just the robot, but also the environment it is operating in (whether that be simulator or real) and task that is currently being attempted.

\n

The API handles communication for all parts of the BenchBot system, including controlling the currently running environment and obtaining configuration information. Below are details for some of the more useful features of the API (all features are also documented in the benchbot.py source code).

\n

Gathering configuration information

\n\n\n\n\n\n\n\n\n\n\n\n\n\n
API method or propertyDescription
configReturns a dict exhaustively describing the current BenchBot configuration. Most of the information returned will not be useful for general BenchBot use.
\n

Interacting with the environment

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
API method or propertyDescription
reset()Resets the current environment scene. For the simulator, this means restarting the running simulator instance with the robot back at its initial position. The method returns initial observations, & the action_result (should always be BenchBot.ActionResult.SUCCESS).
next_scene()Starts the next scene in the current environment (only relevant for tasks with multiple scenes). Note there is no going back once you have moved to the next scene. Returns the same as reset().
\n

Interacting with an agent

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
API method or propertyDescription
actionsReturns the list of actions currently available to the agent. This will update as actions are performed in the environment (for example if the agent has collided with an obstacle this list will be empty).
observationsReturns the lists of observations available to the agent.
step(action, **action_args)Performs the requested action with the provided named action arguments. See Using the API to communicate with a robot above for further details.
\n

Creating results

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
API method or propertyDescription
empty_results()Generates a dict of with required result metadata & empty results. Metadata ('task_details' & 'environment_details') is pre-filled. To create results, all a user needs to do is fill in the empty 'results' field using format's results functions. These functions are available through the 'results_functions() method.
results_functions()Returns a dict of functions defined by the task's 'results_format'. Example use for calling a create() function is results_functions()['create']().
RESULT_LOCATION (outside of BenchBot class)A static string denoting where results should be saved (/tmp/results). Using this locations ensures tools in the BenchBot software stack work as expected.
\n","name":"BenchBot Python API","type":"code","url":"https://github.com/qcr/benchbot_api","image_position":"center 100%","image":"./docs/benchbot_api_web.gif","_images":["/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webm","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.mp4","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webp","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.jpg"],"src":"/content/benchbot/benchbot-api.md","id":"benchbot-api"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-eval.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-eval.json new file mode 100644 index 0000000000..ba1ed3c2aa --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-eval.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip and run on results files if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions here.

\n

BenchBot Evaluation

\n

\"BenchBot\n\"QUT\n\"Primary\n\"License\"

\n

BenchBot Evaluation is a library of functions used to call evaluation methods. These methods are installed through the BenchBot Add-ons Manager, and evaluate the performance of a BenchBot system against the metric. The easiest way to use this module is through the helper scripts provided with the BenchBot software stack.

\n

Installing and performing evaluation with BenchBot Evaluation

\n

BenchBot Evaluation is a Python package, installable with pip. Run the following in the root directory of where this repository was cloned:

\n
u@pc:~$ pip install .\n
\n

Although evaluation is best run from within the BenchBot software stack, it can be run in isolation if desired. The following code snippet shows how to perform evaluation with the 'omq' method from Python:

\n
from benchbot_eval.evaluator import Evaluator, Validator\n\nValidator(results_file).validate_results_data()\nEvaluator('omq', scores_file).evaluate()\n
\n

This prints the final scores to the screen and saves them to a file using the following inputs:

\n\n

How add-ons interact with BenchBot Evaluation

\n

Two types of add-ons are used in the BenchBot Evaluation process: format definitions, and evaluation methods. An evaluation method's YAML file defines what results formats and ground truth formats the method supports. This means:

\n\n

Please see the BenchBot Add-ons Manager's documentation for further details on the different types of add-ons.

\n

Creating valid results and ground truth files

\n

The BenchBot software stack includes tools to assist in creating results and ground truth files:

\n\n","name":"BenchBot Evaluation Tools","type":"code","url":"https://github.com/qcr/benchbot_eval","image":"/qcr_logo_light_filled.svg","_images":["/qcr_logo_light_filled.svg"],"src":"/content/benchbot/benchbot-eval.md","id":"benchbot-eval","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-simulator.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-simulator.json new file mode 100644 index 0000000000..dd519edc7f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-simulator.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

NOTE: this simulator is retired. It was based on Isaac Sim 2019.2, which used Unreal Engine. We have migrated to the new Omniverse-powered Isaac Sim. See benchbot_sim_omni for details.

\n

BenchBot Simulator for Unreal-powered Isaac Sim (2019.2) - retired!

\n

\"BenchBot\n\"QUT\n\"Primary\n\"License\"

\n

\n

This simulator is an extension of the NVIDIA Isaac SDK that establishes ROS communications to a running instance of an Unreal Engine-based NVIDIA Isaac SIM. This simulator is explicitly linked to version 2019.2 of Isaac, the last version with direct support for the Unreal Engine-based simulator. This simulator is retired, as we have moved to the latest Isaac Sim which uses Omniverse. See benchbot_sim_omni for details.

\n

This simulator provides direct access to the following data on a robot, simulated in Unreal Engine. Access is via ROS on the topic provided in brackets:

\n\n

Direct control of the robot is also facilitated via:

\n\n

Installing the simulator

\n

Please see the note at the top of the page; installation of this Simulator in isolation is generally not what you want!

\n

If you are sure you need to install the simulator in isolation, the following steps should be sufficient. Note there are a significant number of driver & software requirements for your system:

\n
    \n
  1. \n

    Download version 2019.2 of the Isaac SDK from the NVIDIA site. If creating your own environments, also download version 2019.2 of Isaac SIM (not NavSim). You will have to create / sign in to an NVIDIA developer account, and look in the \"Archive\" drop down for version 2019.2.

    \n
  2. \n
  3. \n

    Either setup your system with Isaac SIM, or download our environments:

    \n

    a) Follow the install instructions for Isaac SIM 2019.2 to get Unreal Engine (through IsaacSimProject) running on your system. You will have to link Epic Games to your Github account to get access.

    \n

    b) Download our latest environments: Isaac Development Environments, and Isaac Challenge Environments

    \n
  4. \n
  5. \n

    Install the Isaac SDK by following the instructions here.

    \n
  6. \n
  7. \n

    Clone these simulator wrapper, apply our patches to the installed Isaac SDK, & build the simulator using the Bazel wrapper script (ensure the environment variable ISAAC_SDK_PATH is set to where you installed Isaac SDK):

    \n
    u@pc:~$ git clone https://github.com/qcr/benchbot_sim_unreal && cd benchbot_sim_unreal\nu@pc:~$ .isaac_patches/apply_patches\nu@pc:~$ ./bazelros build //apps/benchbot_simulator\n
    \n
  8. \n
\n

Running the simulator

\n

This simulator interface is run alongside a running Isaac Unreal Engine Simulator. To get both components running:

\n
    \n
  1. \n

    Start the Unreal Engine Simulator, either via our precompiled environments or the IsaacSimProject Unreal Editor:

    \n
    u@pc:~$ ./IsaacSimProject <map_name> \\\n    -isaac_sim_config_json='<path_to_isaac>/apps/carter/carter_sim/bridge_config/carter_full.json' \\\n    -windowed -ResX=960 -ResY=540 -vulkan -game\n
    \n
  2. \n
  3. \n

    Launch this simulator's Isaac application (you first need to hardcode the pose unfortunately...):

    \n
    u@pc:~$ START_POSE=<robot_start_pose> \\\n    sed -i \"0,/\\\"pose\\\":/{s/\\(\\\"pose\\\": \\)\\(.*\\)/\\1$START_POSE}\" \\\n    <path_to_isaac>/apps/carter/carter_sim/bridge_config/carter_full_config.json\nu@pc:~$ ./bazelros run //apps/benchbot_simulator\n
    \n
  4. \n
\n

At this point you will have a running Isaac Unreal Engine Simulator, with sensorimotor data available from the robot in ROS!

\n

Using this simulator with the BenchBot Robot Controller

\n

The BenchBot Robot Controller is a wrapping ROS / HTTP hybrid script that manages running robots and their required subprocesses. See the carter_sim.yaml configuration in the BenchBot Supervisor for an example configuration of how to run BenchBot Simulator through the Robot Controller.

\n","name":"BenchBot Simulator (Isaac)","type":"code","url":"https://github.com/qcr/benchbot_simulator","image":"./docs/benchbot_simulator.gif","_images":["/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webm","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.mp4","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webp","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.jpg"],"src":"/content/benchbot/benchbot-simulator.md","id":"benchbot-simulator","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-supervisor.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-supervisor.json new file mode 100644 index 0000000000..c8e596c5dd --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-supervisor.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation. For a working BenchBot system, please install the BenchBot software stack by following the instructions here.

\n

BenchBot Supervisor

\n

\"BenchBot\n\"QUT\n\"Primary\n\"License\"

\n

\"benchbot_supervisor\"

\n

The BenchBot Supervisor is a HTTP server facilitating communication between user-facing interfaces like the BenchBot API, and the low-level robot components like BenchBot Simulator or real robots. Communication is typically routed through a BenchBot Robot Controller, which provides automated process management for low-level components and wraps all ROS communications.

\n

Installing and running the BenchBot Supervisor

\n

BenchBot Supervisor is a Python package containing a Supervisor class that wraps a HTTP server for both upstream and downstream communication. Install by running the following in the root directory of where this repository was cloned:

\n
u@pc:~$ pip install .\n
\n

Once installed, the Python class can be used as follows:

\n
from benchbot_supervisor import Supervisor\n\ns = Supervisor(...args...)\ns.run()\n
\n

The following parameters are typically required for a useful instantiation of the supervisor:

\n\n

The module can also be executed directly, which makes the passing of arguments from the command line simple (see python -m benchbot_supervisor --help for argument details):

\n
u@pc:~$ python -m benchbot_supervisor ...args...\n
\n

As an example, the below command runs the supervisor for a scene change detection task, where active control is employed with ground truth localisation on a simulated Carter robot, and environments miniroom:1 and miniroom:5 are used:

\n
u@pc:~$ python -m benchbot_supervisor \\\n    --task-name scd:active:ground_truth \\\n    --robot-name carter \\\n    --environment-names miniroom:1,miniroom:5\n
\n

Employing task, robot, and environment configurations

\n

The BenchBot Supervisor requires configuration details for the selected tasks, robots, and environments. It uses these details to manage each of the system components, like API interaction and control of the simulator / real robot. Configuration details are provided by YAML files, which are referenced via their 'name' field as shown above.

\n

The BenchBot Add-ons Manager manages the installation of, and access to, these files. See the documentation there for further details on configuration files. All you need to do to use add-ons with the supervisor is provide the location via the 'addons_path' argument.

\n

Interacting with the BenchBot Supervisor

\n

The supervisor includes a RESTful HTTP API for all interaction with a user-facing API. The RESTful API includes the following commands:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Request RouteResponse FormatDescription
/
Hello, I am the BenchBot supervisor
Arbitrary response to confirm connection.
/config/
{
...
'param_name': param_value,
...
}
Dictionary containing containing parameter values for all of supervisor configuration settings. Keys correspond to parameter name, & values to parameter value.
/config/<config>config_valueDirectly retrieve the value of a supervisor configuration parameter with name 'config'. Returns param_value of 'config'.
/connections/<connection>dictReturns the response of the connection (e.g. an image_rgb connection would return the image) as a dict. Format & style of the dict is defined by the methods described above in \"Defining environment, robot, & task configurations\".
/results_functions/listReturns a list of the results function names that can be remotely executed via the route below.
/results_functions/<function>dictCalls results function with name 'function', and returns the result of the function call in the response's JSON body.
/robot/
Hello, I am the BenchBot robot controller
Arbitrary response confirming a robot controller is available.
/robot/<command>dictPasses the command command down to a running robot controller manager. See BenchBot Robot Controller for documentation of supported commands & expected responses.
\n","name":"BenchBot Backend Supervisor","type":"code","url":"https://github.com/qcr/benchbot_supervisor","image_position":"center 0%","image":"./docs/benchbot_supervisor.jpg","_images":["/_next/static/images/benchbot_supervisor-3e4092b6584962e3e4529101ae489a08.jpg.webp","/_next/static/images/benchbot_supervisor-fb509eb331f3380fbf5da2c3035116b6.jpg"],"src":"/content/benchbot/benchbot-supervisor.md","id":"benchbot-supervisor"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot.json new file mode 100644 index 0000000000..60b07f37fd --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

~ Our Robotic Vision Scene Understanding (RVSU) Challenge is live on EvalAI ~

\n

~ BenchBot is now powered by NVIDIA Omniverse and Isaac Sim. We are aware of some issues, please report any you do encounter. ~

\n

~ Our BenchBot tutorial is the best place to get started developing with BenchBot ~

\n

BenchBot Software Stack

\n

\"BenchBot\n\"QUT\n\"Primary\n\"License\"

\n

\n

The BenchBot software stack is a collection of software packages that allow end users to control robots in real or simulated environments with a simple python API. It leverages the simple \"observe, act, repeat\" approach to robot problems prevalent in reinforcement learning communities (OpenAI Gym users will find the BenchBot API interface very similar).

\n

BenchBot was created as a tool to assist in the research challenges faced by the semantic scene understanding community; challenges including understanding a scene in simulation, transferring algorithms to real world systems, and meaningfully evaluating algorithm performance. We've since realised, these challenges don't just exist for semantic scene understanding, they're prevalent in a wide range of robotic problems.

\n

This led us to create version 2 of BenchBot with a focus on allowing users to define their own functionality for BenchBot through add-ons. Want to integrate your own environments? Plug-in new robot platforms? Define new tasks? Share examples with others? Add evaluation measures? This all now possible with add-ons, and you don't have to do anything more than add some YAML and Python files defining your new content!

\n

The \"bench\" in \"BenchBot\" refers to benchmarking, with our goal to provide a system that greatly simplifies the benchmarking of novel algorithms in both realistic 3D simulation and on real robot platforms. If there is something else you would like to use BenchBot for (like integrating different simulators), please let us know. We're very interested in BenchBot being the glue between your novel robotics research and whatever your robot platform may be.

\n

This repository contains the software stack needed to develop solutions for BenchBot tasks on your local machine. It installs and configures a significant amount of software for you, wraps software in stable Docker images (~50GB), and provides simple interaction with the stack through 4 basic scripts: benchbot_install, benchbot_run, benchbot_submit, and benchbot_eval.

\n

System recommendations and requirements

\n

The BenchBot software stack is designed to run seamlessly on a wide number of system configurations (currently limited to Ubuntu 18.04+). System hardware requirements are relatively high due to the software run for 3D simulation (e.g. NVIDIA Omniverse-powered Isaac Sim):

\n\n

Having a system that meets the above hardware requirements is all that is required to begin installing the BenchBot software stack. The install script analyses your system configuration and offers to install any missing software components interactively. The list of 3rd party software components involved includes:

\n\n

Managing your installation

\n

Installation is simple:

\n
u@pc:~$ git clone https://github.com/qcr/benchbot && cd benchbot\nu@pc:~$ ./install\n
\n

Any missing software components, or configuration issues with your system, should be detected by the install script and resolved interactively (you may be prompted to manually reboot and restart the install script). The installation asks if you want to add BenchBot helper scripts to your PATH. Choosing yes will make the following commands available from any directory: benchbot_install (same as ./install above), benchbot_run, benchbot_submit, benchbot_eval, and benchbot_batch.

\n

The BenchBot software stack will frequently check for updates and can update itself automatically. To update simply run the install script again (add the --force-clean flag if you would like to install from scratch):

\n
u@pc:~$ benchbot_install\n
\n

If you decide to uninstall the BenchBot software stack, run:

\n
u@pc:~$ benchbot_install --uninstall\n
\n

There are a number of other options to customise your BenchBot installation, which are all described by running:

\n
u@pc:~$ benchbot_install --help\n
\n

Managing installed BenchBot add-ons

\n

BenchBot installs a default set of add-ons, which is currently 'benchbot-addons/ssu' (and all of its dependencies declared here). But you can also choose to install a different set of add-ons instead. For example, the following will also install the 'benchbot-addons/data_collect' add-ons:

\n
u@pc:~$ benchbot_install --addons benchbot-addons/ssu,benchbot-addons/data_collect\n
\n

See the BenchBot Add-ons Manager's documentation for more information on using add-ons. All of our official add-ons can be found in our benchbot-addons GitHub organisation. We're open to adding add-ons contributed by our users to the official list as well.

\n

Getting started

\n

Getting a solution up and running with BenchBot is as simple as 1,2,3. Here's how to use BenchBot with content from the semantic scene understanding add-on:

\n
    \n
  1. \n

    Run a simulator with the BenchBot software stack by selecting an available robot, environment, and task definition:

    \n
    u@pc:~$ benchbot_run --robot carter_omni --env miniroom:1 --task semantic_slam:active:ground_truth\n
    \n

    A number of useful flags exist to help you explore what content is available in your installation (see --help for full details). For example, you can list what tasks are available via --list-tasks and view the task specification via --show-task TASK_NAME.

    \n
  2. \n
  3. \n

    Create a solution to a BenchBot task, and run it against the software stack. To run a solution you must select a mode. For example, if you've created a solution in my_solution.py that you would like to run natively:

    \n
    u@pc:~$ benchbot_submit --native python my_solution.py\n
    \n

    See --help for other options. You also have access to all of the examples available in your installation. For instance, you can run the hello_active example in containerised mode via:

    \n
    u@pc:~$ benchbot_submit --containerised --example hello_active\n
    \n

    See --list-examples and --show-example EXAMPLE_NAME for full details on what's available out of the box.

    \n
  4. \n
  5. \n

    Evaluate the performance of your system using a supported evaluation method (see --list-methods). To use the omq evaluation method on my_results.json:

    \n
    u@pc:~$ benchbot_eval --method omq my_results.json\n
    \n

    You can also simply run evaluation automatically after your submission completes:

    \n
    u@pc:~$ benchbot_submit --evaluate-with omq --native --example hello_eval_semantic_slam\n
    \n
  6. \n
\n

The BenchBot Tutorial is a great place to start working with BenchBot; the tutorial takes you from a blank system to a working Semantic SLAM solution, with many educational steps along the way. Also remember the examples in your installation (benchbot-addons/examples_base is a good starting point) which show how to get up and running with the BenchBot software stack.

\n

Power tools for autonomous algorithm evaluation

\n

Once you are confident your algorithm is a solution to the chosen task, the BenchBot software stack's power tools allow you to comprehensively explore your algorithm's performance. You can autonomously run your algorithm over multiple environments, and evaluate it holistically to produce a single summary statistic of your algorithm's performance. Here are some examples again with content from the semantic scene understanding add-on:

\n\n

Using BenchBot in your research

\n

BenchBot was made to enable and assist the development of high quality, repeatable research results. We welcome any and all use of the BenchBot software stack in your research.

\n

To use our system, we just ask that you cite our paper on the BenchBot system. This will help us follow uses of BenchBot in the research community, and understand how we can improve the system to help support future research results. Citation details are as follows:

\n
@misc{talbot2020benchbot,\n    title={BenchBot: Evaluating Robotics Research in Photorealistic 3D Simulation and on Real Robots},\n    author={Ben Talbot and David Hall and Haoyang Zhang and Suman Raj Bista and Rohan Smith and Feras Dayoub and Niko Sünderhauf},\n    year={2020},\n    eprint={2008.00635},\n    archivePrefix={arXiv},\n    primaryClass={cs.RO}\n}\n
\n

If you use our benchbot environments for active robotics (BEAR) which are installed by default, we ask you please cite our data paper on BEAR. Citation details are as follows:

\n
@article{hall2022bear,\nauthor = {David Hall and Ben Talbot and Suman Raj Bista and Haoyang Zhang and Rohan Smith and Feras Dayoub and Niko Sünderhauf},\ntitle ={BenchBot environments for active robotics (BEAR): Simulated data for active scene understanding research},\njournal = {The International Journal of Robotics Research},\nvolume = {41},\nnumber = {3},\npages = {259-269},\nyear = {2022},\ndoi = {10.1177/02783649211069404},\n}\n
\n

Components of the BenchBot software stack

\n

The BenchBot software stack is split into a number of standalone components, each with their own GitHub repository and documentation. This repository glues them all together for you into a working system. The components of the stack are:

\n\n

Further information

\n\n

Supporters

\n

Development of the BenchBot software stack was directly supported by:

\n

\"QUT    \"CSIRO    

\n
\n
\n

\"NVIDIA    \"Australian

\n","name":"BenchBot Software Stack","type":"code","url":"https://github.com/qcr/benchbot","image_position":"100% center","image":"./docs/benchbot_web.gif","_images":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"src":"/content/benchbot/benchbot.md","id":"benchbot"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/code-templates.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/code-templates.json new file mode 100644 index 0000000000..93d254ce25 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/code-templates.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

QCR Code Templates

\n

\"QUT\n\"Primary\n\"License\"

\n

\n

This repository defines shared templates for commonly performed actions within the QUT Centre for Robotics (QCR). We've made this project public as most of the templates have a general use case, and aren't directly tied to QCR.

\n

Templates can be used through a single script, and new templates are created by writing some basic template script in a new folder. The template 'engine' is ~250 lines of (admittedly terse) Bash.

\n

How to use a template

\n

Note: QCR members can access templates directly using the qcr script from our tools

\n

Clone this Git repository:

\n
git clone https://github.com/qcr/code_templates\n
\n

And add the qcr_templates script to your path somewhere if you'd like to use from any directory. We recommend adding it to ~/bin directory as follows:

\n
mkdir ~/bin\nln -s /path/to/code_templates/qcr_templates ~/bin/\n
\n

Your new projects can be created from a template simply by making a new folder and running the script with your chosen template inside that folder. For example:

\n
qcr_templates ros_package\n
\n

This will retrieve the template, and start a prompt asking you for values for your project. In general, it's best to use snake_case for programming variable values (i.e. my_variable_value not myVariableValue as our modification function assumes snake_case).

\n

How templates work

\n

We use a very basic custom templating method in this project, with templates being declared by creating a new folder in this repository. Templates are defined using named variables, the user is prompted at runtime for values for these variables, and then a project is created from the template with the runtime values applied. Variable values can be used to:

\n\n

Template variable names are typically upper snake case (i.e. MY_VARIABLE), can have default values which will be shown in the prompt, and are evaluated using Bash. This means that any variable with no value is considered false, and all other values considered true. A current limitation is that variables with default values cannot be changed to have no value by the user at runtime.

\n

Variables are declared in a special file called .variables.yaml at the root of each template, with their syntax described below.

\n

In-file text replacement

\n

Variables are replaced in text using their runtime value, with the __CAMEL and __PASCAL modifiers supported. For example, the following Python template:

\n
\nclass MY_VARIABLE__PASCAL:\n\n  def __init__(self):\n    self._MY_VARIABLE = None\n\ndef MY_VARIABLE__CAMEL():\n  print(\"Hi\")\n
\n

when given MY_VARIABLE='obstacle_detector', would produce:

\n
\nclass ObstacleDetector:\n\n  def __init__(self):\n    self.obstacle_detector = None\n\ndef obstacleDetector():\n  print(\"Hi\")\n
\n

Conditional in-file blocks

\n

Variables can also be used to declare whether blocks of code should be included in the output. Blocks begin with a TEMPLATE_START variable_1 variable_2 ... line, and end with a TEMPLATE_END line. The block is included if any of variable_1 variable_2 ... have a value, and will only be excluded if all are empty. For example, the following CMake template:

\n
\ncatkin_package(\n  TEMPLATE_START ADD_MSGS ADD_SERVICES ADD_ACTIONS\n  CATKIN_DEPENDS message_runtime\n  TEMPLATE_END\n  )\n
\n

includes a dependency on message_runtime if any of ADD_MSGS, ADD_SERVICES, ADD_ACTIONS have a value. The TEMPLATE_* lines are removed from the result, with the output being:

\n
catkin_package(\n  CATKIN_DEPENDS message_runtime\n  )\n
\n

The opposite relationship (include if all have a value) isn't yet supported, but should be supported in the future.

\n

Variable file names

\n

File names can be given variable values simply by using the variable name in the filename. For example, a file called MY_VARIABLE.cpp with a runtime value of MY_VARIABLE='object_detector' would be renamed to object_detector.cpp.

\n

Conditional file existence

\n

Another special file called .files.yaml marks files which should only exist under certain conditions. It's syntax is based on very basic key-value pairs (filename: variable_1 variable_2 ...), with the filed included if any of variable_1 variable_2 ... have a value. See existing templates for examples.

\n

Creating your own templates

\n

Creating your own templates is almost as simple as using templates. To create your own template:

\n
    \n
  1. \n

    Clone this repository locally:

    \n
    git clone https://github.com/qcr/code_templates\n
    \n
  2. \n
  3. \n

    Make a new folder with the name of your template. For example, a template called my_new_template is denoted by a folder called my_new_template.

    \n
  4. \n
  5. \n

    Create a .variables.yaml file in your new folder. The format is the following:

    \n
    VARIABLE_NAME:\n  text: \"Text to be displayed to user in prompt\"\n  default: \"Default static value\"\nVARIABLE_WITH_DYNAMIC_DEFAULT:\n  text: \"Variable with default value determined at runtime\"\n  default: $(echo \"This Bash code will be executed\")\nOPTIONAL_VARIABLE:\n  text: \"Variable will be left blank if the user provides no input\"\n  default: \"\"\n
    \n
  6. \n
  7. \n

    Create the files for your template, taking advantage of whichever variable features your template requires.

    \n
  8. \n
  9. \n

    Test your template locally before pushing to master (as soon as it's pushed everyone can use it). Test locally by directly running the use_template script with local files instead of the remote:

    \n
    LOCAL_LOCATION=/path/to/local/clone/of/this/repo ./use_template my_new_template\n
    \n
  10. \n
  11. \n

    Once it works, push to the master branch. Done!

    \n
  12. \n
\n

Please note: a very crude YAML parser is written in use_template to keep the dependencies of this software as low as possible. I emphasise, crude. You should not expect full YAML functionality (keep values on same line as key, don't use line breaks, no escape characters, etc.).

\n","name":"QCR's Code Templates","type":"code","url":"https://github.com/qcr/code_templates","image":"https://github.com/qcr/code_templates/wiki/demo.gif","_images":["/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.webm","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.mp4","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.webp","/_next/static/images/demo-70a6816faa1e78bf2b6f4c8115a1a047.jpg"],"src":"/content/qcr/code_templates.md","id":"code-templates","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/delta_descriptors_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/delta_descriptors_code.json new file mode 100644 index 0000000000..1c80dc45dc --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/delta_descriptors_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Update 2021-Jun-02: A pytorch-based (GPU/CPU) implementation of Delta Descriptors is now available with our latest work SeqNet.

\n

Delta Descriptors

\n

Source code for the paper - \"Delta Descriptors: Change-Based Place Representation for Robust Visual Localization\", published in IEEE Robotics and Automation Letters (RA-L) 2020 and to be presented at IROS 2020. [arXiv] [IEEE Xplore][YouTube]

\n

We propose Delta Descriptor, defined as a high-dimensional signed vector of change measured across the places observed along a route. Using a difference-based description, places can be effectively recognized despite significant appearance variations.\n\"Schematic\nImages on the left are from the Oxford Robotcar dataset.

\n

Requirements

\n
matplotlib==2.0.2\nnumpy==1.15.2\ntqdm==4.29.1\nscipy==1.1.0\nscikit_learn==0.23.1\n
\n

See requirements.txt, generated using pipreqs==0.4.10 and python3.5.6

\n

Usage

\n

Download this Repository and the Nordland dataset (part)

\n

The dataset used in our paper is available here (or use commands as below). Note that the download only comprises a small part (~1 GB) of the original Nordland videos released here. These videos were first used for visual place recognition in this paper.

\n
git clone https://github.com/oravus/DeltaDescriptors.git\ncd DeltaDescriptors/\nmkdir data/\ncd data/\nwget https://zenodo.org/record/4016653/files/nordland-part-2020.zip\nunzip nordland-part-2020.zip\n
\n

The zip contains two folders: summer and winter, where each one of them comprises 1750 images which were used for experiments conducted in our paper.

\n

Describe and Match

\n

Delta Descriptors are defined on top of global image descriptors, for example, NetVLAD (Update 05 Sep 2020: see our python wrapper). Given such descriptors, compute Delta Descriptors and match across two traverses as below:

\n
python src/main.py --genDesc --genMatch -l 16 -d delta -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\n

The input descriptor data is assumed to be a 2D tensor of shape [numImages,numDescDims]. The computed descriptors are stored in .npy format and the match results are stored in .npz format comprising a dict of two arrays: matchInds (matched reference index per query image) and matchDists (corresponding distance value). By default, output is stored in the ./out folder but can also be specified via --outPath argument. To see all the options, use:

\n
python src/main.py --help\n
\n

The options --genDesc and --genMatch can be used in isolation or together, see example usage below.

\n

Describe only

\n

In order to compute only the descriptors for a single traverse, use:

\n
python src/main.py --genDesc -l 16 -d delta -ip1 <full_path_of_desc.npy>\n
\n

Match only

\n

For only computing matches, given the descriptors (Delta or some other), use:

\n
python src/main.py --genMatch -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\n

Evaluate only

\n
python src/main.py --eval -mop <full_path_of_match_output.npz>\n
\n

or evaluate directly with --genMatch (and possibly --genDesc) flag:

\n
python src/main.py --eval --genMatch -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\n

Currently, only Nordland dataset-style (1-to-1 frame correspondence) evaluation is supported, GPS/INS coordinates-based evaluation, for example, for Oxford Robotcar dataset to be added soon. Evalution code can be used to generate PR curves and the code in its current form prints Precision @ 100% Recall for localization radius of 1, 5, 10 and 20 (frames).

\n

Citation

\n

If you find this code or our work useful, cite it as below:

\n
@article{garg2020delta,\n  title={Delta Descriptors: Change-Based Place Representation for Robust Visual Localization},\n  author={Garg, Sourav and Harwood, Ben and Anand, Gaurangi and Milford, Michael},\n  journal={IEEE Robotics and Automation Letters},\n  year={2020},\n  publisher={IEEE},\n  volume={5},\n  number={4},\n  pages={5120-5127},  \n}\n
\n

License

\n

The code is released under MIT License.

\n

Related Projects

\n

SeqNet (2021).

\n

CoarseHash (2020)

\n

seq2single (2019)

\n

LoST (2018)

\n","name":"Delta Descriptors","type":"code","url":"https://github.com/oravus/DeltaDescriptors","id":"delta_descriptors_code","image":"ral-iros-2020-delta-descriptors-schematic.png","_images":["/_next/static/images/ral-iros-2020-delta-descriptors-schematic-b5f57732c327f2f8546715b5dc3643af.png.webp","/_next/static/images/ral-iros-2020-delta-descriptors-schematic-95f5d1a50f3d92aa3344d9782ac13c32.png"],"src":"/content/visual_place_recognition/delta-descriptors.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/event_vpr_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/event_vpr_code.json new file mode 100644 index 0000000000..8fa1719e91 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/event_vpr_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Event-Based Visual Place Recognition With Ensembles of Temporal Windows

\n

\"License:\n\"stars\"\n\"GitHub\n\"GitHub\n\"\"

\n

License + Attribution

\n

This code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper (preprint and additional material):

\n
@article{fischer2020event,\n  title={Event-Based Visual Place Recognition With Ensembles of Temporal Windows},\n  author={Fischer, Tobias and Milford, Michael},\n  journal={IEEE Robotics and Automation Letters},\n  volume={5},\n  number={4},\n  pages={6924--6931},\n  year={2020}\n}\n
\n

The Brisbane-Event-VPR dataset accompanies this code repository: https://zenodo.org/record/4302805

\n

\"Dataset

\n

Code overview

\n

The following code is available:

\n\n

Please note that in our paper we used manually annotated and then interpolated correspondences; instead here we provide matches based on the GPS data. Therefore, the results between what is reported in the paper and what is obtained using the methods here will be slightly different.

\n

Reconstruct videos from events

\n
    \n
  1. \n

    Clone this repository: git clone https://github.com/Tobias-Fischer/ensemble-event-vpr.git

    \n
  2. \n
  3. \n

    Clone https://github.com/cedric-scheerlinck/rpg_e2vid and follow the instructions to create a conda environment and download the pretrained models.

    \n
  4. \n
  5. \n

    Download the Brisbane-Event-VPR dataset.

    \n
  6. \n
  7. \n

    Now convert the bag files to txt/zip files that can be used by the event2video code: python convert_rosbags.py. Make sure to adjust the path to the extract_events_from_rosbag.py file from the rpg_e2vid repository.

    \n
  8. \n
  9. \n

    Now do the event to video conversion: python reconstruct_videos.py. Make sure to adjust the path to the run_reconstruction.py file from the rpg_e2vid repository.

    \n
  10. \n
\n

Create suitable conda environment

\n
    \n
  1. Create a new conda environment with the dependencies: conda create --name brisbaneeventvpr tensorflow-gpu pynmea2 scipy matplotlib numpy tqdm jupyterlab opencv pip ros-noetic-rosbag ros-noetic-cv-bridge python=3.8 -c conda-forge -c robostack
  2. \n
\n

Export RGB frames from rosbags

\n
    \n
  1. \n

    conda activate brisbaneeventvpr

    \n
  2. \n
  3. \n

    python export_frames_from_rosbag.py

    \n
  4. \n
\n

Event-based VPR with ensembles

\n
    \n
  1. \n

    Create a new conda environment with the dependencies: conda create --name brisbaneeventvpr tensorflow-gpu pynmea2 scipy matplotlib numpy tqdm jupyterlab opencv pip

    \n
  2. \n
  3. \n

    conda activate brisbaneeventvpr

    \n
  4. \n
  5. \n

    git clone https://github.com/QVPR/netvlad_tf_open.git

    \n
  6. \n
  7. \n

    cd netvlad_tf_open && pip install -e .

    \n
  8. \n
  9. \n

    Download the NetVLAD checkpoint here (1.1 GB). Extract the zip and move its contents to the checkpoints folder of the netvlad_tf_open repository.

    \n
  10. \n
  11. \n

    Open the Brisbane Event VPR.ipynb and adjust the path to the dataset_folder.

    \n
  12. \n
  13. \n

    You can now run the code in Brisbane Event VPR.ipynb.

    \n
  14. \n
\n

Related works

\n

Please check out this collection of related works on place recognition.

\n","name":"Visual Place Recognition using Event Cameras","type":"code","url":"https://github.com/Tobias-Fischer/ensemble-event-vpr","id":"event_vpr_code","image":"./dataset.png","_images":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"src":"/content/visual_place_recognition/event-vpr-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json new file mode 100644 index 0000000000..a7b2e9fb17 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

~ Please see our QuadricSLAM repository for examples of full SLAM systems~

\n

GTSAM Quadrics: quadric landmarks for GTSAM

\n

\"QUT\n\"Best\n\"Primary\n\"PyPI\n\"License\"

\n

This repository contains an extension to the popular Georgia Tech Smoothing and Mapping (GTSAM) factor graph optimisation library. We introduce constrained dual quadrics as GTSAM variables, and support the estimation of the quadric parameters using 2-D bounding box measurements. These tools are available in both C++ and Python, and are designed to be used in conjunction with GTSAM. The extensions power our QuadricSLAM library, where we use quadrics for simultaneous localisation and mapping (SLAM) problems.

\n

\"Demonstrations

\n

We expect this repository to be active and continually improved upon. If you have any feature requests or experience any bugs, don't hesitate to let us know. Our code is free to use, and licensed under BSD-3. We simply ask that you cite our work if you use QuadricSLAM in your own research.

\n

Installation

\n

Note: we are aware of some issues with the wheels. If you encounter issues, we recommend the \"Install from source via Pip\" steps below

\n

Pre-build wheels of this library are available on PyPI for most Linux systems, as well as source distributions. Install the library with:

\n
pip install gtsam_quadrics\n
\n

The Python library is built from a custom setup.py, which uses CMake to build a custom C++ extension bound using both PyBind11 and Georgia Tech's wrap meta-library.

\n

Building from source

\n

You can build from source if you want closer access to the C++ libraries, or are having trouble finding a pre-compiled wheel for your system. There are two levels you can build the package from source: the Python level using pip, and C++ level using CMake.

\n

All building from source methods expect the following system dependencies to be available:

\n\n

Instructions for installing these dependencies vary across Linux systems, but the following should be sufficient on a relatively recent Ubuntu version:

\n
sudo apt install build-essential cmake libboost-all-dev libmetis-dev\n
\n

If your distribution's CMake version is too old, it can easily be upgrade following Kitware's instructions here.

\n

Install from source via Pip

\n

Simply request the sdist instead of binary wheel:

\n
pip install gtsam_quadrics --no-binary :all:\n
\n

Building the Python package from source

\n

Installing from source is very similar to the pip method above, accept installation is from a local copy:

\n
    \n
  1. \n

    Clone the repository, and initialise the gtsam submodule:

    \n
    git clone --recurse-submodules https://github.com/best-of-acrv/gtsam-quadrics\n
    \n
  2. \n
  3. \n

    Enter the gtsam_quadrics directory, and simply install via pip (the build process will take a while):

    \n
    pip install .\n
    \n
  4. \n
\n

Building the C++ package with CMake

\n
    \n
  1. \n

    Clone the repository, and initialise the gtsam submodule:

    \n
    git clone --recurse-submodules https://github.com/best-of-acrv/gtsam-quadrics\n
    \n
  2. \n
  3. \n

    Create an out-of-source build directory:

    \n
    cd gtsam_quadrics\nmkdir build\ncd build\n
    \n
  4. \n
  5. \n

    Run the configuration and generation CMake steps, optionally building the Python wrapper using the BUILD_PYTHON_WRAP variable:

    \n
    cmake -DBUILD_PYTHON_WRAP=ON ..\n
    \n
  6. \n
  7. \n

    Run the build step:

    \n
    cmake --build . -j$(nproc)\n
    \n
  8. \n
\n

Then optionally run any of the other supported targets as described below:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Target nameDescription
checkcompile and run optional unit tests
examplescompiles the c++ examples
docgenerates the doxygen documentation
doc_cleanremoves the doxygen documentation
installinstalls the gtsam_quadrics c++/python library
\n

Note: documentation requires Doxygen (sudo apt install doxygen) and epstopdf (sudo apt install texlive-font-utils)

\n

Using the GTSAM Quadrics and GTSAM Python APIs

\n

GTSAM Quadrics and GTSAM can be used like native Python packages. Below are some examples to help get you started with using GTSAM Quadrics:

\n
import gtsam\nimport gtsam_quadrics\nimport numpy as np\n\n# setup constants\npose_key = int(gtsam.symbol(ord('x'), 0))\nquadric_key = int(gtsam.symbol(ord('q'), 5))\n\n# create calibration\ncalibration = gtsam.Cal3_S2(525.0, 525.0, 0.0, 160.0, 120.0)\n\n# create graph/values\ngraph = gtsam.NonlinearFactorGraph()\nvalues = gtsam.Values()\n\n# create noise model (SD=10)\nbbox_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([10]*4, dtype=np.float))\n\n# create quadric landmark (pose=eye(4), radii=[1,2,3])\ninitial_quadric = gtsam_quadrics.ConstrainedDualQuadric(gtsam.Pose3(), np.array([1.,2.,3.]))\n\n# create bounding-box measurement (xmin,ymin,xmax,ymax)\nbounds = gtsam_quadrics.AlignedBox2(15,12,25,50)\n\n# create bounding-box factor\nbbf = gtsam_quadrics.BoundingBoxFactor(bounds, calibration, pose_key, quadric_key, bbox_noise)\n\n# add landmark to values\ninitial_quadric.addToValues(values, quadric_key)\n\n# add bbf to graph\ngraph.add(bbf)\n\n\n# get quadric estimate from values (assuming the values have changed)\nquadric_estimate = gtsam_quadrics.ConstrainedDualQuadric.getFromValues(values, quadric_key)\n
\n

Citing our work

\n

If you are using this library in academic work, please cite the publication:

\n

L. Nicholson, M. Milford and N. Sünderhauf, \"QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM,\" in IEEE Robotics and Automation Letters, vol. 4, no. 1, pp. 1-8, Jan. 2019, doi: 10.1109/LRA.2018.2866205. PDF.

\n
@article{nicholson2019,\n  title={QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM},\n  author={Nicholson, Lachlan and Milford, Michael and Sünderhauf, Niko},\n  journal={IEEE Robotics and Automation Letters},\n  year={2019},\n}\n
\n","name":"GTSAM extension for quadrics","type":"code","url":"https://github.com/qcr/gtsam-quadrics","image":"https://github.com/qcr/gtsam-quadrics/raw/master/doc/gtsam_quadrics.png","_images":["/_next/static/images/gtsam_quadrics-9ce945399d611f449b8df8e1db6602ae.png.webp","/_next/static/images/gtsam_quadrics-cb27c37d5d64abed2e30e1523a8cec1a.png"],"src":"/content/quadricslam/gtsam_quadrics.md","id":"gtsam-quadrics","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json new file mode 100644 index 0000000000..5c9a7fb2ce --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

A Hierarchical Dual Model of Environment- and Place-Specific Utility for Visual Place Recognition

\n

\"License:\n\"stars\"\n\"QUT\n\"arXiv\"\n\"IEEE\n\"Open\n\"YouTube\"

\n

\"PWC\"

\n

Introduction

\n

HEAPUtil is an IEEE RA-L & IROS 2021 research paper. In this work, we present a method for unsupervised estimation of the Environment-Specific (ES) and Place-Specific (PS) Utility of unique visual cues in a reference map represented as VLAD clusters. Furthermore, we employ this Utility in a unified hierarchical global-to-local VPR pipeline to enable better place recognition and localization capability for robots, with reduced storage and compute time requirements. This repo contains the official code for estimating the Utility of visual cues and the hierarchical global-to-local VPR pipeline.

\n

\n \"\"\n
Utility-guided Hierarchical Visual Place Recognition.\n

\n

For more details, please see:

\n\n

Dependencies

\n

Simply run the following command: pip install -r requirements.txt

\n

Conda

\n
conda create -n heaputil python=3.8 mamba -c conda-forge -y\nconda activate heaputil\nmamba install numpy opencv pytorch matplotlib faiss-gpu scipy scikit-image=0.18.2 torchvision scikit-learn h5py -c conda-forge\n
\n

Data

\n

For Data Loading, we use .mat files which contain information regarding Reference Image Paths, Query Image Paths, Ground-truth Co-ordinates for Reference and Query Images, and the Positive Localization Distance Threshold. These .mat files for the Berlin Kudamm, Nordland Summer Vs Winter and Oxford Day Vs Night datasets are present in the ./dataset-mat-files folder.

\n

We provide the Berlin Kudamm Dataset for Inference:

\n\n

For more details regarding the Berlin Kudamm dataset please refer to this paper.

\n

For all the scripts, apart from SuperPoint Extraction, you may use the --dataset flag to mention the dataset to use. By default, it is set to 'berlin' and the default choices are ['oxford', 'nordland', 'berlin'].

\n

Quick Start

\n

Here's a Colab Notebook to effortlessly run tests on the Berlin Dataset.

\n

Scripts

\n

Please use the --help flag to see all available arguments for the scripts.

\n

NetVLAD (Global Descriptor)

\n

Extract NetVLAD Descriptors, Predictions and Cluster Masks:

\n
python NetVLAD/main.py --resume './data/NetVLAD/netvlad-checkpoint-cc16' --root_dir './data' --save --save_path './data/NetVLAD'\n
\n

Environment- and Place-Specific Utility Estimation

\n

Estimate the Environment- and Place-Specific Utility of VLAD Clusters for the Reference Map:

\n
python utility.py --root_dir './data' --netvlad_extracts_path './data/NetVLAD' --save_path './data/Utility' --save_viz\n
\n

You may use the --save_viz flag to visualize the Environment-Specific and Place-Specific Utility as shown below:

\n

\n     \n
Visualizing ES (left) & PS (right) Utility (Red indicates low utility and blue/gray indicates high utility)\n

\n

SuperPoint Feature Extraction

\n

Generate path lists which are required for SuperPoint Extraction & SuperGlue:

\n
python generate_path_lists.py --root_dir './data' --netvlad_predictions './data/NetVLAD' --save_path './data'\n
\n

Extract SuperPoint features for the Reference Map:

\n
python SuperGlue/superpoint_extraction.py --input_images './data/db_list.txt' --split 'db' --input_dir './data' --output_dir './data/SuperPoint'\n
\n

Extract SuperPoint features for the Queries:

\n
python SuperGlue/superpoint_extraction.py --input_images './data/q_list.txt' --split 'query' --input_dir './data' --output_dir './data/SuperPoint'\n
\n

Utility-guided Local Feature Matching

\n

You may use the --viz flag to visualize the best matches as a gif.

\n

Vanilla

\n

Run Vanilla SuperPoint based Local Feature Matching:

\n
python local_feature_matching.py --input_dir './data' --output_dir './data/LFM/Vanilla' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility'\n
\n

Environment-Specific (ES) Utility

\n

Run ES-Utility guided Local Feature Matching:

\n
python local_feature_matching.py --input_dir './data' --output_dir './data/LFM/ES_Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--es_utility\n
\n

Place-Specific (PS) Utility

\n

Run PS-Utility guided Local Feature Matching:

\n
python local_feature_matching.py --input_dir './data' --output_dir './data/LFM/PS_Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--ps_utility\n
\n

Default Number of Top Utility Clusters to use for Local Feature Matching is 10. Please use the --k flag to use a different number of top utility clusters.

\n

Combined ES & PS Utility

\n

Run ES & PS-Utility guided Local Feature Matching:

\n
python local_feature_matching.py --input_dir './data' --output_dir './data/LFM/Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--es_utility --ps_utility --viz\n
\n

Default Number of Top Utility Clusters to use for Local Feature Matching is X-1 clusters, where X is the number of useful clusters determined by the Environment-Specific system. To use a different number of top utility clusters please use the --non_default_k and --k flags.

\n

We use the --viz flag to visualize the best matches along with utility reference masks as a gif as shown below:

\n

\n \n
ES & PS Utility-guided Local Feature Matching (Cyan mask represents regions with high utility)\n

\n

Utility-guided SuperGlue

\n

Similar to Local Feature Matching, you may run the superglue_match_pairs.py file for Vanilla SuperGlue & Utility-guided SuperGlue. You may use the --viz flag to visualize all the matches and dump the SuperGlue-style plots.

\n

Run ES & PS-Utility guided SuperGlue:

\n
python superglue_match_pairs.py --input_pairs './data/berlin_netvlad_candidate_list.txt' --input_dir './data' --output_dir './data/SuperGlue/Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --utility_path './data/Utility' \\\n--es_utility --ps_utility\n
\n

BibTeX Citation

\n

If any ideas from the paper or code from this repo are used, please consider citing:

\n
@article{keetha2021hierarchical,\n  author={Keetha, Nikhil Varma and Milford, Michael and Garg, Sourav},\n  journal={IEEE Robotics and Automation Letters}, \n  title={A Hierarchical Dual Model of Environment- and Place-Specific Utility for Visual Place Recognition}, \n  year={2021},\n  volume={6},\n  number={4},\n  pages={6969-6976},\n  doi={10.1109/LRA.2021.3096751}}\n
\n

The code is licensed under the MIT License.

\n

Acknowledgements

\n

The authors acknowledge the support from the Queensland University of Technology (QUT) through the Centre for Robotics.

\n

Furthermore, we would like to acknowledge the Pytorch Implementation of NetVlad from Nanne and the original implementation of SuperGlue.

\n

Related works

\n

Please check out this collection of related works on place recognition.

\n","name":"HEAPUtil","type":"code","url":"https://github.com/Nik-V9/HEAPUtil","id":"heaputil_code","image":"assets/overview.jpg","_images":["/_next/static/images/overview-8c193585e23714439d55f0227d88f923.jpg.webp","/_next/static/images/overview-fc609d6102a3c08cb20b14382e57ee50.jpg"],"src":"/content/visual_place_recognition/heaputil.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json new file mode 100644 index 0000000000..60dc63b4ee --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics

\n

This is the source code for the paper titled - \"LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics\", [arXiv][RSS 2018 Proceedings]

\n

An example output image showing Keypoint Correspondences:

\n

\"An

\n

Flowchart of the proposed approach:

\n

\"Flowchart

\n

If you find this work useful, please cite it as:
\nSourav Garg, Niko Sunderhauf, and Michael Milford. LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics. Proceedings of Robotics: Science and Systems XIV, 2018.
\nbibtex:

\n
@article{garg2018lost,\ntitle={LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics},\nauthor={Garg, Sourav and Suenderhauf, Niko and Milford, Michael},\njournal={Proceedings of Robotics: Science and Systems XIV},\nyear={2018}\n}\n
\n

RefineNet's citation as mentioned on their Github page.

\n

Setup and Run

\n

Dependencies

\n\n

Download

\n
    \n
  1. In your workspace, clone the repositories:
    git clone https://github.com/oravus/lostX.git\ncd lostX\ngit clone https://github.com/oravus/refinenet.git\n
    \nNOTE: If you download this repository as a zip, the refineNet's fork will not get downloaded automatically, being a git submodule.
  2. \n
  3. Download the Resnet-101 model pre-trained on Cityscapes dataset from here or here. More details on RefineNet's Github page.\n\n
  4. \n
  5. If you are using docker, download the docker image:
    docker pull souravgarg/vpr-lost-kc:v1\n
    \n
  6. \n
\n

Run

\n
    \n
  1. \n

    Generate and store semantic labels and dense convolutional descriptors from RefineNet's conv5 layer\nIn the MATLAB workspace, from the refinenet/main/ directory, run:

    \n
    demo_predict_mscale_cityscapes\n
    \n

    The above will use the sample dataset from refinenet/datasets/ directory. You can set path to your data in demo_predict_mscale_cityscapes.m through variable datasetName and img_data_dir.
    \nYou might have to run vl_compilenn before running the demo, please refer to the instructions for running refinenet in their official Readme.md

    \n
  2. \n
  3. \n

    [For Docker users]
    \nIf you have an environment with python and other dependencies installed, skip this step, otherwise run a docker container:

    \n
    docker run -it -v PATH_TO_YOUR_HOME_DIRECTORY/:/workspace/ souravgarg/vpr-lost-kc:v1 /bin/bash\n
    \n

    From within the docker container, navigate to lostX/lost_kc/ repository.
    \n-v option mounts the PATH_TO_YOUR_HOME_DIRECTORY to /workspace directory within the docker container.

    \n
  4. \n
  5. \n

    Reformat and pre-process RefineNet's output from lostX/lost_kc/ directory:

    \n
    python reformat_data.py -p $PATH_TO_REFINENET_OUTPUT\n
    \n

    $PATH_TO_REFINENET_OUTPUT is set to be the parent directory of predict_result_full, for example, ../refinenet/cache_data/test_examples_cityscapes/1-s_result_20180427152622_predict_custom_data/predict_result_1/

    \n
  6. \n
  7. \n

    Compute LoST descriptor:

    \n
    python LoST.py -p $PATH_TO_REFINENET_OUTPUT \n
    \n
  8. \n
  9. \n

    Repeat step 1, 3, and 4 to generate output for the other dataset by setting the variable datasetName to 2-s.

    \n
  10. \n
  11. \n

    Perform place matching using LoST descriptors based difference matrix and Keypoint Correspondences:

    \n
    python match_lost_kc.py -n 10 -f 0 -p1 $PATH_TO_REFINENET_OUTPUT_1  -p2 $PATH_TO_REFINENET_OUTPUT_2\n
    \n
  12. \n
\n

Note: Run python FILENAME -h for any of the python source files in Step 3, 4, and 6 for description of arguments passed to those files.

\n

License

\n

The code is released under MIT License.

\n

Related Projects

\n

Delta Descriptors (2020)

\n

CoarseHash (2020)

\n

seq2single (2019)

\n","name":"LoST-X","type":"code","url":"https://github.com/oravus/lostX","id":"lost_code","image":"lost_kc/bin/day-night-keypoint-correspondence-place-recognition.jpg","_images":["/_next/static/images/day-night-keypoint-correspondence-place-recognition-38203057bf036a1e9271b0a7647119fa.jpg.webp","/_next/static/images/day-night-keypoint-correspondence-place-recognition-bed6f778b7ec1ce4edaa346e24fb33bf.jpg"],"src":"/content/visual_place_recognition/lost.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/openseqslam2_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/openseqslam2_code.json new file mode 100644 index 0000000000..7d62d0221b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/openseqslam2_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

OpenSeqSLAM2.0 Toolbox

\n

\"The

\n

OpenSeqSLAM2.0 is a MATLAB toolbox that allows users to thoroughly explore the SeqSLAM method in addressing the visual place recognition problem. The visual place recognition problem is centred around recognising a previously traversed route, regardless of whether it is seen during the day or night, in clear or inclement conditions, or in summer or winter. Recognising previously traversed routes is a crucial capability of navigating robots. Through the graphical interfaces packaged in OpenSeqSLAM2 users are able to:

\n\n

The toolbox is open-source and downloadable from the releases tab. All we ask is that if you use OpenSeqSLAM2 in any academic work, that you include a reference to corresponding publication (bibtex is available at the bottom of the page).

\n

How to use the toolbox

\n

The toolbox is designed to be simple to use (it runs out of the box without any initial configuration required). To run the toolbox, simple run the command below (with the toolbox root directory in your MATLAB path):

\n
OpenSeqSLAM2();\n
\n

There are a number of default configuration files included in the .config directory which showcase the capabilities of the toolbox. To use a configuration file, open the toolbox as described above, then use the Import config button. A summary of the features showcased in each of the configuration files is included below:

\n\n

Note: the programs in the ./bin directory can be run standalone by providing the appropriate results / config structs as arguments if you would like to use only a specific part of the pipeline (i.e. only configuration, or progress wrapped execution, or viewing results).

\n

Citation details

\n

If using the toolbox in any academic work, please include the following citation:

\n
@ARTICLE{2018openseqslam2,\n   author = {{Talbot}, B. and {Garg}, S. and {Milford}, M.},\n    title = \"{OpenSeqSLAM2.0: An Open Source Toolbox for Visual Place Recognition Under Changing Conditions}\",\n  journal = {ArXiv e-prints},\narchivePrefix = \"arXiv\",\n   eprint = {1804.02156},\n primaryClass = \"cs.RO\",\n keywords = {Computer Science - Robotics, Computer Science - Computer Vision and Pattern Recognition},\n     year = 2018,\n    month = apr,\n   adsurl = {http://adsabs.harvard.edu/abs/2018arXiv180402156T},\n  adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n
\n","name":"OpenSeqSLAM2","type":"code","url":"https://github.com/qcr/openseqslam2","id":"openseqslam2_code","image":"./docs/openseqslam2.png","_images":["/_next/static/images/openseqslam2-c5079d59d4cff5bd652acb1652d047f6.png.webp","/_next/static/images/openseqslam2-f3755fc8e61c0d81c8f0b0f42c5e08ae.png"],"src":"/content/visual_place_recognition/openseqslam2.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json new file mode 100644 index 0000000000..f18f44843a --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Patch-NetVLAD: Multi-Scale Fusion of Locally-Global Descriptors for Place Recognition

\n

\"License:\n\"stars\"\n\"GitHub\n\"GitHub\n\"GitHub\n\"QUT

\n

\"PWC\"\n\"PWC\"\n\"PWC\"\n\"PWC\"\n\"PWC\"\n\"PWC\"

\n

This repository contains code for the CVPR2021 paper \"Patch-NetVLAD: Multi-Scale Fusion of Locally-Global Descriptors for Place Recognition\"

\n

The article can be found on arXiv and the official proceedings.

\n

\n \"Patch-NetVLAD\n

\n

License + attribution/citation

\n

When using code within this repository, please refer the following paper in your publications:

\n
@inproceedings{hausler2021patchnetvlad,\n  title={Patch-NetVLAD: Multi-Scale Fusion of Locally-Global Descriptors for Place Recognition},\n  author={Hausler, Stephen and Garg, Sourav and Xu, Ming and Milford, Michael and Fischer, Tobias},\n  booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n  pages={14141--14152},\n  year={2021}\n}\n
\n

The code is licensed under the MIT License.

\n

Installation

\n

We recommend using conda (or better: mamba) to install all dependencies. If you have not yet installed conda/mamba, please download and install mambaforge.

\n
# On Linux:\nconda create -n patchnetvlad python numpy pytorch-gpu torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\n# On MacOS (x86 Intel processor):\nconda create -n patchnetvlad python numpy pytorch torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\n# On MacOS (ARM M1/M2 processor):\nconda create -n patchnetvlad python numpy pytorch torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge -c tobiasrobotics\n# On Windows:\nconda create -n patchnetvlad python numpy natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\nconda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia\n\nconda activate patchnetvlad\n
\n

We provide several pre-trained models and configuration files. The pre-trained models will be downloaded automatically into the pretrained_models the first time feature extraction is performed.

\n
\n Alternatively, you can manually download the pre-trained models into a folder of your choice; click to expand if you want to do so.\n

We recommend downloading the models into the pretrained_models folder (which is setup in the config files within the configs directory):

\n
# Note: the pre-trained models will be downloaded automatically the first time feature extraction is performed\n# the steps below are optional!\n\n# You can use the download script which automatically downloads the models:\npython ./download_models.py\n\n# Manual download:\ncd pretrained_models\nwget -O mapillary_WPCA128.pth.tar https://cloudstor.aarnet.edu.au/plus/s/vvr0jizjti0z2LR/download\nwget -O mapillary_WPCA512.pth.tar https://cloudstor.aarnet.edu.au/plus/s/DFxbGgFwh1y1wAz/download\nwget -O mapillary_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/ZgW7DMEpeS47ELI/download\nwget -O pittsburgh_WPCA128.pth.tar https://cloudstor.aarnet.edu.au/plus/s/2ORvaCckitjz4Sd/download\nwget -O pittsburgh_WPCA512.pth.tar https://cloudstor.aarnet.edu.au/plus/s/WKl45MoboSyB4SH/download\nwget -O pittsburgh_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/1aoTGbFjsekeKlB/download\n
\n
\n

If you want to use the shortcuts patchnetvlad-match-two, patchnetvlad-feature-match and patchnetvlad-feature-extract, you also need to run (which also lets you use Patch-NetVLAD in a modular way):

\n
pip3 install --no-deps -e .\n
\n

Quick start

\n

Feature extraction

\n

Replace performance.ini with speed.ini or storage.ini if you want, and adapt the dataset paths - examples are given for the Pittsburgh30k dataset (simply replace pitts30k with tokyo247 or nordland for these datasets).

\n
python feature_extract.py \\\n  --config_path patchnetvlad/configs/performance.ini \\\n  --dataset_file_path=pitts30k_imageNames_index.txt \\\n  --dataset_root_dir=/path/to/your/pitts/dataset \\\n  --output_features_dir patchnetvlad/output_features/pitts30k_index\n
\n

Repeat for the query images by replacing _index with _query. Note that you have to adapt dataset_root_dir.

\n

Feature matching (dataset)

\n
python feature_match.py \\\n  --config_path patchnetvlad/configs/performance.ini \\\n  --dataset_root_dir=/path/to/your/pitts/dataset \\\n  --query_file_path=pitts30k_imageNames_query.txt \\\n  --index_file_path=pitts30k_imageNames_index.txt \\\n  --query_input_features_dir patchnetvlad/output_features/pitts30k_query \\\n  --index_input_features_dir patchnetvlad/output_features/pitts30k_index \\\n  --ground_truth_path patchnetvlad/dataset_gt_files/pitts30k_test.npz \\\n  --result_save_folder patchnetvlad/results/pitts30k\n
\n

Note that providing ground_truth_path is optional.

\n

This will create three output files in the folder specified by result_save_folder:

\n\n

Feature matching (two files)

\n
python match_two.py \\\n--config_path patchnetvlad/configs/performance.ini \\\n--first_im_path=patchnetvlad/example_images/tokyo_query.jpg \\\n--second_im_path=patchnetvlad/example_images/tokyo_db.png\n
\n

We provide the match_two.py script which computes the Patch-NetVLAD features for two given images and then determines the local feature matching between these images. While we provide example images, any image pair can be used.

\n

The script will print a score value as an output, where a larger score indicates more similar images and a lower score means dissimilar images. The function also outputs a matching figure, showing the patch correspondances (after RANSAC) between the two images. The figure is saved as results/patchMatchings.png.

\n

Training

\n
python train.py \\\n--config_path patchnetvlad/configs/train.ini \\\n--cache_path=/path/to/your/desired/cache/folder \\\n--save_path=/path/to/your/desired/checkpoint/save/folder \\\n--dataset_root_dir=/path/to/your/mapillary/dataset\n
\n

To begin, request, download and unzip the Mapillary Street-level Sequences dataset (https://github.com/mapillary/mapillary_sls).\nThe provided script will train a new network from scratch, to resume training add --resume_path and set to a full path, filename and extension to an existing checkpoint file. Note to resume our provided models, first remove the WPCA layers.

\n

After training a model, PCA can be added using add_pca.py.

\n
python add_pca.py \\\n--config_path patchnetvlad/configs/train.ini \\\n--resume_path=full/path/with/extension/to/your/saved/checkpoint \\\n--dataset_root_dir=/path/to/your/mapillary/dataset\n
\n

This will add an additional checkpoint file to the same folder as resume_path, except including a WPCA layer.

\n

FAQ

\n

\"Patch-NetVLAD

\n

How to Create New Ground Truth Files

\n

We provide three ready-to-go ground truth files in the dataset_gt_files folder, however, for evaluation on other datasets you will need to create your own .npz ground truth data files.\nEach .npz stores three variables: utmQ (a numpy array of floats), utmDb (a numpy array of floats) and posDistThr (a scalar numpy float).

\n

Each successive element within utmQ and utmDb needs to correspond to the corresponding row of the image list file. posDistThr is the ground truth tolerance value (typically in meters).

\n

The following mock example details the steps required to create a new ground truth file:

\n
    \n
  1. Collect GPS data for your query and database traverses and convert to utm format. Ensure the data is sampled at the same rate as your images.
  2. \n
  3. Select your own choice of posDistThr value.
  4. \n
  5. Save these variables using Numpy, such as this line of code:\nnp.savez('dataset_gt_files/my_dataset.npz', utmQ=my_utmQ, utmDb=my_utmDb, posDistThr=my_posDistThr)
  6. \n
\n

Acknowledgements

\n

We would like to thank Gustavo Carneiro, Niko Suenderhauf and Mark Zolotas for their valuable comments in preparing this paper. This work received funding from the Australian Government, via grant AUSMURIB000001 associated with ONR MURI grant N00014-19-1-2571. The authors acknowledge continued support from the Queensland University of Technology (QUT) through the Centre for Robotics.

\n

Related works

\n

Please check out this collection of related works on place recognition.

\n","name":"Patch-NetVLAD","type":"code","url":"https://github.com/QVPR/Patch-NetVLAD","id":"patchnetvlad_code","image":"./assets/patch_netvlad_method_diagram.png","_images":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"src":"/content/visual_place_recognition/patchnetvlad.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json new file mode 100644 index 0000000000..777733cd52 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

\"QUT

\n

Probability-based Detection Quality (PDQ)

\n

This repository contains the implementation of the probability-based detection quality (PDQ) evaluation measure.\nThis enables quantitative analysis of the spatial and semantic uncertainties output by a probabilistic object detecttion (PrOD) system.\nThis repository provides tools for analysing PrOD detections and classical detections using mAP, moLRP, and PDQ (note that PDQ results will be low for a classical detector and mAP and moLRP scores will likely be low for PrOD detections).\nEvaluation can be performed both on COCO formatted data and on RVC1 (PrOD challenge) formatted data.\nThe repository also provides visualization tools to enable fine-grained analysis of PDQ results as shown below.

\n

\"PrOD

\n

The code here, particularly for evaluating RVC1 data is based heavily on the PrOD challenge code which can be found\nhere: https://github.com/jskinn/rvchallenge-evaluation

\n

Note that some extra funcitonality for PDQ outside of what is reported in the original paper and challenge is also provided such as evaluating results using the bounding boxes of the ground-truth segmentation masks, probabilistic segmentation evaluation, a greedy alternative to PDQ.

\n

For further details on the robotic vision challenges please see the following links for more details:

\n\n

Citing PDQ

\n

If you are using PDQ in your research, please cite the paper below:

\n
@inproceedings{hall2020probabilistic,\n  title={Probabilistic object detection: Definition and evaluation},\n  author={Hall, David and Dayoub, Feras and Skinner, John and Zhang, Haoyang and Miller, Dimity and Corke, Peter and Carneiro, Gustavo and Angelova, Anelia and S{\\\"u}nderhauf, Niko},\n  booktitle={The IEEE Winter Conference on Applications of Computer Vision},\n  pages={1031--1040},\n  year={2020}\n}\n
\n

Setup

\n

Install all python requirements

\n

This code comes with a requirements.txt file.\nMake sure you have installed all libraries as part of your working environment.

\n

Install COCO mAP API

\n

After installing all requirements, you will need to have a fully installed implementation of the COCO API located\nsomewhere on your machine.\nYou can download this API here https://github.com/cocodataset/cocoapi.

\n

Once this is downloaded and installed, you need to adjust the system path on line 11 of coco_mAP.py and line 16 of\nread_files.py to match the PythonAPI folder of your COCO API installation.

\n

Add LRP Evaluation Code

\n

You will also require code for using LRP evaluation measures.\nTo do this you need to simply copy the cocoevalLRP.py file from the LRP github repository to the pycocotools folder within the PythonAPI.\nYou can download the specific file here https://github.com/cancam/LRP/blob/master/cocoLRPapi-master/PythonAPI/pycocotools/cocoevalLRP.py\nYou can clone the original repository here https://github.com/cancam/LRP.

\n

After cocoevalLRP.py is located in your pycocotools folder, simply adjust the system path on line 11 of coco_LRP.py to match your PythonAPI folder.

\n

Usage

\n

All evaluation code is run on detections saved in .json files formatted as required by the RVC outlined later on.\nA variation to this is also available for probabilistic segmentation format also described later.\nIf you are evaluating on COCO data and have saved detections in COCO format, you can convert to RVC1 format using\nfile_convert-coco_to_rvc1.py\nWhen you have the appropriate files, you can evaluate on mAP, moLRP, and PDQ with evaluate.py.\nAfter evaluation is complete, you can visualise your detections for a sequence of images w.r.t. PDQ using\nvisualise_pdq_analysis.py

\n

Evaluation is currently organised so that you can evaluate either on COCO data, or on RVC1 data. Note that RVC1 data\nexpects multiple sequences rather than a single folder of data.

\n

RVC1 Detection Format

\n

RVC1 detections are saved in a single .json file per sequence being evaluated. Each .json file is formatted as follows:

\n
{\n  \"classes\": [<an ordered list of class names>],\n  \"detections\": [\n    [\n      {\n        \"bbox\": [x1, y1, x2, y2],\n        \"covars\": [\n          [[xx1, xy1],[xy1, yy1]],\n          [[xx2, xy2],[xy2, yy2]]\n        ],\n        \"label_probs\": [<an ordered list of probabilities for each class>]\n      },\n      {\n      }\n    ],\n    [],\n    []\n    ...\n  ]\n}\n
\n

Important Notes

\n

The two covariance matrices in covars need to be positive semi-definite in order for the code to work. A covariance matrix C is positive semi-definite when its eigenvalues are not negative. You can easily check this condition in python with the following function:

\n
def is_pos_semidefinite(C):\n    return np.all(np.linalg.eigvals(C) >= 0)\n
\n

Probabilistic Segmentation Detections

\n

We now accommodate a way to submit probabilistic segmentation detections.\nFor this format, a .npy file for each image stores all detection probabilistic segmentation heatmaps for that image.\nThis 3D array's shape is m x h x w where m is the number of segmentation masks, h is the image height, and w is the\nimage width.\nEach detection dictionary now contains the location the .npy file associated with the detection and the mask id for the\nspecific detection.\nYou may also define a bounding box to replace the probabilistic segmentation for bounding-box detections and define a\nchosen class to use for mAP and moLRP evaluation (rather than always using max class of label_probs).

\n

Expected format for probabilistic segmentation detection files is as follows:

\n
{\n  \"classes\": [<an ordered list of class names>],\n  \"detections\": [\n    [\n      {\n        \"label_probs\": [<an ordered list of probabilities for each class>],\n        \"masks_file\": \"<location of .npy file holding probabilistic segmentation mask>\",\n        \"mask_id\": <index of this detection's mask in mask_file's numpy array>,\n        \"label\": <chosen label within label_probs> (optional),\n        \"bbox\": [x1, y1, x2, y2] (optional for use in mAP and moLRP),\n      },\n      {\n      }\n    ],\n    [],\n    []\n    ...\n  ]\n}\n
\n

file_convert_coco_to_rvc1.py

\n

To convert coco detections to rvc format simply run:

\n

python file_convert_coco_to_rvc1.py --coco_gt <gt_json_file> --coco_det <det_json_file> --rvc1_det <output_json_file>

\n

where <gt_json_file> is the coco format ground-truth json filename, det_json_file is the coco format detection\njson filename, and output_file is the json filename you will save your rvc1 formatted detections json file.

\n

Important Notes

\n

By default, coco json format does not come with the predicted scores for all the classes available, in which case the conversion script will just\nextract the score of the chosen class and distribute remaining probability across all others classes. However, this will produce\nincorrect measures of label quality because it is the probability estimated by the detector for the object's ground-truth class, which might not\ncorrespond to the chosen class. To facilitate correct measurements, if a detection element in the coco json file (det_json_file) comes with a\nkey all_scores, the conversion script will consider it as an array of all the scores, and use it instead of the default behaviour.

\n

Also, by default, coco json format does not consider the existence of a covariance matrix which is needed for PDQ calculations. The conversion\nscript assigns by default a zero'ed covariance matrix, but if a detection element in the coco json file (det_json_file) comes with a\nkey covars, the conversion script will use that covariance matrix instead of the default one with zeros. Please refer to the previous section RVC1 Detection Format for further information on how covars should be formatted in the json file.

\n

evaluate.py

\n

To perform full evaluation simply run:

\n

python evaluate.py --test_set <test_type> --gt_loc <gt_location> --det_loc <det_location> --save_folder <save_folder> --set_cov <cov> --num_workers <num_workers>

\n

Optional flags for new functionality include --bbox_gt, --segment_mode, --greedy_mode, and --prob_seg.\nThere is also an --mAP_heatmap flag but that should not generally be used.

\n\n

For further details, please consult the code.

\n

Important Notes

\n

For consistency reasons, unlike the original rvc1 evaluation code, we do not multiply PDQ by 100 to provide it as a percentage.\nPDQ is also labelled as \"PDQ\" in scores.txt rather than simply \"score\".

\n

For anyone unfamiliar with moLRP based measures, these values are losses and not qualities like all other provided measures.\nTo transform these results from losses to qualities simply take 1 - moLRP.

\n

Newly implemented modes --segment_mode, --bbox_gt, greedy_mode are not used for the RVC1 challenge but can be\nuseful for developing research in probabilistic segmentation, when your dataset does not have a segmentation mask, or\nwhen time is critical, respectively.

\n

visualise_pdq_analysis.py

\n

To create visualisations for probabilistic detections and PDQ analysis on a single sequence of images run:

\n

python visualise_pdq_analysis.py --data_type <test_type> --ground_truth <gt_location> --gt_img_folder <gt_imgs_location> --det_json <det_json_file> --gt_analysis <gt_analysis_file> --det_analysis <det_analysis_file> --save_folder <save_folder_location> --set_cov <cov> --img_type <ext> --colour_mode <colour_mode> --corner_mode <corner_mode> --img_set <list_of_img_names> --full_info

\n

where:

\n\n

For further details, please consult the code.

\n

Important Notes

\n

Consistency must be kept between ground-truth analysis, detection analysis, and detection .json files in order to provide meaningful visualisation.

\n

If the evaluation which produced the ground-truth analysis and detection analysis used a set covariance input, you must\nprovide that same set covariance when generating visualisations.

\n

New modes such as using probabilistic segmentation detections (--prob_seg) in segment mode (--segment_mode)\nor using bounding_box ground-truth (--bbox_gt) in the evaluation code are NOT yet supported.

\n

visualise_prob_detections.py

\n

To create visualisations for probabilistic detections on a single sequence of images run:

\n

python visualise_prob_detections.py --gt_img_folder <gt_imgs_location> --det_json <det_json_file> --save_folder <save_folder_location> --set_cov <cov> --img_type <ext> --corner_mode <corner_mode> --img_set <list_of_img_names>

\n

where:

\n\n

For further details, please consult the code.

\n

Important Notes

\n

Order of detections in detections.json file must match the order of the images as stored in the ground-truth images\nfolder.

\n

New modes such as using probabilistic segmentation detections (--prob_seg) in the evaluation code are\nNOT yet supported.

\n

Acknowledgements

\n

Development of the probability-based detection quality evaluation measure was directly supported by:

\n

\"Australian

\n","name":"Probability-based Detection Quality (PDQ)","type":"code","url":"https://github.com/david2611/pdq_evaluation","image":"repo:/docs/qcr_web_img.jpg","image_fit":"contain","_images":["/_next/static/images/qcr_web_img-c5a515adb03792ab295e52f405822b65.jpg.webp","/_next/static/images/qcr_web_img-8b73fea58e143ca4e51ab20579b08efa.jpg"],"src":"/content/pdq.md","id":"pdq","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/pgraph-python.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pgraph-python.json new file mode 100644 index 0000000000..cd4f0dd86c --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pgraph-python.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

PGraph: graphs for Python

\n

\"A\n\"QUT

\n

\"PyPI\n\"PyPI\n\"GitHub

\n

\"Build\n\"Coverage\"\n\"Language\n\"pypi

\n\n

This Python package allows the manipulation of directed and non-directed graphs. Also supports embedded graphs. It is suitable for graphs with thousands of nodes.

\n

\"road

\n
from pgraph import *\nimport json\n\n# load places and routes\nwith open('places.json', 'r') as f:\n    places = json.loads(f.read())\nwith open('routes.json', 'r') as f:\n    routes = json.loads(f.read())\n\n# build the graph\ng = UGraph()\n\nfor name, info in places.items():\n    g.add_vertex(name=name, coord=info[\"utm\"])\n\nfor route in routes:\n    g.add_edge(route[0], route[1], cost=route[2])\n\n# plan a path from Hughenden to Brisbane\np = g.path_Astar('Hughenden', 'Brisbane')\ng.plot(block=False) # plot it\ng.highlight_path(p)  # overlay the path\n
\n

Properties and methods of the graph

\n

Graphs belong to the class UGraph or DGraph for undirected or directed graphs respectively. The graph is essentially a container for the vertices.

\n\n

Properties and methods of a vertex

\n

Vertices belong to the class UVertex (for undirected graphs) or DVertex (for directed graphs), which are each subclasses of Vertex.

\n\n

Vertices can be named and referenced by name.

\n

Properties and methods of an edge

\n

Edges are instances of the class Edge.\nEdges are not referenced by the graph object, each edge references a pair of vertices, and the vertices reference the edges. For a directed graph only the start vertex of an edge references the edge object, whereas for an undirected graph both vertices reference the edge object.

\n\n

Modifying a graph

\n\n

Subclasing pgraph classes

\n

Consider a user class Foo that we would like to connect using a graph overlay, ie.\ninstances of Foo becomes vertices in a graph.

\n\n
class Foo(UVertex):\n  # foo stuff goes here\n  \nf1 = Foo(...)\nf2 = Foo(...)\n\ng = UGraph() # create a new undirected graph\ng.add_vertex(f1)\ng.add_vertex(f2)\n\nf1.connect(f2, cost=3)\nfor f in f1.neighbours():\n    # say hi to the neighbours\n
\n

Under the hood

\n

The key objects and their interactions are shown below.

\n

\"data

\n

MATLAB version

\n

This is a re-engineered version of PGraph.m which ships as part of the Spatial Math Toolbox for MATLAB. This class is used to support bundle adjustment, pose-graph SLAM and various planners such as PRM, RRT and Lattice.

\n

The Python version was designed from the start to work with directed and undirected graphs, whereas directed graphs were a late addition to the MATLAB version. Semantics are similar but not identical. In particular the use of subclassing rather than references to\nuser data is encouraged.

\n","name":"Graph classes (Python)","type":"code","url":"https://github.com/petercorke/pgraph-python","image":"https://github.com/petercorke/pgraph-python/raw/master/examples/roads.png","_images":["/_next/static/images/roads-8b68dd7b635af6f867a02be9d399b4bd.png.webp","/_next/static/images/roads-18739c10c6cf2a6dccbffb581fb9a183.png"],"src":"/content/pgraph-python.md","id":"pgraph-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json new file mode 100644 index 0000000000..2ad6f59217 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

QuadricSLAM

\n

\"QUT\n\"Primary\n\"PyPI\n\"License\"

\n

QuadricSLAM is a system for using quadrics to represent objects in a scene, leveraging common optimisation tools for simultaneous localisation and mapping (SLAM) problems to converge on stable object maps and camera trajectories. This library uses Georgia Tech's Smoothing and Mapping (GTSAM) library for factor graph optimisation, and adds support through our custom GTSAM quadrics extension.

\n

TODO update with a more holistic reflection of the repository in its current state\n\"@youtube

\n

The key features of this repository are:

\n\n

We expect this repository to be active and continually improved upon. If you have any feature requests or experience any bugs, don't hesitate to let us know. Our code is free to use, and licensed under BSD-3. We simply ask that you cite our work if you use QuadricSLAM in your own research.

\n

Installation and using the library

\n

Pre-build wheels of this library are available on PyPI for most Linux systems, as well as source distributions. Install the library with:

\n
pip install quadricslam\n
\n

From here basic custom QuadricSLAM systems can be setup by implementing and integrating the following abstract classes:

\n
from quadricslam import DataSource, Detector, Associator, visualise\n\nclass MyDataSource(DataSource):\n  ...\n\nclass MyDetector(Detector):\n  ...\n\nclass MyAssociator(Associator):\n  ...\n\nq = QuadricSlam(data_source=MyDataSource(),\n                detector=MyDetector(),\n                associator=MyAssociator(),\n                on_new_estimate=lambda vals, labels, done: visualise(vals, labels, done)))\n                )\nq.spin()\n
\n

The examples described below also provide code showing how to create customisations for a range of different scenarios.

\n

Running the examples from this repository

\n

Note: in the spirit of keeping this package light, some dependencies may not be installed; please install those manually

\n

This repository contains a number of examples to demonstrate how QuadricSLAM systems can be set up in different contexts.

\n

Each example is a file in the quadricslam_examples module, with a standalone run() function. There are two possible ways to run each example:

\n
    \n
  1. \n

    Directly through the command line:

    \n
    python -m quadricslam_examples.EXAMPLE_NAME ARGS ...\n
    \n

    e.g for the hello_quadricslam examples:

    \n
    python -m quadricslam_examples.hello_quadricslam\n
    \n
  2. \n
  3. \n

    Or from within Python:

    \n
    from quadricslam_examples.EXAMPLE_NAME import run\nrun()\n
    \n
  4. \n
\n

hello_manual_quadricslam

\n

Shows how to create a QuadricSLAM system from scratch using the primitives exposed by our GTSAM Quadrics library. The scenario is 4 viewpoints in a square around 2 quadrics in the middle of the square:

\n

\"hello_manual_quadricslam

\n

hello_quadricslam

\n

Same scenario as the hello_manual_quadricslam example, but uses the abstractions provided by this library. Shows how an entire QuadricSLAM system can be created with only a few lines of code when the appropriate components are available:

\n

\"hello_quadricslam

\n

tum_rgbd_dataset

\n

Re-creation of the TUM RGBD dataset experiments used in our initial publication. There is a script included for downloading the dataset.

\n

\"tum_rgbd_dataset

\n

Note: the paper used hand-annotated data to avoid the data association problem; as a result the example here requires a custom data associator to be created before it will run

\n

realsense_python

\n

Demonstrates how a system can be run using an RGBD RealSense, the pyrealsense2 library, and a barebones OpenCV visual odometry algorithm.

\n

The example is a simple plug-n-play system, with weak localisation and data association:

\n

\"realsense_python

\n

realsense_ros

\n

Demonstrates how a ROS QuadricSLAM system can be put together with an RGBD RealSense, the ROS RealSense library, and Kimera VIO's visual odometry system.

\n

This example includes a script for creating an entire ROS workspace containing all the required packages built from source. Once installed, it runs the same as the realsense_python example but with significantly better localisation:

\n

\"realsense_ros

\n

Citing our work

\n

If you are using this library in academic work, please cite the publication:

\n

L. Nicholson, M. Milford and N. Sünderhauf, \"QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM,\" in IEEE Robotics and Automation Letters, vol. 4, no. 1, pp. 1-8, Jan. 2019, doi: 10.1109/LRA.2018.2866205. PDF.

\n
@article{nicholson2019,\n  title={QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM},\n  author={Nicholson, Lachlan and Milford, Michael and Sünderhauf, Niko},\n  journal={IEEE Robotics and Automation Letters},\n  year={2019},\n}\n
\n","name":"QuadricSLAM","type":"code","url":"https://github.com/qcr/quadricslam","image":"https://github.com/qcr/gtsam-quadrics/raw/master/doc/quadricslam_video.png","_images":["/_next/static/images/quadricslam_video-412d8ad8190b4f7eee1320faf254cd6f.png.webp","/_next/static/images/quadricslam_video-a4d673ea6414754e153004c137d2a2c1.png"],"src":"/content/quadricslam/quadricslam.md","id":"quadricslam","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json new file mode 100644 index 0000000000..def235822c --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Robotics Toolbox for Python

\n

\"A\n\"Powered\n\"QUT

\n

\"PyPI\n\"Anaconda\n\"PyPI

\n

\"Build\n\"Coverage\"\n\"PyPI\n\"License:

\n\n\n\n\n\n
\n\"\"\nA Python implementation of the Robotics Toolbox for MATLAB®\n\n
\n\n

Contents

\n\n
\n

\n

Synopsis

\n

This toolbox brings robotics-specific functionality to Python, and leverages\nPython's advantages of portability, ubiquity and support, and the capability of\nthe open-source ecosystem for linear algebra (numpy, scipy), graphics\n(matplotlib, three.js, WebGL), interactive development (jupyter, jupyterlab,\nmybinder.org), and documentation (sphinx).

\n

The Toolbox provides tools for representing the kinematics and dynamics of\nserial-link manipulators - you can easily create your own in Denavit-Hartenberg\nform, import a URDF file, or use over 30 supplied models for well-known\ncontemporary robots from Franka-Emika, Kinova, Universal Robotics, Rethink as\nwell as classical robots such as the Puma 560 and the Stanford arm.

\n

The Toolbox contains fast implementations of kinematic operations. The forward\nkinematics and the manipulator Jacobian can be computed in less than 1 microsecond\nwhile numerical inverse kinematics can be solved in as little as 4 microseconds.

\n

The toolbox also supports mobile robots with functions for robot motion models\n(unicycle, bicycle), path planning algorithms (bug, distance transform, D*,\nPRM), kinodynamic planning (lattice, RRT), localization (EKF, particle filter),\nmap building (EKF) and simultaneous localization and mapping (EKF).

\n

The Toolbox provides:

\n\n

The Toolbox leverages the Spatial Maths Toolbox for Python to\nprovide support for data types such as SO(n) and SE(n) matrices, quaternions, twists and spatial vectors.

\n
\n

\n

Getting going

\n

You will need Python >= 3.6

\n

Using pip

\n

Install a snapshot from PyPI

\n
pip3 install roboticstoolbox-python\n
\n

Available options are:

\n\n

Put the options in a comma separated list like

\n
pip3 install roboticstoolbox-python[optionlist]\n
\n

Swift, a web-based visualizer, is\ninstalled as part of Robotics Toolbox.

\n

From GitHub

\n

To install the bleeding-edge version from GitHub

\n
git clone https://github.com/petercorke/robotics-toolbox-python.git\ncd robotics-toolbox-python\npip3 install -e .\n
\n
\n

\n

Tutorials

\n\n\n\n\n\n\n
\"\"\"\"\nDo you want to learn about manipulator kinematics, differential kinematics, inverse-kinematics and motion control? Have a look at our\ntutorial.\nThis tutorial comes with two articles to cover the theory and 12 Jupyter Notebooks providing full code implementations and examples. Most of the Notebooks are also Google Colab compatible allowing them to run online.\n
\n
\n

\n

Code Examples

\n

We will load a model of the Franka-Emika Panda robot defined by a URDF file

\n
import roboticstoolbox as rtb\nrobot = rtb.models.Panda()\nprint(robot)\n\n\tERobot: panda (by Franka Emika), 7 joints (RRRRRRR), 1 gripper, geometry, collision\n\t┌─────┬──────────────┬───────┬─────────────┬────────────────────────────────────────────────┐\n\t│link │     link     │ joint │   parent    │              ETS: parent to link               │\n\t├─────┼──────────────┼───────┼─────────────┼────────────────────────────────────────────────┤\n\t│   0 │ panda_link0  │       │ BASE        │                                                │\n\t│   1 │ panda_link1  │     0 │ panda_link0 │ SE3(0, 0, 0.333) ⊕ Rz(q0)                      │\n\t│   2 │ panda_link2  │     1 │ panda_link1 │ SE3(-90°, -0°, 0°) ⊕ Rz(q1)                    │\n\t│   3 │ panda_link3  │     2 │ panda_link2 │ SE3(0, -0.316, 0; 90°, -0°, 0°) ⊕ Rz(q2)       │\n\t│   4 │ panda_link4  │     3 │ panda_link3 │ SE3(0.0825, 0, 0; 90°, -0°, 0°) ⊕ Rz(q3)       │\n\t│   5 │ panda_link5  │     4 │ panda_link4 │ SE3(-0.0825, 0.384, 0; -90°, -0°, 0°) ⊕ Rz(q4) │\n\t│   6 │ panda_link6  │     5 │ panda_link5 │ SE3(90°, -0°, 0°) ⊕ Rz(q5)                     │\n\t│   7 │ panda_link7  │     6 │ panda_link6 │ SE3(0.088, 0, 0; 90°, -0°, 0°) ⊕ Rz(q6)        │\n\t│   8 │ @panda_link8 │       │ panda_link7 │ SE3(0, 0, 0.107)                               │\n\t└─────┴──────────────┴───────┴─────────────┴────────────────────────────────────────────────┘\n\n\t┌─────┬─────┬────────┬─────┬───────┬─────┬───────┬──────┐\n\t│name │ q0  │ q1     │ q2  │ q3    │ q4  │ q5    │ q6   │\n\t├─────┼─────┼────────┼─────┼───────┼─────┼───────┼──────┤\n\t│  qr │  0° │ -17.2° │  0° │ -126° │  0° │  115° │  45° │\n\t│  qz │  0° │  0°    │  0° │  0°   │  0° │  0°   │  0°  │\n\t└─────┴─────┴────────┴─────┴───────┴─────┴───────┴──────┘\n
\n

The symbol @ indicates the link as an end-effector, a leaf node in the rigid-body\ntree (Python prompts are not shown to make it easy to copy+paste the code, console output is indented).\nWe will compute the forward kinematics next

\n
Te = robot.fkine(robot.qr)  # forward kinematics\nprint(Te)\n\n\t0.995     0         0.09983   0.484\n\t0        -1         0         0\n\t0.09983   0        -0.995     0.4126\n\t0         0         0         1\n
\n

We can solve inverse kinematics very easily. We first choose an SE(3) pose\ndefined in terms of position and orientation (end-effector z-axis down (A=-Z) and finger\norientation parallel to y-axis (O=+Y)).

\n
from spatialmath import SE3\n\nTep = SE3.Trans(0.6, -0.3, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ik_LM(Tep)         # solve IK\nprint(sol)\n\n\t(array([ 0.20592815,  0.86609481, -0.79473206, -1.68254794,  0.74872915,\n\t\t\t2.21764746, -0.10255606]), 1, 114, 7, 2.890164057230228e-07)\n\nq_pickup = sol[0]\nprint(robot.fkine(q_pickup))    # FK shows that desired end-effector pose was achieved\n\n\t 1         -8.913e-05  -0.0003334  0.5996\n\t-8.929e-05 -1          -0.0004912 -0.2998\n\t-0.0003334  0.0004912  -1          0.1001\n\t 0          0           0          1\n
\n

We can animate a path from the ready pose qr configuration to this pickup configuration

\n
qt = rtb.jtraj(robot.qr, q_pickup, 50)\nrobot.plot(qt.q, backend='pyplot', movie='panda1.gif')\n
\n

\n\t\n

\n

where we have specified the matplotlib pyplot backend. Blue arrows show the joint axes and the coloured frame shows the end-effector pose.

\n

We can also plot the trajectory in the Swift simulator (a browser-based 3d-simulation environment built to work with the Toolbox)

\n
robot.plot(qt.q)\n
\n

\n\t\n

\n

We can also experiment with velocity controllers in Swift. Here is a resolved-rate motion control example

\n
import swift\nimport roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\n\nenv = swift.Swift()\nenv.launch(realtime=True)\n\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\nTep = panda.fkine(panda.q) * sm.SE3.Trans(0.2, 0.2, 0.45)\n\narrived = False\nenv.add(panda)\n\ndt = 0.05\n\nwhile not arrived:\n\n    v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n    panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n    env.step(dt)\n\n# Uncomment to stop the browser tab from closing\n# env.hold()\n
\n

\n\t\n

\n

Run some examples

\n

The notebooks folder contains some tutorial Jupyter notebooks which you can browse on GitHub. Additionally, have a look in the examples folder for many ready to run examples.

\n
\n

\n

Toolbox Research Applications

\n

The toolbox is incredibly useful for developing and prototyping algorithms for research, thanks to the exhaustive set of well documented and mature robotic functions exposed through clean and painless APIs. Additionally, the ease at which a user can visualize their algorithm supports a rapid prototyping paradigm.

\n

Publication List

\n

J. Haviland, N. Sünderhauf and P. Corke, \"A Holistic Approach to Reactive Mobile Manipulation,\" in IEEE Robotics and Automation Letters, doi: 10.1109/LRA.2022.3146554. In the video, the robot is controlled using the Robotics toolbox for Python and features a recording from the Swift Simulator.

\n

[Arxiv Paper] [IEEE Xplore] [Project Website] [Video] [Code Example]

\n

\n \n \"\"\n \n

\n

J. Haviland and P. Corke, \"NEO: A Novel Expeditious Optimisation Algorithm for Reactive Motion Control of Manipulators,\" in IEEE Robotics and Automation Letters, doi: 10.1109/LRA.2021.3056060. In the video, the robot is controlled using the Robotics toolbox for Python and features a recording from the Swift Simulator.

\n

[Arxiv Paper] [IEEE Xplore] [Project Website] [Video] [Code Example]

\n

\n \n \"\"\n \n

\n

A Purely-Reactive Manipulability-Maximising Motion Controller, J. Haviland and P. Corke. In the video, the robot is controlled using the Robotics toolbox for Python.

\n

[Paper] [Project Website] [Video] [Code Example]

\n

\n \n \"\"\n \n

\n
\n
\n

\n

Toolbox ICRA Paper and Citation Info

\n

Check out our ICRA 2021 paper on IEEE Xplore or get the PDF from Peter's website.

\n

If the toolbox helped you in your research, please cite

\n
@inproceedings{rtb,\n  title={Not your grandmother’s toolbox--the Robotics Toolbox reinvented for Python},\n  author={Corke, Peter and Haviland, Jesse},\n  booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},\n  pages={11357--11363},\n  year={2021},\n  organization={IEEE}\n}\n
\n
\n

\n

Using the Toolbox in your Open Source Code?

\n

If you are using the Toolbox in your open source code, feel free to add our badge to your readme!

\n

For the powered by robotics toolbox badge

\n

\"Powered

\n

copy the following

\n
[![Powered by the Robotics Toolbox](https://raw.githubusercontent.com/petercorke/robotics-toolbox-python/master/.github/svg/rtb_powered.min.svg)](https://github.com/petercorke/robotics-toolbox-python)\n
\n

For the powered by python robotics badge

\n

\"Powered

\n

copy the following

\n
[![Powered by Python Robotics](https://raw.githubusercontent.com/petercorke/robotics-toolbox-python/master/.github/svg/pr_powered.min.svg)](https://github.com/petercorke/robotics-toolbox-python)\n
\n
\n

\n

Common Issues and Solutions

\n

See the common issues with fixes here.

\n","name":"Robotics Toolbox Python","type":"code","url":"https://github.com/petercorke/robotics-toolbox-python","image":"repo:/docs/figs/RobToolBox_RoundLogoB.png","image_fit":"contain","_images":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"src":"/content/robotics_toolbox/robotics-toolbox-python.md","id":"robotics-toolbox-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json new file mode 100644 index 0000000000..d6105be208 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

OMRON ROS Robot driver

\n

\"QUT

\n

The OMRON LD-60 is a capable platform out of the box but has no ROS support. Fortunatelyt he LD-60 s still really a Pioneer at heart and there is significant resources in the public domain which can interface to the platform.

\n

This does not replace Mobile Planner. Mobile Planner is still used for map creation and robot configuration. *Note: Mobile planner will run inside Wine on Ubuntu 18.04

\n

This driver currently assumes you have a user (which can be set via Mobile Planner) with no password.

\n\"LD-60\n

Required Parameters

\n

Host IP: String e.g. 172.168.1.1

\n

Host Port: String e.g. 7272

\n

User: String e.g. omron

\n

Topics

\n

Published

\n\n

Actions

\n\n

Subscribed

\n\n

Getting Started

\n

Coming Soon

\n","name":"ROS Omron Driver","type":"code","url":"https://github.com/qcr/ros_omron_driver","image":"./docs/omron_robot.jpg","_images":["/_next/static/images/omron_robot-6882a84f2dec840b5cba11e9f8f19e65.jpg.webp","/_next/static/images/omron_robot-542517e40cecf88333a4f6e07f854cc1.jpg"],"src":"/content/ros-omron-driver.md","id":"ros-omron-driver","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json new file mode 100644 index 0000000000..880e5a5232 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

\n\n~ New to ROS Trees? We have guides for getting started, and solving problems using trees ~\n\n

\n

ROS Trees: Behaviour Trees for robotic systems

\n

\"QUT\n\"Primary\n\"License\"

\n

\n \n

\n

ROS Trees makes behaviour trees accessible in ROS ecosystems, bringing all the compositional benefits of behaviour trees while facilitating interactions with the underlying state of robotic systems. We build on top of the capabilities provided by py_trees, with these extra features:

\n\n

This package has been used on a number of real world robots, allowing us to combine a range of useful robot capabilities into significant robot behaviours. Featured works include:

\n\n

We are always interested in hearing about where our software is used. If you've used ROS Trees in your work, we'd love to hear more about how it was used.

\n

Using Leaves with ROS

\n

All the work for interfacing with ROS topics, Services, and Action Servers is already done for you in ros_trees.leaves_ros.*. It provides the following leaves, which extend the functionality of the base Leaf class described below:

\n\n

ROS can be tedious when it comes to passing data between processes. For example, a service response with a PoseStamped field (e.g. MyServiceResponse) and an Action Server with a PoseStamped goal field (e.g. MyActionGoal) cannot be used interchangeably. A manual conversion of output to input is needed even though they have exactly the same fields. This becomes extremely tiresome in the scope of a tree, where there are linking outputs to inputs is widespread. It makes it impossible to chain standalone leaves together, as they are inherently dependent on each other's input and output formats.

\n

ros_trees handles this pain \"automagically\" through the method ros_trees.data_management.auto_generate(). This function tries as hard as possible to generate an instance of the desired object class from data (don't ask...). While this saves you from writing endless arbitrary linking code, it is also improves the quality of your leaves. Using this method makes leaves work with as many different types of input as possible, rather than requiring manual conversion code with every use case.

\n

Basic Examples

\n

Leaves can be written as an instance or class, with class being generally preferred. Below are some basic examples of how to write your own leaves ():

\n\n

Accessing Common Leaves

\n

General, non-robot-specific leaves that we create are provided through the ros_trees.leaves_common submodule. Importing this provides you with a library of common leaves including:

\n\n

Writing Good Leaves

\n

A good leaf is a leaf that is as general purpose as physically possible given what it does. It may be impossible to write a leaf that can perform object detection without an input image, but your leaf should be written to work with any type of input that contains an image. To achieve this, the following are some good guidelines to stick by:

\n\n

The Anatomy of a Leaf

\n

\"Anatomy

\n

The above figure shows the leaf's lifecycle, from the start to end of execution by the behaviour tree. Details for each of the parts in the image above are provided below:

\n
    \n
  1. pre: other leaves have run before this leaf and stored data in the py_trees blackboard (a centralised key-value dictionary). There is also the special method get_last_value() from ros_trees.data_management that will \"magically\" get the last saved result from a previous leaf.
  2. \n
  3. load data: the leaf attempts to load any data it needs by calling load_fn. If the load_fn argument is not provided, there is a default behaviour that should work for most cases. The default load_fn (see Leaf._default_load_fn()) will load from the blackboard key load_key if provided, otherwise it uses the last saved value through get_last_value().
  4. \n
  5. loaded: the result of load_fn can be assumed to be available in the self.loaded_data member from this point forward.
  6. \n
  7. ticking: happens when a leaf doesn't complete immediately. Override is_leaf_done() to control how the leaf decides its action is done, and override _extra_update() to start your long running behaviour. It must not be blocking!
  8. \n
  9. attain a result: the result of the action is created by calling result_fn. Short running actions can simply create their result from scratch in this function.
  10. \n
  11. finished: the leaf has finished its action, and it can be assumed that the result is available in self.result.
  12. \n
  13. saving the result: if the save flag is set, the leaf will attempt to save self.result (or save_value) according to save_fn. The default save_fn should be fine for most cases; it saves the result to key save_key if it is set, otherwise the result is saved such that it is available to the next leaf in the tree with get_last_value().
  14. \n
  15. leaving: the leaf is done; no more actions should be performed by the leaf from this point on.
  16. \n
  17. evaluate the result: eval_fn is called to determine whether the leaf's process was a success or failure. The function is provided with save_value if set, otherwise self.result. If no eval_fn is provided, the default will return the first bool if the data provided is a list, otherwise the Boolean evaluation of the data.
  18. \n
  19. post: the tree has decided the leaf is done; any steps your leaf needs to perform to stop its action should be done in an overload of _extra_terminate(). For example, an ActionLeaf sends the pre-empt signal by overloading extra_terminate().
  20. \n
\n

That's a general overview of the leaf lifecycle in ros_trees. We have covered what you will need to know about leaves for 99% of cases, but there are also other methods you can override to control other parts of the process. See the class implementation ros_trees.leaves.Leaf for full details.

\n

Leaf Parameterisation Documentation

\n

Here we describe each of the input arguments to the leaf classes. Note that all classes have the Leaf class arguments as they extend from it. If you need any more details, feel free to dig into the class implementations.

\n

Leaf class

\n

A barebones Leaf every other leaf is built upon. The constructor has the following customisation parameters:

\n\n

ActionLeaf class

\n

An ActionLeaf creates an actionlib.SimpleActionClient for interfacing with an existing ROS Action Server. It extends the base Leaf class, making the following changes:

\n\n

The ActionLeaf constructor defines one extra parameter:

\n\n

PublisherLeaf class

\n

A PublisherLeaf publishes a message to a topic each time a leaf is called by the tree. It makes the following extensions to the base Leaf class:

\n\n

The PublisherLeaf class defines two extra parameters:

\n\n

ServiceLeaf class

\n

A ServiceLeaf calls a service and returns the response. It makes the following extensions to the base Leaf class:

\n\n

The ServiceLeaf class defines one extra parameter:

\n\n

SubscriberLeaf class

\n

A SubscriberLeaf attempts to get a message on a topic, with configurable timeout parameters. It makes the following extensions to the base Leaf class:

\n\n

The SubscriberLeaf class defines four extra parameters:

\n\n","name":"Behaviour trees for ROS","type":"code","url":"https://github.com/qcr/ros_trees","image":"https://github.com/qcr/ros_trees/wiki/assets/frankie.gif","_images":["/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webm","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.mp4","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webp","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.jpg"],"src":"/content/ros_trees.md","id":"ros-trees","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_bene_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_bene_code.json new file mode 100644 index 0000000000..963626480f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_bene_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments

\n

\"License:\n\"stars\"\n\"GitHub\n\"GitHub

\n

\"PWC\"\n\"PWC\"\n\"PWC\"

\n

\"PWC\"\n\"PWC\"\n\"PWC\"

\n

This repository contains code and dataset references for two papers: RT-GENE (Gaze Estimation; ECCV2018) and RT-BENE (Blink Estimation; ICCV2019 Workshops).

\n

RT-GENE (Gaze Estimation)

\n

License + Attribution

\n

The RT-GENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper:

\n

\"Paper

\n
@inproceedings{FischerECCV2018,\nauthor = {Tobias Fischer and Hyung Jin Chang and Yiannis Demiris},\ntitle = {{RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments}},\nbooktitle = {European Conference on Computer Vision},\nyear = {2018},\nmonth = {September},\npages = {339--357}\n}\n
\n

This work was supported in part by the Samsung Global Research Outreach program, and in part by the EU Horizon 2020 Project PAL (643783-RIA).

\n

Overview + Accompanying Dataset

\n

The code is split into four parts, each having its own README contained. There is also an accompanying dataset (alternative link) to the code. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/.

\n

RT-GENE ROS package

\n

The rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time.

\n

\n \n

\n

RT-GENE Standalone Version

\n

The rt_gene_standalone directory contains instructions for eye gaze estimation given a set of images. It shares code with the rt_gene package (above), in particular the code in rt_gene/src/rt_gene.

\n

RT-GENE Inpainting

\n

The rt_gene_inpainting directory contains code to inpaint the region covered by the eyetracking glasses.

\n

\"Inpaining

\n

RT-GENE Model Training

\n

The rt_gene_model_training directory allows using the inpainted images to train a deep neural network for eye gaze estimation.

\n

\n \"Accuracy\n

\n

RT-BENE (Blink Estimation)

\n

License + Attribution

\n

The RT-BENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use our blink estimation code or dataset, please cite the relevant paper:

\n
@inproceedings{CortaceroICCV2019W,\nauthor={Kevin Cortacero and Tobias Fischer and Yiannis Demiris},\nbooktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops},\ntitle = {RT-BENE: A Dataset and Baselines for Real-Time Blink Estimation in Natural Environments},\nyear = {2019},\n}\n
\n

RT-BENE was supported by the EU Horizon 2020 Project PAL (643783-RIA) and a Royal Academy of Engineering Chair in Emerging Technologies to Yiannis Demiris.

\n

Overview + Accompanying Dataset

\n

The code is split into several parts, each having its own README. There is also an associated RT-BENE dataset. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/. Please note that a lot of the code is shared with RT-GENE (see above), hence there are many references to RT-GENE below.

\n

\"Paper

\n

RT-BENE ROS package

\n

The rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time. For blink estimation, please refer to the estimate_blink.py file.

\n

\n \n

\n

RT-BENE Standalone Version

\n

The rt_bene_standalone directory contains instructions for blink estimation given a set of images. It makes use of the code in rt_gene/src/rt_bene.

\n

RT-BENE Model Training

\n

The rt_bene_model_training directory contains the code required to train models with the labels contained in the RT-BENE dataset (see below). We will soon at evaluation code in this directory, too.

\n

RT-BENE Dataset

\n

\"RT-BENE

\n

We manually annotated images contained in the \"noglasses\" part of the RT-GENE dataset. The RT-BENE dataset on Zenodo contains the eye image patches and associated annotations to train the blink models.

\n","name":"RT-BENE: Real-Time Blink Estimation in Natural Environments Codebase","type":"code","url":"https://github.com/Tobias-Fischer/rt_gene","image":"repo:/assets/rt_bene_best_poster_award.png","image_fit":"contain","id":"rt_bene_code","_images":["/_next/static/images/rt_bene_best_poster_award-5ac70111852de9eac6c94cd88ef726e0.png.webp","/_next/static/images/rt_bene_best_poster_award-d72f84610eb0050287dd856b52cc99c5.png"],"src":"/content/rt-gene/rt-bene-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json new file mode 100644 index 0000000000..3bb0d75bfb --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments

\n

\"License:\n\"stars\"\n\"GitHub\n\"GitHub

\n

\"PWC\"\n\"PWC\"\n\"PWC\"

\n

\"PWC\"\n\"PWC\"\n\"PWC\"

\n

This repository contains code and dataset references for two papers: RT-GENE (Gaze Estimation; ECCV2018) and RT-BENE (Blink Estimation; ICCV2019 Workshops).

\n

RT-GENE (Gaze Estimation)

\n

License + Attribution

\n

The RT-GENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper:

\n

\"Paper

\n
@inproceedings{FischerECCV2018,\nauthor = {Tobias Fischer and Hyung Jin Chang and Yiannis Demiris},\ntitle = {{RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments}},\nbooktitle = {European Conference on Computer Vision},\nyear = {2018},\nmonth = {September},\npages = {339--357}\n}\n
\n

This work was supported in part by the Samsung Global Research Outreach program, and in part by the EU Horizon 2020 Project PAL (643783-RIA).

\n

Overview + Accompanying Dataset

\n

The code is split into four parts, each having its own README contained. There is also an accompanying dataset (alternative link) to the code. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/.

\n

RT-GENE ROS package

\n

The rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time.

\n

\n \n

\n

RT-GENE Standalone Version

\n

The rt_gene_standalone directory contains instructions for eye gaze estimation given a set of images. It shares code with the rt_gene package (above), in particular the code in rt_gene/src/rt_gene.

\n

RT-GENE Inpainting

\n

The rt_gene_inpainting directory contains code to inpaint the region covered by the eyetracking glasses.

\n

\"Inpaining

\n

RT-GENE Model Training

\n

The rt_gene_model_training directory allows using the inpainted images to train a deep neural network for eye gaze estimation.

\n

\n \"Accuracy\n

\n

RT-BENE (Blink Estimation)

\n

License + Attribution

\n

The RT-BENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use our blink estimation code or dataset, please cite the relevant paper:

\n
@inproceedings{CortaceroICCV2019W,\nauthor={Kevin Cortacero and Tobias Fischer and Yiannis Demiris},\nbooktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops},\ntitle = {RT-BENE: A Dataset and Baselines for Real-Time Blink Estimation in Natural Environments},\nyear = {2019},\n}\n
\n

RT-BENE was supported by the EU Horizon 2020 Project PAL (643783-RIA) and a Royal Academy of Engineering Chair in Emerging Technologies to Yiannis Demiris.

\n

Overview + Accompanying Dataset

\n

The code is split into several parts, each having its own README. There is also an associated RT-BENE dataset. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/. Please note that a lot of the code is shared with RT-GENE (see above), hence there are many references to RT-GENE below.

\n

\"Paper

\n

RT-BENE ROS package

\n

The rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time. For blink estimation, please refer to the estimate_blink.py file.

\n

\n \n

\n

RT-BENE Standalone Version

\n

The rt_bene_standalone directory contains instructions for blink estimation given a set of images. It makes use of the code in rt_gene/src/rt_bene.

\n

RT-BENE Model Training

\n

The rt_bene_model_training directory contains the code required to train models with the labels contained in the RT-BENE dataset (see below). We will soon at evaluation code in this directory, too.

\n

RT-BENE Dataset

\n

\"RT-BENE

\n

We manually annotated images contained in the \"noglasses\" part of the RT-GENE dataset. The RT-BENE dataset on Zenodo contains the eye image patches and associated annotations to train the blink models.

\n","name":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Codebase","type":"code","url":"https://github.com/Tobias-Fischer/rt_gene","image":"repo:/assets/system_overview.jpg","image_fit":"contain","id":"rt_gene_code","_images":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"src":"/content/rt-gene/rt-gene-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json new file mode 100644 index 0000000000..5c5ff9f77b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Look No Deeper: Recognizing Places from Opposing Viewpoints under Varying Scene Appearance using Single-View Depth Estimation

\n

This is the source code for the paper titled: \"Look No Deeper: Recognizing Places from Opposing Viewpoints under Varying Scene Appearance using Single-View Depth Estimation\", [arXiv][IEEE Xplore].

\n

If you find this work useful, please cite it as:\nGarg, S., Babu V, M., Dharmasiri, T., Hausler, S., Suenderhauf, N., Kumar, S., Drummond, T., & Milford, M. (2019). Look no deeper: Recognizing places from opposing viewpoints under varying scene appearance using single-view depth estimation. In IEEE International Conference on Robotics and Automation (ICRA), 2019. IEEE.

\n

bibtex:

\n
@inproceedings{garg2019look,\ntitle={Look No Deeper: Recognizing Places from Opposing Viewpoints under Varying Scene Appearance using Single-View Depth Estimation},\nauthor={Garg, Sourav and Babu V, Madhu and Dharmasiri, Thanuja and Hausler, Stephen and Suenderhauf, Niko and Kumar, Swagat and Drummond, Tom and Milford, Michael},\nbooktitle={IEEE International Conference on Robotics and Automation (ICRA)},\nyear={2019}\n}\n
\n

\"Illustration

\n

\"An

\n

Requirements

\n\n

Optionally, for vis_results.ipynb:

\n\n

Download an example dataset and its pre-computed representations

\n
    \n
  1. \n

    In seq2single/precomputed/, download pre-computed representations (~10 GB). Please refer to the seq2single/precomputed/readme.md for instructions on how to compute these representations.

    \n
  2. \n
  3. \n

    [Optional] In seq2single/images/, download images (~1 GB). These images are a subset of two different traverses from the Oxford Robotcar dataset.

    \n
  4. \n
\n

(Note: These download links from Mega.nz require you to first create an account (free))

\n

Run

\n
    \n
  1. The Jupyter notebook seq2single.ipynb first loads the pre-computed global image descriptors to find top matches. These matches are re-ranked with the proposed method using the pre-computed depth masks and dense conv5 features.
  2. \n
\n

License

\n

The code is released under MIT License.

\n

Related Projects

\n

Delta Descriptors (2020)

\n

CoarseHash (2020)

\n

LoST (2018)

\n","name":"seq2single","type":"code","url":"https://github.com/oravus/seq2single","id":"seq2single_code","image":"gitPics/illustration.png","_images":["/_next/static/images/illustration-73bec1a3cac56819cdbea1268b711fa4.png.webp","/_next/static/images/illustration-1e185173132d7d8138449660ac905c04.png"],"src":"/content/visual_place_recognition/seq2single.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/seqnet_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seqnet_code.json new file mode 100644 index 0000000000..a96bd22643 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seqnet_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

SeqNet: Learning Descriptors for Sequence-Based Hierarchical Place Recognition

\n

[ArXiv+Supplementary] [IEEE Xplore RA-L 2021] [ICRA 2021 YouTube Video]

\n

and

\n

SeqNetVLAD vs PointNetVLAD: Image Sequence vs 3D Point Clouds for Day-Night Place Recognition

\n

[ArXiv] [CVPR 2021 Workshop 3DVR]

\n

\n \"\"\n
Sequence-Based Hierarchical Visual Place Recognition.\n

\n

News:

\n

Jan 18, 2022 : MSLS training setup included.

\n

Jan 07, 2022 : Single Image Vanilla NetVLAD feature extraction enabled.

\n

Oct 13, 2021 : Oxford & Brisbane Day-Night pretrained models download link.

\n

Aug 03, 2021 : Added Oxford dataset files and a direct link to download the Nordland dataset.

\n

Jun 23, 2021: CVPR 2021 Workshop 3DVR paper, \"SeqNetVLAD vs PointNetVLAD\", now available on arXiv.

\n

Setup

\n

Conda

\n
conda create -n seqnet numpy pytorch=1.8.0 torchvision tqdm scikit-learn faiss tensorboardx h5py -c pytorch -c conda-forge\n
\n

Download

\n

Run bash download.sh to download single image NetVLAD descriptors (3.4 GB) for the Nordland-clean dataset [a] and the Oxford dataset (0.3 GB), and Nordland-trained model files (1.5 GB) [b]. Other pre-trained models for Oxford and Brisbane Day-Night can be downloaded from here.

\n

Run

\n

Train

\n

To train sequential descriptors through SeqNet on the Nordland dataset:

\n
python main.py --mode train --pooling seqnet --dataset nordland-sw --seqL 10 --w 5 --outDims 4096 --expName \"w5\"\n
\n

or the Oxford dataset (set --dataset oxford-pnv for pointnetvlad-like data split as described in the CVPR 2021 Workshop paper):

\n
python main.py --mode train --pooling seqnet --dataset oxford-v1.0 --seqL 5 --w 3 --outDims 4096 --expName \"w3\"\n
\n

or the MSLS dataset (specifying --msls_trainCity and --msls_valCity as default values):

\n
python main.py --mode train --pooling seqnet --dataset msls --msls_trainCity melbourne --msls_valCity austin --seqL 5 --w 3 --outDims 4096 --expName \"msls_w3\"\n
\n

To train transformed single descriptors through SeqNet:

\n
python main.py --mode train --pooling seqnet --dataset nordland-sw --seqL 1 --w 1 --outDims 4096 --expName \"w1\"\n
\n

Test

\n

On the Nordland dataset:

\n
python main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-22-44_l10_w5/ \n
\n

On the MSLS dataset (can change --msls_valCity to melbourne or austin too):

\n
python main.py --mode test --pooling seqnet --dataset msls --msls_valCity amman --seqL 5 --split test --resume ./data/runs/<modelName>/\n
\n

The above will reproduce results for SeqNet (S5) as per Supp. Table III on Page 10.

\n
\n [Expand this] To obtain other results from the same table in the paper, expand this. \n
# Raw Single (NetVLAD) Descriptor\npython main.py --mode test --pooling single --dataset nordland-sf --seqL 1 --split test\n\n# SeqNet (S1)\npython main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 1 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/\n\n# Raw + Smoothing\npython main.py --mode test --pooling smooth --dataset nordland-sf --seqL 5 --split test\n\n# Raw + Delta\npython main.py --mode test --pooling delta --dataset nordland-sf --seqL 5 --split test\n\n# Raw + SeqMatch\npython main.py --mode test --pooling single+seqmatch --dataset nordland-sf --seqL 5 --split test\n\n# SeqNet (S1) + SeqMatch\npython main.py --mode test --pooling s1+seqmatch --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/\n\n# HVPR (S5 to S1)\n# Run S5 first and save its predictions by specifying `resultsPath`\npython main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-22-44_l10_w5/ --resultsPath ./data/results/\n# Now run S1 + SeqMatch using results from above (the timestamp of `predictionsFile` would be different in your case)\npython main.py --mode test --pooling s1+seqmatch --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/ --predictionsFile ./data/results/Jun03_16-07-36_l5_0.npz\n\n
\n
\n

Single Image Vanilla NetVLAD Extraction

\n
\n [Expand this] To obtain the single image vanilla NetVLAD descriptors (i.e. the provided precomputed .npy descriptors) \n
# Setup Patch-NetVLAD submodule from the seqNet repo:\ncd seqNet \ngit submodule update --init\n\n# Download NetVLAD+PCA model\ncd thirdparty/Patch-NetVLAD/patchnetvlad/pretrained_models\nwget -O pitts_orig_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/gJZvogRj4FUUQMy/download\n\n# Compute global descriptors\ncd ../../../Patch-NetVLAD/\npython feature_extract.py --config_path patchnetvlad/configs/seqnet.ini --dataset_file_path ../../structFiles/imageNamesFiles/oxford_2014-12-16-18-44-24_imagenames_subsampled-2m.txt --dataset_root_dir <PATH_TO_OXFORD_IMAGE_DIR> --output_features_fullpath ../../data/descData/netvlad-pytorch/oxford_2014-12-16-18-44-24_stereo_left.npy\n\n# example for MSLS (replace 'database' with 'query' and use different city names to compute all)\npython feature_extract.py --config_path patchnetvlad/configs/seqnet.ini --dataset_file_path ../../structFiles/imageNamesFiles/msls_melbourne_database_imageNames.txt --dataset_root_dir <PATH_TO_Mapillary_Street_Level_Sequences> --output_features_fullpath ../../data/descData/netvlad-pytorch/msls_melbourne_database.npy\n
\n
\n

Acknowledgement

\n

The code in this repository is based on Nanne/pytorch-NetVlad. Thanks to Tobias Fischer for his contributions to this code during the development of our project QVPR/Patch-NetVLAD.

\n

Citation

\n
@article{garg2021seqnet,\n  title={SeqNet: Learning Descriptors for Sequence-based Hierarchical Place Recognition},\n  author={Garg, Sourav and Milford, Michael},\n  journal={IEEE Robotics and Automation Letters},\n  volume={6},\n  number={3},\n  pages={4305-4312},\n  year={2021},\n  publisher={IEEE},\n  doi={10.1109/LRA.2021.3067633}\n}\n\n@misc{garg2021seqnetvlad,\n  title={SeqNetVLAD vs PointNetVLAD: Image Sequence vs 3D Point Clouds for Day-Night Place Recognition},\n  author={Garg, Sourav and Milford, Michael},\n  howpublished={CVPR 2021 Workshop on 3D Vision and Robotics (3DVR)},\n  month={Jun},\n  year={2021},\n}\n
\n

Other Related Projects

\n

SeqMatchNet (2021);\nPatch-NetVLAD (2021);\nDelta Descriptors (2020);\nCoarseHash (2020);\nseq2single (2019);\nLoST (2018)

\n

[a] This is the clean version of the dataset that excludes images from the tunnels and red lights and can be downloaded from here.

\n

[b] These will automatically save to ./data/, you can modify this path in download.sh and get_datasets.py to specify your workdir.

\n","name":"SeqNet","type":"code","url":"https://github.com/oravus/seqNet","id":"seqnet_code","image":"./assets/seqnet.jpg","_images":["/_next/static/images/seqnet-cfc1aecd3cd2b268af41400a4fb86e6a.jpg.webp","/_next/static/images/seqnet-69de71978f2b7f0ffbcefcbb976010d3.jpg"],"src":"/content/visual_place_recognition/seqnet.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/spatialmath-python.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/spatialmath-python.json new file mode 100644 index 0000000000..48c9a20cdc --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/spatialmath-python.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Spatial Maths for Python

\n

\"A\n\"QUT

\n

\"PyPI\n\"Anaconda\n\"Python\n\"License:

\n

\"Build\n\"Coverage\"\n\"PyPI\n\"GitHub

\n\n\n\n\n\n
\n\"\"\nA Python implementation of the Spatial Math Toolbox for MATLAB®\n\n
\n

Spatial mathematics capability underpins all of robotics and robotic vision where we need to describe the position, orientation or pose of objects in 2D or 3D spaces.

\n

What it does

\n

The package provides classes to represent pose and orientation in 3D and 2D\nspace:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Representsin 3Din 2D
poseSE3 Twist3 UnitDualQuaternionSE2 Twist2
orientationSO3 UnitQuaternionSO2
\n

More specifically:

\n\n

These classes provide convenience and type safety, as well as methods and overloaded operators to support:

\n\n

These are layered over a set of base functions that perform many of the same operations but represent data explicitly in terms of numpy arrays.

\n

The class, method and functions names largely mirror those of the MATLAB toolboxes, and the semantics are quite similar.

\n

\"trplot\"

\n

\n

Citing

\n

Check out our ICRA 2021 paper on IEEE Xplore or get the PDF from Peter's website. This describes the Robotics Toolbox for Python as well Spatial Maths.

\n

If the toolbox helped you in your research, please cite

\n
@inproceedings{rtb,\n  title={Not your grandmother’s toolbox--the Robotics Toolbox reinvented for Python},\n  author={Corke, Peter and Haviland, Jesse},\n  booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},\n  pages={11357--11363},\n  year={2021},\n  organization={IEEE}\n}\n
\n
\n

\n

Using the Toolbox in your Open Source Code?

\n

If you are using the Toolbox in your open source code, feel free to add our badge to your readme!

\n

\"Powered

\n

Simply copy the following

\n
[![Powered by the Spatial Math Toolbox](https://github.com/bdaiinstitute/spatialmath-python/raw/master/.github/svg/sm_powered.min.svg)](https://github.com/bdaiinstitute/spatialmath-python)\n
\n

Installation

\n

Using pip

\n

Install a snapshot from PyPI

\n
pip install spatialmath-python\n
\n

From GitHub

\n

Install the current code base from GitHub and pip install a link to that cloned copy

\n
git clone https://github.com/bdaiinstitute/spatialmath-python.git\ncd spatialmath-python\npip install -e .\n# Optional: if you would like to contribute and commit code changes to the repository,\n# pre-commit install\n
\n

Dependencies

\n

numpy, scipy, matplotlib, ffmpeg (if rendering animations as a movie)

\n

Examples

\n

High-level classes

\n

These classes abstract the low-level numpy arrays into objects that obey the rules associated with the mathematical groups SO(2), SE(2), SO(3), SE(3) as well as twists and quaternions.

\n

Using classes ensures type safety, for example it stops us mixing a 2D homogeneous transformation with a 3D rotation matrix -- both of which are 3x3 matrices. It also ensures that the internal matrix representation is always a valid member of the relevant group.

\n

For example, to create an object representing a rotation of 0.3 radians about the x-axis is simply

\n
>>> from spatialmath import SO3, SE3\n>>> R1 = SO3.Rx(0.3)\n>>> R1\n   1         0         0          \n   0         0.955336 -0.29552    \n   0         0.29552   0.955336         \n
\n

while a rotation of 30 deg about the z-axis is

\n
>>> R2 = SO3.Rz(30, 'deg')\n>>> R2\n   0.866025 -0.5       0          \n   0.5       0.866025  0          \n   0         0         1    \n
\n

and the composition of these two rotations is

\n
>>> R = R1 * R2\n   0.866025 -0.5       0          \n   0.433013  0.75     -0.5        \n   0.25      0.433013  0.866025 \n
\n

We can find the corresponding Euler angles (in radians)

\n
>> R.eul()\narray([-1.57079633,  0.52359878,  2.0943951 ])\n
\n

Frequently in robotics we want a sequence, a trajectory, of rotation matrices or poses. These pose classes inherit capability from the list class

\n
>>> R = SO3()   # the null rotation or identity matrix\n>>> R.append(R1)\n>>> R.append(R2)\n>>> len(R)\n 3\n>>> R[1]\n   1         0         0          \n   0         0.955336 -0.29552    \n   0         0.29552   0.955336             \n
\n

and this can be used in for loops and list comprehensions.

\n

An alternative way of constructing this would be (R1, R2 defined above)

\n
>>> R = SO3( [ SO3(), R1, R2 ] )       \n>>> len(R)\n 3\n
\n

Many of the constructors such as .Rx, .Ry and .Rz support vectorization

\n
>>> R = SO3.Rx( np.arange(0, 2*np.pi, 0.2))\n>>> len(R)\n 32\n
\n

which has created, in a single line, a list of rotation matrices.

\n

Vectorization also applies to the operators, for instance

\n
>>> A = R * SO3.Ry(0.5)\n>>> len(R)\n 32\n
\n

will produce a result where each element is the product of each element of the left-hand side with the right-hand side, ie. R[i] * SO3.Ry(0.5).

\n

Similarly

\n
>>> A = SO3.Ry(0.5) * R \n>>> len(R)\n 32\n
\n

will produce a result where each element is the product of the left-hand side with each element of the right-hand side , ie. SO3.Ry(0.5) * R[i] .

\n

Finally

\n
>>> A = R * R \n>>> len(R)\n 32\n
\n

will produce a result where each element is the product of each element of the left-hand side with each element of the right-hand side , ie. R[i] * R[i] .

\n

The underlying representation of these classes is a numpy matrix, but the class ensures that the structure of that matrix is valid for the particular group represented: SO(2), SE(2), SO(3), SE(3). Any operation that is not valid for the group will return a matrix rather than a pose class, for example

\n
>>> SO3.Rx(0.3) * 2\narray([[ 2.        ,  0.        ,  0.        ],\n       [ 0.        ,  1.91067298, -0.59104041],\n       [ 0.        ,  0.59104041,  1.91067298]])\n\n>>> SO3.Rx(0.3) - 1\narray([[ 0.        , -1.        , -1.        ],\n       [-1.        , -0.04466351, -1.29552021],\n       [-1.        , -0.70447979, -0.04466351]])\n
\n

We can print and plot these objects as well

\n
>>> T = SE3(1,2,3) * SE3.Rx(30, 'deg')\n>>> T.print()\n   1         0         0         1          \n   0         0.866025 -0.5       2          \n   0         0.5       0.866025  3          \n   0         0         0         1          \n\n>>> T.printline()\nt =        1,        2,        3; rpy/zyx =       30,        0,        0 deg\n\n>>> T.plot()\n
\n

\"trplot\"

\n

printline is a compact single line format for tabular listing, whereas print shows the underlying matrix and for consoles that support it, it is colorised, with rotational elements in red and translational elements in blue.

\n

For more detail checkout the shipped Python notebooks:

\n\n

You can browse it statically through the links above, or clone the toolbox and run them interactively using Jupyter or JupyterLab.

\n

Low-level spatial math

\n

Import the low-level transform functions

\n
>>> from spatialmath.base import *\n
\n

We can create a 3D rotation matrix

\n
>>> rotx(0.3)\narray([[ 1.        ,  0.        ,  0.        ],\n       [ 0.        ,  0.95533649, -0.29552021],\n       [ 0.        ,  0.29552021,  0.95533649]])\n\n>>> rotx(30, unit='deg')\narray([[ 1.       ,  0.       ,  0.       ],\n       [ 0.       ,  0.8660254, -0.5      ],\n       [ 0.       ,  0.5      ,  0.8660254]])\n
\n

The results are numpy arrays so to perform matrix multiplication you need to use the @ operator, for example

\n
rotx(0.3) @ roty(0.2)\n
\n

We also support multiple ways of passing vector information to functions that require it:

\n\n
transl2(1, 2)\narray([[1., 0., 1.],\n       [0., 1., 2.],\n       [0., 0., 1.]])\n
\n\n
transl2( [1,2] )\narray([[1., 0., 1.],\n       [0., 1., 2.],\n       [0., 0., 1.]])\n\ntransl2( (1,2) )\nOut[444]: \narray([[1., 0., 1.],\n       [0., 1., 2.],\n       [0., 0., 1.]])\n
\n\n
transl2( np.array([1,2]) )\nOut[445]: \narray([[1., 0., 1.],\n       [0., 1., 2.],\n       [0., 0., 1.]])\n
\n

There is a single module that deals with quaternions, unit or not, and the representation is a numpy array of four elements. As above, functions can accept the numpy array, a list, dict or numpy row or column vectors.

\n
>>> from spatialmath.base.quaternion import *\n>>> q = qqmul([1,2,3,4], [5,6,7,8])\n>>> q\narray([-60,  12,  30,  24])\n>>> qprint(q)\n-60.000000 < 12.000000, 30.000000, 24.000000 >\n>>> qnorm(q)\n72.24956747275377\n
\n

Graphics

\n

\"trplot\"

\n

The functions support various plotting styles

\n
trplot( transl(1,2,3), frame='A', rviz=True, width=1, dims=[0, 10, 0, 10, 0, 10])\ntrplot( transl(3,1, 2), color='red', width=3, frame='B')\ntrplot( transl(4, 3, 1)@trotx(math.pi/3), color='green', frame='c', dims=[0,4,0,4,0,4])\n
\n

Animation is straightforward

\n
tranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200)\n
\n

and it can be saved to a file by

\n
tranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200, movie='out.mp4')\n
\n

\n

At the moment we can only save as an MP4, but the following incantation will covert that to an animated GIF for embedding in web pages

\n
ffmpeg -i out -r 20 -vf \"fps=10,scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\" out.gif\n
\n

For use in a Jupyter notebook, or on Colab, you can display an animation by

\n
from IPython.core.display import HTML\nHTML(tranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200, movie=True))\n
\n

The movie=True option causes tranimate to output an HTML5 fragment which\nis displayed inline by the HTML function.

\n

Symbolic support

\n

Some functions have support for symbolic variables, for example

\n
import sympy\n\ntheta = sym.symbols('theta')\nprint(rotx(theta))\n[[1 0 0]\n [0 cos(theta) -sin(theta)]\n [0 sin(theta) cos(theta)]]\n
\n

The resulting numpy array is an array of symbolic objects not numbers – the constants are also symbolic objects. You can read the elements of the matrix

\n
a = T[0,0]\n\na\nOut[258]: 1\n\ntype(a)\nOut[259]: int\n\na = T[1,1]\na\nOut[256]: \ncos(theta)\ntype(a)\nOut[255]: cos\n
\n

We see that the symbolic constants are converted back to Python numeric types on read.

\n

Similarly when we assign an element or slice of the symbolic matrix to a numeric value, they are converted to symbolic constants on the way in.

\n

History & Contributors

\n

This package was originally created by Peter Corke and Jesse Haviland and was inspired by the Spatial Math Toolbox for MATLAB. It supports the textbook Robotics, Vision & Control in Python 3e.

\n

The package is now a collaboration with Boston Dynamics AI Institute.

\n","name":"Spatialmath Python","type":"code","url":"https://github.com/petercorke/spatialmath-python","image":"repo:/docs/figs/CartesianSnakes_LogoW.png","image_fit":"contain","_images":["/_next/static/images/CartesianSnakes_LogoW-7d2f987ca5432e1ce32ce72e90be7c64.png.webp","/_next/static/images/CartesianSnakes_LogoW-d72d60a588449aa6a08846bed694c0c9.png"],"src":"/content/robotics_toolbox/spatialmath-python.md","id":"spatialmath-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json new file mode 100644 index 0000000000..361f3ac22a --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Swift

\n

\"A\n\"QUT

\n

\"PyPI\n\"PyPI\n\"License:

\n

Swift is a light-weight browser-based simulator built on top of the Robotics Toolbox for Python. This simulator provides robotics-specific functionality for rapid prototyping of algorithms, research, and education. Built using Python and Javascript, Swift is cross-platform (Linux, MacOS, and Windows) while also leveraging the ubiquity and support of these languages.

\n

Through the Robotics Toolbox for Python, Swift can visualise over 30 supplied robot models: well-known contemporary robots from Franka-Emika, Kinova, Universal Robotics, Rethink as well as classical robots such as the Puma 560 and the Stanford arm. Swift is under development and will support mobile robots in the future.

\n

Swift provides:

\n\n

Installing

\n

Using pip

\n

Swift is designed to be controlled through the Robotics Toolbox for Python. By installing the toolbox through PyPI, swift is installed as a dependency

\n
pip3 install roboticstoolbox-python\n
\n

Otherwise, Swift can be install by

\n
pip3 install swift-sim\n
\n

Available options are:

\n\n

Put the options in a comma-separated list like

\n
pip3 install swift-sim[optionlist]\n
\n

From GitHub

\n

To install the latest version from GitHub

\n
git clone https://github.com/jhavl/swift.git\ncd swift\npip3 install -e .\n
\n

Code Examples

\n

Robot Plot

\n

We will load a model of the Franka-Emika Panda robot and plot it. We set the joint angles of the robot into the ready joint configuration qr.

\n
import roboticstoolbox as rp\n\npanda = rp.models.Panda()\npanda.plot(q=panda.qr)\n
\n

\n \"\"\n

\n

Resolved-Rate Motion Control

\n

We will load a model of the Franka-Emika Panda robot and make it travel towards a goal pose defined by the variable Tep.

\n
import roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\nfrom swift import Swift\n\n\n# Make and instance of the Swift simulator and open it\nenv = Swift()\nenv.launch(realtime=True)\n\n# Make a panda model and set its joint angles to the ready joint configuration\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\n# Set a desired and effector pose an an offset from the current end-effector pose\nTep = panda.fkine(panda.q) * sm.SE3.Tx(0.2) * sm.SE3.Ty(0.2) * sm.SE3.Tz(0.45)\n\n# Add the robot to the simulator\nenv.add(panda)\n\n# Simulate the robot while it has not arrived at the goal\narrived = False\nwhile not arrived:\n\n    # Work out the required end-effector velocity to go towards the goal\n    v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n    \n    # Set the Panda's joint velocities\n    panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n    \n    # Step the simulator by 50 milliseconds\n    env.step(0.05)\n
\n

\n \n

\n

Embed within a Jupyter Notebook

\n

To embed within a Jupyter Notebook Cell, use the browser=\"notebook\" option when launching the simulator.

\n
# Try this example within a Jupyter Notebook Cell!\nimport roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\nfrom swift import Swift\n\n# Make and instance of the Swift simulator and open it\nenv = Swift()\nenv.launch(realtime=True, browser=\"notebook\")\n\n# Make a panda model and set its joint angles to the ready joint configuration\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\n# Set a desired and effector pose an an offset from the current end-effector pose\nTep = panda.fkine(panda.q) * sm.SE3.Tx(0.2) * sm.SE3.Ty(0.2) * sm.SE3.Tz(0.45)\n\n# Add the robot to the simulator\nenv.add(panda)\n\n# Simulate the robot while it has not arrived at the goal\narrived = False\nwhile not arrived:\n\n    # Work out the required end-effector velocity to go towards the goal\n    v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n    \n    # Set the Panda's joint velocities\n    panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n    \n    # Step the simulator by 50 milliseconds\n    env.step(0.05)\n
\n","name":"Swift","type":"code","url":"https://github.com/jhavl/swift","image":"repo:/.github/figures/panda.png","_images":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"src":"/content/robotics_toolbox/swift.md","id":"swift","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json new file mode 100644 index 0000000000..a7a343587e --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Fast and Robust Bio-inspired Teach and Repeat Navigation

\n

\"arXiv\"\n\"arXiv\"\n\"License:\n\"stars\"\n\"GitHub\n\"GitHub\n\"QUT

\n

This repository contains code for a low compute teach and repeat navigation approach which only requires monocular vision and wheel odometry. Teach the robot a route by teleoperation, then the robot will be able to repeat it - robust to lighting variation and moderate environmental changes. For full details see our IROS2021 paper, available on arXiv. You can view the conference presentation here as well as other multimedia material and a full 550 metre outdoor run.

\n

\n

License and attribution

\n

If you use the code in this repository, please cite our paper. The code is available under the BSD-2-Clause License.

\n
@inproceedings{dallostoFastRobustBioinspired2021,\n      title = {Fast and {{Robust Bio-inspired Teach}} and {{Repeat Navigation}}},\n      booktitle = {2021 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}})},\n      author = {Dall'Osto, Dominic and Fischer, Tobias and Milford, Michael},\n      year = {2021},\n      month = sep,\n      pages = {500--507},\n      publisher = {{IEEE}},\n      address = {{Prague, Czech Republic}},\n      doi = {10.1109/IROS51168.2021.9636334},\n}\n
\n

Setup and use

\n

This approach can be used with any mobile robot with a monocular camera and odometry source.

\n

For the teach run, run both the data_collect.py and data_save.py nodes. Teleoperate the robot along the desired route and the teach run (odometry poses and images) will be recorded to a specified folder.

\n

For the repeat run, use image_matcher.py and localiser.py. The localiser will publish Goal messages on the topic goal, containing a goal to navigate to in the robot's odometry frame. An example drive_to_pose_controller is used here, but can be replaced with another controller as required.

\n

In both cases, remap the odom and image topics to those provided by the robot. Note, the published odometry must also contain an integrated pose estimate.

\n

Essential parameters for these nodes are shown below. Other parameters exist to save additional diagnostic data, or to wait for a ready signal before starting - if the robot needs to run a setup procedure for example. These are shown in the nodes and example usage is shown in the provided launch files.

\n

Global parameters

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ParameterDescriptionDefault Value
/data_load_dirdirectory in which the teach runs are saved~/miro/data
/data_save_dirdirectory in which to save the results of a repeat run~/miro/data/follow-straight_tests/5
/image_resize_widthwidth to resize images before comparison115
/image_resize_heightheight to resize images before comparison44
/patch_sizepatch size to use for patch normalisation(9,9)
/goal_pose_separationdistance between goals, should match ~distance_threshold in data_collect.py0.2
/image_field_of_view_width_deghorizontal field of view of images (degrees)175.2
/wait_for_readywhether the localiser waits for a service signal 'ready_localiser' before starting, allowing robot initialisationfalse
\n

Parameters for data_collect.py

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ParameterDescriptionExample Value
~distance_thresholddistance (metres) travelled from the previous pose after which a new pose is stored in the teach map0.2
~angle_threshold_degangular distance (degrees) travelled from the previous pose after which a new pose is stored in the teach map15.0
\n

Parameters for data_save.py

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ParameterDescriptionExample Value
~save_dirdirectory in which to save the teach run~/miro/data
~timestamp_folderwhether to timestamp the folder name of the teach run, so multiple runs can be performed without overwritingtrue
\n

Parameters for localiser.py

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ParameterDescriptionDefault Value
~rotation_correction_gainproportional gain term to use for rotation corrections, $K_\\theta$, shouldn't need to be tuned0.01
~path_correction_gainproportional gain term to use for along-path corrections, $K_p$, shouldn't need to be tuned0.01
~stop_at_endwhether the robot should stop at the end of the route, otherwise it assumes the route is circular and restarts from the beginningtrue
~discrete-correctionreduce compute by only performing a correction at each goal pose, not continuallyfalse
~search-rangehow many teach images to search either side of the current to perform along-path correction1
~global_localisation_initwhen initialising, find the closest matching teach image to the current and start the route from there, otherwise start at the first goalfalse
~min_init_correlationminimum correlation with a teach image at initialisation, otherwise the robot thinks it's not on the path and doesn't start repeating0.0
\n

Overview of approach

\n

Teach run

\n

First the robot needs to be taught a route via teleoperation. At regular distance intervals along the path the dead-reckoning position and and image will be saved, resulting in a topometric map of the route. Images are patch normalised to increase robustness to lighting variation.

\n

\n

Repeat run

\n

Having learnt a route, the robot can robustly repeat it. The robot initially follows the sequence of odometry poses stored during the teach run, but errors accumulate in this approach over time. Images are compared between the teach and repeat routes to make corrections to the route.

\n

Correction overview

\n

Both rotational and lateral path errors result in horizontal image offsets that can't be distinguished, but this is not a problem because both require the same correction response. However, moving along the path can also horizontal image offsets. These must be accounted for by interpolating between the previous and next goal images.

\n

\n

Orientation correction

\n

If an orientation error is detected by comparing teach and repeat images, an associated path correction is performed, modulated by a constant gain factor. This correction causes the robot to steer back onto the path.

\n

\n

Along-path correction

\n

Repeat images are compared to teach images within a certain search range of the current goal. If correlation values are stronger to images ahead or behind the robot's current estimated position, and along-path correction is performed. In this case, the goal is pulled towards the robot so it will be reached faster, allowing the estimated position to \"catch up\" to the real position.

\n

\n

Examples of running teach and repeat for the robots we used

\n

Running teach on Miro

\n\n

Running repeat on Miro

\n\n

Running teach on Jackal

\n\n

Running repeat on Jackal

\n\n

Running Bearnav repeat on Jackal

\n\n","name":"Visual Teach and Repeat","type":"code","url":"https://github.com/QVPR/teach-repeat","id":"teach_repeat","image":"assets/outdoor-run.gif","_images":["/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webm","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.mp4","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webp","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.jpg"],"src":"/content/visual_place_recognition/teach-repeat.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/topometric_localization.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/topometric_localization.json new file mode 100644 index 0000000000..986aa745a6 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/topometric_localization.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

CODE IS NOT READY FOR RELEASE, WILL BE CLEANING UP IN COMING WEEKS

\n

This repository contains code related to the following paper - please cite it if you use this code:

\n
@article{xu2021probabilistic,\n  title={Probabilistic Appearance-Invariant Topometric Localization with New Place Awareness},\n  author={Xu, Ming and Fischer, Tobias and S{\\\"u}nderhauf, Niko and Milford, Michael},\n  journal={IEEE Robotics and Automation Letters},\n  volume={6},\n  number={4},\n  pages={6985--6992},\n  year={2021}\n}\n
\n

Probabilistic Topometric Localization

\n

Run the steps in the following order to get things working.

\n

1. Setup

\n

Initial setup

\n

Clone repo, and setup virtual environment. OpenVINO only works with Python 3.7, so run

\n
conda create env -n topometricloc python=3.7\nconda activate topometricloc\n
\n

Then run

\n
cd TopometricLoc\nsh setup.sh\n
\n

This script will install the associated python package to this repository with dependencies. It will also download model weights for feature extraction (HF-Net with OpenVINO) and ask you to enter directories where data is stored (DATA_DIR) and results (RESULTS_DIR). Directories entered are stored in the topometricloc/settings.py file and used as global variables in scripts in this repo.

\n

2. Data Format

\n

To use this code for any dataset you like, simply adhere to the following data format.

\n

You will require a set of images with timestamps or frame order for filenames (sans file extension), corresponding global image descriptors for each image (e.g. NetVLAD, HF-Net), ground truth poses for each image (e.g. GPS) and finally odometry estimates between adjacent images.

\n

The base directory for all data is the DATA_DIR directory. We assume the data is presented as a set of traverses, with each traverse occupying its own folder in DATA_DIR. An example valid directory structure is given as follows:

\n
----\n|-- DATA_DIR\n|   |-- <traverse_1>\n|   |   |-- images\n|   |   |   |-- 0001.png\n|   |   |   |-- ...\n|   |   |   |-- 1000.png\n|   |   |-- features\n|   |   |   |-- 0001.npy\n|   |   |   |-- ...\n|   |   |   |-- 1000.npy\n|   |   |-- camera_poses.csv\n|   |   |-- odometry.csv\n|   |-- ...\n|   |-- <traverse_5>\n|   |   |-- images\n|   |   |   |-- 0001.png\n|   |   |   |-- ...\n|   |   |   |-- 0500.png\n|   |   |-- features\n|   |   |   |-- 0001.npy\n|   |   |   |-- ...\n|   |   |   |-- 0500.npy\n|   |   |-- camera_poses.csv\n|   |   |-- odometry.csv\n
\n

Raw Images

\n

For a given traverse, raw images are stored in DATA_DIR/<traverse_name>/images/ with arbitrary filename extensions. We also assume image names have a corresponding numeric (at least have the ability to be cast into an int!!) identifier which describes the order images are captured (e.g. timestamp). An example of a valid filename is given by 00001.png.

\n

Image features/descriptors

\n

Global features/descriptors are stored in DATA_DIR/<traverse_name>/features/ as .npy files. Note, for a given traverse, each image in the images/ folder MUST have a corresponding feature. For example, 00001.png must have a corresponding feature 00001.npy in the features/ directory. Features are assumed to be stored as a 1D numpy array with shape (D,), e.g. (4096,) for vanilla NetVLAD and HF-Net.

\n

Ground truth pose information

\n

Ground truth poses for a trajectory must be stored in a single .csv file located at DATA_DIR/<traverse_name>/camera_poses.csv. The format of ground truth pose information is stored as a 6D pose with orientation given by a r, p, y Euler angle representation (please have mercy on my soul :p). All ground truth poses are given in the world coordinate frame as a world-to-body transform.

\n

We store 6D poses for the purposes of applying one of our comparison methods (MCL) which requires 6DoF poses. If you have an alternative (lower) number of DoFs, e.g. 3, 4, then simply save a 6DoF pose with zeros in dimensions that are not used.

\n
ts, x, y, z, r, p, y\n0001, 1.0, 100.0, 0.5, 0.003, -0.06, 0.07\n0002, 3.2, 105.0, 0.7, -0.01, -0.05, 0.075\n...\n
\n

Odometry information

\n

Odometry is defined as a relative pose between adjacent pairs (source_frame, destination_frame) of images and is given as a 6D relative pose. We assume the origin of the transformation is at the position of the source frame. As a simple check, composing the global pose of the source frame with the relative pose estimate between source and dest should yield the pose of the dest frame. Example:

\n
source_ts, dest_ts, x, y, z, r, p, y\n0001, 0002, 1.0, 100.0, 0.5, 0.003, -0.06, 0.07\n0002, 0003, 3.2, 105.0, 0.7, -0.01, -0.05, 0.075\n...\n
\n

Again, similar to ground truth poses, if odometry in a lower number of DoFs is provided, then fill in unused dimensions with zeros.

\n

3. Feature extraction (CPU version of HF-Net)

\n

We provide a helpful utility to easily extract features from images assuming the data structure in section 2 has been adhered to. The feature extraction method provided is an OpenVINO version of HF-Net for GPU-free feature extraction. Our code has minor changes to original code found in this repo.

\n

To extract features from images, use the topometricloc/feature_extraction/extract_features.py script. You simply provide the folder name of the traverse you wish to extract features from located inside the DATA_DIR and it'll do it's thing!

\n

Processing traverses

\n

Subsample traverses

\n

After raw feature extraction and data processing of the entire traverse, we subsample the traverses based on odometry (given by VO) for mapping and localization. To do this, use the src/data/subsample_traverse.py script (use --help for information).

\n

Reference map building

\n

Reference maps can be build from subsampled traverse data. Maps store the nodes with odometry constraints (segments) between them preprocessed before localization. Maps also store the global descriptors (NetVLAD from HF-Net) and timestamps (to load local descriptors from disk when required). This map object will be used frequently when localizing. To build a map, use the src/mapping.py script (see --help for information).

\n

Results

\n

Localization

\n

Baselines are stored in the src/baselines/ folder, and scripts include Localization objects which store state estimates, model parameters and can be iterated to update state estimates given odometry and appearance observations. Our method is stored in src/localization.py. Both the comparison methods and our method has the same class structure for Localization objects and are called in the src/evaluate.py script.

\n

Evaluation

\n

Run src/evaluate.py to generate results. Script uniformly (spatially) samples the full query traverse as a starting point for global localization and runs each method (ours or comparisons) until convergence. It stores results in RESULTS_DIR with a description of the experiment which is automatically generated if none is provided (see --help for more information).

\n

Model parameters for each method are stored in the src/params/ folder as yaml files.

\n

src/results.py aggregates results into tables and outputs them as .tex files using pandas. The input to this script is a csv file storing the traverse/method/exper description information about the experiments to be aggregated.

\n

Other

\n

There is a folder tests with notebooks containing exploratory experiments. tests/off_map_classifier-geom.ipynb contains a notebook for tuning the off-map detector parameters and allows you to change parameter values and evaluate detector performance on an on-map and off-map segment.

\n

src/visualization.py allows you to visualize localization for our method for any traverse. Outputs a multitude of useful diagnostic plots to understand how the state estimate (belief) is being updated, where the state proposals are with confidence scores, sensor data (measurement likelihoods, motion, off-map detector, retrieved images). Very handy for tuning parameters on the training set!

\n","name":"Place-aware Topometric Localization","type":"code","url":"https://github.com/mingu6/TopometricLoc","id":"topometric_localization","image":"/qcr_logo_light_filled.svg","_images":["/qcr_logo_light_filled.svg"],"src":"/content/visual_place_recognition/topometric-localization.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/vpr_snn.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vpr_snn.json new file mode 100644 index 0000000000..29492a83e4 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vpr_snn.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

Spiking Neural Networks for Visual Place Recognition via Weighted Neuronal Assignments

\n

\"License:\n\"stars\"\n\"GitHub\n\"QUT

\n

This repository contains code for three of our papers:

\n\n

Updates

\n

Dec 2023:

\n\n

Oct 2023:

\n\n

May 2023:

\n\n

License and Citations

\n

This code is licensed under MIT License.

\n

If you use our Ensemble of Modular SNNs with/without sequence matching code, please cite the following paper:

\n
@article{hussaini2023applications,\n  title={Applications of Spiking Neural Networks in Visual Place Recognition},\n  author={Hussaini, Somayeh and Milford, Michael and Fischer, Tobias},\n  journal={arXiv preprint arXiv:2311.13186},\n  year={2023}\n}\n
\n

If you use our Modular SNN code, please cite the following paper:

\n
@inproceedings{hussaini2023ensembles,\n  title={Ensembles of compact, region-specific \\& regularized spiking neural networks for scalable place recognition},\n  author={Hussaini, Somayeh and Milford, Michael and Fischer, Tobias},\n  booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},\n  pages={4200--4207},\n  year={2023},\n  organization={IEEE}\n}\n
\n

If you use our Non-modular SNN code, please cite the following paper:

\n
@article{hussaini2022spiking,\n  title={Spiking Neural Networks for Visual Place Recognition via Weighted Neuronal Assignments},\n  author={Hussaini, Somayeh and Milford, Michael J and Fischer, Tobias},\n  journal={IEEE Robotics and Automation Letters},\n  year={2022},\n  publisher={IEEE}\n}\n
\n

Overview

\n

Please refer to the readme files of the Ensemble of Modular SNNs & sequence matching, Modular SNN and Non-modular SNN folders for instructions to run the code for each work respectively.

\n

Applications of Spiking Neural Networks in Visual Place Recognition (Ensemble of Modular SNNs with/without sequence matching)

\n

\n \"Ensemble\n

\n

Modular SNNs for scalable place recognition (Modular SNN)

\n

Video: https://www.youtube.com/watch?v=TNDdfmPSe1U&t=137s

\n

\n \"ModularSNN\n

\n

SNNs for VPR (Non-modular SNN)

\n

Video: https://www.youtube.com/watch?v=VGfv4ZVOMkw

\n

\n \"VPRSNN\n

\n

This work is an adaptation of the spiking neural network model from \"Unsupervised Learning of Digit Recognition Using Spike-Timing-Dependent Plasticity\", Diehl and Cook, (2015) for Visual Place Recognition (VPR). DOI: 10.3389/fncom.2015.00099.\nVisual Place Recognition is the problem of how a robot can identify whether it has previously visited a place given an image of the place despite challenges including changes in appearance and perceptual aliasing (where two different places look similar).

\n

The code is based on the following repositories, that include the original code and the modified versions of the original code.

\n

Original code (Peter U. Diehl): https://github.com/peter-u-diehl/stdp-mnist

\n

Updated for Brian2: zxzhijia: https://github.com/zxzhijia/Brian2STDPMNIST

\n

Updated for Python3: sdpenguin: https://github.com/sdpenguin/Brian2STDPMNIST

\n

Please refer to the wiki tab for additional ablation studies.

\n

Acknowledgements

\n

These works were supported by the Australian Government, Intel Labs, and the Queensland University of Technology (QUT) through the Centre for Robotics.

\n","name":"Spiking Neural Networks for Visual Place Recognition","type":"code","url":"https://github.com/QVPR/VPRSNN","id":"vpr_snn","image":"./resources/Ens_of_modularSNNs.png","_images":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"src":"/content/visual_place_recognition/vpr_snn.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json new file mode 100644 index 0000000000..7fc230e986 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"

VPR-Bench

\n

What is VPR-Bench

\n

VPR-Bench is an open-source Visual Place Recognition evaluation framework with quantifiable viewpoint and illumination invariance. This repository represents the open-source release relating to our VPR-Bench paper published in the International Journal of Computer Vision, which you can access here.\n\"VPR-Bench

\n

This repository allows you to do the following two things:

\n
    \n
  1. \n

    Compute the performance of 8 VPR techniques on 12 VPR datasets using multiple evaluation metrics, such as PR curves, ROC curves, RecallRate@N, True-Positive Distribution over a Trajectory etc.

    \n
  2. \n
  3. \n

    Compute the quantified limits of viewpoint and illumination invariance of VPR techniques on Point Features dataset, QUT Multi-lane dataset and MIT Multi-illumination dataset.

    \n
  4. \n
\n

List of Techniques

\n
    \n
  1. NetVLAD [R. Arandjelović et al; https://arxiv.org/abs/1511.07247]
  2. \n
  3. RegionVLAD [Khaliq et al; https://ieeexplore.ieee.org/document/8944012]
  4. \n
  5. CoHOG [Zaffar et al; https://ieeexplore.ieee.org/document/8972582]
  6. \n
  7. HOG [Dalal at al; OpenCV Implementation]
  8. \n
  9. AlexNet [Krizhevsky et al; https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf]
  10. \n
  11. AMOSNet [Chen et al; https://ieeexplore.ieee.org/document/7989366]
  12. \n
  13. HybridNet [Chen et al; https://ieeexplore.ieee.org/document/7989366]
  14. \n
  15. CALC [Merrill et al; http://www.roboticsproceedings.org/rss14/p32.pdf]
  16. \n
  17. DenseVLAD (Results-only) [Torii et al; https://ieeexplore.ieee.org/document/7298790]
  18. \n
  19. AP-GeM (Results-only) [Revaud et al; https://ieeexplore.ieee.org/document/9010047]
  20. \n
\n

List of Datasets

\n
    \n
  1. ESSEX3IN1 [Zaffar et al; https://ieeexplore.ieee.org/document/9126220]
  2. \n
  3. Tokyo24/7 [R. Arandjelović et al; https://arxiv.org/abs/1511.07247]
  4. \n
  5. SPEDTest [Chen et al; https://ieeexplore.ieee.org/document/8421024]
  6. \n
  7. Synthia [Ros et al; https://ieeexplore.ieee.org/document/7780721]
  8. \n
  9. Nordland [Skrede et al; https://bit.ly/2QVBOym]
  10. \n
  11. Gardens Point [Glover et al; https://doi.org/10.5281/zenodo.4590133]
  12. \n
  13. INRIA Holidays [Jegou et al; https://lear.inrialpes.fr/pubs/2008/JDS08/jegou_hewgc08.pdf]
  14. \n
  15. Pittsburgh Query [R. Arandjelović et al; https://arxiv.org/abs/1511.07247]
  16. \n
  17. Cross-Seasons [Larsson et al; https://ieeexplore.ieee.org/document/8953253]
  18. \n
  19. Corridor [Milford et al; https://journals.sagepub.com/doi/abs/10.1177/0278364913490323]
  20. \n
  21. Living Room [Milford et al; https://ieeexplore.ieee.org/document/7487686]
  22. \n
  23. 17 Places [Sahdev et al; https://ieeexplore.ieee.org/document/7801503]
  24. \n
\n

Side Note: You can extend our codebase to include more datasets (or use full versions of some datasets) and techniques by following the templates described in the appendix of our paper. For further understanding these templates, dig into the 'VPR_techniques' and 'helper_functions' folders of this repository.

\n

Dependencies

\n

Our code was written in Python 2, tested in Ubuntu 18.04 LTS and Ubuntu 20.04 LTS both using Anaconda Python. Please follow the below steps for installing dependencies:

\n
    \n
  1. \n

    Install Anaconda Python on your system (https://docs.anaconda.com/anaconda/install/). We are running conda 4.9.2 but other versions should also work.

    \n
  2. \n
  3. \n

    Clone this VPR-Bench Github repository (using git clone).

    \n
  4. \n
\n
git clone https://github.com/MubarizZaffar/VPR-Bench\n\n
\n
    \n
  1. Using 'cd' change your working directory to the downloaded VPR-Bench repository and execute the shell script 'must_downloads.sh'. This will download, extract and copy all the required model files and variation quantified datasets into their respective folders.
  2. \n
\n
cd YOURDIR/VPR-Bench/\nsh must_downloads.sh\n
\n
    \n
  1. This VPR-Bench repository also contains a YAML file named 'environment.yml'. Using this file, you can create a new conda environment (named 'myvprbenchenv') containing all the dependencies by running the following in your terminal.
  2. \n
\n
conda env create -f environment.yml\n
\n
    \n
  1. \n

    There is a known Caffe bug regarding 'mean shape incompatible with input shape' , so follow the solution in https://stackoverflow.com/questions/30808735/error-when-using-classify-in-caffe. That is, modify the lines 253-254 in {USER}/anaconda3/envs/myvprbenchenv/lib/python2.7/site-packages/caffe.

    \n
  2. \n
  3. \n

    Finally activate your environment using the following and you should be good to go.

    \n
  4. \n
\n
\nconda activate myvprbenchenv\n\n
\n
    \n
  1. (Backup) If for some reason you are unable to create a conda environment from environment.yml, please look into the 'VPR_Bench_dependencies_installationcommands.txt' file in this repo, which specifies the individual commands needed to install the dependencies for VPR-Bench in a fresh Python 2 conda environment. A similar backup file namely 'must_downloads.txt' has also been provided for the 'must_downloads.sh' shell script.
  2. \n
\n

Using VPR-Bench

\n\n
python main.py -em 0 -sm 1 -dn Corridor -ddir datasets/corridor/ -mdir precomputed_matches/corridor/ -techs CoHOG CALC\n
\n\n
python main.py -em 0 -sm 1 -dn Corridor -ddir datasets/corridor/ -mdir precomputed_matches/corridor/ -techs CoHOG CALC NetVLAD RegionVLAD AMOSNet HybridNet HOG AlexNet_VPR\n
\n\n
python main.py -em 0 -sm 0 -dn SPEDTEST -ddir datasets/SPEDTEST/ -mdir precomputed_matches/SPEDTEST/ -techs CoHOG_Precomputed CALC_Precomputed NetVLAD_Precomputed RegionVLAD_Precomputed\n
\n\n
python main.py -em 2 -techs NetVLAD RegionVLAD AMOSNet HybridNet CALC HOG CoHOG AlexNet_VPR\n\n
\n

Related External Resources

\n

Datasets

\n\n

Techniques

\n\n

Contacts

\n

You can send an email at mubarizzaffar at gmail dot com, m dot zaffar at tudelft dot nl or s dot garg at qut dot edu dot au for further guidance and/or questions.

\n

Important Note: For all the datasets and techniques, we have made our maximum effort to provide original citations and/or licenses within the respective folders, where possible and applicable. We request all users of VPR-Bench to be aware of (and use) the original citations and licenses in any of their works. If you have any concerns about this, please do send us an email.

\n

Cite as

\n

If you find this work useful, please cite as:

\n
@article{zaffar2021vpr,\n  title={Vpr-bench: An open-source visual place recognition evaluation framework with quantifiable viewpoint and appearance change},\n  author={Zaffar, Mubariz and Garg, Sourav and Milford, Michael and Kooij, Julian and Flynn, David and McDonald-Maier, Klaus and Ehsan, Shoaib},\n  journal={International Journal of Computer Vision},\n  pages={1--39},\n  year={2021},\n  publisher={Springer}\n}\n
\n","name":"VPR-Bench","type":"code","url":"https://github.com/MubarizZaffar/VPR-Bench","id":"vprbench","image":"VPRBench.jpg","_images":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"src":"/content/visual_place_recognition/vprbench_code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json new file mode 100644 index 0000000000..939e1a12fc --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json @@ -0,0 +1 @@ +{"pageProps":{"listData":[{"linkUrl":"/collection/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/human-cues","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Human Cues for Robot Navigation","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/python_robotics","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Python Robotics","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/rt_gene_overview","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/vpr_overview","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Visual Place Recognition","secondaryText":"Collection","secondaryTransform":"capitalize"}],"title":"Open source collections"},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json new file mode 100644 index 0000000000..c185492733 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot Software Stack","secondaryText":"qcr/benchbot","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-api","mediaPosition":"center 100%","mediaUrls":["/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webm","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.mp4","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webp","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.jpg"],"primaryText":"BenchBot Python API","secondaryText":"qcr/benchbot_api","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-addons","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webm","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.mp4","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webp","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.jpg"],"primaryText":"BenchBot Add-ons Manager","secondaryText":"qcr/benchbot_addons","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-eval","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"BenchBot Evaluation Tools","secondaryText":"qcr/benchbot_eval","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-supervisor","mediaPosition":"center 0%","mediaUrls":["/_next/static/images/benchbot_supervisor-3e4092b6584962e3e4529101ae489a08.jpg.webp","/_next/static/images/benchbot_supervisor-fb509eb331f3380fbf5da2c3035116b6.jpg"],"primaryText":"BenchBot Backend Supervisor","secondaryText":"qcr/benchbot_supervisor","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webm","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.mp4","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webp","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.jpg"],"primaryText":"BenchBot Simulator (Isaac)","secondaryText":"qcr/benchbot_simulator","secondaryTransform":"lowercase"}],"collectionData":{"content":"

The BenchBot software stack is a collection of software packages that allow end users to control robots in real or simulated environments with a simple python API. It leverages the simple \"observe, act, repeat\" approach to robot problems prevalent in reinforcement learning communities (OpenAI Gym users will find the BenchBot API interface very similar).

\n","name":"BenchBot","type":"collection","url":"http://benchbot.org","id":"benchbot","code":["benchbot","benchbot-api","benchbot-addons","benchbot-eval","benchbot-supervisor","benchbot-simulator"],"datasets":["benchbot-bear-data"],"feature":1,"src":"/content/benchbot/collection.md","image_position":"100% center","_code":[],"_datasets":[],"image":"./docs/benchbot_web.gif","_images":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"]},"datasets":[{"linkUrl":"/dataset/benchbot-bear-data","mediaPosition":"center","mediaUrls":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"primaryText":"BenchBot Environments for Active Robotics (BEAR)","secondaryText":"15.9GB","secondaryTransform":"capitalize"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json new file mode 100644 index 0000000000..bf1ad1225b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/abstract-map","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Abstract Map (Python)","secondaryText":"btalb/abstract_map","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_simulation-55e32b58dd5e4ed9caf7a85baf98677c.png.webp","/_next/static/images/abstract_map_simulation-3a9dbfc04fa16e80a961cec841d316fc.png"],"primaryText":"2D Simulator for Zoo Experiments","secondaryText":"btalb/abstract_map_simulator","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map-app","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webm","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.mp4","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webp","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.jpg"],"primaryText":"Android App for Human Participants","secondaryText":"btalb/abstract_map_app","secondaryTransform":"lowercase"}],"collectionData":{"content":"

The Human Cues for Robot Navigation ARC Discovery Project (DP140103216) investigated how a robot can navigate using the same navigation cues humans use when navigating built environments. Types of navigation cues targeted include labels, directional signs, signboards, maps & floor plans, navigational gestures, and spoken directions & descriptions. The main contribution from this work is the abstract map, a navigational tool that allows a robot to employ symbolic spatial information in its navigation of unseen spaces.

\n","name":"Human Cues for Robot Navigation","type":"collection","url":"https://btalb.github.io/abstract_map","code":["abstract-map","abstract-map-simulator","abstract-map-app"],"feature":0,"src":"/content/human_cues/human-cues.md","id":"human-cues","image_position":"center","_code":[],"image":"./docs/assets/images/abstract_map_in_action.png","_images":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"_datasets":[]},"datasets":[]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json new file mode 100644 index 0000000000..99e12aec99 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/spatialmath-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/CartesianSnakes_LogoW-7d2f987ca5432e1ce32ce72e90be7c64.png.webp","/_next/static/images/CartesianSnakes_LogoW-d72d60a588449aa6a08846bed694c0c9.png"],"primaryText":"Spatialmath Python","secondaryText":"petercorke/spatialmath-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/robotics-toolbox-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Robotics Toolbox Python","secondaryText":"petercorke/robotics-toolbox-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/swift","mediaPosition":"center","mediaUrls":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"primaryText":"Swift","secondaryText":"jhavl/swift","secondaryTransform":"lowercase"}],"collectionData":{"content":"

Python Robotics is a collection of software packages providing robotics-specific functionality to Python. While leveraging Python's advantages of portability, ubiquity and support, and the capability of the open-source ecosystem for linear algebra (numpy, scipy), graphics (matplotlib, three.js, WebGL), interactive development (jupyter, jupyterlab, mybinder.org), and documentation (sphinx).

\n

The collection is built on top of Spatialmath which underpins all of robotics and robotic vision where we need to describe the position, orientation or pose of objects in 2D or 3D spaces. The core of the collection is the the Robotics Toolbox for Python while Swift provides a light-weight browser-based simulation environment.

\n","name":"Python Robotics","type":"collection","url":"https://petercorke.github.io/robotics-toolbox-python/","image":"repo:petercorke/robotics-toolbox-python/docs/figs/RobToolBox_RoundLogoB.png","image_fit":"contain","id":"python_robotics","code":["spatialmath-python","robotics-toolbox-python","swift"],"feature":99999,"_images":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"src":"/content/robotics_toolbox/collection.md","image_position":"center","_code":[],"_datasets":[]},"datasets":[]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json new file mode 100644 index 0000000000..3993343a0f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/rt_gene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"},{"linkUrl":"/code/rt_bene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_best_poster_award-5ac70111852de9eac6c94cd88ef726e0.png.webp","/_next/static/images/rt_bene_best_poster_award-d72f84610eb0050287dd856b52cc99c5.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"}],"collectionData":{"content":"

This project contains code + datasets for real-time eye gaze and blink estimation.

\n

The work done in this project was done within the Personal Robotics Lab at Imperial College London.

\n
\n
\n","name":"RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments","type":"collection","url":"https://github.com/Tobias-Fischer/rt_gene","id":"rt_gene_overview","code":["rt_gene_code","rt_bene_code"],"datasets":["rt_gene_dataset","rt_bene_dataset"],"feature":3,"src":"/content/rt-gene/project.md","image_position":"center","_code":[],"_datasets":[],"image":"repo:/assets/system_overview.jpg","_images":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"image_fit":"contain"},"datasets":[{"linkUrl":"/dataset/rt_gene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset_figure-5572954dcf83fca94ae80fa38a0f36ab.jpg.webp","/_next/static/images/dataset_figure-024bff6ee75c09b3b9afd020a4e1467b.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Dataset","secondaryText":"45GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/rt_bene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_labels-f79290be354a9a6ea6dfa387d60da1c1.png.webp","/_next/static/images/rt_bene_labels-4ac642446c5fd65a3d20b2b46f856cdc.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Dataset","secondaryText":"600MB","secondaryTransform":"capitalize"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json new file mode 100644 index 0000000000..5e03480269 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/patchnetvlad_code","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Patch-NetVLAD","secondaryText":"QVPR/Patch-NetVLAD","secondaryTransform":"lowercase"},{"linkUrl":"/code/seqnet_code","mediaPosition":"center","mediaUrls":["/_next/static/images/seqnet-cfc1aecd3cd2b268af41400a4fb86e6a.jpg.webp","/_next/static/images/seqnet-69de71978f2b7f0ffbcefcbb976010d3.jpg"],"primaryText":"SeqNet","secondaryText":"oravus/seqNet","secondaryTransform":"lowercase"},{"linkUrl":"/code/vprbench","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"MubarizZaffar/VPR-Bench","secondaryTransform":"lowercase"},{"linkUrl":"/code/delta_descriptors_code","mediaPosition":"center","mediaUrls":["/_next/static/images/ral-iros-2020-delta-descriptors-schematic-b5f57732c327f2f8546715b5dc3643af.png.webp","/_next/static/images/ral-iros-2020-delta-descriptors-schematic-95f5d1a50f3d92aa3344d9782ac13c32.png"],"primaryText":"Delta Descriptors","secondaryText":"oravus/DeltaDescriptors","secondaryTransform":"lowercase"},{"linkUrl":"/code/event_vpr_code","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"primaryText":"Visual Place Recognition using Event Cameras","secondaryText":"Tobias-Fischer/ensemble-event-vpr","secondaryTransform":"lowercase"},{"linkUrl":"/code/lost_code","mediaPosition":"center","mediaUrls":["/_next/static/images/day-night-keypoint-correspondence-place-recognition-38203057bf036a1e9271b0a7647119fa.jpg.webp","/_next/static/images/day-night-keypoint-correspondence-place-recognition-bed6f778b7ec1ce4edaa346e24fb33bf.jpg"],"primaryText":"LoST-X","secondaryText":"oravus/lostX","secondaryTransform":"lowercase"},{"linkUrl":"/code/openseqslam2_code","mediaPosition":"center","mediaUrls":["/_next/static/images/openseqslam2-c5079d59d4cff5bd652acb1652d047f6.png.webp","/_next/static/images/openseqslam2-f3755fc8e61c0d81c8f0b0f42c5e08ae.png"],"primaryText":"OpenSeqSLAM2","secondaryText":"qcr/openseqslam2","secondaryTransform":"lowercase"},{"linkUrl":"/code/seq2single_code","mediaPosition":"center","mediaUrls":["/_next/static/images/illustration-73bec1a3cac56819cdbea1268b711fa4.png.webp","/_next/static/images/illustration-1e185173132d7d8138449660ac905c04.png"],"primaryText":"seq2single","secondaryText":"oravus/seq2single","secondaryTransform":"lowercase"},{"linkUrl":"/code/teach_repeat","mediaPosition":"center","mediaUrls":["/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webm","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.mp4","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webp","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.jpg"],"primaryText":"Visual Teach and Repeat","secondaryText":"QVPR/teach-repeat","secondaryTransform":"lowercase"},{"linkUrl":"/code/heaputil_code","mediaPosition":"center","mediaUrls":["/_next/static/images/overview-8c193585e23714439d55f0227d88f923.jpg.webp","/_next/static/images/overview-fc609d6102a3c08cb20b14382e57ee50.jpg"],"primaryText":"HEAPUtil","secondaryText":"Nik-V9/HEAPUtil","secondaryTransform":"lowercase"},{"linkUrl":"/code/topometric_localization","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"Place-aware Topometric Localization","secondaryText":"mingu6/TopometricLoc","secondaryTransform":"lowercase"},{"linkUrl":"/code/vpr_snn","mediaPosition":"center","mediaUrls":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"primaryText":"Spiking Neural Networks for Visual Place Recognition","secondaryText":"QVPR/VPRSNN","secondaryTransform":"lowercase"}],"collectionData":{"content":"

This collection features code related to Visual Place Recognition (VPR) research, which is concerned with the fundamental problem of how a robot or autonomous vehicle uses perception to create maps and calculates and tracks its location in the world. Research questions include addressing how:

\n\n","name":"Visual Place Recognition","type":"collection","url":"https://github.com/qvpr","id":"vpr_overview","code":["patchnetvlad_code","seqnet_code","vprbench","delta_descriptors_code","event_vpr_code","lost_code","openseqslam2_code","seq2single_code","teach_repeat","heaputil_code","topometric_localization","vpr_snn"],"datasets":["brisbane_event_vpr_dataset","vprbench"],"feature":4,"src":"/content/visual_place_recognition/project.md","image_position":"center","_code":[],"_datasets":[],"image":"./assets/patch_netvlad_method_diagram.png","_images":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"]},"datasets":[{"linkUrl":"/dataset/brisbane_event_vpr_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"primaryText":"Brisbane-Event-VPR","secondaryText":"80GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/vprbench","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"20GB","secondaryTransform":"capitalize"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset.json new file mode 100644 index 0000000000..ae177e4be1 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset.json @@ -0,0 +1 @@ +{"pageProps":{"listData":[{"linkUrl":"/dataset/air-traffic-occupancy","mediaPosition":"center","mediaUrls":["/_next/static/images/airspace_traffic_occupation-d8931212d7eaeaba10c2acf785a0ecd9.jpg.webp","/_next/static/images/airspace_traffic_occupation-01dcbf21de2f822245f4bd51c364dc3a.jpg"],"primaryText":"Air Traffic Occupancy Data","secondaryText":"19.5MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/aircraft-collision-course","mediaPosition":"center","mediaUrls":["/_next/static/images/aircraft_collision_course-5837e09e1d9c74d5172247fc1e45d485.png.webp","/_next/static/images/aircraft_collision_course-39c4d558d7857ed871ee5625ead09fe7.png"],"primaryText":"Aircraft Collision Course Dataset","secondaryText":"43.1GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/alderley","mediaPosition":"center","mediaUrls":["/_next/static/images/alderley-b50cf86288ad8d46bfd22a4d44388409.jpg.webp","/_next/static/images/alderley-1ca139fb61a2f7e5f85056bd73b7be49.jpg"],"primaryText":"Alderley Day and Night","secondaryText":"2.07GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/benchbot-bear-data","mediaPosition":"center","mediaUrls":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"primaryText":"BenchBot Environments for Active Robotics (BEAR)","secondaryText":"15.9GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/brisbane_event_vpr_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"primaryText":"Brisbane-Event-VPR","secondaryText":"80GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/cbd-and-highway","mediaPosition":"center","mediaUrls":["/_next/static/images/cbd_and_highway-68b114bb9789803999a0af2fe0a97d91.png.webp","/_next/static/images/cbd_and_highway-6e93fb1dae7cf6034cc07b783b8ca033.png"],"primaryText":"CBD and Highway Datasets","secondaryText":"4.66GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/city-sunset","mediaPosition":"center","mediaUrls":["/_next/static/images/city_sunset_sample-7d8be3853972a74f7ab4885baa118dba.jpg.webp","/_next/static/images/city_sunset_sample-970f5a9952e2126c37c4a7235d8b48b1.jpg"],"primaryText":"City sunset drive","secondaryText":"9.8GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/day-night-lateral","mediaPosition":"center","mediaUrls":["/_next/static/images/day_night_lateral-56ae9615a767dd2fc33a0b2c257f9f28.jpg.webp","/_next/static/images/day_night_lateral-ece5db3367c8eae1fb94458fafcb8e99.jpg"],"primaryText":"Day and Night with Lateral Pose Change","secondaryText":"67.9MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/floor-and-lawn","mediaPosition":"center","mediaUrls":["/_next/static/images/floor_and_lawn-17b4af12dd8360d83e5f91a61c0f043e.jpg.webp","/_next/static/images/floor_and_lawn-93eb7282e76ae159b32f1cdb983f4317.jpg"],"primaryText":"Day-night vacuum-cleaner robot and lawn datasets","secondaryText":"400MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/fish-image","mediaPosition":"center","mediaUrls":["/_next/static/images/fish_image-a252b1d6bfcba0ada070ce6af73fc92b.png.webp","/_next/static/images/fish_image-d74fa618dc09390794a0785ebede2291.png"],"primaryText":"Fish images","secondaryText":"512MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/gold-coast-drive","mediaPosition":"center","mediaUrls":["/_next/static/images/gold_coast_sample-5c2190ed72cc8f3cb58d4a4d8c6f1e66.jpg.webp","/_next/static/images/gold_coast_sample-403d54f648160fcbabca61f64c22ba22.jpg"],"primaryText":"Gold Coast drive","secondaryText":"15.8GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/indoor-level-7","mediaPosition":"center","mediaUrls":["/_next/static/images/indoor_level_7-5206991317d188dcc5c74a9fcb49af3a.png.webp","/_next/static/images/indoor_level_7-0af1bc519fe9a2884ef2d8b0423c7c55.png"],"primaryText":"Indoor Level 7 S-Block","secondaryText":"42.2GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/kagaru","mediaPosition":"center","mediaUrls":["/_next/static/images/kagaru-f2c2fb6210e37c5a307667580f126176.png.webp","/_next/static/images/kagaru-a9f63225cd629de63260ecebd1aadf5c.png"],"primaryText":"Kagaru Airborne Vision","secondaryText":"22.9GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/kitti-semantics","mediaPosition":"center","mediaUrls":["/_next/static/images/kitti_semantics-f17afe36ed6d7a3faa570f932450244d.png.webp","/_next/static/images/kitti_semantics-b99130453265bfc7fe08428ef322d4ac.png"],"primaryText":"KITTI images with semantic labels","secondaryText":"31.8MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/mt-cootha-day-night","mediaPosition":"center","mediaUrls":["/_next/static/images/mt_cootha-bd14dc6ef8b1122300e0b93391b69052.jpg.webp","/_next/static/images/mt_cootha-237af78c49ccc8dd88c237a761fbbfe0.jpg"],"primaryText":"Mt Cootha Day and Night Drives","secondaryText":"2.8GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/multilane-sideways","mediaPosition":"center","mediaUrls":["/_next/static/images/multilane_sideways-4450e7f81fc4a229197607668cd4b881.png.webp","/_next/static/images/multilane_sideways-640c09802edf13b2e1a9c6b032945c13.png"],"primaryText":"Multi-Lane Road Sideways-Camera Datasets","secondaryText":"761MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/multimodal-rock-surface","mediaPosition":"center","mediaUrls":["/_next/static/images/multimodal_rock_surface-f19b647fbcffec77c0dc67dd2eef1984.png.webp","/_next/static/images/multimodal_rock_surface-75e156a88b63a6a1e4a9cb862b891742.png"],"primaryText":"Multimodal Rock Surface Images","secondaryText":"2.0GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/night-time-drive","mediaPosition":"center","mediaUrls":["/_next/static/images/night_drive_sample-ec428d6b27967b4c1bd2738263c5c4ce.jpg.webp","/_next/static/images/night_drive_sample-0be92f8f70cbdd291f029a387cac0565.jpg"],"primaryText":"Night time drive","secondaryText":"2.57GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/openratslam","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"OpenRATSLAM","secondaryText":"3.6GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/low-light","mediaPosition":"center","mediaUrls":["/_next/static/images/low_light-e29b3b1c358fbb7bb15cf1075b2f1066.jpg.webp","/_next/static/images/low_light-5d50fd52e0698697f62132897dbf6bfa.jpg"],"primaryText":"Raw image low-light object dataset","secondaryText":"23.3GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/rt_bene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_labels-f79290be354a9a6ea6dfa387d60da1c1.png.webp","/_next/static/images/rt_bene_labels-4ac642446c5fd65a3d20b2b46f856cdc.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Dataset","secondaryText":"600MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/rt_gene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset_figure-5572954dcf83fca94ae80fa38a0f36ab.jpg.webp","/_next/static/images/dataset_figure-024bff6ee75c09b3b9afd020a4e1467b.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Dataset","secondaryText":"45GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/st-lucia-multiple","mediaPosition":"center","mediaUrls":["/_next/static/images/st_lucia_multiple-2cbaafdfc8e48c3435bf99f7dd1664c7.jpg.webp","/_next/static/images/st_lucia_multiple-a633d97afbaa94da3e29001a6583b448.jpg"],"primaryText":"St Lucia multiple times of day","secondaryText":"1.46GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/trip-hazards","mediaPosition":"center","mediaUrls":["/_next/static/images/trip_hazards-c2e22a1e003c0abefb7e1182008b7d5d.png.webp","/_next/static/images/trip_hazards-0fb1c5950e6df6a57f0abb9f71e7b113.png"],"primaryText":"Trip Hazards on a Construction Site","secondaryText":"339MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/uq-st-lucia","mediaPosition":"center","mediaUrls":["/_next/static/images/uq_st_lucia-1d565cc939abfe002a470df1354ac6e8.jpg.webp","/_next/static/images/uq_st_lucia-cd1a4c432374b14373c4c98eb02e8d7c.jpg"],"primaryText":"UQ St Lucia Vision","secondaryText":"38.1GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/vprbench","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"20GB","secondaryTransform":"capitalize"}],"title":"Downloadable datasets"},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/air-traffic-occupancy.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/air-traffic-occupancy.json new file mode 100644 index 0000000000..8c9643a92b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/air-traffic-occupancy.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Airspace

\n

A dataset of aircraft operating around an airport and efficient implementation of an optimal quickest change detection rule to detect changes in airspace traffic.

\n

The raw data is supplied by Airservices Australia and consists of merged track surveillance data acquired from ADS-B and Radar for aircraft operating around an airport. The data was processed to extract the aircraft patterns on a grid at certain sampling points, and aircraft count information used to identify busy and quiet traffic environments. The aircraft pattern and traffic environment class is made available in this dataset.

\n","name":"Air Traffic Occupancy Data","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/wPCSZ6Znipez1vC","image":"./airspace_traffic_occupation.jpg","size":"19.5MB","id":"air-traffic-occupancy","_images":["/_next/static/images/airspace_traffic_occupation-d8931212d7eaeaba10c2acf785a0ecd9.jpg.webp","/_next/static/images/airspace_traffic_occupation-01dcbf21de2f822245f4bd51c364dc3a.jpg"],"src":"/content/aircraft_detection/airspace_traffic_occupancy.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json new file mode 100644 index 0000000000..f29ee38e56 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

A dataset of stationary, fixed-wing aircraft on a collision course for vision-based sense and avoid.

\n

The dataset consists of 15 uncompressed, high-resolution image sequences containing 55,521 images of a fixed-wing aircraft approaching a stationary, grounded camera.

\n

Ground truth labels and videos of the image sequences are also provided.

\n

This dataset is licensed under the BSD-3 license. If you use this dataset please cite the following paper:

\n

Martin, Jasmin, Jenna Riseley, and Jason J. Ford. \"A Dataset of Stationary, Fixed-wing Aircraft on a Collision Course for Vision-Based Sense and Avoid.\" arXiv preprint arXiv:2112.02735 (2021).

\n","name":"Aircraft Collision Course Dataset","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/qHjCKYrFagWBHL5","image":"./aircraft_collision_course.png","size":"43.1GB","_images":["/_next/static/images/aircraft_collision_course-5837e09e1d9c74d5172247fc1e45d485.png.webp","/_next/static/images/aircraft_collision_course-39c4d558d7857ed871ee5625ead09fe7.png"],"src":"/content/aircraft_detection/aircraft_collision_course.md","id":"aircraft-collision-course","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json new file mode 100644 index 0000000000..1359e1fbc5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Alderley

\n

A vision dataset gathered from a car driven around Alderley, Queensland in two different conditions for the same route: one on a sunny day and one during a rainy night. The dataset includes extracted frames from the original .avi video files, as well as manually ground-truthed frame correspondences. The dataset was first used in the ICRA2012 Best Robot Vision Paper:

\n

M. Milford, G. Wyeth, \"SeqSLAM: Visual route-based navigation for sunny summer days and stormy winter nights\", in IEEE International Conference on Robotics and Automation, St Paul, United States, 2012.

\n

If you use this dataset please cite the above paper. BibTeX, Endnote, RefMan and CSV citation options available by clicking here.

\n","name":"Alderley Day and Night","type":"dataset","url":"https://wiki.qut.edu.au/pages/viewpage.action?pageId=181178395","url_type":"external","size":"2.07GB","image":"./alderley.jpg","_images":["/_next/static/images/alderley-b50cf86288ad8d46bfd22a4d44388409.jpg.webp","/_next/static/images/alderley-1ca139fb61a2f7e5f85056bd73b7be49.jpg"],"src":"/content/legacy_datasets/alderley.md","id":"alderley","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json new file mode 100644 index 0000000000..bbe50de40d --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

The BenchBot Environments for Active Robotics (BEAR) are a set of Unreal Engine environments for use with the BenchBot software stack in the ACRV Semantic Scene Understanding Challenge. A collage of the robot starting position for each of the environments is shown below:

\n

\"Robot

\n

Features of the dataset include:

\n\n

The primary and easiest way to utilise the dataset is through BenchBot software stack. For full instructions on using an active agent within the environments with BenchBot we refer users to the BenchBot documentation. The link above gives access to the packaged Unreal \"games\" (not raw assets) for all environments, split into a development and challenge set, in line with the original scene understanding challenge. Develop contains house and miniroom. Challenge contains apartment, company, and office. Note that these ae just the environments. Ground truth object cuboid maps are located in the BenchBot add-ons ground_truths_isaac_develop and ground_truths_isaac_challenge respectively.

\n

For more details of the dataset, challenge, BenchBot, and how it all fits together, please see our summary video below:

\n
\n","name":"BenchBot Environments for Active Robotics (BEAR)","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/pr8Bthtj2OFbg4R/download","size":"15.9GB","image":"./all_envs.png","_images":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"src":"/content/benchbot/benchbot-bear-data.md","id":"benchbot-bear-data","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json new file mode 100644 index 0000000000..4bc323e4cf --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

Brisbane-Event-VPR was captured in the Brookfield and Kenmore Hills outer suburbs of Brisbane. The route is approx. 8km long and contains a variety of different roads, ranging from single-lane roads without much build-up over dual carriageways to built-up areas. Some areas contain many trees that cast shadows on the street and lead to challenging lighting conditions. The dataset includes 6 traverses recorded at different times of the day and under varying weather conditions. A DAVIS346 event camera was used to record the dataset; it was mounted forward-facing on the inside of the windshield of a Honda Civic. The DAVIS346 allows recording of events and aligned RGB frames with 346x260 pixels resolution.

\n

\"Brisbane-Event-VPR

\n","name":"Brisbane-Event-VPR","type":"dataset","url":"https://zenodo.org/record/4302805","size":"80GB","id":"brisbane_event_vpr_dataset","image":"repo:Tobias-Fischer/ensemble-event-vpr/dataset.png","image_fit":"contain","_images":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"src":"/content/visual_place_recognition/event-vpr-dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/cbd-and-highway.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/cbd-and-highway.json new file mode 100644 index 0000000000..efb626383c --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/cbd-and-highway.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

Paper is currently in press and expected to be published in 2016. Please cite the following paper if you use these datasets (use the correct year after it has been published):

\n

Pepperell, E., Corke, P. & Milford, M. (in press). Routed Roads: Probabilistic Vision-Based Place Recognition for Changing Conditions, Split Streets and Varied Viewpoints. The International Journal of Robotics Research (IJRR).

\n

Links to datasets can be found through the download button above.

\n","name":"CBD and Highway Datasets","type":"dataset","url":[{"name":"CBD","url":"https://mega.co.nz/#F!FEM2zBzb!D72oxkUG2jDhaIDxsig1iQ","size":"2.20GB"},{"name":"Highway","url":"https://mega.co.nz/#F!xRsxCZ4Y!s1Lq4KmtmZfR5MLBLw4a2g","size":"2.46GB"}],"url_type":"list","image":"./cbd_and_highway.png","size":"4.66GB","_images":["/_next/static/images/cbd_and_highway-68b114bb9789803999a0af2fe0a97d91.png.webp","/_next/static/images/cbd_and_highway-6e93fb1dae7cf6034cc07b783b8ca033.png"],"src":"/content/legacy_datasets/cbd_and_highway.md","id":"cbd-and-highway","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json new file mode 100644 index 0000000000..99f9c2cf28 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"City

\n

Description:

\n

GoPro vision-only dataset gathered on a late afternoon / evening approximately 10 km drive (one way) into and out of the Brisbane metropolitan area. Lots of varied traffic and interesting pedestrian situations. Map shows inbound route (part 1), return route (part 2) is approximately in reverse but has some extra suburban streets at the end.

\n

Settings: 1080p 30 fps wide FOV setting on a GoPro 4 Silver.

\n

Download links for both parts can be accessed via the button above.

\n

\"City

\n

Paper reference:

\n

If you use this dataset, please cite the below paper:

\n

Michael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.

\n

Paper web link:

\n

http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W11/html/Milford_Sequence_Searching_With_2015_CVPR_paper.html

\n","name":"City sunset drive","type":"dataset","url":[{"name":"Inbound route (part 1)","url":"https://mega.nz/#!UBkhiL7L!xppCjeRaadUqK1ESk36O_ZdObpC0C3ETXmXaonweIF0","size":"4.07GB"},{"name":"Outbound route (part 2)","url":"https://mega.nz/#!8ZFQEZDC!mDcOPs5g6V1Ad4SSJ5_6SCUcxIveI8JnK7LEZe696Mg","size":"5.72GB"}],"url_type":"list","size":"9.8GB","image":"./city_sunset_sample.jpg","_images":["/_next/static/images/city_sunset_sample-7d8be3853972a74f7ab4885baa118dba.jpg.webp","/_next/static/images/city_sunset_sample-970f5a9952e2126c37c4a7235d8b48b1.jpg"],"src":"/content/legacy_datasets/city_sunset.md","id":"city-sunset","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json new file mode 100644 index 0000000000..00e861c556 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

Two vision datasets of a single route through the Gardens Point Campus, Queensland University of Technology and along the Brisbane River, Brisbane, Australia. One route is traversed on the left-hand side of the path during the day and the other day route is traversed on the right-hand side of the path during the night, to capture both pose and condition change.

\n

Full details of how to use the dataset and individual download links are available from:

\n

https://wiki.qut.edu.au/pages/viewpage.action?pageId=175739622

\n","name":"Day and Night with Lateral Pose Change","type":"dataset","url":"https://wiki.qut.edu.au/pages/viewpage.action?pageId=175739622","url_type":"external","image":"./day_night_lateral.jpg","size":"67.9MB","_images":["/_next/static/images/day_night_lateral-56ae9615a767dd2fc33a0b2c257f9f28.jpg.webp","/_next/static/images/day_night_lateral-ece5db3367c8eae1fb94458fafcb8e99.jpg"],"src":"/content/legacy_datasets/day_night_lateral.md","id":"day-night-lateral","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json new file mode 100644 index 0000000000..acc745f039 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Sample

\n

This fish dataset currently consisting of 3,960 images collected from 468 species. This data consists of real-world images of fish captured in conditions defined as \"controlled\", \"out-of-the-water\" and \"in-situ\". The \"controlled\", images consists of fish specimens, with their fins spread, taken against a constant background with controlled illumination. The \"in-situ\" images are underwater images of fish in their natural habitat and so there is no control over background or illumination. The \"out-of-the-water\" images consist of fish specimens, taken out of the water with a varying background and limited control over the illumination conditions. A tight red bounding box is annotated around the fish.

\n

Full details of how to use the dataset and individual download links are available from:

\n

https://wiki.qut.edu.au/display/cyphy/Fish+Dataset

\n","name":"Fish images","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/Fish+Dataset","url_type":"external","image":"./fish_image.png","size":"512MB","_images":["/_next/static/images/fish_image-a252b1d6bfcba0ada070ce6af73fc92b.png.webp","/_next/static/images/fish_image-d74fa618dc09390794a0785ebede2291.png"],"src":"/content/legacy_datasets/fish_image.md","id":"fish-image","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json new file mode 100644 index 0000000000..afcf7014c9 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Floor

\n

Vision datasets gathered within a townhouse (Indooroopilly, Brisbane), and a suburban backyard (Gaythorne, Brisbane) in varying conditions over the same area: one set during the day, and one during night time. The dataset includes the all the extracted frames, as well as a text document containing their ground truthed locations. The dataset was used in a paper that is accepted to ICRA2016:

\n

J. Mount, M. Milford, \"2D Vision Place Recognition for Domestic Service Robots at Night\", in IEEE International Conference on Robotics and Automation, Stockholm, Sweden, 2016.

\n

The code used to compare images and perform place recognition is also contained within the files.

\n

If you use this dataset, or the provided code, please cite the above paper.

\n","name":"Day-night vacuum-cleaner robot and lawn datasets","type":"dataset","url":"https://mega.nz/#!ZUVhjCjK!E5vxmbVDwo18_bVkuz5vVMV_5Fiu3GJo9M0Z8YUufNs","image":"./floor_and_lawn.jpg","size":"400MB","_images":["/_next/static/images/floor_and_lawn-17b4af12dd8360d83e5f91a61c0f043e.jpg.webp","/_next/static/images/floor_and_lawn-93eb7282e76ae159b32f1cdb983f4317.jpg"],"src":"/content/legacy_datasets/floor_and_lawn.md","id":"floor-and-lawn","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json new file mode 100644 index 0000000000..1d33a45e39 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Gold

\n

Description:

\n

GoPro vision-only dataset gathered along an approximately 87 km drive from Brisbane to the Gold Coast, in sunny weather (no ground truth but a reference trajectory provided in the image on the left. Lots of varied traffic conditions, some interesting pedestrian and dangerous driving situations captured on the camera.

\n

Settings: 1080p 30 fps wide FOV setting on a GoPro 4 Silver .

\n

Download links are available through the button above for:

\n\n

\"Gold

\n

Paper reference:

\n

If you use this dataset, please cite the below paper:

\n

Michael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.

\n

Paper web link:

\n

http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W11/html/Milford_Sequence_Searching_With_2015_CVPR_paper.html

\n","name":"Gold Coast drive","type":"dataset","url":[{"name":"Full video","url":"https://mega.nz/#!4NFzmJyI!fkgjjEN-OJ9ceYRtUMT5VXYVHo8GhakpbuIs-Ih5FjE","size":"15.24GB"},{"name":"Low resolution, highly compressed video","url":"https://mega.nz/#!EUlmQRqD!pOD6Ob7i2G5SDdmC7cvcjBK0K4cxx-drjHlFqWhBMgo","size":"257.2MB"},{"name":"Short sample segment (~314MB)","url":"https://mega.nz/#!1YUHzTIR!46f0xwKy57_9Zdbay466u-vWkMUgIJjgPbJW5lqLjyQ","size":"314.8MB"}],"url_type":"list","size":"15.8GB","image":"./gold_coast_sample.jpg","_images":["/_next/static/images/gold_coast_sample-5c2190ed72cc8f3cb58d4a4d8c6f1e66.jpg.webp","/_next/static/images/gold_coast_sample-403d54f648160fcbabca61f64c22ba22.jpg"],"src":"/content/legacy_datasets/gold_coast_drive.md","id":"gold-coast-drive","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json new file mode 100644 index 0000000000..5d3fada407 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

A vision dataset was taken on level 7 of S-Block QUT Gardens Point campus. The data contains stereo images, laser data and wheel odometry in addition to secondary data such as camera calibrations and transforms between sensors. This data was collected over a single continuous run over the level with the Guiabot platform under manual control.

\n

Full details of how to use the dataset and individual download links are available from:

\n

https://wiki.qut.edu.au/display/cyphy/Indoor+Level+7+S-Block+Dataset

\n","name":"Indoor Level 7 S-Block","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/Indoor+Level+7+S-Block+Dataset","url_type":"external","image":"./indoor_level_7.png","size":"42.2GB","_images":["/_next/static/images/indoor_level_7-5206991317d188dcc5c74a9fcb49af3a.png.webp","/_next/static/images/indoor_level_7-0af1bc519fe9a2884ef2d8b0423c7c55.png"],"src":"/content/legacy_datasets/indoor_level_7.md","id":"indoor-level-7","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json new file mode 100644 index 0000000000..09e366294b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

A vision dataset gathered from a radio-controlled aircraft flown at Kagaru, Queensland, Australia on 31/08/10. The data consists of visual data from a pair of downward facing cameras, translation and orientation information as a ground truth from an XSens Mti-g INS/GPS and additional information from a USB NMEA GPS. The dataset traverses over farmland and includes views of grass, an air-strip, roads, trees, ponds, parked aircraft and buildings.

\n

Please see the author's page for up-to-date details and documentation on the dataset:

\n

https://michaelwarren.info/docs/datasets/kagaru-airborne-stereo/

\n","name":"Kagaru Airborne Vision","type":"dataset","url":"https://michaelwarren.info/docs/datasets/kagaru-airborne-stereo/","url_type":"external","image":"./kagaru.png","size":"22.9GB","_images":["/_next/static/images/kagaru-f2c2fb6210e37c5a307667580f126176.png.webp","/_next/static/images/kagaru-a9f63225cd629de63260ecebd1aadf5c.png"],"src":"/content/legacy_datasets/kagaru.md","id":"kagaru","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json new file mode 100644 index 0000000000..28eaae70f5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

Dataset contains 41 original KITTI images and the corresponding manually semantically labelled data. The labelled data is in the form of a Matlab .mat file with each entry in the array corresponding to the class label.

\n

Full details of how to use the dataset and individual download links are available from:

\n

https://wiki.qut.edu.au/display/cyphy/KITTI+Semantic+Labels

\n","name":"KITTI images with semantic labels","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/KITTI+Semantic+Labels","url_type":"external","image":"./kitti_semantics.png","size":"31.8MB","_images":["/_next/static/images/kitti_semantics-f17afe36ed6d7a3faa570f932450244d.png.webp","/_next/static/images/kitti_semantics-b99130453265bfc7fe08428ef322d4ac.png"],"src":"/content/legacy_datasets/kitti_semantics.md","id":"kitti-semantics","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json new file mode 100644 index 0000000000..9ccd204c39 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

For 28 objects (22 within the ImageNet class set and 6 within the PASCAL VOC class set), a set of raw images (DNG format) has been obtained at a variety of lighting conditions (1-40lx), ISO settings (3200 - 409600) and exposure times (1/8000 - 1/10) for comparison of the influence of demosaicing techniques on feature point detectors and CNNs at low-light and with noise. Each object set has a reference image captured at ~380lx. All images were captured with a Sony α7s in a dark room with controlled lighting.

\n

Presented in:

\n

D. Richards, J. Sergeant, M. Milford, P. Corke, \"Seeing in the Dark: the Demosaicing Difference\", IEEE Conference on Computer Vision and Pattern Recognition [under review], 2017.

\n","name":"Raw image low-light object dataset","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/gdJNon8OdEnQeXU/download","image":"./low_light.jpg","size":"23.3GB","_images":["/_next/static/images/low_light-e29b3b1c358fbb7bb15cf1075b2f1066.jpg.webp","/_next/static/images/low_light-5d50fd52e0698697f62132897dbf6bfa.jpg"],"src":"/content/legacy_datasets/low_light.md","id":"low-light","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json new file mode 100644 index 0000000000..0b6d5925b8 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Mt

\n

Description:

\n

Mt Cootha circuit day and night time laps with a Sony A7s camera (note this is a later dataset, not the original in the paper below).

\n

The original datasets for this paper were recorded using a Nikon D5100 with long exposures.

\n

Download links for all of the datasets can be accessed using the button above.

\n

Paper reference:

\n

If you use this dataset, please cite the below paper:

\n

Milford, Michael, Turner, Ian, & Corke, Peter (2013) Long exposure localization in darkness using consumer cameras. In Vincze, M (Ed.) Proceedings of the 2013 IEEE International Conference on Robotics and Automation (ICRA). Institute of Electrical and Electronic Engineers (IEEE), United States, pp. 3755-3761.

\n","name":"Mt Cootha Day and Night Drives","type":"dataset","url":[{"name":"Daytime drive recorded on a Sony A7s","url":"https://mega.nz/#!1JVy0CxQ!kKxjcJB6Ma5ML4ERJZzWv3AcFV9j-V3vMpbZLX68JqM","size":"785.9MB"},{"name":"Nighttime drive recorded on a Sony A7s","url":"https://mega.nz/#!dAVG1KJD!2GHfMxj_kUiALfCjGDGH8ERQZO1qNmRaqNUHHTK5Pmo","size":"489.2MB"},{"name":"Daytime drive recorded on a Nikon D5100","url":"https://mega.nz/#!8Fdgkabb!S_FFvCmuH3RvebV9NBx5m28o8PMOp1eBwRVW0-LVcb4","size":"566.5MB"},{"name":"Nighttime drive recorded on a Niko D5100","url":"https://mega.nz/#!dQEQRSRI!cm_Xgm1ceGbHara8xX4vbn3X5gYtpPXamtS5WzABfJk","size":"1.11GB"}],"url_type":"list","image":"./mt_cootha.jpg","size":"2.8GB","_images":["/_next/static/images/mt_cootha-bd14dc6ef8b1122300e0b93391b69052.jpg.webp","/_next/static/images/mt_cootha-237af78c49ccc8dd88c237a761fbbfe0.jpg"],"src":"/content/legacy_datasets/mt_cootha_day_night.md","id":"mt-cootha-day-night","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json new file mode 100644 index 0000000000..96db179564 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

We provide two vehicular datasets, acquired in Queensland, Australia, each consisting of multiple passes in different lanes across day and night on approximately 4 km, 4-lane, bidirectional road sections, divided by median strips. The first dataset was collected along the Gold Coast Highway in Palm Beach (\"highway\") and the second was collected along Christine Avenue in Robina and nearby suburbs (\"suburban\").

\n

The dataset can be downloaded using the button above, with further information contained in the Readme.txt.

\n","name":"Multi-Lane Road Sideways-Camera Datasets","type":"dataset","url":"https://mega.co.nz/#F!8FEQiDzC!B57HWocLSlSjgSDqNjEWvQ","image":"./multilane_sideways.png","size":"761MB","_images":["/_next/static/images/multilane_sideways-4450e7f81fc4a229197607668cd4b881.png.webp","/_next/static/images/multilane_sideways-640c09802edf13b2e1a9c6b032945c13.png"],"src":"/content/legacy_datasets/multilane_sideways.md","id":"multilane-sideways","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multimodal-rock-surface.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multimodal-rock-surface.json new file mode 100644 index 0000000000..7e1a65c78d --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multimodal-rock-surface.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

Presented with the permission of NASA's Jet Propulsion Laboratory (JPL) in:

\n

J. Sergeant, G. Doran, D. R. Thompson, C. Lehnert, A. Allwood, B. Upcroft, M. Milford, \"Towards Multimodal and Condition-Invariant Vision-based Registration for Robot Positioning on Changing Surfaces,\" Proceedings of the Australasian Conference on Robotics and Automation, 2016.

\n

Further referenced in:

\n

J. Sergeant, G. Doran, D. R. Thompson, C. Lehnert, A. Allwood, B. Upcroft, M. Milford, \"Appearance-Invariant Surface Registration for Robot Positioning,\" International Conference on Robotics and Automation 2017, 2017.

\n

Associated code for the above papers can be obtained at the following repository:

\n

https://github.com/jamessergeant/seqreg_tpp.git

\n","name":"Multimodal Rock Surface Images","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/nX1rhsKMehp1h6N/download","image":"./multimodal_rock_surface.png","size":"2.0GB","_images":["/_next/static/images/multimodal_rock_surface-f19b647fbcffec77c0dc67dd2eef1984.png.webp","/_next/static/images/multimodal_rock_surface-75e156a88b63a6a1e4a9cb862b891742.png"],"src":"/content/legacy_datasets/multimodal_rock_surface.md","id":"multimodal-rock-surface","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json new file mode 100644 index 0000000000..7ac91c1e15 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Night

\n

Description:

\n

Night time ~13 km drive with Sony A7s camera mounted on the roof in Brisbane. Mixture of highway and suburban driving, some light traffic and stop go at traffic lights.

\n

Settings: 1080p 25 fps.

\n

Download links for both the full video and a highly compressed version are available through the button above.

\n

\"Night

\n

Paper reference:

\n

If you use this dataset, please cite the below paper:

\n

Michael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.

\n

Paper web link:

\n

http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W11/html/Milford_Sequence_Searching_With_2015_CVPR_paper.html

\n","name":"Night time drive","type":"dataset","url":[{"name":"Full video","url":"https://mega.nz/#!IY90HQJY!HQrsdXRLN6FkeLpBIrPDZ3xX6k2ajaKO7OUbzpG7AzM","size":"2.50GB"},{"name":"Low resolution, highly compressed video","url":"https://mega.nz/#!lF90UBZS!Dhyt-DiY4PfuGB-HXG4XAjhMGu5rP0NRYJyrprIoBrA","size":"65.7MB"}],"url_type":"list","size":"2.57GB","image":"./night_drive_sample.jpg","_images":["/_next/static/images/night_drive_sample-ec428d6b27967b4c1bd2738263c5c4ce.jpg.webp","/_next/static/images/night_drive_sample-0be92f8f70cbdd291f029a387cac0565.jpg"],"src":"/content/legacy_datasets/night_time_drive.md","id":"night-time-drive","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json new file mode 100644 index 0000000000..3dcd845ce5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

A ROS and OpenCV version of RatSLAM is available at: Google Code

\n

Please see the following paper for more information about openRatSLAM. We would appreciate cites if you use the code.

\n

David Ball, Scott Heath, Janet Wiles, Gordon Wyeth, Peter Corke, Michael Milford (2013) OpenRatSLAM: an open source brain-based SLAM system. Autonomous Robots

\n

We provide the following datasets here, which can all be downloaded via the button above.

\n

iRat 2011 Australia

\n

The iRat - intelligent Rat animat technology

\n

St Lucia 2007

\n

See the youtube video describing this dataset.

\n

Oxford's New College 2008

\n

We have re-encoded Oxford's dataset that is available here into a rosbag file. The rosbag file only include the odometry and the panoramic image sensor data. The odometry has been integrated to be at the same rate (3Hz) as the panoramic images. Note that this re-encoded datset file has been created without permission and is maintained purely by us.

\n

If you use this dataset, please reference the original paper:

\n

Mike Smith, Ian Baldwin, Winston Churchill, Rohan Paul, Paul Newman (2009) The New College Vision and Laser Data Set. The International Journal of Robotics Research. 28:5

\n","name":"OpenRATSLAM","type":"dataset","url":[{"name":"iRat 2011 Australia","url":"https://mega.co.nz/#!FAlXyZbB!6rMpQ6EE4LQIKmZvy5zN7Stdu4pIzZm2h3TnHkG2wms","size":"861MB"},{"name":"St Lucia 2007","url":"https://mega.co.nz/#!od8xVbKJ!E81hKj-M1-CybBkX1dLe3htAJw-gP9MAQIEeZkPwuUY","size":"2.31GB"},{"name":"Oxford's New College 2008","url":"https://mega.co.nz/#!oJdwxTAJ!EB-M_gLWq8Sy2uFvmER-D_uTZ7_Rd4v-5ZUhu1YGNCQ","size":"1.18GB"}],"url_type":"list","size":"3.6GB","image":"/qcr_logo_light_filled.svg","_images":["/qcr_logo_light_filled.svg"],"src":"/content/legacy_datasets/openratslam.md","id":"openratslam","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json new file mode 100644 index 0000000000..74c2111ff8 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

The RT-BENE dataset is a new open-sourced dataset with annotations of the eye-openness of more than 200,000 eye images, including more than 10,000 images where the eyes are closed. We annotate the RT-GENE dataset, which was proposed for gaze estimation, with blink labels. We define open eyes as images where at least some part of the sclera (white part of the eye) or pupil is visible. Closed eyes are those where the eyelids are fully closed. The uncertain category is used when the image cannot be clearly grouped into one of the other categories due to e.g. extreme head poses, or when the two annotators labelled the image differently. Using this approach, we labelled in total 243,714 images, 218,548 of them where the eyes are open, 10,444 where the eyes are closed and 14,722 uncertain images.

\n

\"RT-BENE

\n

The work done in this project was done within the Personal Robotics Lab at Imperial College London.

\n","name":"RT-BENE: Real-Time Blink Estimation in Natural Environments Dataset","type":"dataset","url":"https://zenodo.org/record/3685316","url_type":"external","size":"600MB","id":"rt_bene_dataset","image":"repo:Tobias-Fischer/rt_gene/assets/rt_bene_labels.png","image_fit":"contain","_images":["/_next/static/images/rt_bene_labels-f79290be354a9a6ea6dfa387d60da1c1.png.webp","/_next/static/images/rt_bene_labels-4ac642446c5fd65a3d20b2b46f856cdc.png"],"src":"/content/rt-gene/rt-bene-dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json new file mode 100644 index 0000000000..71b71d3db6 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

The RT-GENE dataset is a novel dataset of varied gaze and head pose images in a natural environment, addressing the issue of ground truth annotation by measuring head pose using a motion capture system and eye gaze using mobile eyetracking glasses. We apply semantic image inpainting to the area covered by the glasses to bridge the gap between training and testing images by removing the obtrusiveness of the glasses. The proposed RT-GENE dataset contains recordings of 15 participants (9 male, 6 female, 2 participants recorded twice), with a total of 122,531 labeled training images and 154,755 unlabeled images of the same subjects where the eyetracking glasses are not worn.

\n

\"RT-GENE

\n

The work done in this project was done within the Personal Robotics Lab at Imperial College London.

\n","name":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Dataset","type":"dataset","url":"https://zenodo.org/record/2529036","url_type":"external","size":"45GB","id":"rt_gene_dataset","image":"repo:Tobias-Fischer/rt_gene/assets/dataset_figure.jpg","image_fit":"contain","_images":["/_next/static/images/dataset_figure-5572954dcf83fca94ae80fa38a0f36ab.jpg.webp","/_next/static/images/dataset_figure-024bff6ee75c09b3b9afd020a4e1467b.jpg"],"src":"/content/rt-gene/rt-gene-dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json new file mode 100644 index 0000000000..1f5be0662c --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

A vision dataset of a single route through the suburb of St Lucia, Queensland, Australia. The visual data was collected with a forward facing webcam attached to the roof of a car. The route was traversed at five different times during the day to capture the difference in appearance between early morning and late afternoon. The route was traversed again, another five times, two weeks later for a total of ten datasets. GPS data is included for each dataset.

\n

Full details of how to use the dataset and individual download links are available from:

\n

https://wiki.qut.edu.au/display/cyphy/St+Lucia+Multiple+Times+of+Day

\n","name":"St Lucia multiple times of day","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/St+Lucia+Multiple+Times+of+Day","url_type":"external","image":"./st_lucia_multiple.jpg","size":"1.46GB","_images":["/_next/static/images/st_lucia_multiple-2cbaafdfc8e48c3435bf99f7dd1664c7.jpg.webp","/_next/static/images/st_lucia_multiple-a633d97afbaa94da3e29001a6583b448.jpg"],"src":"/content/legacy_datasets/st_lucia_multiple.md","id":"st-lucia-multiple","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json new file mode 100644 index 0000000000..8f773c4326 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Example

\n

Contains RGB, Depth and HHA images of a Construction Site with the trip hazards labelled.

\n

The dataset spans 2'000m2 of construction site over four floors, contains ~629 trip hazards.

\n

Presented in paper under review:

\n

McMahon, S., Sϋnderhauf, N., Upcroft, B & Milford, M. (2017). Trip Hazard Detection On Construction Sites Using Colour and Depth Information. Submitted to International Conference on Intelligent Robotics and Systems (IROS) with RAL option 2017

\n","name":"Trip Hazards on a Construction Site","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/kVAh7G8V4mwdtp4/download","image":"./trip_hazards.png","size":"339MB","_images":["/_next/static/images/trip_hazards-c2e22a1e003c0abefb7e1182008b7d5d.png.webp","/_next/static/images/trip_hazards-0fb1c5950e6df6a57f0abb9f71e7b113.png"],"src":"/content/legacy_datasets/trip_hazards.md","id":"trip-hazards","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json new file mode 100644 index 0000000000..057c7c984b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

\"Overview

\n

A vision dataset gathered from a car driven in a 9.5km circuit around the University of Queensland's St Lucia campus on 15/12/10. The data consists of visual data from a calibrated stereo pair, translation and orientation information as a ground truth from an XSens Mti-g INS/GPS and additional information from a USB NMEA GPS. The dataset traverses local roads and encounters a number of varying scenarios including roadworks, speed bumps, bright scenes, dark scenes, reverse traverses, a number of loop closure events, multi-lane roads, roundabouts and speeds of up to 60 km/h.

\n

Please see the author's page for up-to-date details and documentation on the dataset:

\n

https://michaelwarren.info/docs/datasets/uq-st-lucia-stereo-dataset/

\n","name":"UQ St Lucia Vision","type":"dataset","url":"https://michaelwarren.info/docs/datasets/uq-st-lucia-stereo-dataset/","url_type":"external","image":"./uq_st_lucia.jpg","size":"38.1GB","_images":["/_next/static/images/uq_st_lucia-1d565cc939abfe002a470df1354ac6e8.jpg.webp","/_next/static/images/uq_st_lucia-cd1a4c432374b14373c4c98eb02e8d7c.jpg"],"src":"/content/legacy_datasets/uq_st_lucia.md","id":"uq-st-lucia","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json new file mode 100644 index 0000000000..a03ea0a2fa --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"

List of Supported Datasets in VPR-Bench

\n
    \n
  1. ESSEX3IN1 [Zaffar et al; https://ieeexplore.ieee.org/document/9126220]
  2. \n
  3. Tokyo24/7 [R. Arandjelović et al; https://arxiv.org/abs/1511.07247]
  4. \n
  5. SPEDTest [Chen et al; https://ieeexplore.ieee.org/document/8421024]
  6. \n
  7. Synthia [Ros et al; https://ieeexplore.ieee.org/document/7780721]
  8. \n
  9. Nordland [Skrede et al; https://bit.ly/2QVBOym]
  10. \n
  11. Gardens Point [Glover et al; https://doi.org/10.5281/zenodo.4590133]
  12. \n
  13. INRIA Holidays [Jegou et al; https://lear.inrialpes.fr/pubs/2008/JDS08/jegou_hewgc08.pdf]
  14. \n
  15. Pittsburgh Query [R. Arandjelović et al; https://arxiv.org/abs/1511.07247]
  16. \n
  17. Cross-Seasons [Larsson et al; https://ieeexplore.ieee.org/document/8953253]
  18. \n
  19. Corridor [Milford et al; https://journals.sagepub.com/doi/abs/10.1177/0278364913490323]
  20. \n
  21. Living Room [Milford et al; https://ieeexplore.ieee.org/document/7487686]
  22. \n
  23. 17 Places [Sahdev et al; https://ieeexplore.ieee.org/document/7801503]
  24. \n
\n

These can be downloaded from here. More details for benchmarking can be found here. Please cite the original sources of the datasets when using them in your work.

\n

\"VPR-Bench

\n","name":"VPR-Bench","type":"dataset","url":"https://github.com/MubarizZaffar/VPR-Bench","size":"20GB","id":"vprbench","image":"repo:MubarizZaffar/VPR-Bench/VPRBench.jpg","image_fit":"contain","_images":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"src":"/content/visual_place_recognition/vprbench_dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/index.json b/_next/data/jRfPhdat00YV9X7T_v1K6/index.json new file mode 100644 index 0000000000..2e6d0e8b4f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/index.json @@ -0,0 +1 @@ +{"pageProps":{"featured":[{"linkUrl":"/collection/python_robotics","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Python Robotics","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/vpr_overview","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Visual Place Recognition","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/rt_gene_overview","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/human-cues","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Human Cues for Robot Navigation","secondaryText":"Collection","secondaryTransform":"capitalize"}],"mostPopular":[{"linkUrl":"/collection/vpr_overview","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Visual Place Recognition","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/benchbot-bear-data","mediaPosition":"center","mediaUrls":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"primaryText":"BenchBot Environments for Active Robotics (BEAR)","secondaryText":"15.9GB","secondaryTransform":"capitalize"},{"linkUrl":"/code/robotics-toolbox-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Robotics Toolbox Python","secondaryText":"petercorke/robotics-toolbox-python","secondaryTransform":"lowercase"},{"linkUrl":"/dataset/aircraft-collision-course","mediaPosition":"center","mediaUrls":["/_next/static/images/aircraft_collision_course-5837e09e1d9c74d5172247fc1e45d485.png.webp","/_next/static/images/aircraft_collision_course-39c4d558d7857ed871ee5625ead09fe7.png"],"primaryText":"Aircraft Collision Course Dataset","secondaryText":"43.1GB","secondaryTransform":"capitalize"},{"linkUrl":"/code/swift","mediaPosition":"center","mediaUrls":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"primaryText":"Swift","secondaryText":"jhavl/swift","secondaryTransform":"lowercase"},{"linkUrl":"/collection/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/vprbench","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"20GB","secondaryTransform":"capitalize"},{"linkUrl":"/code/vprbench","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"MubarizZaffar/VPR-Bench","secondaryTransform":"lowercase"},{"linkUrl":"/collection/python_robotics","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Python Robotics","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/code/abstract-map","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Abstract Map (Python)","secondaryText":"btalb/abstract_map","secondaryTransform":"lowercase"}],"mostRecent":[{"linkUrl":"/code/gtsam-quadrics","mediaPosition":"center","mediaUrls":["/_next/static/images/gtsam_quadrics-9ce945399d611f449b8df8e1db6602ae.png.webp","/_next/static/images/gtsam_quadrics-cb27c37d5d64abed2e30e1523a8cec1a.png"],"primaryText":"GTSAM extension for quadrics","secondaryText":"qcr/gtsam-quadrics","secondaryTransform":"lowercase"},{"linkUrl":"/code/quadricslam","mediaPosition":"center","mediaUrls":["/_next/static/images/quadricslam_video-412d8ad8190b4f7eee1320faf254cd6f.png.webp","/_next/static/images/quadricslam_video-a4d673ea6414754e153004c137d2a2c1.png"],"primaryText":"QuadricSLAM","secondaryText":"qcr/quadricslam","secondaryTransform":"lowercase"},{"linkUrl":"/code/ros-trees","mediaPosition":"center","mediaUrls":["/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webm","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.mp4","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.webp","/_next/static/images/frankie-d932493db407ac66026b9fb5968df6f2.jpg"],"primaryText":"Behaviour trees for ROS","secondaryText":"qcr/ros_trees","secondaryTransform":"lowercase"},{"linkUrl":"/code/vpr_snn","mediaPosition":"center","mediaUrls":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"primaryText":"Spiking Neural Networks for Visual Place Recognition","secondaryText":"QVPR/VPRSNN","secondaryTransform":"lowercase"},{"linkUrl":"/dataset/air-traffic-occupancy","mediaPosition":"center","mediaUrls":["/_next/static/images/airspace_traffic_occupation-d8931212d7eaeaba10c2acf785a0ecd9.jpg.webp","/_next/static/images/airspace_traffic_occupation-01dcbf21de2f822245f4bd51c364dc3a.jpg"],"primaryText":"Air Traffic Occupancy Data","secondaryText":"19.5MB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/aircraft-collision-course","mediaPosition":"center","mediaUrls":["/_next/static/images/aircraft_collision_course-5837e09e1d9c74d5172247fc1e45d485.png.webp","/_next/static/images/aircraft_collision_course-39c4d558d7857ed871ee5625ead09fe7.png"],"primaryText":"Aircraft Collision Course Dataset","secondaryText":"43.1GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/alderley","mediaPosition":"center","mediaUrls":["/_next/static/images/alderley-b50cf86288ad8d46bfd22a4d44388409.jpg.webp","/_next/static/images/alderley-1ca139fb61a2f7e5f85056bd73b7be49.jpg"],"primaryText":"Alderley Day and Night","secondaryText":"2.07GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/cbd-and-highway","mediaPosition":"center","mediaUrls":["/_next/static/images/cbd_and_highway-68b114bb9789803999a0af2fe0a97d91.png.webp","/_next/static/images/cbd_and_highway-6e93fb1dae7cf6034cc07b783b8ca033.png"],"primaryText":"CBD and Highway Datasets","secondaryText":"4.66GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/city-sunset","mediaPosition":"center","mediaUrls":["/_next/static/images/city_sunset_sample-7d8be3853972a74f7ab4885baa118dba.jpg.webp","/_next/static/images/city_sunset_sample-970f5a9952e2126c37c4a7235d8b48b1.jpg"],"primaryText":"City sunset drive","secondaryText":"9.8GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/day-night-lateral","mediaPosition":"center","mediaUrls":["/_next/static/images/day_night_lateral-56ae9615a767dd2fc33a0b2c257f9f28.jpg.webp","/_next/static/images/day_night_lateral-ece5db3367c8eae1fb94458fafcb8e99.jpg"],"primaryText":"Day and Night with Lateral Pose Change","secondaryText":"67.9MB","secondaryTransform":"capitalize"}],"codeCount":34,"datasetCount":26,"collectionCount":5},"__N_SSG":true} \ No newline at end of file diff --git a/_next/static/chunks/framework-5f4595e5518b5600.js b/_next/static/chunks/framework-5f4595e5518b5600.js new file mode 100644 index 0000000000..4792da3331 --- /dev/null +++ b/_next/static/chunks/framework-5f4595e5518b5600.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[774],{4448:function(e,t,n){var r=n(7294),l=n(6086),a=n(3840);function o(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n