diff --git a/404/index.html b/404/index.html new file mode 100644 index 0000000000..27cff86b14 --- /dev/null +++ b/404/index.html @@ -0,0 +1,8 @@ +
~ Please see the abstract map site for further details about the research publication ~
\nThis repository contains the mobile application used by human participants in the zoo experiments described in our IEEE TCDS journal. The app, created with Android Studio, includes the following:
\nThe project should be directly openable using Android Studio.
\nPlease keep in mind that this app was last developed in 2019, and Android Studio often introduces minor breaking changes with new versions. Often you will have to tweak things like Gradle versions / syntax etc. to get a project working with newer versions. Android Studio is very good though with pointing out where it sees errors and offering suggestions for how to resolve them.
\nOnce you have the project open, you should be able to compile the app and load it directly onto a device without issues.
\nThis work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.
\nIf you use this software in your research, or for comparisons, please kindly cite our work:
\n@ARTICLE{9091567, \n author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}}, \n journal={IEEE Transactions on Cognitive and Developmental Systems}, \n title={Robot Navigation in Unseen Spaces using an Abstract Map}, \n year={2020}, \n volume={}, \n number={}, \n pages={1-1},\n keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n doi={10.1109/TCDS.2020.2993855},\n ISSN={2379-8939},\n month={},}\n}\n
\n","name":"Android App for Human Participants","type":"code","url":"https://github.com/btalb/abstract_map_app","image":"./docs/abstract_map_app.gif","_images":["/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webm","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.mp4","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webp","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.jpg"],"src":"/content/human_cues/abstract-map-app.md","id":"abstract-map-app","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json
new file mode 100644
index 0000000000..7cb608807e
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map-simulator.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"~ Please see the abstract map site for further details about the research publication ~
\nPackage contains everything needed to simulate the zoo experiments performed in our IEEE TCDS journal. The package includes:
\nNote: this is just the simulator; to use the abstract map with the simulator please make sure you use the abstract_map package
\nClone the repo & install all Python dependencies:
\ngit clone https://github.com/btalb/abstract_map_simulator\npip install -r abstract_map_simulator/requirements.txt\n
\nAdd the new package to your ROS workspace at <ROS_WS>/
by linking in the cloned repository:
ln -s <LOCATION_REPO_WAS_CLONED_ABOVE> <ROS_WS>/src/\n
\nInstall all of the listed ROS dependencies, and build the package:
\ncd <ROS_WS>/src/\nrosdep install abstract_map_simulator\ncd <ROS_WS>\ncatkin_make\n
\nThis work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.
\nIf you use this software in your research, or for comparisons, please kindly cite our work:
\n@ARTICLE{9091567, \n author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}}, \n journal={IEEE Transactions on Cognitive and Developmental Systems}, \n title={Robot Navigation in Unseen Spaces using an Abstract Map}, \n year={2020}, \n volume={}, \n number={}, \n pages={1-1},\n keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n doi={10.1109/TCDS.2020.2993855},\n ISSN={2379-8939},\n month={},}\n}\n
\n","name":"2D Simulator for Zoo Experiments","type":"code","url":"https://github.com/btalb/abstract_map_simulator","image":"./docs/abstract_map_simulation.png","_images":["/_next/static/images/abstract_map_simulation-55e32b58dd5e4ed9caf7a85baf98677c.png.webp","/_next/static/images/abstract_map_simulation-3a9dbfc04fa16e80a961cec841d316fc.png"],"src":"/content/human_cues/abstract-map-simulator.md","id":"abstract-map-simulator","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json
new file mode 100644
index 0000000000..4b7b48c62d
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/abstract-map.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"~ Please see the abstract map site for further details about the research publication ~
\nThis repository provides the implementation of the abstract map used in our IEEE TCDS journal. The implementation, done in Python, includes the following features:
\nPlease see our other related repositories for further resources, and related parts of the abstract map studies:
\nNote: if you wish to run this in simulation (significantly easier than on a real robot platform), you will also need the abstract_map_simulator package
\nClone the repo & install all Python dependencies:
\ngit clone https://github.com/btalb/abstract_map\npip install -r abstract_map/requirements.txt\n
\nAdd the new package to your ROS workspace at <ROS_WS>/
by linking in the cloned repository:
ln -s <LOCATION_REPO_WAS_CLONED_ABOVE> <ROS_WS>/src/\n
\nInstall all of the listed ROS dependencies, and build the package:
\ncd <ROS_WS>/src/\nrosdep install abstract_map\ncd <ROS_WS>\ncatkin_make\n
\nStart the experiment (this will try & launch the 2D simulation back-end by default, so make sure you have that installed if you are using it):
\nroslaunch abstract_map experiment.launch\n
\n(please see this issue for details if you get the spam of TF based errors... which probably shouldn't even be errors... )
\nIn another terminal, start the hierarchy publisher to give the abstract map the contextual symbolic spatial information to begin with:
\nrosrun abstract_map hierarchy_publisher\n
\nThis will use the hierarchy available in ./experiments/zoo_hierarchy.xml
by default. Feel free to make your own if you would like to do different experiments.
Start the visualiser in preparation of beginning the experiment (pick either light or dark mode with one of the two commands):
\nrosrun abstract_map visualiser\n
\nrosrun abstract_map visualiser --dark\n
\nFinally, start the abstract map with a goal, and watch it attempt to complete the navigation task:
\nroslaunch abstract_map abstract_map.launch goal:=Lion\n
\nIf you want to manually drive the robot around and observe how the abstract map evolves over time, you can run the above command without a goal to start in \"observe mode\".
\nThis work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics.
\nIf you use this software in your research, or for comparisons, please kindly cite our work:
\n@ARTICLE{9091567, \n author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}}, \n journal={IEEE Transactions on Cognitive and Developmental Systems}, \n title={Robot Navigation in Unseen Spaces using an Abstract Map}, \n year={2020}, \n volume={}, \n number={}, \n pages={1-1},\n keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},\n doi={10.1109/TCDS.2020.2993855},\n ISSN={2379-8939},\n month={},}\n}\n
\n","name":"Abstract Map (Python)","type":"code","url":"https://github.com/btalb/abstract_map","image":"./docs/assets/images/abstract_map_in_action.png","_images":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"src":"/content/human_cues/abstract-map.md","id":"abstract-map","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json
new file mode 100644
index 0000000000..d246ac3289
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/armer.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"Armer aims to provide an interface layer between the hardware drivers of\na robotic arm giving the user control in several ways:
\nIn addition to a multiple control method layer, Armer is designed to\nbe a compatability layer allowing the user to use the same code\nacross different robotic platforms. Armer supports control for physical\nand simulated arms giving users the ability to develop even without\naccess to a physical manipulator.
\nBelow is a gif of 3 different simulated arms moving with the same cartesian velocity commands.
\n\nSeveral ROS action servers, topics and services are set up by Armer\nto enable this functionality. A summary of these can be found\nhere.
\nArmer is built on the Python Robotics Toolbox\n(RTB) and requires\na URDF loaded RTB model to calculate the required movement kinematics,\nRTB comes with browser based simulator\nSwift which Armer uses as an out of\nthe box simulator.
\nDue to these supporting packages using Armer with a manipulator will\nrequire several requirements:
\nCopy and paste the following code snippet into a terminal to create a\nnew catkin workspace and install Armer to it. Note this\nscript will also add the workspace to be sourced every time a bash\nterminal is opened. If RoboStack is preferred, please follow the steps in the next section
\n# Install pip \nsudo apt install python3-pip\n\n# Make the workspace and clone armer and armer_msgs packages\nmkdir -p ~/armer_ws/src && cd ~/armer_ws/src \ngit clone https://github.com/qcr/armer.git && git clone https://github.com/qcr/armer_msgs \n\n# Install all required packages\npip install -r ~/armer_ws/src/armer/requirements.txt\ncd .. && rosdep install --from-paths src --ignore-src -r -y \n\n# Make and source the workspace \ncatkin_make \necho \"source ~/armer_ws/devel/setup.bash\" >> ~/.bashrc \nsource ~/armer_ws/devel/setup.bash\necho \"Installation complete!\"\n
\nTo enable easy use of ROS on these operating systems, it is recommended to use RoboStack; note that ROS 1 (noetic) is recommended at this stage. Please ensure you have mamba installed before proceeding. Please follow all required steps for the RoboStack install (as per their instructions) to enable the smoothest setup on your particular OS.
\n# --- Mamba Environment Setup --- #\n# Create and activate a new robostack (ros-env) environment\nmamba create -n ros-env ros-noetic-desktop python=3.9 -c robostack-staging -c conda-forge --no-channel-priority --override-channels\nmamba activate ros-env\n\n# Install some compiler packages\nmamba install compilers cmake pkg-config make ninja\n\n# FOR WINDOWS: Install the Visual Studio command prompt - if you use Visual Studio 2022\nmamba install vs2022_win-64\n\n# --- ARMer Setup --- #\n# Make the armer workspace and clone in armer and armer_msgs packages\n# FOR LINUX/MACOS\nmkdir -p ~/armer_ws/src && cd ~/armer_ws/src \n# FOR WINDOWS: Assumes you are in the home folder\nmkdir armer_ws\\src && cd armer_ws\\src\n# Clone in armer and armer_msgs\ngit clone https://github.com/qcr/armer.git && git clone https://github.com/qcr/armer_msgs \n# Install all required packages (into ros-env) - from current directory\n# FOR LINUX/MACOS\npip install -r armer/requirements.txt\n# FOR WINDOWS\npip install -r armer\\requirements.txt\n# Enter armer_ws folder and run rosdep commands\ncd .. && rosdep init && rosdep update && rosdep install --from-paths src --ignore-src -r -y \n\n# Make and source the workspace (including environment)\ncatkin_make \n\n# --- Default Activation of Environment --- #\n# FOR LINUX \necho \"mamba activate ros-env\" >> ~/.bashrc\n\n# FOR MACOS\necho \"mamba activate ros-env\" >> ~/.bash_profile\n\n# --- Workspace Source --- #\nsource ~/armer_ws/devel/setup.bash\n
\nArmer relies on the manipulator's ROS driver to communicate with the low level hardware so the the ROS drivers must be started along side Armer. NOTE: the below packages are required for control of a real robot - see below for simulation usage instructions
\nCurrently Armer driver has packages that launches Armer and the target manipulator's drivers are bundled together. If your arm model has a hardware package, control should be a fairly plug and play experience. (An experience we are still working on so please let us know if it isn't.). Below are the github pages to arms with hardware packages. Install directions can be found on their respective pages.
\n[Available] Franka Panda: https://github.com/qcr/armer_panda
\n[Available] Universal Robot (UR3/UR5/UR10/UR10e): https://github.com/qcr/armer_ur
\n[In Progress] UFactory XArm (6): https://github.com/qcr/armer_xarm
\n[In Progress] ABB IRB6700: https://github.com/qcr/armer_abb
\nFor more information on setting up manipulators not listed here see the Armer documentation, Supported Arms.
\nThe Armer interface can be launched with the following command for simulation (note, please replace USER with your own username):
\n\n\n\n# Example is using the panda_sim.yaml. Note, please update the below path if the install directory is different\nroslaunch armer armer.launch config:=/home/$USER/armer_ws/src/armer/cfg/panda_sim.yaml\n
Alternatively, the Armer interface can be launched for a real robot using the following command (Note that this can also support simulation if you wish via the sim parameter):
\n\n\n\n# Note this example launches the panda model in simulation mode (assumes you have this package cloned, see above)\nroslaunch armer_panda robot_bringup.launch sim:=true\n
After launching, an arm can be controlled in several ways. Some quick tutorials can be referenced below:
\nFor more information and examples see the Armer\ndocumentation
\n","name":"Armer Driver","type":"code","url":"https://github.com/qcr/armer","image":"https://github.com/qcr/armer/wiki/armer_example.gif","_images":["/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webm","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.mp4","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.webp","/_next/static/images/armer_example-ff4e12b2ac663fa5fb394397d23d2681.jpg"],"src":"/content/armer.md","id":"armer","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json new file mode 100644 index 0000000000..130afa3d4e --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/benchbot-addons.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions here.
\nThe BenchBot Add-ons Manager allows you to use BenchBot with a wide array of additional content, and customise your installation to suite your needs. Semantic Scene Understanding not your thing? Install the Semantic Question Answering add-ons instead. Want to create your own content? Write some basic YAML files to make your own add-ons. Need to re-use existing content? Simply include a dependency on that add-on. Add-ons are all about making BenchBot whatever you need it to be—build a BenchBot for your research problems, exactly as you need it.
\nAdd-ons come in a variety of types. Anything that you may need to customise for your own experiments or research, should be customisable through an add-on. If not, let us know, and we'll add more add-on enabled functionality to BenchBot!
\nThe list of currently supported types of add-ons are:
\nbenchbot_batch
script.See the sections below for details of how to interact with installed add-ons, how to create your own add-ons, and formalisation of what's required in an add-on.
\nIn general, you won't use the add-ons manager directly. Instead you interact with the BenchBot software stack, which uses the add-ons manager to manage and access add-ons.
\nThe manager is a Python package if you do find you want to use it directly, and installable with pip. Run the following in the root directory where the repository was cloned:
\nu@pc:~$ pip install .\n
\nThe manager can then be imported and used to manage installation, loading, accessing, processing, and updating of add-ons. Some samples of supported functionality are shown below:
\nfrom benchbot_addons import manager as bam\n\n# Check if example with 'name' = 'hello_scd' exists\nbam.exists('examples', [('name', 'hello_scd')])\n\n# Find all installed environments\nbam.find_all('environments')\n\n# Get a list of the names for all installed tasks\nbam.get_field('tasks', 'name')\n\n# Get a list of (name, variant) pairs for all installed environments\nbam.get_fields('environments', ['name', 'variant'])\n\n# Find a robot with 'name' = 'carter'\nbam.get_match('robots', [('name', 'carter')])\n\n# Get the 'results_format' value for the task called 'scd:passive:ground_truth'\nbam.get_value_by_name('tasks', 'scd:passive:ground_truth', 'results_format')\n\n# Load YAML data for all installed ground truths\nbam.load_yaml_list(bam.find_all('ground_truths', extension='json'))\n\n# Install a list of comma-separated add-ons\nbam.install_addons('benchbot-addons/ssu,benchbot-addons/sqa')\n\n# Install a specific add-on (& it's dependencies)\nbam.install_addon('tasks_ssu')\n\n# Print the list of currently installed add-ons, & officially available add-ons\nbam.print_state()\n\n# Uninstall all add-ons\nbam.remove_addons()\n\n# Uninstall a string separated list of add-ons\nbam.remove_addon('benchbot-addons/ssu,benchbot-addons/sqa')\n
\nAdd-ons are designed to make it easy to add your own local content to a BenchBot installation. You can add your own local content to the \"local add-ons\" folder provided with your install. The location on your machine can be printed via the following:
\nfrom benchbot_addons import manager as bam\n\nprint(bam.local_addon_path())\n
\nBenchBot expects add-on content to be in named folders denoting the type of content. For example, robots must be in a folder called 'robots'
, tasks in a folder called 'tasks'
, and so on. A list of valid content types is available via the SUPPORTED_TYPES
field in the add-ons manager.
Below is an example of the process you would go through to create your own custom task locally:
\nu@pc:~$ python3 -c 'from benchbot_addons import manager as bam; print(bam.local_addon_path())'\n/home/ben/repos/benchbot/addons/benchbot_addons/.local/my_addons\n
\n/home/ben/repos/benchbot/addons/benchbot_addons/.local/my_addons/tasks/my_task.yaml
Done. Your new custom task should now be available for use in your BenchBot system (e.g. benchbot_run --list-tasks
).
Custom add-on content can be grouped together into an add-on package, of which there are two different types: 'official' and third-party.
\n'Official' packages are those we've verified, and are stored in our benchbot-addons GitHub organisation. You can get a full list of official add-on packages through the manager.official_addons()
helper function, or benchbot_install --list-addons
script in the BenchBot software stack.
Third-party add-on packages differ only in that we haven't looked at them, and they can be hosted anywhere on GitHub you please.
\nCreating all add-on packages is exactly the same process, the only difference is whether the repository is inside or outside of the benchbot-addons GitHub organisation:
\nenvironments
directory at the root)..remote
metadata file described in the next section).dependencies*
filesNote: it's a good idea to only include one type of add-on per repository as it makes your add-on package more usable for others. It's not a hard rule though, so feel free to add multiple folders to your add-on if you require.
\nFeel free to have a look at any of the official add-ons for help and examples of how to work with add-ons.
\nHere are the technical details of what's expected in add-on content. The BenchBot system will assume these specifications are adhered to, and errors can be expected if you try to use add-ons that don't match the specifications.
\nAn add-on package has the following structure (technically none of the files are required, they just determine what functionality your add-on includes):
\nFilename | \nDescription | \n
---|---|
.dependencies | \nA list of add-on packages that must be installed with this package. Packages are specified by their GitHub identifier (i.e. github_username/repository_name ), with one per line | \n
.dependencies-python | \nA list of Python dependencies for your add-on. Syntax for file is exactly the same as requirements.txt files. | \n
.remote | \nSpecifies content that should be installed from a remote URL, rather than residing in this repository. A remote resource is specified as a URL and target directory separated by a space. One resource is specified per line. The add-ons manager will fetch the URL specified, and extract the contents to the target directory (e.g. http://myhost/my_content.zip environments ) | \n
<directory>/ | \nEach named directory corresponds to an add-on type described below. The directory will be ignored if its name doesn't exactly match any of those below. | \n
A YAML file, that must exist in a folder called batches
in the root of the add-on package (e.g. batches/my_batch.yaml
).
The following keys are supported for batch add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this batch (must be unique!). | \n
'environments' | \nYes | \nA list of environment strings of the format 'name':'variant' (e.g. 'miniroom:1' ). | \n
A YAML file, that must exist in a folder called environments
in the root of the add-on package (e.g. environments/my_environment.yaml
).
The following keys are supported for environment add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this environment's name (the ('name', 'variant') pair must be unique!). | \n
'variant' | \nYes | \nA string used to refer to this environment's variant (the ('name', 'variant') pair must be unique!). | \n
'type' | \nYes | \nA string describing the type of this environment ('sim_unreal' & 'real' are the only values currently used). | \n
'map_path' | \nYes | \nA path to the map for this environment, which will be used by either the simulator or real world system to load the environment. | \n
'start_pose' | \nYes | \nThe start pose of the robot that will be provided to users through the BenchBot API. The pose is specified as a list of 7 numbers: quarternion_w, quarternion_x, quarternion_y, quarternion_z, position_x, position_y, position_z. This must be accurate! | \n
'trajectory_poses' | \nNo | \nA list of poses for the robot to traverse through in order. Each pose is a list of 7 numbers: quarternion_w, quarternion_x, quarternion_y, quarternion_z, position_x, position_y, position_z. This environment won't be usable for tasks that use the 'move_next' action if this parameter isn't provided. | \n
'robots' | \nNo | \nA list of supported names for robot that are supported in this environment. If this list isn't included, all robots with the same 'type' as this environment will be able to run. | \n
'object_labels' | \nNo | \nA list of labels for the objects that exist in the scene. Can be used with simulated sensors like segmentation sensors. | \n
A YAML file, that must exist in a folder called evaluation_methods
in the root of the add-on package (e.g. evaluation_methods/my_evaluation_method.yaml
).
The following keys are supported for evaluation method add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this evaluation method (must be unique!) | \n
'valid_result_formats' | \nYes | \nList of strings denoting results formats supported by the evaluation method. Ideally these format definitions should also be installed. | \n
'valid_ground_truth_formats' | \nYes | \nList of strings denoting ground truth formats supported by the evaluation method. Ideally these format definitions should also be installed. | \n
'functions' | \nYes | \nDictionary of named functions provided by the evaluation method. The named methods are key value pairs where the key is the function name, and the value is a string describing how the function can be imported with Python. For example, evaluate: \"omq.evaluate_method\" declares a function called 'evaluate' that is imported via from omq import evaluate_method . Likewise \"omq.submodule.combine_method\" translates to from omq.submodule import combine_method . See below for the list of functions expected for evaluation methods. | \n
'description' | \nNo | \nA string describing what the evaluation method is and how it works. Should be included if you want users to understand where your method can be used. | \n
Evaluation methods expect the following named functions:
\nName | \nSignature | \nUsage | \n
---|---|---|
'evaluate' | \nfn(dict: results, list: ground_truths) -> dict | \nEvaluates the performance using a results dictionary, and returns a dictionary of containing the scores. It also takes a list of dictionaries containing each ground truth that will be used in evaluation. | \n
'combine' | \nfn(list: scores) -> dict | \nTakes a list of scores dictionaries, and returns an aggregate score. If this method isn't declared, benchbot_eval won't return a summary score. | \n
A YAML file, that must in a folder called examples
in the root of the add-on package (e.g. examples/my_example.yaml
).
The following keys are supported for example add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
name | \nYes | \nA string used to refer to this example (must be unique!) | \n
native_command | \nYes | \nA string describing the command used to run your example natively, relative to the directory of this YAML file! For example running your my_example.py file which is in the same director as this YAML would be python3 ./my_example.py . | \n
container_directory | \nNo | \nDirectory to be used for Docker's build context. The submission process will automatically look for a file called Dockerfile in that directory unless the 'container_filename' key is also provided. | \n
container_filename | \nNo | \nCustom filename for your example's Dockerfile. Dockerfile in container_directory will be used if this key is not included. This path is relative to this YAML file, not 'container_directory' . | \n
description | \nNo | \nA string describing what the example is and how it works. Should be included if you want users to understand how your example can be expanded. | \n
A YAML file, that must exist in a folder called formats
in the root of the add-on package (e.g. formats/my_format.yaml
).
The following keys are supported for format add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this format (must be unique!) | \n
'functions' | \nYes | \nDictionary of named functions for use with this format. The named methods are key-value pairs where the key is the function name, and the value is a string describing how the function can be imported with Python. For example, create: \"object_map.create_empty\" declares a function called 'create' that is imported via from object_map import create_empty . Likewise \"object_map.submodule.validate\" translates to from object_map.submodule import validate . See below for the list of functions expected for format definitions. | \n
'description' | \nNo | \nA string describing what the format is and how it works. Should be included if you want users to understand what your format is supposed to capture. | \n
Format definitions expect the following named functions:
\nName | \nSignature | \nUsage | \n
---|---|---|
'create' | \nfn() -> dict | \nFunction that returns an empty instance of this format. As much as possible should be filled in to make it easy for users to create valid instances (especially when a format is used for results). | \n
'validate' | \nfn(dict: instance) -> None | \nTakes a proposed instance of this format and validates whether it meets the requirements. Will typically use a series of assert statements to confirm fields are valid. | \n
A JSON file, that must exist in a folder called ground_truths
in the root of the add-on package (e.g. ground_truths/my_ground_truth.json
).
The following keys are supported for ground truth add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'environment' | \nYes | \nA dictionary containing the definition data for the ground truth's reference environment. The data in this field should be a direct copy of an environment add-on. | \n
'format' | \nYes | \nA dictionary containing the definition data for the ground truth's format. The data in this field should be a direct copy of a format definition add-on. | \n
'ground_truth' | \nYes | \nA valid instance of the format described by the 'format' field. This is where your actual ground truth data should be stored. | \n
A lot of these keys should be copied from other valid definitions. Please see the GroundTruthCreator
helper class in BenchBot Evaluation for assistance in creating valid ground truths.
A YAML file, that must exist in a folder called robots
in the root of the add-on package (e.g. robots/my_robot.yaml
).
The following keys are supported for robot add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this robot (must be unique!). | \n
'type' | \nYes | \nA string describing the type of this robot ('sim_unreal' & 'real' are the only values currently used). | \n
'address' | \nYes | \nA string for the address where a running BenchBot Robot Controller can be accessed (e.g. 'localhost:10000' ) | \n
'global_frame' | \nYes | \nThe name of the global TF frame. All poses reported by the BenchBot API will be with respect to this frame. | \n
'robot_frame' | \nYes | \nThe name of the robot's TF frame. | \n
'poses' | \nYes | \nA list of named poses that this robot provides. This list of poses will be available in observations provided by the BenchBot API. | \n
persistent_cmds | \nYes | \nA list of commands that will be run and kept alive for the lifetime of the robot controller. The commands will be run in parallel, and executed via bash -c <your_command_string> | \n
persistent_status | \nYes | \nA command used to check the status of your persistent_cmds . This command should execute quickly, and terminate on completion, with the return code being used to evaluate the status. The command string is executed via bash -c <your_command_string> | \n
run_cmd | \nYes | \nA single command issued by the controller to run a simulation. This command must terminate on completion. The command string is executed via bash -c <your_command_string> | \n
stop_cmd | \nYes | \nA single command issued by the controller to stop a simulation. This command must terminate on completion. The command string is executed via bash -c <your_command_string> | \n
'connections' | \nYes | \nA dictionary of connections that your robot makes available to the BenchBot ecosystem. The name of the key-value pair is important, and should follow the recommendations provided on standard channels in the BenchBot API documentation. A description of connection definitions is provided below. | \n
Connections are the lifeblood of interaction between BenchBot and robot platforms. They are defined by named entries, with the following fields:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'connection' | \nYes | \nConnection type string, used by the BenchBot Robot Controller. Supported values are 'api_to_ros' (used for actions), 'ros_to_api' (used for observations), and 'roscache_to_api' (special value used for caching observation values). | \n
'ros_topic' | \nYes | \nTopic name for the ROS side of the connection. | \n
'ros_type' | \nYes | \nTopic type for the ROS side of the connection. | \n
'callback_api' | \nNo | \nA callback that is run on the HTTP encoded data received / sent on the API end of the connection. It takes in data, and returns transformed data based on the callback's action. Callbacks are specified by a string denoting how the callback can be accessed (e.g. 'api_callbacks.convert_to_rgb = from api_callbacks import convert_to_rgb ). No data transformation occurs if no callback is provided. | \n
'callback_ros' | \nNo | \nA callback that is run on the ROS data received / sent on the robot controller end of the connection. It takes in data and a reference to the robot controller. 'api_to_ros' connections use this data to act on the robot, whereas 'ros_to_api' connections turn this data into a dictionary that can be serialised into HTTP traffic. Callbacks are specified by a string denoting how the callback can be accessed (e.g. 'api_callbacks.convert_to_rgb = from api_callbacks import convert_to_rgb ). No action occurs at the ROS level if no callback is provided. | \n
A YAML file, that must exist in a folder called tasks
in the root of the add-on package (e.g. tasks/my_task.yaml
).
The following keys are supported for task add-ons:
\nKey | \nRequired | \nDescription | \n
---|---|---|
'name' | \nYes | \nA string used to refer to this task (must be unique!). | \n
'actions' | \nYes | \nA list of named connections to be provided as actions through the BenchBot API. Running this task will fail if the robot doesn't provide these named connections. | \n
'observations' | \nYes | \nA list of named connections to be provided as observations through the BenchBot API. Running this task will fail if the robot doesn't provide these named connections. | \n
'localisation' | \nNo | \nA string describing the level of localisation. Only supported values currently are 'ground_truth' and 'noisy' . The default value is 'ground_truth '. | \n
'results_format' | \nNo | \nA string naming the format for results. The format must be installed, as BenchBot API will use the format's functions to provide the user with empty results. | \n
'description' | \nNo | \nA string describing what the task is, and how it works. Should be included if you want users to understand what challenges your task is trying to capture. | \n
'type' | \nNo | \nA string describing what robot / environment types are valid for this task. For example, a task that provides a magic image segmentation sensor would only be made available for 'sim_unreal' type robots / environments. | \n
'scene_count' | \nNo | \nInteger representing the number of scenes (i.e. environment variations required for a task). If omitted, a default value of 1 will be used for the task. | \n
NOTE: this software needs to interface with a running instance of the BenchBot software stack. Unless you are running against a remote stack / robot, please install this software with the BenchBot software stack as described here.
\nThe BenchBot API provides a simple interface for controlling a robot or simulator through actions, and receiving data through observations. As shown above, the entire code required for running an agent in a realistic 3D simulator is only a handful of simple Python commands.
\nOpen AI Gym users will find the breakdown into actions, observations, and steps extremely familiar. BenchBot API allows researchers to develop and test novel algorithms with real robot systems and realistic 3D simulators, without the typical hassles arising when interfacing with complicated multi-component robot systems.
\nRunning a robot through an entire environment, with your own custom agent, is as simple as one line of code with the BenchBot API:
\nfrom benchbot_api import BenchBot\nfrom my_agent import MyAgent\n\nBenchBot(agent=MyAgent()).run()\n
\nThe above assumes you have created your own agent by overloading the abstract Agent
class provided with the API. Overloading the abstract class requires implementing 3 basic methods. Below is a basic example to spin on the spot:
from benchbot_api import Agent\nimport json\n\nclass MyAgent(Agent):\n\n def is_done(self, action_result):\n # Go forever\n return False\n\n def pick_action(self, observations, action_list):\n # Rotates on the spot indefinitely, 5 degrees at a time\n # (assumes we are running in passive mode)\n return 'move_angle', {'angle': 5}\n\n def save_result(self, filename, empty_results, results_format_fns):\n # Save some blank results\n with open(filename, 'w') as f:\n json.dump(empty_results, f)\n
\nIf you prefer to do things manually, a more exhaustive suite of functions are also available as part of the BenchBot API. Instead of using the BenchBot.run()
method, a large number of methods are available through the API. Below highlights a handful of the capabilities of BenchBot API:
from benchbot_api import BenchBot, RESULT_LOCATION\nimport json\nimport matplotlib.pyplot as plt\n\n# Create a BenchBot instance & reset the simulator / robot to starting state\nb = BenchBot()\nobservations, action_result = b.reset()\n\n# Print details of selected task & environment\nprint(b.task_details)\nprint(b.environment_details)\n\n# Visualise the current RGB image from the robot\nplt.imshow(observations['image_rgb'])\n\n# Move to the next pose if we have a 'move_next' action available\nif 'move_next' in b.actions:\n observations, action_result = b.step('move_next')\n\n# Save some empty results\nwith open(RESULT_LOCATION, 'w') as f:\n json.dump(b.empty_results(), f)\n
\nFor sample solutions that use the BenchBot API, see the examples add-ons available (e.g. benchbot-addons/examples_base
and benchbot-addons/examples_ssu
).
BenchBot API is a Python package, installable with pip. Run the following in the root directory of where this repository was cloned:
\nu@pc:~$ pip install .\n
\nCommunication with the robot comes through a series of \"channels\" which are defined by the robot's definition file (e.g. carter). A task definition file (e.g. semantic_slam:passive:ground_truth) then declares which of these connections are provided to the API as either sensor observations or actions to be executed by a robot actuator.
\nThe API talks to the BenchBot Supervisor, which handles loading and managing the different kinds of back-end configuration files. This abstracts all of the underlying communication complexities away from the user, allowing the BenchBot API to remain a simple interface that focuses on getting observations and sending actions.
\nAn action is sent to the robot by calling the BenchBot.step()
method with a valid action (found by checking the BenchBot.actions
property):
from benchbot_api import BenchBot\n\nb = BenchBot()\navailable_actions = b.actions\nb.step(b.actions[0], {'action_arg:', arg_value}) # Perform the first available action\n
\nThe second parameter is a dictionary of named arguments for the selected action. For example, moving 5m forward with the 'move_distance'
action is represented by the dictionary {'distance': 5}
.
Observations lists are received as return values from a BenchBot.step()
call (BenchBot.reset()
internally calls BenchBot.step(None)
, which means don't perform an action):
from benchbot_api import BenchBot\n\nb = BenchBot()\nobservations, action_result = b.reset()\nobservations, action_result = b.step('move_distance', {'distance': 5})\n
\nThe returned observations
variable holds a dictionary with key-value pairs corresponding to the name-data defined by each observation channel.
The action_result
is an enumerated value denoting the result of the action (use from benchbot_api import ActionResult
to access the Enum
class). You should use this result to guide the progression of your algorithm either manually or in the is_done()
method of your Agent
. Possible values for the returned action_result
are:
ActionResult.SUCCESS
: the action was carried out successfullyActionResult.FINISHED
: the action was carried out successfully, and the robot is now finished its traversal through the scene (only used in passive
actuation mode)ActionResult.COLLISION
: the action crashed the robot into an obstacle, and as a result it will not respond to any further actuation commands (at this point you should quit)Tasks and robot definition files declare actions and observations, and these files are include through BenchBot add-ons. The add-on creator is free to add and declare channels as they please, but it is a better experience for all if channel definitions are as consistent as possible across the BenchBot ecosystem.
\nSo if you're adding a robot that move between a set of poses, declare a channel called 'move_next
with no arguments. Likewise, a robot that receives image observations should use a channel named 'image_rgb'
with the same format as described below. Feel free to implement the channels however you please for your robot, but consistent interfaces should always be preferred.
If you encounter a task using non-standard channel configurations, the API has all the functionality you need as a user to handle them (actions
, config
, & observations
properties). On the other hand, maybe the non-standard channel should be a new standard. New standard communication channels are always welcome; please open a pull request with the details!
Name | \nRequired Arguments | \nDescription | \n
---|---|---|
'move_next' | \nNone | \nMoves the robot to the next pose in its list of pre-defined poses (only available in environments that declare a 'trajectory_poses' field). | \n
'move_distance' | \n{'distance': float} | \nMoves the robot 'distance' metres directly ahead. | \n
'move_angle' | \n{'angle': float} | \nRotate the angle on the spot by 'angle' degrees. | \n
Name | \nData format | \nDescription | \n
---|---|---|
'image_depth' | \nnumpy.ndarray(shape=(H,W), | \nDepth image from the default image sensor with depths in meters. | \n
'image_depth_info' | \n{ | \nSensor information for the depth image. 'matrix_instrinsics' is of the format:[fx 0 cx]for a camera with focal lengths (fx,fy) , & principal point (cx,cy) . Likewise, 'matrix_projection' is:[fx 0 cx Tx]where (Tx,Ty) is the translation between stereo sensors. See here for further information on fields. | \n
'image_rgb' | \nnumpy.ndarray(shape=(H,W,3), | \nRGB image from the default image sensor with colour values mapped to the 3 channels, in the 0-255 range. | \n
'image_rgb_info' | \n{ | \nSensor information for the RGB image. 'matrix_instrinsics' is of the format:[fx 0 cx]for a camera with focal lengths (fx,fy) , & principal point (cx,cy) . Likewise, 'matrix_projection' is:[fx 0 cx Tx]where (Tx,Ty) is the translation between stereo sensors. See here for further information on fields. | \n
'laser' | \n{ | \nSet of scan values from a laser sensor, between 'range_min' & 'range_max' (in meters). The 'scans' array consists of N scans of format [scan_value, scan_angle] . For example, scans[100, 0] would get the distance value & scans[100, 1] would get the angle of the 100th scan. | \n
'poses' | \n{ | \nDictionary of relative poses for the current system state. The pose of each system component is available at key 'frame_name' . Each pose has a 'parent_frame' which the pose is relative to (all poses are typically with respect to global 'map' frame), & the pose values. 'rotation_rpy' is [roll,pitch,yaw] in ZYX order, 'rotation_xyzw' is the equivalent quaternion [x,y,z,w] , & 'translation_xyz' is the Cartesion [x,y,z] coordinates. | \n
A running BenchBot system manages many other elements besides simply getting data to and from a real / simulated robot. BenchBot encapsulates not just the robot, but also the environment it is operating in (whether that be simulator or real) and task that is currently being attempted.
\nThe API handles communication for all parts of the BenchBot system, including controlling the currently running environment and obtaining configuration information. Below are details for some of the more useful features of the API (all features are also documented in the benchbot.py
source code).
API method or property | \nDescription | \n
---|---|
config | \nReturns a dict exhaustively describing the current BenchBot configuration. Most of the information returned will not be useful for general BenchBot use. | \n
API method or property | \nDescription | \n
---|---|
reset() | \nResets the current environment scene. For the simulator, this means restarting the running simulator instance with the robot back at its initial position. The method returns initial observations , & the action_result (should always be BenchBot.ActionResult.SUCCESS ). | \n
next_scene() | \nStarts the next scene in the current environment (only relevant for tasks with multiple scenes). Note there is no going back once you have moved to the next scene. Returns the same as reset() . | \n
API method or property | \nDescription | \n
---|---|
actions | \nReturns the list of actions currently available to the agent. This will update as actions are performed in the environment (for example if the agent has collided with an obstacle this list will be empty). | \n
observations | \nReturns the lists of observations available to the agent. | \n
step(action, **action_args) | \nPerforms the requested action with the provided named action arguments. See Using the API to communicate with a robot above for further details. | \n
API method or property | \nDescription | \n
---|---|
empty_results() | \nGenerates a dict of with required result metadata & empty results. Metadata ('task_details' & 'environment_details' ) is pre-filled. To create results, all a user needs to do is fill in the empty 'results' field using format's results functions. These functions are available through the 'results_functions() method. | \n
results_functions() | \nReturns a dict of functions defined by the task's 'results_format' . Example use for calling a create() function is results_functions()['create']() . | \n
RESULT_LOCATION (outside of BenchBot class) | \nA static string denoting where results should be saved (/tmp/results ). Using this locations ensures tools in the BenchBot software stack work as expected. | \n
NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation (although it can be installed independently through pip and run on results files if desired). For a working BenchBot system, please install the BenchBot software stack by following the instructions here.
\nBenchBot Evaluation is a library of functions used to call evaluation methods. These methods are installed through the BenchBot Add-ons Manager, and evaluate the performance of a BenchBot system against the metric. The easiest way to use this module is through the helper scripts provided with the BenchBot software stack.
\nBenchBot Evaluation is a Python package, installable with pip. Run the following in the root directory of where this repository was cloned:
\nu@pc:~$ pip install .\n
\nAlthough evaluation is best run from within the BenchBot software stack, it can be run in isolation if desired. The following code snippet shows how to perform evaluation with the 'omq'
method from Python:
from benchbot_eval.evaluator import Evaluator, Validator\n\nValidator(results_file).validate_results_data()\nEvaluator('omq', scores_file).evaluate()\n
\nThis prints the final scores to the screen and saves them to a file using the following inputs:
\nresults_file
: points to the JSON file with the output from your experimentground_truth_folder
: the directory containing the relevant environment ground truth JSON filessave_file
: is where final scores are to be savedTwo types of add-ons are used in the BenchBot Evaluation process: format definitions, and evaluation methods. An evaluation method's YAML file defines what results formats and ground truth formats the method supports. This means:
\nresults_file
must be a valid instance of a supported formatPlease see the BenchBot Add-ons Manager's documentation for further details on the different types of add-ons.
\nThe BenchBot software stack includes tools to assist in creating results and ground truth files:
\nresults: are best created using the empty_results()
and results_functions()
helper functions in the BenchBot API, which automatically populate metadata for your current task and environment.
ground truths: this package includes a GroundTruthCreator
class to aid in creating ground truths of a specific format, for a specific environment. Example use includes:
from benchbot_eval.ground_truth_creator import GroundTruthCreator\n\ngtc = GroundTruthCreator('object_map_ground_truth', 'miniroom:1')\ngt = gtc.create_empty();\nprint(gtc.functions()) # ['create', 'create_object']\ngt['ground_truth']['objects'][0] = gtc.functions('create_object')\n
\nNOTE: this simulator is retired. It was based on Isaac Sim 2019.2, which used Unreal Engine. We have migrated to the new Omniverse-powered Isaac Sim. See benchbot_sim_omni for details.
\nThis simulator is an extension of the NVIDIA Isaac SDK that establishes ROS communications to a running instance of an Unreal Engine-based NVIDIA Isaac SIM. This simulator is explicitly linked to version 2019.2 of Isaac, the last version with direct support for the Unreal Engine-based simulator. This simulator is retired, as we have moved to the latest Isaac Sim which uses Omniverse. See benchbot_sim_omni for details.
\nThis simulator provides direct access to the following data on a robot, simulated in Unreal Engine. Access is via ROS on the topic provided in brackets:
\n/camera/color/image_raw
)/camera/color/camera_info
)/camera/depth/image_raw
)/camera/depth/camera_info
)/scan_laser
)/odom
)/tf
)Direct control of the robot is also facilitated via:
\n/cmd_vel
Please see the note at the top of the page; installation of this Simulator in isolation is generally not what you want!
\nIf you are sure you need to install the simulator in isolation, the following steps should be sufficient. Note there are a significant number of driver & software requirements for your system:
\nDownload version 2019.2 of the Isaac SDK from the NVIDIA site. If creating your own environments, also download version 2019.2 of Isaac SIM (not NavSim). You will have to create / sign in to an NVIDIA developer account, and look in the \"Archive\" drop down for version 2019.2.
\nEither setup your system with Isaac SIM, or download our environments:
\na) Follow the install instructions for Isaac SIM 2019.2 to get Unreal Engine (through IsaacSimProject) running on your system. You will have to link Epic Games to your Github account to get access.
\nb) Download our latest environments: Isaac Development Environments, and Isaac Challenge Environments
\nInstall the Isaac SDK by following the instructions here.
\nClone these simulator wrapper, apply our patches to the installed Isaac SDK, & build the simulator using the Bazel wrapper script (ensure the environment variable ISAAC_SDK_PATH
is set to where you installed Isaac SDK):
u@pc:~$ git clone https://github.com/qcr/benchbot_sim_unreal && cd benchbot_sim_unreal\nu@pc:~$ .isaac_patches/apply_patches\nu@pc:~$ ./bazelros build //apps/benchbot_simulator\n
\nThis simulator interface is run alongside a running Isaac Unreal Engine Simulator. To get both components running:
\nStart the Unreal Engine Simulator, either via our precompiled environments or the IsaacSimProject Unreal Editor:
\nu@pc:~$ ./IsaacSimProject <map_name> \\\n -isaac_sim_config_json='<path_to_isaac>/apps/carter/carter_sim/bridge_config/carter_full.json' \\\n -windowed -ResX=960 -ResY=540 -vulkan -game\n
\nLaunch this simulator's Isaac application (you first need to hardcode the pose unfortunately...):
\nu@pc:~$ START_POSE=<robot_start_pose> \\\n sed -i \"0,/\\\"pose\\\":/{s/\\(\\\"pose\\\": \\)\\(.*\\)/\\1$START_POSE}\" \\\n <path_to_isaac>/apps/carter/carter_sim/bridge_config/carter_full_config.json\nu@pc:~$ ./bazelros run //apps/benchbot_simulator\n
\nAt this point you will have a running Isaac Unreal Engine Simulator, with sensorimotor data available from the robot in ROS!
\nThe BenchBot Robot Controller is a wrapping ROS / HTTP hybrid script that manages running robots and their required subprocesses. See the carter_sim.yaml
configuration in the BenchBot Supervisor for an example configuration of how to run BenchBot Simulator through the Robot Controller.
NOTE: this software is part of the BenchBot software stack, and not intended to be run in isolation. For a working BenchBot system, please install the BenchBot software stack by following the instructions here.
\nThe BenchBot Supervisor is a HTTP server facilitating communication between user-facing interfaces like the BenchBot API, and the low-level robot components like BenchBot Simulator or real robots. Communication is typically routed through a BenchBot Robot Controller, which provides automated process management for low-level components and wraps all ROS communications.
\nBenchBot Supervisor is a Python package containing a Supervisor
class that wraps a HTTP server for both upstream and downstream communication. Install by running the following in the root directory of where this repository was cloned:
u@pc:~$ pip install .\n
\nOnce installed, the Python class can be used as follows:
\nfrom benchbot_supervisor import Supervisor\n\ns = Supervisor(...args...)\ns.run()\n
\nThe following parameters are typically required for a useful instantiation of the supervisor:
\nmanager.py
can be found)'name'
field of an installed task'name'
field of an installed robot'name':'variant'
field combination of an installed environment (the 'name'
must be the same for all environments in the list)The module can also be executed directly, which makes the passing of arguments from the command line simple (see python -m benchbot_supervisor --help
for argument details):
u@pc:~$ python -m benchbot_supervisor ...args...\n
\nAs an example, the below command runs the supervisor for a scene change detection task, where active control is employed with ground truth localisation on a simulated Carter robot, and environments miniroom:1 and miniroom:5 are used:
\nu@pc:~$ python -m benchbot_supervisor \\\n --task-name scd:active:ground_truth \\\n --robot-name carter \\\n --environment-names miniroom:1,miniroom:5\n
\nThe BenchBot Supervisor requires configuration details for the selected tasks, robots, and environments. It uses these details to manage each of the system components, like API interaction and control of the simulator / real robot. Configuration details are provided by YAML files, which are referenced via their 'name'
field as shown above.
The BenchBot Add-ons Manager manages the installation of, and access to, these files. See the documentation there for further details on configuration files. All you need to do to use add-ons with the supervisor is provide the location via the 'addons_path'
argument.
The supervisor includes a RESTful HTTP API for all interaction with a user-facing API. The RESTful API includes the following commands:
\nRequest Route | \nResponse Format | \nDescription | \n
---|---|---|
/ | \nHello, I am the BenchBot supervisor | \nArbitrary response to confirm connection. | \n
/config/ | \n{ | \nDictionary containing containing parameter values for all of supervisor configuration settings. Keys correspond to parameter name, & values to parameter value. | \n
/config/<config> | \nconfig_value | \nDirectly retrieve the value of a supervisor configuration parameter with name 'config' . Returns param_value of 'config' . | \n
/connections/<connection> | \ndict | \nReturns the response of the connection (e.g. an image_rgb connection would return the image) as a dict . Format & style of the dict is defined by the methods described above in \"Defining environment, robot, & task configurations\". | \n
/results_functions/ | \nlist | \nReturns a list of the results function names that can be remotely executed via the route below. | \n
/results_functions/<function> | \ndict | \nCalls results function with name 'function' , and returns the result of the function call in the response's JSON body. | \n
/robot/ | \nHello, I am the BenchBot robot controller | \nArbitrary response confirming a robot controller is available. | \n
/robot/<command> | \ndict | \nPasses the command command down to a running robot controller manager. See BenchBot Robot Controller for documentation of supported commands & expected responses. | \n
~ Our Robotic Vision Scene Understanding (RVSU) Challenge is live on EvalAI ~
\n~ BenchBot is now powered by NVIDIA Omniverse and Isaac Sim. We are aware of some issues, please report any you do encounter. ~
\n~ Our BenchBot tutorial is the best place to get started developing with BenchBot ~
\nThe BenchBot software stack is a collection of software packages that allow end users to control robots in real or simulated environments with a simple python API. It leverages the simple \"observe, act, repeat\" approach to robot problems prevalent in reinforcement learning communities (OpenAI Gym users will find the BenchBot API interface very similar).
\nBenchBot was created as a tool to assist in the research challenges faced by the semantic scene understanding community; challenges including understanding a scene in simulation, transferring algorithms to real world systems, and meaningfully evaluating algorithm performance. We've since realised, these challenges don't just exist for semantic scene understanding, they're prevalent in a wide range of robotic problems.
\nThis led us to create version 2 of BenchBot with a focus on allowing users to define their own functionality for BenchBot through add-ons. Want to integrate your own environments? Plug-in new robot platforms? Define new tasks? Share examples with others? Add evaluation measures? This all now possible with add-ons, and you don't have to do anything more than add some YAML and Python files defining your new content!
\nThe \"bench\" in \"BenchBot\" refers to benchmarking, with our goal to provide a system that greatly simplifies the benchmarking of novel algorithms in both realistic 3D simulation and on real robot platforms. If there is something else you would like to use BenchBot for (like integrating different simulators), please let us know. We're very interested in BenchBot being the glue between your novel robotics research and whatever your robot platform may be.
\nThis repository contains the software stack needed to develop solutions for BenchBot tasks on your local machine. It installs and configures a significant amount of software for you, wraps software in stable Docker images (~50GB), and provides simple interaction with the stack through 4 basic scripts: benchbot_install
, benchbot_run
, benchbot_submit
, and benchbot_eval
.
The BenchBot software stack is designed to run seamlessly on a wide number of system configurations (currently limited to Ubuntu 18.04+). System hardware requirements are relatively high due to the software run for 3D simulation (e.g. NVIDIA Omniverse-powered Isaac Sim):
\nHaving a system that meets the above hardware requirements is all that is required to begin installing the BenchBot software stack. The install script analyses your system configuration and offers to install any missing software components interactively. The list of 3rd party software components involved includes:
\nsim_omni
)Installation is simple:
\nu@pc:~$ git clone https://github.com/qcr/benchbot && cd benchbot\nu@pc:~$ ./install\n
\nAny missing software components, or configuration issues with your system, should be detected by the install script and resolved interactively (you may be prompted to manually reboot and restart the install script). The installation asks if you want to add BenchBot helper scripts to your PATH
. Choosing yes will make the following commands available from any directory: benchbot_install
(same as ./install
above), benchbot_run
, benchbot_submit
, benchbot_eval
, and benchbot_batch
.
The BenchBot software stack will frequently check for updates and can update itself automatically. To update simply run the install script again (add the --force-clean
flag if you would like to install from scratch):
u@pc:~$ benchbot_install\n
\nIf you decide to uninstall the BenchBot software stack, run:
\nu@pc:~$ benchbot_install --uninstall\n
\nThere are a number of other options to customise your BenchBot installation, which are all described by running:
\nu@pc:~$ benchbot_install --help\n
\nBenchBot installs a default set of add-ons, which is currently 'benchbot-addons/ssu'
(and all of its dependencies declared here). But you can also choose to install a different set of add-ons instead. For example, the following will also install the 'benchbot-addons/data_collect'
add-ons:
u@pc:~$ benchbot_install --addons benchbot-addons/ssu,benchbot-addons/data_collect\n
\nSee the BenchBot Add-ons Manager's documentation for more information on using add-ons. All of our official add-ons can be found in our benchbot-addons GitHub organisation. We're open to adding add-ons contributed by our users to the official list as well.
\nGetting a solution up and running with BenchBot is as simple as 1,2,3. Here's how to use BenchBot with content from the semantic scene understanding add-on:
\nRun a simulator with the BenchBot software stack by selecting an available robot, environment, and task definition:
\nu@pc:~$ benchbot_run --robot carter_omni --env miniroom:1 --task semantic_slam:active:ground_truth\n
\nA number of useful flags exist to help you explore what content is available in your installation (see --help
for full details). For example, you can list what tasks are available via --list-tasks
and view the task specification via --show-task TASK_NAME
.
Create a solution to a BenchBot task, and run it against the software stack. To run a solution you must select a mode. For example, if you've created a solution in my_solution.py
that you would like to run natively:
u@pc:~$ benchbot_submit --native python my_solution.py\n
\nSee --help
for other options. You also have access to all of the examples available in your installation. For instance, you can run the hello_active
example in containerised mode via:
u@pc:~$ benchbot_submit --containerised --example hello_active\n
\nSee --list-examples
and --show-example EXAMPLE_NAME
for full details on what's available out of the box.
Evaluate the performance of your system using a supported evaluation method (see --list-methods
). To use the omq
evaluation method on my_results.json
:
u@pc:~$ benchbot_eval --method omq my_results.json\n
\nYou can also simply run evaluation automatically after your submission completes:
\nu@pc:~$ benchbot_submit --evaluate-with omq --native --example hello_eval_semantic_slam\n
\nThe BenchBot Tutorial is a great place to start working with BenchBot; the tutorial takes you from a blank system to a working Semantic SLAM solution, with many educational steps along the way. Also remember the examples in your installation (benchbot-addons/examples_base
is a good starting point) which show how to get up and running with the BenchBot software stack.
Once you are confident your algorithm is a solution to the chosen task, the BenchBot software stack's power tools allow you to comprehensively explore your algorithm's performance. You can autonomously run your algorithm over multiple environments, and evaluate it holistically to produce a single summary statistic of your algorithm's performance. Here are some examples again with content from the semantic scene understanding add-on:
\nUse benchbot_batch
to run your algorithm in a number of environments and produce a set of results. The script has a number of toggles available to customise the process (see --help
for full details). To autonomously run your semantic_slam:active:ground_truth
algorithm over 3 environments:
u@pc:~$ benchbot_batch --robot carter_omni --task semantic_slam:active:ground_truth --envs miniroom:1,miniroom:3,house:5 --native python my_solution.py\n
\nOr you can use one of the pre-defined environment batches installed via add-ons (e.g. benchbot-addons/batches_isaac
):
u@pc:~$ benchbot_batch --robot carter_omni --task semantic_slam:active:ground_truth --envs-batch develop_1 --native python my_solution.py\n
\nAdditionally, you can create a results ZIP and request an overall evaluation score at the end of the batch:
\nu@pc:~$ benchbot_batch --robot carter_omni --task semantic_slam:active:ground_truth --envs miniroom:1,miniroom:3,house:5 --zip --evaluate-with omq --native python my_solution.py\n
\nLastly, both native and containerised submissions are supported exactly as in benchbot_submit
:
u@pc:~$ benchbot_batch --robot carter_omni --task semantic_slam:active:ground_truth --envs miniroom:1,miniroom:3,house:5 --containerised my_solution_folder/\n
\nYou can also directly call the holistic evaluation performed above by benchbot_batch
through the benchbot_eval
script. The script supports single result files, multiple results files, or a ZIP of multiple results files. See benchbot_eval --help
for full details. Below are examples calling benchbot_eval
with a series of results and a ZIP of results respectively:
u@pc:~$ benchbot_eval --method omq -o my_jsons_scores result_1.json result_2.json result_3.json\n
\nu@pc:~$ benchbot_eval --method omq -o my_zip_scores results.zip\n
\nBenchBot was made to enable and assist the development of high quality, repeatable research results. We welcome any and all use of the BenchBot software stack in your research.
\nTo use our system, we just ask that you cite our paper on the BenchBot system. This will help us follow uses of BenchBot in the research community, and understand how we can improve the system to help support future research results. Citation details are as follows:
\n@misc{talbot2020benchbot,\n title={BenchBot: Evaluating Robotics Research in Photorealistic 3D Simulation and on Real Robots},\n author={Ben Talbot and David Hall and Haoyang Zhang and Suman Raj Bista and Rohan Smith and Feras Dayoub and Niko Sünderhauf},\n year={2020},\n eprint={2008.00635},\n archivePrefix={arXiv},\n primaryClass={cs.RO}\n}\n
\nIf you use our benchbot environments for active robotics (BEAR) which are installed by default, we ask you please cite our data paper on BEAR. Citation details are as follows:
\n@article{hall2022bear,\nauthor = {David Hall and Ben Talbot and Suman Raj Bista and Haoyang Zhang and Rohan Smith and Feras Dayoub and Niko Sünderhauf},\ntitle ={BenchBot environments for active robotics (BEAR): Simulated data for active scene understanding research},\njournal = {The International Journal of Robotics Research},\nvolume = {41},\nnumber = {3},\npages = {259-269},\nyear = {2022},\ndoi = {10.1177/02783649211069404},\n}\n
\nThe BenchBot software stack is split into a number of standalone components, each with their own GitHub repository and documentation. This repository glues them all together for you into a working system. The components of the stack are:
\nDevelopment of the BenchBot software stack was directly supported by:
\n\nThis repository defines shared templates for commonly performed actions within the QUT Centre for Robotics (QCR). We've made this project public as most of the templates have a general use case, and aren't directly tied to QCR.
\nTemplates can be used through a single script, and new templates are created by writing some basic template script in a new folder. The template 'engine' is ~250 lines of (admittedly terse) Bash.
\nNote: QCR members can access templates directly using the qcr
script from our tools
Clone this Git repository:
\ngit clone https://github.com/qcr/code_templates\n
\nAnd add the qcr_templates
script to your path somewhere if you'd like to use from any directory. We recommend adding it to ~/bin
directory as follows:
mkdir ~/bin\nln -s /path/to/code_templates/qcr_templates ~/bin/\n
\nYour new projects can be created from a template simply by making a new folder and running the script with your chosen template inside that folder. For example:
\nqcr_templates ros_package\n
\nThis will retrieve the template, and start a prompt asking you for values for your project. In general, it's best to use snake_case for programming variable values (i.e. my_variable_value
not myVariableValue
as our modification function assumes snake_case).
We use a very basic custom templating method in this project, with templates being declared by creating a new folder in this repository. Templates are defined using named variables, the user is prompted at runtime for values for these variables, and then a project is created from the template with the runtime values applied. Variable values can be used to:
\nTemplate variable names are typically upper snake case (i.e. MY_VARIABLE
), can have default values which will be shown in the prompt, and are evaluated using Bash. This means that any variable with no value is considered false, and all other values considered true. A current limitation is that variables with default values cannot be changed to have no value by the user at runtime.
Variables are declared in a special file called .variables.yaml
at the root of each template, with their syntax described below.
Variables are replaced in text using their runtime value, with the __CAMEL
and __PASCAL
modifiers supported. For example, the following Python template:
\nclass MY_VARIABLE__PASCAL:\n\n def __init__(self):\n self._MY_VARIABLE = None\n\ndef MY_VARIABLE__CAMEL():\n print(\"Hi\")\n
\nwhen given MY_VARIABLE='obstacle_detector'
, would produce:
\nclass ObstacleDetector:\n\n def __init__(self):\n self.obstacle_detector = None\n\ndef obstacleDetector():\n print(\"Hi\")\n
\nVariables can also be used to declare whether blocks of code should be included in the output. Blocks begin with a TEMPLATE_START variable_1 variable_2 ...
line, and end with a TEMPLATE_END
line. The block is included if any of variable_1 variable_2 ...
have a value, and will only be excluded if all are empty. For example, the following CMake template:
\ncatkin_package(\n TEMPLATE_START ADD_MSGS ADD_SERVICES ADD_ACTIONS\n CATKIN_DEPENDS message_runtime\n TEMPLATE_END\n )\n
\nincludes a dependency on message_runtime
if any of ADD_MSGS
, ADD_SERVICES
, ADD_ACTIONS
have a value. The TEMPLATE_*
lines are removed from the result, with the output being:
catkin_package(\n CATKIN_DEPENDS message_runtime\n )\n
\nThe opposite relationship (include if all have a value) isn't yet supported, but should be supported in the future.
\nFile names can be given variable values simply by using the variable name in the filename. For example, a file called MY_VARIABLE.cpp
with a runtime value of MY_VARIABLE='object_detector'
would be renamed to object_detector.cpp
.
Another special file called .files.yaml
marks files which should only exist under certain conditions. It's syntax is based on very basic key-value pairs (filename: variable_1 variable_2 ...
), with the filed included if any of variable_1 variable_2 ...
have a value. See existing templates for examples.
Creating your own templates is almost as simple as using templates. To create your own template:
\nClone this repository locally:
\ngit clone https://github.com/qcr/code_templates\n
\nMake a new folder with the name of your template. For example, a template called my_new_template
is denoted by a folder called my_new_template
.
Create a .variables.yaml
file in your new folder. The format is the following:
VARIABLE_NAME:\n text: \"Text to be displayed to user in prompt\"\n default: \"Default static value\"\nVARIABLE_WITH_DYNAMIC_DEFAULT:\n text: \"Variable with default value determined at runtime\"\n default: $(echo \"This Bash code will be executed\")\nOPTIONAL_VARIABLE:\n text: \"Variable will be left blank if the user provides no input\"\n default: \"\"\n
\nCreate the files for your template, taking advantage of whichever variable features your template requires.
\nTest your template locally before pushing to master (as soon as it's pushed everyone can use it). Test locally by directly running the use_template
script with local files instead of the remote:
LOCAL_LOCATION=/path/to/local/clone/of/this/repo ./use_template my_new_template\n
\nOnce it works, push to the master branch. Done!
\nPlease note: a very crude YAML parser is written in use_template
to keep the dependencies of this software as low as possible. I emphasise, crude. You should not expect full YAML functionality (keep values on same line as key, don't use line breaks, no escape characters, etc.).
Update 2021-Jun-02: A pytorch-based (GPU/CPU) implementation of Delta Descriptors is now available with our latest work SeqNet.
\nSource code for the paper - \"Delta Descriptors: Change-Based Place Representation for Robust Visual Localization\", published in IEEE Robotics and Automation Letters (RA-L) 2020 and to be presented at IROS 2020. [arXiv] [IEEE Xplore][YouTube]
\nWe propose Delta Descriptor, defined as a high-dimensional signed vector of change measured across the places observed along a route. Using a difference-based description, places can be effectively recognized despite significant appearance variations.\n
matplotlib==2.0.2\nnumpy==1.15.2\ntqdm==4.29.1\nscipy==1.1.0\nscikit_learn==0.23.1\n
\nSee requirements.txt
, generated using pipreqs==0.4.10
and python3.5.6
The dataset used in our paper is available here (or use commands as below). Note that the download only comprises a small part (~1 GB) of the original Nordland videos released here. These videos were first used for visual place recognition in this paper.
\ngit clone https://github.com/oravus/DeltaDescriptors.git\ncd DeltaDescriptors/\nmkdir data/\ncd data/\nwget https://zenodo.org/record/4016653/files/nordland-part-2020.zip\nunzip nordland-part-2020.zip\n
\nThe zip contains two folders: summer and winter, where each one of them comprises 1750 images which were used for experiments conducted in our paper.
\nDelta Descriptors are defined on top of global image descriptors, for example, NetVLAD (Update 05 Sep 2020: see our python wrapper). Given such descriptors, compute Delta Descriptors and match across two traverses as below:
\npython src/main.py --genDesc --genMatch -l 16 -d delta -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\nThe input descriptor data is assumed to be a 2D tensor of shape [numImages,numDescDims]
. The computed descriptors are stored in .npy
format and the match results are stored in .npz
format comprising a dict of two arrays: matchInds
(matched reference index per query image) and matchDists
(corresponding distance value). By default, output is stored in the ./out
folder but can also be specified via --outPath
argument. To see all the options, use:
python src/main.py --help\n
\nThe options --genDesc
and --genMatch
can be used in isolation or together, see example usage below.
In order to compute only the descriptors for a single traverse, use:
\npython src/main.py --genDesc -l 16 -d delta -ip1 <full_path_of_desc.npy>\n
\nFor only computing matches, given the descriptors (Delta or some other), use:
\npython src/main.py --genMatch -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\npython src/main.py --eval -mop <full_path_of_match_output.npz>\n
\nor evaluate directly with --genMatch
(and possibly --genDesc
) flag:
python src/main.py --eval --genMatch -ip1 <full_path_of_desc.npy> -ip2 <full_path_of_query_desc.npy>\n
\nCurrently, only Nordland dataset-style (1-to-1 frame correspondence) evaluation is supported, GPS/INS coordinates-based evaluation, for example, for Oxford Robotcar dataset to be added soon. Evalution code can be used to generate PR curves and the code in its current form prints Precision @ 100% Recall for localization radius of 1, 5, 10 and 20 (frames).
\nIf you find this code or our work useful, cite it as below:
\n@article{garg2020delta,\n title={Delta Descriptors: Change-Based Place Representation for Robust Visual Localization},\n author={Garg, Sourav and Harwood, Ben and Anand, Gaurangi and Milford, Michael},\n journal={IEEE Robotics and Automation Letters},\n year={2020},\n publisher={IEEE},\n volume={5},\n number={4},\n pages={5120-5127}, \n}\n
\nThe code is released under MIT License.
\nThis code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper (preprint and additional material):
\n@article{fischer2020event,\n title={Event-Based Visual Place Recognition With Ensembles of Temporal Windows},\n author={Fischer, Tobias and Milford, Michael},\n journal={IEEE Robotics and Automation Letters},\n volume={5},\n number={4},\n pages={6924--6931},\n year={2020}\n}\n
\nThe Brisbane-Event-VPR dataset accompanies this code repository: https://zenodo.org/record/4302805
\nThe following code is available:
\nvideo_beginning
indicates the ROS timestamp within the bag file that corresponds to the first frame of the consumer camera video file.Please note that in our paper we used manually annotated and then interpolated correspondences; instead here we provide matches based on the GPS data. Therefore, the results between what is reported in the paper and what is obtained using the methods here will be slightly different.
\nClone this repository: git clone https://github.com/Tobias-Fischer/ensemble-event-vpr.git
Clone https://github.com/cedric-scheerlinck/rpg_e2vid and follow the instructions to create a conda environment and download the pretrained models.
\nDownload the Brisbane-Event-VPR dataset.
\nNow convert the bag files to txt/zip files that can be used by the event2video code: python convert_rosbags.py
. Make sure to adjust the path to the extract_events_from_rosbag.py
file from the rpg_e2vid repository.
Now do the event to video conversion: python reconstruct_videos.py
. Make sure to adjust the path to the run_reconstruction.py
file from the rpg_e2vid repository.
conda create --name brisbaneeventvpr tensorflow-gpu pynmea2 scipy matplotlib numpy tqdm jupyterlab opencv pip ros-noetic-rosbag ros-noetic-cv-bridge python=3.8 -c conda-forge -c robostack
conda activate brisbaneeventvpr
python export_frames_from_rosbag.py
Create a new conda environment with the dependencies: conda create --name brisbaneeventvpr tensorflow-gpu pynmea2 scipy matplotlib numpy tqdm jupyterlab opencv pip
conda activate brisbaneeventvpr
git clone https://github.com/QVPR/netvlad_tf_open.git
cd netvlad_tf_open && pip install -e .
Download the NetVLAD checkpoint here (1.1 GB). Extract the zip and move its contents to the checkpoints folder of the netvlad_tf_open
repository.
Open the Brisbane Event VPR.ipynb and adjust the path to the dataset_folder
.
You can now run the code in Brisbane Event VPR.ipynb.
\nPlease check out this collection of related works on place recognition.
\n","name":"Visual Place Recognition using Event Cameras","type":"code","url":"https://github.com/Tobias-Fischer/ensemble-event-vpr","id":"event_vpr_code","image":"./dataset.png","_images":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"src":"/content/visual_place_recognition/event-vpr-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json new file mode 100644 index 0000000000..a7b2e9fb17 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/gtsam-quadrics.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"~ Please see our QuadricSLAM repository for examples of full SLAM systems~
\nThis repository contains an extension to the popular Georgia Tech Smoothing and Mapping (GTSAM) factor graph optimisation library. We introduce constrained dual quadrics as GTSAM variables, and support the estimation of the quadric parameters using 2-D bounding box measurements. These tools are available in both C++ and Python, and are designed to be used in conjunction with GTSAM. The extensions power our QuadricSLAM library, where we use quadrics for simultaneous localisation and mapping (SLAM) problems.
\nWe expect this repository to be active and continually improved upon. If you have any feature requests or experience any bugs, don't hesitate to let us know. Our code is free to use, and licensed under BSD-3. We simply ask that you cite our work if you use QuadricSLAM in your own research.
\nNote: we are aware of some issues with the wheels. If you encounter issues, we recommend the \"Install from source via Pip\" steps below
\nPre-build wheels of this library are available on PyPI for most Linux systems, as well as source distributions. Install the library with:
\npip install gtsam_quadrics\n
\nThe Python library is built from a custom setup.py
, which uses CMake to build a custom C++ extension bound using both PyBind11 and Georgia Tech's wrap meta-library.
You can build from source if you want closer access to the C++ libraries, or are having trouble finding a pre-compiled wheel for your system. There are two levels you can build the package from source: the Python level using pip, and C++ level using CMake.
\nAll building from source methods expect the following system dependencies to be available:
\nInstructions for installing these dependencies vary across Linux systems, but the following should be sufficient on a relatively recent Ubuntu version:
\nsudo apt install build-essential cmake libboost-all-dev libmetis-dev\n
\nIf your distribution's CMake version is too old, it can easily be upgrade following Kitware's instructions here.
\nInstall from source via Pip
\nSimply request the sdist
instead of binary wheel:
pip install gtsam_quadrics --no-binary :all:\n
\nBuilding the Python package from source
\nInstalling from source is very similar to the pip
method above, accept installation is from a local copy:
Clone the repository, and initialise the gtsam
submodule:
git clone --recurse-submodules https://github.com/best-of-acrv/gtsam-quadrics\n
\nEnter the gtsam_quadrics
directory, and simply install via pip
(the build process will take a while):
pip install .\n
\nBuilding the C++ package with CMake
\nClone the repository, and initialise the gtsam
submodule:
git clone --recurse-submodules https://github.com/best-of-acrv/gtsam-quadrics\n
\nCreate an out-of-source build directory:
\ncd gtsam_quadrics\nmkdir build\ncd build\n
\nRun the configuration and generation CMake steps, optionally building the Python wrapper using the BUILD_PYTHON_WRAP
variable:
cmake -DBUILD_PYTHON_WRAP=ON ..\n
\nRun the build step:
\ncmake --build . -j$(nproc)\n
\nThen optionally run any of the other supported targets as described below:
\nTarget name | \nDescription | \n
---|---|
check | \ncompile and run optional unit tests | \n
examples | \ncompiles the c++ examples | \n
doc | \ngenerates the doxygen documentation | \n
doc_clean | \nremoves the doxygen documentation | \n
install | \ninstalls the gtsam_quadrics c++/python library | \n
Note: documentation requires Doxygen (sudo apt install doxygen
) and epstopdf (sudo apt install texlive-font-utils
)
GTSAM Quadrics and GTSAM can be used like native Python packages. Below are some examples to help get you started with using GTSAM Quadrics:
\nimport gtsam\nimport gtsam_quadrics\nimport numpy as np\n\n# setup constants\npose_key = int(gtsam.symbol(ord('x'), 0))\nquadric_key = int(gtsam.symbol(ord('q'), 5))\n\n# create calibration\ncalibration = gtsam.Cal3_S2(525.0, 525.0, 0.0, 160.0, 120.0)\n\n# create graph/values\ngraph = gtsam.NonlinearFactorGraph()\nvalues = gtsam.Values()\n\n# create noise model (SD=10)\nbbox_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([10]*4, dtype=np.float))\n\n# create quadric landmark (pose=eye(4), radii=[1,2,3])\ninitial_quadric = gtsam_quadrics.ConstrainedDualQuadric(gtsam.Pose3(), np.array([1.,2.,3.]))\n\n# create bounding-box measurement (xmin,ymin,xmax,ymax)\nbounds = gtsam_quadrics.AlignedBox2(15,12,25,50)\n\n# create bounding-box factor\nbbf = gtsam_quadrics.BoundingBoxFactor(bounds, calibration, pose_key, quadric_key, bbox_noise)\n\n# add landmark to values\ninitial_quadric.addToValues(values, quadric_key)\n\n# add bbf to graph\ngraph.add(bbf)\n\n\n# get quadric estimate from values (assuming the values have changed)\nquadric_estimate = gtsam_quadrics.ConstrainedDualQuadric.getFromValues(values, quadric_key)\n
\nIf you are using this library in academic work, please cite the publication:
\nL. Nicholson, M. Milford and N. Sünderhauf, \"QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM,\" in IEEE Robotics and Automation Letters, vol. 4, no. 1, pp. 1-8, Jan. 2019, doi: 10.1109/LRA.2018.2866205. PDF.
\n@article{nicholson2019,\n title={QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM},\n author={Nicholson, Lachlan and Milford, Michael and Sünderhauf, Niko},\n journal={IEEE Robotics and Automation Letters},\n year={2019},\n}\n
\n","name":"GTSAM extension for quadrics","type":"code","url":"https://github.com/qcr/gtsam-quadrics","image":"https://github.com/qcr/gtsam-quadrics/raw/master/doc/gtsam_quadrics.png","_images":["/_next/static/images/gtsam_quadrics-9ce945399d611f449b8df8e1db6602ae.png.webp","/_next/static/images/gtsam_quadrics-cb27c37d5d64abed2e30e1523a8cec1a.png"],"src":"/content/quadricslam/gtsam_quadrics.md","id":"gtsam-quadrics","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json
new file mode 100644
index 0000000000..5c9a7fb2ce
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/heaputil_code.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"HEAPUtil is an IEEE RA-L & IROS 2021 research paper. In this work, we present a method for unsupervised estimation of the Environment-Specific (ES) and Place-Specific (PS) Utility of unique visual cues in a reference map represented as VLAD clusters. Furthermore, we employ this Utility in a unified hierarchical global-to-local VPR pipeline to enable better place recognition and localization capability for robots, with reduced storage and compute time requirements. This repo contains the official code for estimating the Utility of visual cues and the hierarchical global-to-local VPR pipeline.
\n\n
Utility-guided Hierarchical Visual Place Recognition.\n
For more details, please see:
\nFull paper PDF: A Hierarchical Dual Model of Environment- and Place-Specific Utility for Visual Place Recognition.
\nAuthors: Nikhil Varma Keetha, Michael Milford, Sourav Garg
\nSimply run the following command: pip install -r requirements.txt
conda create -n heaputil python=3.8 mamba -c conda-forge -y\nconda activate heaputil\nmamba install numpy opencv pytorch matplotlib faiss-gpu scipy scikit-image=0.18.2 torchvision scikit-learn h5py -c conda-forge\n
\nFor Data Loading, we use .mat
files which contain information regarding Reference Image Paths
, Query Image Paths
, Ground-truth Co-ordinates
for Reference and Query Images, and the Positive Localization Distance Threshold
. These .mat
files for the Berlin Kudamm, Nordland Summer Vs Winter and Oxford Day Vs Night datasets are present in the ./dataset-mat-files
folder.
We provide the Berlin Kudamm Dataset for Inference:
\nFor more details regarding the Berlin Kudamm dataset please refer to this paper.
\nFor all the scripts, apart from SuperPoint Extraction, you may use the --dataset
flag to mention the dataset to use. By default, it is set to 'berlin'
and the default choices are ['oxford', 'nordland', 'berlin']
.
Here's a Colab Notebook to effortlessly run tests on the Berlin Dataset.
\nPlease use the --help
flag to see all available arguments for the scripts.
Extract NetVLAD Descriptors, Predictions and Cluster Masks:
\npython NetVLAD/main.py --resume './data/NetVLAD/netvlad-checkpoint-cc16' --root_dir './data' --save --save_path './data/NetVLAD'\n
\nEstimate the Environment- and Place-Specific Utility of VLAD Clusters for the Reference Map:
\npython utility.py --root_dir './data' --netvlad_extracts_path './data/NetVLAD' --save_path './data/Utility' --save_viz\n
\nYou may use the --save_viz
flag to visualize the Environment-Specific and Place-Specific Utility as shown below:
\n \n
Visualizing ES (left) & PS (right) Utility (Red indicates low utility and blue/gray indicates high utility)\n
Generate path lists which are required for SuperPoint Extraction & SuperGlue:
\npython generate_path_lists.py --root_dir './data' --netvlad_predictions './data/NetVLAD' --save_path './data'\n
\nExtract SuperPoint features for the Reference Map:
\npython SuperGlue/superpoint_extraction.py --input_images './data/db_list.txt' --split 'db' --input_dir './data' --output_dir './data/SuperPoint'\n
\nExtract SuperPoint features for the Queries:
\npython SuperGlue/superpoint_extraction.py --input_images './data/q_list.txt' --split 'query' --input_dir './data' --output_dir './data/SuperPoint'\n
\nYou may use the --viz
flag to visualize the best matches as a gif.
Run Vanilla SuperPoint based Local Feature Matching:
\npython local_feature_matching.py --input_dir './data' --output_dir './data/LFM/Vanilla' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility'\n
\nRun ES-Utility guided Local Feature Matching:
\npython local_feature_matching.py --input_dir './data' --output_dir './data/LFM/ES_Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--es_utility\n
\nRun PS-Utility guided Local Feature Matching:
\npython local_feature_matching.py --input_dir './data' --output_dir './data/LFM/PS_Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--ps_utility\n
\nDefault Number of Top Utility Clusters to use for Local Feature Matching is 10
. Please use the --k
flag to use a different number of top utility clusters.
Run ES & PS-Utility guided Local Feature Matching:
\npython local_feature_matching.py --input_dir './data' --output_dir './data/LFM/Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --superpoint_extracts_path './data/SuperPoint' --utility_path './data/Utility' \\\n--es_utility --ps_utility --viz\n
\nDefault Number of Top Utility Clusters to use for Local Feature Matching is X-1
clusters, where X
is the number of useful clusters determined by the Environment-Specific system. To use a different number of top utility clusters please use the --non_default_k
and --k
flags.
We use the --viz
flag to visualize the best matches along with utility reference masks as a gif as shown below:
\n \n
ES & PS Utility-guided Local Feature Matching (Cyan mask represents regions with high utility)\n
Similar to Local Feature Matching, you may run the superglue_match_pairs.py
file for Vanilla SuperGlue & Utility-guided SuperGlue. You may use the --viz
flag to visualize all the matches and dump the SuperGlue-style plots.
Run ES & PS-Utility guided SuperGlue:
\npython superglue_match_pairs.py --input_pairs './data/berlin_netvlad_candidate_list.txt' --input_dir './data' --output_dir './data/SuperGlue/Utility' \\\n--netvlad_extracts_path './data/NetVLAD' --utility_path './data/Utility' \\\n--es_utility --ps_utility\n
\nIf any ideas from the paper or code from this repo are used, please consider citing:
\n@article{keetha2021hierarchical,\n author={Keetha, Nikhil Varma and Milford, Michael and Garg, Sourav},\n journal={IEEE Robotics and Automation Letters}, \n title={A Hierarchical Dual Model of Environment- and Place-Specific Utility for Visual Place Recognition}, \n year={2021},\n volume={6},\n number={4},\n pages={6969-6976},\n doi={10.1109/LRA.2021.3096751}}\n
\nThe code is licensed under the MIT License.
\nThe authors acknowledge the support from the Queensland University of Technology (QUT) through the Centre for Robotics.
\nFurthermore, we would like to acknowledge the Pytorch Implementation of NetVlad from Nanne and the original implementation of SuperGlue.
\nPlease check out this collection of related works on place recognition.
\n","name":"HEAPUtil","type":"code","url":"https://github.com/Nik-V9/HEAPUtil","id":"heaputil_code","image":"assets/overview.jpg","_images":["/_next/static/images/overview-8c193585e23714439d55f0227d88f923.jpg.webp","/_next/static/images/overview-fc609d6102a3c08cb20b14382e57ee50.jpg"],"src":"/content/visual_place_recognition/heaputil.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json new file mode 100644 index 0000000000..60dc63b4ee --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/lost_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"This is the source code for the paper titled - \"LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics\", [arXiv][RSS 2018 Proceedings]
\nAn example output image showing Keypoint Correspondences:
\nFlowchart of the proposed approach:
\nIf you find this work useful, please cite it as:
\nSourav Garg, Niko Sunderhauf, and Michael Milford. LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics. Proceedings of Robotics: Science and Systems XIV, 2018.
\nbibtex:
@article{garg2018lost,\ntitle={LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics},\nauthor={Garg, Sourav and Suenderhauf, Niko and Milford, Michael},\njournal={Proceedings of Robotics: Science and Systems XIV},\nyear={2018}\n}\n
\nRefineNet's citation as mentioned on their Github page.
\ngit clone https://github.com/oravus/lostX.git\ncd lostX\ngit clone https://github.com/oravus/refinenet.git\n
\nNOTE: If you download this repository as a zip, the refineNet's fork will not get downloaded automatically, being a git submodule..mat
file in the refinenet/model_trained/
directory.docker pull souravgarg/vpr-lost-kc:v1\n
\nGenerate and store semantic labels and dense convolutional descriptors from RefineNet's conv5 layer\nIn the MATLAB workspace, from the refinenet/main/
directory, run:
demo_predict_mscale_cityscapes\n
\nThe above will use the sample dataset from refinenet/datasets/
directory. You can set path to your data in demo_predict_mscale_cityscapes.m
through variable datasetName
and img_data_dir
.
\nYou might have to run vl_compilenn
before running the demo, please refer to the instructions for running refinenet in their official Readme.md
[For Docker users]
\nIf you have an environment with python and other dependencies installed, skip this step, otherwise run a docker container:
docker run -it -v PATH_TO_YOUR_HOME_DIRECTORY/:/workspace/ souravgarg/vpr-lost-kc:v1 /bin/bash\n
\nFrom within the docker container, navigate to lostX/lost_kc/
repository.
\n-v
option mounts the PATH_TO_YOUR_HOME_DIRECTORY to /workspace directory within the docker container.
Reformat and pre-process RefineNet's output from lostX/lost_kc/
directory:
python reformat_data.py -p $PATH_TO_REFINENET_OUTPUT\n
\n$PATH_TO_REFINENET_OUTPUT is set to be the parent directory of predict_result_full
, for example, ../refinenet/cache_data/test_examples_cityscapes/1-s_result_20180427152622_predict_custom_data/predict_result_1/
Compute LoST descriptor:
\npython LoST.py -p $PATH_TO_REFINENET_OUTPUT \n
\nRepeat step 1, 3, and 4 to generate output for the other dataset by setting the variable datasetName
to 2-s
.
Perform place matching using LoST descriptors based difference matrix and Keypoint Correspondences:
\npython match_lost_kc.py -n 10 -f 0 -p1 $PATH_TO_REFINENET_OUTPUT_1 -p2 $PATH_TO_REFINENET_OUTPUT_2\n
\nNote: Run python FILENAME -h
for any of the python source files in Step 3, 4, and 6 for description of arguments passed to those files.
The code is released under MIT License.
\nOpenSeqSLAM2.0 is a MATLAB toolbox that allows users to thoroughly explore the SeqSLAM method in addressing the visual place recognition problem. The visual place recognition problem is centred around recognising a previously traversed route, regardless of whether it is seen during the day or night, in clear or inclement conditions, or in summer or winter. Recognising previously traversed routes is a crucial capability of navigating robots. Through the graphical interfaces packaged in OpenSeqSLAM2 users are able to:
\nThe toolbox is open-source and downloadable from the releases tab. All we ask is that if you use OpenSeqSLAM2 in any academic work, that you include a reference to corresponding publication (bibtex is available at the bottom of the page).
\nThe toolbox is designed to be simple to use (it runs out of the box without any initial configuration required). To run the toolbox, simple run the command below (with the toolbox root directory in your MATLAB path):
\nOpenSeqSLAM2();\n
\nThere are a number of default configuration files included in the .config
directory which showcase the capabilities of the toolbox. To use a configuration file, open the toolbox as described above, then use the Import config
button. A summary of the features showcased in each of the configuration files is included below:
'images_same'
: The trimmed Nordland dataset images, with the same dataset used as both reference and query. Trajectory based search is used, and a velocity-based ground truth is included, but not used for auto-optimisation of match threshold.'images_diff'
: The trimmed Nordland dataset images, with the summer traversal used as the reference dataset and the winter traversal as the query. Trajectory based search is used, and a *.csv based ground truth is used for auto-optimising the match threshold selection.'videos_same'
: The day night video dataset, with the same video used as both the reference and query dataset. Trajectory based search is used, with no ground truth provided.'videos_diff'
: The day night video dataset, with the day traversal used as the reference dataset and the night traversal as the query. Trajectory based search is used, with no ground truth provided.'hybrid_search'
: Same as 'videos_diff'
, but the hybrid search is used instead of trajectory search.'no_gui'
: Same as 'videos_diff'
, but the progress is presented in the console rather than GUI and no results GUI is shown (tip: run OpenSeqSLAM2(‘'batch_with_gui'
: Same as 'images_diff'
, but a batch parameter sweep of the sequence length parameter is performed. The progress GUI shows the progress of the individual iteration and overall in separate windows.'parrallelised_batch'
: Same as 'batch_with_gui'
, but the parameter sweep is done in parallel mode (which cannot be performed with the Progress GUI). The parallel mode will use a worker for each core available in the host CPU.'default'
: is set to 'images_diff'
Note: the programs in the ./bin
directory can be run standalone by providing the appropriate results / config structs as arguments if you would like to use only a specific part of the pipeline (i.e. only configuration, or progress wrapped execution, or viewing results).
If using the toolbox in any academic work, please include the following citation:
\n@ARTICLE{2018openseqslam2,\n author = {{Talbot}, B. and {Garg}, S. and {Milford}, M.},\n title = \"{OpenSeqSLAM2.0: An Open Source Toolbox for Visual Place Recognition Under Changing Conditions}\",\n journal = {ArXiv e-prints},\narchivePrefix = \"arXiv\",\n eprint = {1804.02156},\n primaryClass = \"cs.RO\",\n keywords = {Computer Science - Robotics, Computer Science - Computer Vision and Pattern Recognition},\n year = 2018,\n month = apr,\n adsurl = {http://adsabs.harvard.edu/abs/2018arXiv180402156T},\n adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n
\n","name":"OpenSeqSLAM2","type":"code","url":"https://github.com/qcr/openseqslam2","id":"openseqslam2_code","image":"./docs/openseqslam2.png","_images":["/_next/static/images/openseqslam2-c5079d59d4cff5bd652acb1652d047f6.png.webp","/_next/static/images/openseqslam2-f3755fc8e61c0d81c8f0b0f42c5e08ae.png"],"src":"/content/visual_place_recognition/openseqslam2.md","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json
new file mode 100644
index 0000000000..f18f44843a
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/patchnetvlad_code.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"This repository contains code for the CVPR2021 paper \"Patch-NetVLAD: Multi-Scale Fusion of Locally-Global Descriptors for Place Recognition\"
\nThe article can be found on arXiv and the official proceedings.
\n\n
When using code within this repository, please refer the following paper in your publications:
\n@inproceedings{hausler2021patchnetvlad,\n title={Patch-NetVLAD: Multi-Scale Fusion of Locally-Global Descriptors for Place Recognition},\n author={Hausler, Stephen and Garg, Sourav and Xu, Ming and Milford, Michael and Fischer, Tobias},\n booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n pages={14141--14152},\n year={2021}\n}\n
\nThe code is licensed under the MIT License.
\nWe recommend using conda (or better: mamba) to install all dependencies. If you have not yet installed conda/mamba, please download and install mambaforge
.
# On Linux:\nconda create -n patchnetvlad python numpy pytorch-gpu torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\n# On MacOS (x86 Intel processor):\nconda create -n patchnetvlad python numpy pytorch torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\n# On MacOS (ARM M1/M2 processor):\nconda create -n patchnetvlad python numpy pytorch torchvision natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge -c tobiasrobotics\n# On Windows:\nconda create -n patchnetvlad python numpy natsort tqdm opencv pillow scikit-learn faiss matplotlib-base -c conda-forge\nconda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia\n\nconda activate patchnetvlad\n
\nWe provide several pre-trained models and configuration files. The pre-trained models will be downloaded automatically into the pretrained_models
the first time feature extraction is performed.
We recommend downloading the models into the pretrained_models
folder (which is setup in the config files within the configs
directory):
# Note: the pre-trained models will be downloaded automatically the first time feature extraction is performed\n# the steps below are optional!\n\n# You can use the download script which automatically downloads the models:\npython ./download_models.py\n\n# Manual download:\ncd pretrained_models\nwget -O mapillary_WPCA128.pth.tar https://cloudstor.aarnet.edu.au/plus/s/vvr0jizjti0z2LR/download\nwget -O mapillary_WPCA512.pth.tar https://cloudstor.aarnet.edu.au/plus/s/DFxbGgFwh1y1wAz/download\nwget -O mapillary_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/ZgW7DMEpeS47ELI/download\nwget -O pittsburgh_WPCA128.pth.tar https://cloudstor.aarnet.edu.au/plus/s/2ORvaCckitjz4Sd/download\nwget -O pittsburgh_WPCA512.pth.tar https://cloudstor.aarnet.edu.au/plus/s/WKl45MoboSyB4SH/download\nwget -O pittsburgh_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/1aoTGbFjsekeKlB/download\n
\nIf you want to use the shortcuts patchnetvlad-match-two
, patchnetvlad-feature-match
and patchnetvlad-feature-extract
, you also need to run (which also lets you use Patch-NetVLAD in a modular way):
pip3 install --no-deps -e .\n
\nReplace performance.ini
with speed.ini
or storage.ini
if you want, and adapt the dataset paths - examples are given for the Pittsburgh30k dataset (simply replace pitts30k
with tokyo247
or nordland
for these datasets).
python feature_extract.py \\\n --config_path patchnetvlad/configs/performance.ini \\\n --dataset_file_path=pitts30k_imageNames_index.txt \\\n --dataset_root_dir=/path/to/your/pitts/dataset \\\n --output_features_dir patchnetvlad/output_features/pitts30k_index\n
\nRepeat for the query images by replacing _index
with _query
. Note that you have to adapt dataset_root_dir
.
python feature_match.py \\\n --config_path patchnetvlad/configs/performance.ini \\\n --dataset_root_dir=/path/to/your/pitts/dataset \\\n --query_file_path=pitts30k_imageNames_query.txt \\\n --index_file_path=pitts30k_imageNames_index.txt \\\n --query_input_features_dir patchnetvlad/output_features/pitts30k_query \\\n --index_input_features_dir patchnetvlad/output_features/pitts30k_index \\\n --ground_truth_path patchnetvlad/dataset_gt_files/pitts30k_test.npz \\\n --result_save_folder patchnetvlad/results/pitts30k\n
\nNote that providing ground_truth_path
is optional.
This will create three output files in the folder specified by result_save_folder
:
recalls.txt
with a plain text output (only if ground_truth_path
is specified)NetVLAD_predictions.txt
with top 100 reference images for each query images obtained using \"vanilla\" NetVLAD in Kapture formatPatchNetVLAD_predictions.txt
with top 100 reference images from above re-ranked by Patch-NetVLAD, again in Kapture formatpython match_two.py \\\n--config_path patchnetvlad/configs/performance.ini \\\n--first_im_path=patchnetvlad/example_images/tokyo_query.jpg \\\n--second_im_path=patchnetvlad/example_images/tokyo_db.png\n
\nWe provide the match_two.py
script which computes the Patch-NetVLAD features for two given images and then determines the local feature matching between these images. While we provide example images, any image pair can be used.
The script will print a score value as an output, where a larger score indicates more similar images and a lower score means dissimilar images. The function also outputs a matching figure, showing the patch correspondances (after RANSAC) between the two images. The figure is saved as results/patchMatchings.png
.
python train.py \\\n--config_path patchnetvlad/configs/train.ini \\\n--cache_path=/path/to/your/desired/cache/folder \\\n--save_path=/path/to/your/desired/checkpoint/save/folder \\\n--dataset_root_dir=/path/to/your/mapillary/dataset\n
\nTo begin, request, download and unzip the Mapillary Street-level Sequences dataset (https://github.com/mapillary/mapillary_sls).\nThe provided script will train a new network from scratch, to resume training add --resume_path and set to a full path, filename and extension to an existing checkpoint file. Note to resume our provided models, first remove the WPCA layers.
\nAfter training a model, PCA can be added using add_pca.py.
\npython add_pca.py \\\n--config_path patchnetvlad/configs/train.ini \\\n--resume_path=full/path/with/extension/to/your/saved/checkpoint \\\n--dataset_root_dir=/path/to/your/mapillary/dataset\n
\nThis will add an additional checkpoint file to the same folder as resume_path, except including a WPCA layer.
\nWe provide three ready-to-go ground truth files in the dataset_gt_files folder, however, for evaluation on other datasets you will need to create your own .npz ground truth data files.\nEach .npz stores three variables: utmQ
(a numpy array of floats), utmDb
(a numpy array of floats) and posDistThr
(a scalar numpy float).
Each successive element within utmQ
and utmDb
needs to correspond to the corresponding row of the image list file. posDistThr
is the ground truth tolerance value (typically in meters).
The following mock example details the steps required to create a new ground truth file:
\nnp.savez('dataset_gt_files/my_dataset.npz', utmQ=my_utmQ, utmDb=my_utmDb, posDistThr=my_posDistThr)
We would like to thank Gustavo Carneiro, Niko Suenderhauf and Mark Zolotas for their valuable comments in preparing this paper. This work received funding from the Australian Government, via grant AUSMURIB000001 associated with ONR MURI grant N00014-19-1-2571. The authors acknowledge continued support from the Queensland University of Technology (QUT) through the Centre for Robotics.
\nPlease check out this collection of related works on place recognition.
\n","name":"Patch-NetVLAD","type":"code","url":"https://github.com/QVPR/Patch-NetVLAD","id":"patchnetvlad_code","image":"./assets/patch_netvlad_method_diagram.png","_images":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"src":"/content/visual_place_recognition/patchnetvlad.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json new file mode 100644 index 0000000000..777733cd52 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/pdq.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"\nThis repository contains the implementation of the probability-based detection quality (PDQ) evaluation measure.\nThis enables quantitative analysis of the spatial and semantic uncertainties output by a probabilistic object detecttion (PrOD) system.\nThis repository provides tools for analysing PrOD detections and classical detections using mAP, moLRP, and PDQ (note that PDQ results will be low for a classical detector and mAP and moLRP scores will likely be low for PrOD detections).\nEvaluation can be performed both on COCO formatted data and on RVC1 (PrOD challenge) formatted data.\nThe repository also provides visualization tools to enable fine-grained analysis of PDQ results as shown below.
\nThe code here, particularly for evaluating RVC1 data is based heavily on the PrOD challenge code which can be found\nhere: https://github.com/jskinn/rvchallenge-evaluation
\nNote that some extra funcitonality for PDQ outside of what is reported in the original paper and challenge is also provided such as evaluating results using the bounding boxes of the ground-truth segmentation masks, probabilistic segmentation evaluation, a greedy alternative to PDQ.
\nFor further details on the robotic vision challenges please see the following links for more details:
\nIf you are using PDQ in your research, please cite the paper below:
\n@inproceedings{hall2020probabilistic,\n title={Probabilistic object detection: Definition and evaluation},\n author={Hall, David and Dayoub, Feras and Skinner, John and Zhang, Haoyang and Miller, Dimity and Corke, Peter and Carneiro, Gustavo and Angelova, Anelia and S{\\\"u}nderhauf, Niko},\n booktitle={The IEEE Winter Conference on Applications of Computer Vision},\n pages={1031--1040},\n year={2020}\n}\n
\nThis code comes with a requirements.txt file.\nMake sure you have installed all libraries as part of your working environment.
\nAfter installing all requirements, you will need to have a fully installed implementation of the COCO API located\nsomewhere on your machine.\nYou can download this API here https://github.com/cocodataset/cocoapi.
\nOnce this is downloaded and installed, you need to adjust the system path on line 11 of coco_mAP.py and line 16 of\nread_files.py to match the PythonAPI folder of your COCO API installation.
\nYou will also require code for using LRP evaluation measures.\nTo do this you need to simply copy the cocoevalLRP.py file from the LRP github repository to the pycocotools folder within the PythonAPI.\nYou can download the specific file here https://github.com/cancam/LRP/blob/master/cocoLRPapi-master/PythonAPI/pycocotools/cocoevalLRP.py\nYou can clone the original repository here https://github.com/cancam/LRP.
\nAfter cocoevalLRP.py is located in your pycocotools folder, simply adjust the system path on line 11 of coco_LRP.py to match your PythonAPI folder.
\nAll evaluation code is run on detections saved in .json files formatted as required by the RVC outlined later on.\nA variation to this is also available for probabilistic segmentation format also described later.\nIf you are evaluating on COCO data and have saved detections in COCO format, you can convert to RVC1 format using\nfile_convert-coco_to_rvc1.py\nWhen you have the appropriate files, you can evaluate on mAP, moLRP, and PDQ with evaluate.py.\nAfter evaluation is complete, you can visualise your detections for a sequence of images w.r.t. PDQ using\nvisualise_pdq_analysis.py
\nEvaluation is currently organised so that you can evaluate either on COCO data, or on RVC1 data. Note that RVC1 data\nexpects multiple sequences rather than a single folder of data.
\nRVC1 detections are saved in a single .json file per sequence being evaluated. Each .json file is formatted as follows:
\n{\n \"classes\": [<an ordered list of class names>],\n \"detections\": [\n [\n {\n \"bbox\": [x1, y1, x2, y2],\n \"covars\": [\n [[xx1, xy1],[xy1, yy1]],\n [[xx2, xy2],[xy2, yy2]]\n ],\n \"label_probs\": [<an ordered list of probabilities for each class>]\n },\n {\n }\n ],\n [],\n []\n ...\n ]\n}\n
\nThe two covariance matrices in covars
need to be positive semi-definite in order for the code to work. A covariance matrix C
is positive semi-definite when its eigenvalues are not negative. You can easily check this condition in python with the following function:
def is_pos_semidefinite(C):\n return np.all(np.linalg.eigvals(C) >= 0)\n
\nWe now accommodate a way to submit probabilistic segmentation detections.\nFor this format, a .npy file for each image stores all detection probabilistic segmentation heatmaps for that image.\nThis 3D array's shape is m x h x w where m is the number of segmentation masks, h is the image height, and w is the\nimage width.\nEach detection dictionary now contains the location the .npy file associated with the detection and the mask id for the\nspecific detection.\nYou may also define a bounding box to replace the probabilistic segmentation for bounding-box detections and define a\nchosen class to use for mAP and moLRP evaluation (rather than always using max class of label_probs).
\nExpected format for probabilistic segmentation detection files is as follows:
\n{\n \"classes\": [<an ordered list of class names>],\n \"detections\": [\n [\n {\n \"label_probs\": [<an ordered list of probabilities for each class>],\n \"masks_file\": \"<location of .npy file holding probabilistic segmentation mask>\",\n \"mask_id\": <index of this detection's mask in mask_file's numpy array>,\n \"label\": <chosen label within label_probs> (optional),\n \"bbox\": [x1, y1, x2, y2] (optional for use in mAP and moLRP),\n },\n {\n }\n ],\n [],\n []\n ...\n ]\n}\n
\nTo convert coco detections to rvc format simply run:
\npython file_convert_coco_to_rvc1.py --coco_gt <gt_json_file> --coco_det <det_json_file> --rvc1_det <output_json_file>
where <gt_json_file>
is the coco format ground-truth json filename, det_json_file
is the coco format detection\njson filename, and output_file
is the json filename you will save your rvc1 formatted detections json file.
By default, coco json format does not come with the predicted scores for all the classes available, in which case the conversion script will just\nextract the score of the chosen class and distribute remaining probability across all others classes. However, this will produce\nincorrect measures of label quality because it is the probability estimated by the detector for the object's ground-truth class, which might not\ncorrespond to the chosen class. To facilitate correct measurements, if a detection element in the coco json file (det_json_file
) comes with a\nkey all_scores
, the conversion script will consider it as an array of all the scores, and use it instead of the default behaviour.
Also, by default, coco json format does not consider the existence of a covariance matrix which is needed for PDQ calculations. The conversion\nscript assigns by default a zero'ed covariance matrix, but if a detection element in the coco json file (det_json_file
) comes with a\nkey covars
, the conversion script will use that covariance matrix instead of the default one with zeros. Please refer to the previous section RVC1 Detection Format
for further information on how covars
should be formatted in the json file.
To perform full evaluation simply run:
\npython evaluate.py --test_set <test_type> --gt_loc <gt_location> --det_loc <det_location> --save_folder <save_folder> --set_cov <cov> --num_workers <num_workers>
Optional flags for new functionality include --bbox_gt
, --segment_mode
, --greedy_mode
, and --prob_seg
.\nThere is also an --mAP_heatmap
flag but that should not generally be used.
<test_type>
is a string defining whether we are evaluating COCO or RVC1 data. Options are 'coco' and 'rvc1'
<gt_location>
is a string defining either the location of a ground-truth .json file (coco tests) or a folder of\nground truth sequences (rvc1 data). Which one it is interpreted as is defined by <test_type>
<det_loc>
is a string defining either the location of a detection .json file (coco data) or a folder of .json files for\nmultiple sequences (rvc1 data). Which one it is interpreted as is defined by <test_type>
.\nNote that these detection files must be in rvc1 format.
<save_folder>
is a string defining the folder where analysis will be stored in form of scores.txt, and files for visualisations
<cov>
is an optional value defining set covariance for the corners of detections.
--bbox_gt
flag states that all ground-truth should be teated as bounding boxes for PDQ analysis.\nAll pixels within the bounding box will be used for analysis and there will be no \"ignored\" pixels. This enables\nuse of datasets with no segmentation information provided they are stored in COCO ground-truth format.
--segment_mode
flag states that evaluation is performed per-pixel on the ground-truth segments with no \"ignored\"\npixels to accommodate box-shaped detections. This should only be used if evaluating a probabilistic segmentation\ndetection system.
--greedy_mode
flag states that assignment of detections to ground-truth objects based upon pPDQ scores is done\ngreedily rather than optimal assignment. Greedy mode can be faster for some applications but does not match \"official\"\nPDQ process and there may be some minuscule difference in score/behaviour.
--prob_seg
flag states that detection.json file is formatted for probabilistic segmentation detections as outlined\nabove.
--mAP_heatmap
flag should not generally be used but enables mAP/moLRP evaluation to be based not upon corners\ndefined by PBox/BBox detections, but that encompass all pixels of the detection above given threshold of probability\n(0.0027).
--num_workers
number of parallel worker processes to use in the CPU when making the calculations for the PDQ score. By default, this value is 6.
For further details, please consult the code.
\nFor consistency reasons, unlike the original rvc1 evaluation code, we do not multiply PDQ by 100 to provide it as a percentage.\nPDQ is also labelled as \"PDQ\" in scores.txt rather than simply \"score\".
\nFor anyone unfamiliar with moLRP based measures, these values are losses and not qualities like all other provided measures.\nTo transform these results from losses to qualities simply take 1 - moLRP.
\nNewly implemented modes --segment_mode
, --bbox_gt
, greedy_mode
are not used for the RVC1 challenge but can be\nuseful for developing research in probabilistic segmentation, when your dataset does not have a segmentation mask, or\nwhen time is critical, respectively.
To create visualisations for probabilistic detections and PDQ analysis on a single sequence of images run:
\npython visualise_pdq_analysis.py --data_type <test_type> --ground_truth <gt_location> --gt_img_folder <gt_imgs_location> --det_json <det_json_file> --gt_analysis <gt_analysis_file> --det_analysis <det_analysis_file> --save_folder <save_folder_location> --set_cov <cov> --img_type <ext> --colour_mode <colour_mode> --corner_mode <corner_mode> --img_set <list_of_img_names> --full_info
where:
\n<test_type>
is a string defining whether we are evaluating COCO or RVC1 data. Options are 'coco' and 'rvc1'
<gt_location>
is a string defining either the location of a ground-truth .json file (coco tests) or a folder of\nground truth sequences (rvc1 data). Which one it is interpreted as is defined by <test_type>
<gt_imgs_location>
a string defining the folder where ground-truth images for the sequence are stored.
<det_json_file>
a string defining the detection .json file matching the sequence to be visualised
<gt_analysis>
a string defining the ground-truth analysis .json file matching the sequence to be visualised.\nMust also correspond to the detection .json file being visualised.
<det_analysis>
a string defining the detection analysis .json file matching the sequence to be visualised.\nMust also correspond to the detection .json file being visualised.
<save_folder_location>
a string defining the folder where image visualisations will be saved. Must be different to the <gt_imgs_location>
<cov>
is an optional value defining set covariance for the corners of the detections. This must match the set covariance used in evaluate.py
<img_type>
is a string defining what image type the ground-truth is provided in. For example 'jpg'.
<colour_mode>
is a string defining whether correct and incorrect results are coloured green and red ('gr') or blue and orange ('bo') respectively.\nDefault option is blue and orange.
<corner_mode>
is a string defining whether Gaussian corners are represented as three ellipses ('ellipse') or two arrows ('arrow').\nEllipses are drawn showing 1, 2, and 3, std deviation rings along the contours of the Gaussian.\nArrows show 2 x standard deviation along the major axes of the Gaussian.\nDefault option is 'ellipse'
<list_of_img_names>
is an optional parameter where the user provides a set of image names and only these images will have visualisations drawn for them.\nFor example --img_set cat.jpg dog.jpg whale.jpg
would only draw visualisations for \"cat.jpg\", \"dog.jpg\", and \"whale.jpg\".
--full_info
is an optional flag defining whether full pairwise quality analysis should be written for TP detections. Recommended setting for in-depth analysis
For further details, please consult the code.
\nConsistency must be kept between ground-truth analysis, detection analysis, and detection .json files in order to provide meaningful visualisation.
\nIf the evaluation which produced the ground-truth analysis and detection analysis used a set covariance input, you must\nprovide that same set covariance when generating visualisations.
\nNew modes such as using probabilistic segmentation detections (--prob_seg
) in segment mode (--segment_mode
)\nor using bounding_box ground-truth (--bbox_gt
) in the evaluation code are NOT yet supported.
To create visualisations for probabilistic detections on a single sequence of images run:
\npython visualise_prob_detections.py --gt_img_folder <gt_imgs_location> --det_json <det_json_file> --save_folder <save_folder_location> --set_cov <cov> --img_type <ext> --corner_mode <corner_mode> --img_set <list_of_img_names>
where:
\n<gt_imgs_location>
a string defining the folder where ground-truth images for the sequence are stored.
<det_json_file>
a string defining the detection .json file matching the sequence to be visualised
<save_folder_location>
a string defining the folder where image visualisations will be saved. Must be different to the <gt_imgs_location>
<cov>
is an optional value defining set covariance for the corners of the detections.
<img_type>
is a string defining what image type the ground-truth is provided in. For example 'jpg'.
<corner_mode>
is a string defining whether Gaussian corners are represented as three ellipses ('ellipse') or two arrows ('arrow').\nEllipses are drawn showing 1, 2, and 3, std deviation rings along the contours of the Gaussian.\nArrows show 2 x standard deviation along the major axes of the Gaussian.\nDefault option is 'ellipse'
<list_of_img_names>
is an optional parameter where the user provides a set of image names and only these images will have visualisations drawn for them.\nFor example --img_set cat.jpg dog.jpg whale.jpg
would only draw visualisations for \"cat.jpg\", \"dog.jpg\", and \"whale.jpg\".
For further details, please consult the code.
\nOrder of detections in detections.json file must match the order of the images as stored in the ground-truth images\nfolder.
\nNew modes such as using probabilistic segmentation detections (--prob_seg
) in the evaluation code are\nNOT yet supported.
Development of the probability-based detection quality evaluation measure was directly supported by:
\nnumpy
This Python package allows the manipulation of directed and non-directed graphs. Also supports embedded graphs. It is suitable for graphs with thousands of nodes.
\nfrom pgraph import *\nimport json\n\n# load places and routes\nwith open('places.json', 'r') as f:\n places = json.loads(f.read())\nwith open('routes.json', 'r') as f:\n routes = json.loads(f.read())\n\n# build the graph\ng = UGraph()\n\nfor name, info in places.items():\n g.add_vertex(name=name, coord=info[\"utm\"])\n\nfor route in routes:\n g.add_edge(route[0], route[1], cost=route[2])\n\n# plan a path from Hughenden to Brisbane\np = g.path_Astar('Hughenden', 'Brisbane')\ng.plot(block=False) # plot it\ng.highlight_path(p) # overlay the path\n
\nGraphs belong to the class UGraph
or DGraph
for undirected or directed graphs respectively. The graph is essentially a container for the vertices.
g.add_vertex()
add a vertex
g.n
the number of vertices
g
is an iterator over vertices, can be used as for vertex in g:
g[i]
reference a vertex by its index or name
g.add_edge()
connect two vertices
g.edges()
all edges in the graph
g.plot()
plots the vertices and edges
g.nc
the number of graph components, 1 if fully connected
g.component(v)
the component that vertex v
belongs to
g.path_BFS()
breadth-first search
g.path_Astar()
A* search
g.adjacency()
adjacency matrix
g.Laplacian()
Laplacian matrix
g.incidence()
incidence matrix
Vertices belong to the class UVertex
(for undirected graphs) or DVertex
(for directed graphs), which are each subclasses of Vertex
.
v.coord
the coordinate vector for embedded graph (optional)v.name
the name of the vertex (optional)v.neighbours()
is a list of the neighbouring verticesv1.samecomponent(v2)
predicate for vertices belonging to the same componentVertices can be named and referenced by name.
\nEdges are instances of the class Edge
.\nEdges are not referenced by the graph object, each edge references a pair of vertices, and the vertices reference the edges. For a directed graph only the start vertex of an edge references the edge object, whereas for an undirected graph both vertices reference the edge object.
e.cost
cost of edge for planning methodse.next(v)
vertex on edge e
that is not v
e.v1
, e.v2
the two vertices that define the edge e
g.remove(v)
remove vertex v
e.remove()
remove edge e
Consider a user class Foo
that we would like to connect using a graph overlay, ie.\ninstances of Foo
becomes vertices in a graph.
DVertex
or UVertex
depending on graph typeFoo
into the graph using add_vertex
and create edges as requiredclass Foo(UVertex):\n # foo stuff goes here\n \nf1 = Foo(...)\nf2 = Foo(...)\n\ng = UGraph() # create a new undirected graph\ng.add_vertex(f1)\ng.add_vertex(f2)\n\nf1.connect(f2, cost=3)\nfor f in f1.neighbours():\n # say hi to the neighbours\n
\nThe key objects and their interactions are shown below.
\nThis is a re-engineered version of PGraph.m which ships as part of the Spatial Math Toolbox for MATLAB. This class is used to support bundle adjustment, pose-graph SLAM and various planners such as PRM, RRT and Lattice.
\nThe Python version was designed from the start to work with directed and undirected graphs, whereas directed graphs were a late addition to the MATLAB version. Semantics are similar but not identical. In particular the use of subclassing rather than references to\nuser data is encouraged.
\n","name":"Graph classes (Python)","type":"code","url":"https://github.com/petercorke/pgraph-python","image":"https://github.com/petercorke/pgraph-python/raw/master/examples/roads.png","_images":["/_next/static/images/roads-8b68dd7b635af6f867a02be9d399b4bd.png.webp","/_next/static/images/roads-18739c10c6cf2a6dccbffb581fb9a183.png"],"src":"/content/pgraph-python.md","id":"pgraph-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json new file mode 100644 index 0000000000..2ad6f59217 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/quadricslam.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"QuadricSLAM is a system for using quadrics to represent objects in a scene, leveraging common optimisation tools for simultaneous localisation and mapping (SLAM) problems to converge on stable object maps and camera trajectories. This library uses Georgia Tech's Smoothing and Mapping (GTSAM) library for factor graph optimisation, and adds support through our custom GTSAM quadrics extension.
\nTODO update with a more holistic reflection of the repository in its current state\n
The key features of this repository are:
\nq = QuadricSLAM(data_source=MyDataSource(), detector=MyDetector(), associator=MyDataAssociator())\nq.spin()\n
\nWe expect this repository to be active and continually improved upon. If you have any feature requests or experience any bugs, don't hesitate to let us know. Our code is free to use, and licensed under BSD-3. We simply ask that you cite our work if you use QuadricSLAM in your own research.
\nPre-build wheels of this library are available on PyPI for most Linux systems, as well as source distributions. Install the library with:
\npip install quadricslam\n
\nFrom here basic custom QuadricSLAM systems can be setup by implementing and integrating the following abstract classes:
\nfrom quadricslam import DataSource, Detector, Associator, visualise\n\nclass MyDataSource(DataSource):\n ...\n\nclass MyDetector(Detector):\n ...\n\nclass MyAssociator(Associator):\n ...\n\nq = QuadricSlam(data_source=MyDataSource(),\n detector=MyDetector(),\n associator=MyAssociator(),\n on_new_estimate=lambda vals, labels, done: visualise(vals, labels, done)))\n )\nq.spin()\n
\nThe examples described below also provide code showing how to create customisations for a range of different scenarios.
\nNote: in the spirit of keeping this package light, some dependencies may not be installed; please install those manually
\nThis repository contains a number of examples to demonstrate how QuadricSLAM systems can be set up in different contexts.
\nEach example is a file in the quadricslam_examples
module, with a standalone run()
function. There are two possible ways to run each example:
Directly through the command line:
\npython -m quadricslam_examples.EXAMPLE_NAME ARGS ...\n
\ne.g for the hello_quadricslam
examples:
python -m quadricslam_examples.hello_quadricslam\n
\nOr from within Python:
\nfrom quadricslam_examples.EXAMPLE_NAME import run\nrun()\n
\nhello_manual_quadricslam
Shows how to create a QuadricSLAM system from scratch using the primitives exposed by our GTSAM Quadrics library. The scenario is 4 viewpoints in a square around 2 quadrics in the middle of the square:
\nhello_quadricslam
Same scenario as the hello_manual_quadricslam
example, but uses the abstractions provided by this library. Shows how an entire QuadricSLAM system can be created with only a few lines of code when the appropriate components are available:
tum_rgbd_dataset
Re-creation of the TUM RGBD dataset experiments used in our initial publication. There is a script included for downloading the dataset.
\nNote: the paper used hand-annotated data to avoid the data association problem; as a result the example here requires a custom data associator to be created before it will run
\nrealsense_python
Demonstrates how a system can be run using an RGBD RealSense, the pyrealsense2 library, and a barebones OpenCV visual odometry algorithm.
\nThe example is a simple plug-n-play system, with weak localisation and data association:
\nrealsense_ros
Demonstrates how a ROS QuadricSLAM system can be put together with an RGBD RealSense, the ROS RealSense library, and Kimera VIO's visual odometry system.
\nThis example includes a script for creating an entire ROS workspace containing all the required packages built from source. Once installed, it runs the same as the realsense_python
example but with significantly better localisation:
If you are using this library in academic work, please cite the publication:
\nL. Nicholson, M. Milford and N. Sünderhauf, \"QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM,\" in IEEE Robotics and Automation Letters, vol. 4, no. 1, pp. 1-8, Jan. 2019, doi: 10.1109/LRA.2018.2866205. PDF.
\n@article{nicholson2019,\n title={QuadricSLAM: Dual Quadrics From Object Detections as Landmarks in Object-Oriented SLAM},\n author={Nicholson, Lachlan and Milford, Michael and Sünderhauf, Niko},\n journal={IEEE Robotics and Automation Letters},\n year={2019},\n}\n
\n","name":"QuadricSLAM","type":"code","url":"https://github.com/qcr/quadricslam","image":"https://github.com/qcr/gtsam-quadrics/raw/master/doc/quadricslam_video.png","_images":["/_next/static/images/quadricslam_video-412d8ad8190b4f7eee1320faf254cd6f.png.webp","/_next/static/images/quadricslam_video-a4d673ea6414754e153004c137d2a2c1.png"],"src":"/content/quadricslam/quadricslam.md","id":"quadricslam","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json
new file mode 100644
index 0000000000..def235822c
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/robotics-toolbox-python.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"\n![]() | \n\nA Python implementation of the Robotics Toolbox for MATLAB®\n\n | \n
This toolbox brings robotics-specific functionality to Python, and leverages\nPython's advantages of portability, ubiquity and support, and the capability of\nthe open-source ecosystem for linear algebra (numpy, scipy), graphics\n(matplotlib, three.js, WebGL), interactive development (jupyter, jupyterlab,\nmybinder.org), and documentation (sphinx).
\nThe Toolbox provides tools for representing the kinematics and dynamics of\nserial-link manipulators - you can easily create your own in Denavit-Hartenberg\nform, import a URDF file, or use over 30 supplied models for well-known\ncontemporary robots from Franka-Emika, Kinova, Universal Robotics, Rethink as\nwell as classical robots such as the Puma 560 and the Stanford arm.
\nThe Toolbox contains fast implementations of kinematic operations. The forward\nkinematics and the manipulator Jacobian can be computed in less than 1 microsecond\nwhile numerical inverse kinematics can be solved in as little as 4 microseconds.
\nThe toolbox also supports mobile robots with functions for robot motion models\n(unicycle, bicycle), path planning algorithms (bug, distance transform, D*,\nPRM), kinodynamic planning (lattice, RRT), localization (EKF, particle filter),\nmap building (EKF) and simultaneous localization and mapping (EKF).
\nThe Toolbox provides:
\nThe Toolbox leverages the Spatial Maths Toolbox for Python to\nprovide support for data types such as SO(n) and SE(n) matrices, quaternions, twists and spatial vectors.
\nYou will need Python >= 3.6
\nInstall a snapshot from PyPI
\npip3 install roboticstoolbox-python\n
\nAvailable options are:
\ncollision
install collision checking with pybulletPut the options in a comma separated list like
\npip3 install roboticstoolbox-python[optionlist]\n
\nSwift, a web-based visualizer, is\ninstalled as part of Robotics Toolbox.
\nTo install the bleeding-edge version from GitHub
\ngit clone https://github.com/petercorke/robotics-toolbox-python.git\ncd robotics-toolbox-python\npip3 install -e .\n
\n![]() | \n![]() | \n\nDo you want to learn about manipulator kinematics, differential kinematics, inverse-kinematics and motion control? Have a look at our\ntutorial.\nThis tutorial comes with two articles to cover the theory and 12 Jupyter Notebooks providing full code implementations and examples. Most of the Notebooks are also Google Colab compatible allowing them to run online.\n | \n
We will load a model of the Franka-Emika Panda robot defined by a URDF file
\nimport roboticstoolbox as rtb\nrobot = rtb.models.Panda()\nprint(robot)\n\n\tERobot: panda (by Franka Emika), 7 joints (RRRRRRR), 1 gripper, geometry, collision\n\t┌─────┬──────────────┬───────┬─────────────┬────────────────────────────────────────────────┐\n\t│link │ link │ joint │ parent │ ETS: parent to link │\n\t├─────┼──────────────┼───────┼─────────────┼────────────────────────────────────────────────┤\n\t│ 0 │ panda_link0 │ │ BASE │ │\n\t│ 1 │ panda_link1 │ 0 │ panda_link0 │ SE3(0, 0, 0.333) ⊕ Rz(q0) │\n\t│ 2 │ panda_link2 │ 1 │ panda_link1 │ SE3(-90°, -0°, 0°) ⊕ Rz(q1) │\n\t│ 3 │ panda_link3 │ 2 │ panda_link2 │ SE3(0, -0.316, 0; 90°, -0°, 0°) ⊕ Rz(q2) │\n\t│ 4 │ panda_link4 │ 3 │ panda_link3 │ SE3(0.0825, 0, 0; 90°, -0°, 0°) ⊕ Rz(q3) │\n\t│ 5 │ panda_link5 │ 4 │ panda_link4 │ SE3(-0.0825, 0.384, 0; -90°, -0°, 0°) ⊕ Rz(q4) │\n\t│ 6 │ panda_link6 │ 5 │ panda_link5 │ SE3(90°, -0°, 0°) ⊕ Rz(q5) │\n\t│ 7 │ panda_link7 │ 6 │ panda_link6 │ SE3(0.088, 0, 0; 90°, -0°, 0°) ⊕ Rz(q6) │\n\t│ 8 │ @panda_link8 │ │ panda_link7 │ SE3(0, 0, 0.107) │\n\t└─────┴──────────────┴───────┴─────────────┴────────────────────────────────────────────────┘\n\n\t┌─────┬─────┬────────┬─────┬───────┬─────┬───────┬──────┐\n\t│name │ q0 │ q1 │ q2 │ q3 │ q4 │ q5 │ q6 │\n\t├─────┼─────┼────────┼─────┼───────┼─────┼───────┼──────┤\n\t│ qr │ 0° │ -17.2° │ 0° │ -126° │ 0° │ 115° │ 45° │\n\t│ qz │ 0° │ 0° │ 0° │ 0° │ 0° │ 0° │ 0° │\n\t└─────┴─────┴────────┴─────┴───────┴─────┴───────┴──────┘\n
\nThe symbol @
indicates the link as an end-effector, a leaf node in the rigid-body\ntree (Python prompts are not shown to make it easy to copy+paste the code, console output is indented).\nWe will compute the forward kinematics next
Te = robot.fkine(robot.qr) # forward kinematics\nprint(Te)\n\n\t0.995 0 0.09983 0.484\n\t0 -1 0 0\n\t0.09983 0 -0.995 0.4126\n\t0 0 0 1\n
\nWe can solve inverse kinematics very easily. We first choose an SE(3) pose\ndefined in terms of position and orientation (end-effector z-axis down (A=-Z) and finger\norientation parallel to y-axis (O=+Y)).
\nfrom spatialmath import SE3\n\nTep = SE3.Trans(0.6, -0.3, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])\nsol = robot.ik_LM(Tep) # solve IK\nprint(sol)\n\n\t(array([ 0.20592815, 0.86609481, -0.79473206, -1.68254794, 0.74872915,\n\t\t\t2.21764746, -0.10255606]), 1, 114, 7, 2.890164057230228e-07)\n\nq_pickup = sol[0]\nprint(robot.fkine(q_pickup)) # FK shows that desired end-effector pose was achieved\n\n\t 1 -8.913e-05 -0.0003334 0.5996\n\t-8.929e-05 -1 -0.0004912 -0.2998\n\t-0.0003334 0.0004912 -1 0.1001\n\t 0 0 0 1\n
\nWe can animate a path from the ready pose qr
configuration to this pickup configuration
qt = rtb.jtraj(robot.qr, q_pickup, 50)\nrobot.plot(qt.q, backend='pyplot', movie='panda1.gif')\n
\n\n\t\n
\nwhere we have specified the matplotlib pyplot
backend. Blue arrows show the joint axes and the coloured frame shows the end-effector pose.
We can also plot the trajectory in the Swift simulator (a browser-based 3d-simulation environment built to work with the Toolbox)
\nrobot.plot(qt.q)\n
\n\n\t\n
\nWe can also experiment with velocity controllers in Swift. Here is a resolved-rate motion control example
\nimport swift\nimport roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\n\nenv = swift.Swift()\nenv.launch(realtime=True)\n\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\nTep = panda.fkine(panda.q) * sm.SE3.Trans(0.2, 0.2, 0.45)\n\narrived = False\nenv.add(panda)\n\ndt = 0.05\n\nwhile not arrived:\n\n v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n env.step(dt)\n\n# Uncomment to stop the browser tab from closing\n# env.hold()\n
\n\n\t\n
\nThe notebooks
folder contains some tutorial Jupyter notebooks which you can browse on GitHub. Additionally, have a look in the examples
folder for many ready to run examples.
The toolbox is incredibly useful for developing and prototyping algorithms for research, thanks to the exhaustive set of well documented and mature robotic functions exposed through clean and painless APIs. Additionally, the ease at which a user can visualize their algorithm supports a rapid prototyping paradigm.
\nJ. Haviland, N. Sünderhauf and P. Corke, \"A Holistic Approach to Reactive Mobile Manipulation,\" in IEEE Robotics and Automation Letters, doi: 10.1109/LRA.2022.3146554. In the video, the robot is controlled using the Robotics toolbox for Python and features a recording from the Swift Simulator.
\n[Arxiv Paper] [IEEE Xplore] [Project Website] [Video] [Code Example]
\n\n \n
J. Haviland and P. Corke, \"NEO: A Novel Expeditious Optimisation Algorithm for Reactive Motion Control of Manipulators,\" in IEEE Robotics and Automation Letters, doi: 10.1109/LRA.2021.3056060. In the video, the robot is controlled using the Robotics toolbox for Python and features a recording from the Swift Simulator.
\n[Arxiv Paper] [IEEE Xplore] [Project Website] [Video] [Code Example]
\n\n \n
A Purely-Reactive Manipulability-Maximising Motion Controller, J. Haviland and P. Corke. In the video, the robot is controlled using the Robotics toolbox for Python.
\n[Paper] [Project Website] [Video] [Code Example]
\n\n \n
Check out our ICRA 2021 paper on IEEE Xplore or get the PDF from Peter's website.
\nIf the toolbox helped you in your research, please cite
\n@inproceedings{rtb,\n title={Not your grandmother’s toolbox--the Robotics Toolbox reinvented for Python},\n author={Corke, Peter and Haviland, Jesse},\n booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},\n pages={11357--11363},\n year={2021},\n organization={IEEE}\n}\n
\nIf you are using the Toolbox in your open source code, feel free to add our badge to your readme!
\nFor the powered by robotics toolbox badge
\n\ncopy the following
\n[](https://github.com/petercorke/robotics-toolbox-python)\n
\nFor the powered by python robotics badge
\n\ncopy the following
\n[](https://github.com/petercorke/robotics-toolbox-python)\n
\nSee the common issues with fixes here.
\n","name":"Robotics Toolbox Python","type":"code","url":"https://github.com/petercorke/robotics-toolbox-python","image":"repo:/docs/figs/RobToolBox_RoundLogoB.png","image_fit":"contain","_images":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"src":"/content/robotics_toolbox/robotics-toolbox-python.md","id":"robotics-toolbox-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json new file mode 100644 index 0000000000..d6105be208 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-omron-driver.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"The OMRON LD-60 is a capable platform out of the box but has no ROS support. Fortunatelyt he LD-60 s still really a Pioneer at heart and there is significant resources in the public domain which can interface to the platform.
\nThis does not replace Mobile Planner. Mobile Planner is still used for map creation and robot configuration. *Note: Mobile planner will run inside Wine on Ubuntu 18.04
\nThis driver currently assumes you have a user (which can be set via Mobile Planner) with no password.
\nHost IP: String e.g. 172.168.1.1
\nHost Port: String e.g. 7272
\nUser: String e.g. omron
\nComing Soon
\n","name":"ROS Omron Driver","type":"code","url":"https://github.com/qcr/ros_omron_driver","image":"./docs/omron_robot.jpg","_images":["/_next/static/images/omron_robot-6882a84f2dec840b5cba11e9f8f19e65.jpg.webp","/_next/static/images/omron_robot-542517e40cecf88333a4f6e07f854cc1.jpg"],"src":"/content/ros-omron-driver.md","id":"ros-omron-driver","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json new file mode 100644 index 0000000000..880e5a5232 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/ros-trees.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"\n\n~ New to ROS Trees? We have guides for getting started, and solving problems using trees ~\n\n
\n\n \n
\nROS Trees makes behaviour trees accessible in ROS ecosystems, bringing all the compositional benefits of behaviour trees while facilitating interactions with the underlying state of robotic systems. We build on top of the capabilities provided by py_trees, with these extra features:
\nThis package has been used on a number of real world robots, allowing us to combine a range of useful robot capabilities into significant robot behaviours. Featured works include:
\nWe are always interested in hearing about where our software is used. If you've used ROS Trees in your work, we'd love to hear more about how it was used.
\nAll the work for interfacing with ROS topics, Services, and Action Servers is already done for you in ros_trees.leaves_ros.*
. It provides the following leaves, which extend the functionality of the base Leaf
class described below:
ActionLeaf
: calls a ROS Action Server with namespace action_namespace
PublisherLeaf
: publishes a message on topic topic_name
with type topic_class
ServiceLeaf
: calls a ROS Service with service name service_name
SubscriberLeaf
: receives a message on topic topic_name
with type topic_class
ROS can be tedious when it comes to passing data between processes. For example, a service response with a PoseStamped
field (e.g. MyServiceResponse
) and an Action Server with a PoseStamped
goal field (e.g. MyActionGoal
) cannot be used interchangeably. A manual conversion of output to input is needed even though they have exactly the same fields. This becomes extremely tiresome in the scope of a tree, where there are linking outputs to inputs is widespread. It makes it impossible to chain standalone leaves together, as they are inherently dependent on each other's input and output formats.
ros_trees
handles this pain \"automagically\" through the method ros_trees.data_management.auto_generate()
. This function tries as hard as possible to generate an instance of the desired object class from data (don't ask...). While this saves you from writing endless arbitrary linking code, it is also improves the quality of your leaves. Using this method makes leaves work with as many different types of input as possible, rather than requiring manual conversion code with every use case.
Leaves can be written as an instance or class, with class being generally preferred. Below are some basic examples of how to write your own leaves ():
\nA Leaf
which accepts no input data, and saves the result (output of leaf.result_fn()
) so the next leaf can access it via data_management.get_last_value()
:
data_generator_leaf = Leaf(\"Data Generator\", load=False, save=True)\n
\nA Leaf
which swaps success results for failures and vice versa (note: usually you would use py_trees.decorators for this):
inverted_leaf = Leaf(\n \"Inverted\", eval_fn=lambda leaf, value: not leaf._default_eval_fn(value))\n
\nA SubscriberLeaf
whose data on '/camera/image_raw'
is not yet ready for use (we don't yet have the camera), so instead it will instantly return failure via debug mode:
camera_leaf = SubscriberLeaf(\"Camera\",\n topic_name='/camera/image_raw',\n topic_class=sensor_msgs.Image,\n debug=debugging.DebugMode.INSTANT_FAILURE)\n
\nAn ActionLeaf
calling a '/move_to_location'
ROS Action Server to move an arm 'home'
, and saves True
as the result indicating it always succeeds:
move_home_leaf = ActionLeaf(\"Move home\",\n action_namespace='/move_to_location',\n load_value='home',\n save=True,\n save_value=True)\n
\nA ServiceLeaf
calling a '/detect_objects'
ROS Service by taking in an RGB image saved in key 'rgb_image'
, and returning the first object in the list of returned objects:
def first_object(leaf):\n return leaf._default_result_fn[0]\n\n\nfirst_object_leaf = ServiceLeaf(\"Get first object\",\n service_name='/detect_objects',\n load_key='rgb_image',\n result_fn=first_object)\n
\nThe previous example, but using a Lambda instead:
\nfirst_object_leaf = ServiceLeaf(\n \"Get first object\",\n service_name='/detect_objects',\n load_key='rgb_image',\n result_fn=lambda leaf: leaf._default_result_fn[0])\n
\nGeneral, non-robot-specific leaves that we create are provided through the ros_trees.leaves_common
submodule. Importing this provides you with a library of common leaves including:
py_trees
(note: leaves are called \"behaviours\" in their vocabulary)A good leaf is a leaf that is as general purpose as physically possible given what it does. It may be impossible to write a leaf that can perform object detection without an input image, but your leaf should be written to work with any type of input that contains an image. To achieve this, the following are some good guidelines to stick by:
\nclassdef MyLeaf(ros_trees.leaves.Leaf): ...
)ActionLeaf
and ServiceLeaf
implementations end up being).m()
is replacing BaseClass.m()
in the base leaf class, your implementation of m()
should aim to call BaseClass.m()
either directly or by super(MyClass, self).m()
).Leaf
) affects every leaf ever written by anyone... that should be weighed up when considering whether it is easier to just implement what you want in your own leaf.The above figure shows the leaf's lifecycle, from the start to end of execution by the behaviour tree. Details for each of the parts in the image above are provided below:
\nget_last_value()
from ros_trees.data_management
that will \"magically\" get the last saved result from a previous leaf.load_fn
. If the load_fn
argument is not provided, there is a default behaviour that should work for most cases. The default load_fn
(see Leaf._default_load_fn()
) will load from the blackboard key load_key
if provided, otherwise it uses the last saved value through get_last_value()
.load_fn
can be assumed to be available in the self.loaded_data
member from this point forward.is_leaf_done()
to control how the leaf decides its action is done, and override _extra_update()
to start your long running behaviour. It must not be blocking!result_fn
. Short running actions can simply create their result from scratch in this function.self.result
.save
flag is set, the leaf will attempt to save self.result
(or save_value
) according to save_fn
. The default save_fn
should be fine for most cases; it saves the result to key save_key
if it is set, otherwise the result is saved such that it is available to the next leaf in the tree with get_last_value()
.eval_fn
is called to determine whether the leaf's process was a success or failure. The function is provided with save_value
if set, otherwise self.result
. If no eval_fn
is provided, the default will return the first bool if the data provided is a list, otherwise the Boolean evaluation of the data._extra_terminate()
. For example, an ActionLeaf
sends the pre-empt signal by overloading extra_terminate()
.That's a general overview of the leaf lifecycle in ros_trees
. We have covered what you will need to know about leaves for 99% of cases, but there are also other methods you can override to control other parts of the process. See the class implementation ros_trees.leaves.Leaf
for full details.
Here we describe each of the input arguments to the leaf classes. Note that all classes have the Leaf
class arguments as they extend from it. If you need any more details, feel free to dig into the class implementations.
A barebones Leaf
every other leaf is built upon. The constructor has the following customisation parameters:
name
: the name of the leaf (required). The name does not have to be unique throughout the tree. It is safest to stick to numbers and letters here due to py_trees
handling other characters poorly in some of its visualisation methods.load
: whether the leaf should attempt to load data (default = True
).load_value
: static data to be loaded by the leaf (default = None
). If this exists it takes precedence over all other load methods (load_key
and loading from last result via data_management.get_last_value()
), so load_fn
should return this value.load_key
: a key to load data from the py_trees.Blackboard
(default = None
). If this exists, and load_value
is not set, load_fn
should return what is stored at this key.load_fn
: a function handle of the form fn(leaf)
, returning the loaded data (default = None
). It is used instead of Leaf._default_load_fn
if provided. The default method loads the first available from: load_value
, load_key
, data_management.get_last_value()
. Your implementation ideally should still call Leaf._default_load_fn
, and respect the priority order outlined.result_fn
: a function handle of the form fn(leaf)
, returning the result of the Leaf's action (default = None
). It is used instead of Leaf._default_result_fn
if provided. The default simply returns leaf.loaded_data
.save
: whether the leaf should save the result (default = False
).save_value
: static data to be saved instead of the leaf result (default = None
). If this is set save_fn
is called with a value
of save_value
instead of leaf.result
.save_key
: a key to load data from the py_trees.Blackboard
(default = None
). If this is present it takes precedence over saving to last result via data_management.set_last_value()
, so save_fn
should save to this key.save_fn
: a function handle of the form fn(leaf, value)
, which saves value
to the configured location. It is used instead of Leaf._default_save_fn
if provided. The default method saves value
at key save_key
if save_key
is set, otherwise it stores the value with data_management.set_last_value()
. Your implementation should respect this priority order.eval_fn
: a function handle of the form fn(leaf, value)
, returning a bool denoting if value
is a success (default = None
). It is used instead of Leaf._default_eval_fn
if provided. The default returns the first bool value if value
is a list, otherwise it returns bool(value)
. value
will either be leaf.result
or save_value
depending on if save_value
is set.debug
: sets debugging mode to one of the values from debugging.DebugMode
(default = debugging.DebugMode.OFF
). Other options are: INSTANT_SUCCESS
, INPUT_FOR_SUCCESS
, INSTANT_FAILURE
, and INPUT_FOR_FAILURE
. See debugging.DebugMode
for further details.An ActionLeaf
creates an actionlib.SimpleActionClient
for interfacing with an existing ROS Action Server. It extends the base Leaf
class, making the following changes:
_default_eval_fn()
: uses the action client state and Leaf._default_eval_fn
to evaluate if the Leaf succeeded_default_load_fn()
: uses the \"automagic\" of data_management.auto_generate()
to try generate a goal for the Action Server from Leaf._default_load_fn()
_default_result_fn()
: returns the result received by the action client_extra_setup
: sets up the actionlib.SimpleActionClient
and attempts contact with the ActionServer
_extra_terminate()
: handles pre-empt requests and all other manners of random failure, gracefully cancelling the goal_extra_update()
: sends the goal if not already sent_is_leaf_done()
: returns False
until the action client has a resultThe ActionLeaf
constructor defines one extra parameter:
action_namespace
: the namespace of the ROS Action Server the leaf will interact with (required)A PublisherLeaf
publishes a message to a topic each time a leaf is called by the tree. It makes the following extensions to the base Leaf
class:
_default_load_fn()
: uses the \"automagic\" of data_management.auto_generate()
to try generate a message of topic_class
from Leaf._default_load_fn()
_default_result_fn()
: does the publishing (as this is a short blocking call), throwing an exception if publishing fails_extra_setup()
: sets up the ROS Publisher and dirtily waits until ROS accepts the publisher (ROS for whatever reason will let you use a publisher before it has finished initialising and you just lose the message...)The PublisherLeaf
class defines two extra parameters:
topic_name
: the name of the topic that the leaf will publish to (required)topic_class
: the type of message that the leaf will publish (required)A ServiceLeaf
calls a service and returns the response. It makes the following extensions to the base Leaf
class:
_default_load_fn()
: uses the \"automagic\" of data_management.auto_generate()
to try generate a service request for the ROS service from Leaf._deafult_load_fn()
_default_result_fn()
: makes the service call (as this is a short blocking call), throwing an exception if the call fails_extra_setup()
: checks the service exists and sets up the rospy.ServiceProxy
for calling itThe ServiceLeaf
class defines one extra parameter:
service_name
: the name of the ROS Service the leaf will interact with (required)A SubscriberLeaf
attempts to get a message on a topic, with configurable timeout parameters. It makes the following extensions to the base Leaf
class:
_default_result_fn()
: blocks until the expiry time is complete or the Leaf receives a message on the configured topic (note: short times should be used as this is blocking currently...)_extra_setup()
: sets up the ROS SubscriberThe SubscriberLeaf
class defines four extra parameters:
topic_name
: the name of the topic that the leaf will subscribe to (required)topic_class
: the type of message that leaf will expect to receive (required)once_only
: only new messages are returned from this leaf (no message will be returned twice)expiry_time
: the time after which a message is deemed to have expired and is deemed to old to be returned by the leaf (default = None
). No value means all messages will be considered.timeout
: the max time the leaf will wait before declaring nothing was received (default=3.0
)This repository contains code and dataset references for two papers: RT-GENE (Gaze Estimation; ECCV2018) and RT-BENE (Blink Estimation; ICCV2019 Workshops).
\nThe RT-GENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper:
\n@inproceedings{FischerECCV2018,\nauthor = {Tobias Fischer and Hyung Jin Chang and Yiannis Demiris},\ntitle = {{RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments}},\nbooktitle = {European Conference on Computer Vision},\nyear = {2018},\nmonth = {September},\npages = {339--357}\n}\n
\nThis work was supported in part by the Samsung Global Research Outreach program, and in part by the EU Horizon 2020 Project PAL (643783-RIA).
\nThe code is split into four parts, each having its own README contained. There is also an accompanying dataset (alternative link) to the code. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/.
\nThe rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time.
\n\n \n
\nThe rt_gene_standalone directory contains instructions for eye gaze estimation given a set of images. It shares code with the rt_gene
package (above), in particular the code in rt_gene/src/rt_gene.
The rt_gene_inpainting directory contains code to inpaint the region covered by the eyetracking glasses.
\nThe rt_gene_model_training directory allows using the inpainted images to train a deep neural network for eye gaze estimation.
\n\n
The RT-BENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use our blink estimation code or dataset, please cite the relevant paper:
\n@inproceedings{CortaceroICCV2019W,\nauthor={Kevin Cortacero and Tobias Fischer and Yiannis Demiris},\nbooktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops},\ntitle = {RT-BENE: A Dataset and Baselines for Real-Time Blink Estimation in Natural Environments},\nyear = {2019},\n}\n
\nRT-BENE was supported by the EU Horizon 2020 Project PAL (643783-RIA) and a Royal Academy of Engineering Chair in Emerging Technologies to Yiannis Demiris.
\nThe code is split into several parts, each having its own README. There is also an associated RT-BENE dataset. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/. Please note that a lot of the code is shared with RT-GENE (see above), hence there are many references to RT-GENE below.
\nThe rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time. For blink estimation, please refer to the estimate_blink.py file.
\n\n \n
\nThe rt_bene_standalone directory contains instructions for blink estimation given a set of images. It makes use of the code in rt_gene/src/rt_bene.
\nThe rt_bene_model_training directory contains the code required to train models with the labels contained in the RT-BENE dataset (see below). We will soon at evaluation code in this directory, too.
\nWe manually annotated images contained in the \"noglasses\" part of the RT-GENE dataset. The RT-BENE dataset on Zenodo contains the eye image patches and associated annotations to train the blink models.
\n","name":"RT-BENE: Real-Time Blink Estimation in Natural Environments Codebase","type":"code","url":"https://github.com/Tobias-Fischer/rt_gene","image":"repo:/assets/rt_bene_best_poster_award.png","image_fit":"contain","id":"rt_bene_code","_images":["/_next/static/images/rt_bene_best_poster_award-5ac70111852de9eac6c94cd88ef726e0.png.webp","/_next/static/images/rt_bene_best_poster_award-d72f84610eb0050287dd856b52cc99c5.png"],"src":"/content/rt-gene/rt-bene-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json new file mode 100644 index 0000000000..3bb0d75bfb --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/rt_gene_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"This repository contains code and dataset references for two papers: RT-GENE (Gaze Estimation; ECCV2018) and RT-BENE (Blink Estimation; ICCV2019 Workshops).
\nThe RT-GENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use this dataset or the code in a scientific publication, please cite the following paper:
\n@inproceedings{FischerECCV2018,\nauthor = {Tobias Fischer and Hyung Jin Chang and Yiannis Demiris},\ntitle = {{RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments}},\nbooktitle = {European Conference on Computer Vision},\nyear = {2018},\nmonth = {September},\npages = {339--357}\n}\n
\nThis work was supported in part by the Samsung Global Research Outreach program, and in part by the EU Horizon 2020 Project PAL (643783-RIA).
\nThe code is split into four parts, each having its own README contained. There is also an accompanying dataset (alternative link) to the code. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/.
\nThe rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time.
\n\n \n
\nThe rt_gene_standalone directory contains instructions for eye gaze estimation given a set of images. It shares code with the rt_gene
package (above), in particular the code in rt_gene/src/rt_gene.
The rt_gene_inpainting directory contains code to inpaint the region covered by the eyetracking glasses.
\nThe rt_gene_model_training directory allows using the inpainted images to train a deep neural network for eye gaze estimation.
\n\n
The RT-BENE code is licensed under CC BY-NC-SA 4.0. Commercial usage is not permitted. If you use our blink estimation code or dataset, please cite the relevant paper:
\n@inproceedings{CortaceroICCV2019W,\nauthor={Kevin Cortacero and Tobias Fischer and Yiannis Demiris},\nbooktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops},\ntitle = {RT-BENE: A Dataset and Baselines for Real-Time Blink Estimation in Natural Environments},\nyear = {2019},\n}\n
\nRT-BENE was supported by the EU Horizon 2020 Project PAL (643783-RIA) and a Royal Academy of Engineering Chair in Emerging Technologies to Yiannis Demiris.
\nThe code is split into several parts, each having its own README. There is also an associated RT-BENE dataset. For more information, other datasets and more open-source software please visit the Personal Robotic Lab's website: https://www.imperial.ac.uk/personal-robotics/software/. Please note that a lot of the code is shared with RT-GENE (see above), hence there are many references to RT-GENE below.
\nThe rt_gene directory contains a ROS package for real-time eye gaze and blink estimation. This contains all the code required at inference time. For blink estimation, please refer to the estimate_blink.py file.
\n\n \n
\nThe rt_bene_standalone directory contains instructions for blink estimation given a set of images. It makes use of the code in rt_gene/src/rt_bene.
\nThe rt_bene_model_training directory contains the code required to train models with the labels contained in the RT-BENE dataset (see below). We will soon at evaluation code in this directory, too.
\nWe manually annotated images contained in the \"noglasses\" part of the RT-GENE dataset. The RT-BENE dataset on Zenodo contains the eye image patches and associated annotations to train the blink models.
\n","name":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Codebase","type":"code","url":"https://github.com/Tobias-Fischer/rt_gene","image":"repo:/assets/system_overview.jpg","image_fit":"contain","id":"rt_gene_code","_images":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"src":"/content/rt-gene/rt-gene-code.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json new file mode 100644 index 0000000000..5c5ff9f77b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/seq2single_code.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"This is the source code for the paper titled: \"Look No Deeper: Recognizing Places from Opposing Viewpoints under Varying Scene Appearance using Single-View Depth Estimation\", [arXiv][IEEE Xplore].
\nIf you find this work useful, please cite it as:\nGarg, S., Babu V, M., Dharmasiri, T., Hausler, S., Suenderhauf, N., Kumar, S., Drummond, T., & Milford, M. (2019). Look no deeper: Recognizing places from opposing viewpoints under varying scene appearance using single-view depth estimation. In IEEE International Conference on Robotics and Automation (ICRA), 2019. IEEE.
\nbibtex:
\n@inproceedings{garg2019look,\ntitle={Look No Deeper: Recognizing Places from Opposing Viewpoints under Varying Scene Appearance using Single-View Depth Estimation},\nauthor={Garg, Sourav and Babu V, Madhu and Dharmasiri, Thanuja and Hausler, Stephen and Suenderhauf, Niko and Kumar, Swagat and Drummond, Tom and Milford, Michael},\nbooktitle={IEEE International Conference on Robotics and Automation (ICRA)},\nyear={2019}\n}\n
\nOptionally, for vis_results.ipynb:
\nIn seq2single/precomputed/
, download pre-computed representations (~10 GB). Please refer to the seq2single/precomputed/readme.md
for instructions on how to compute these representations.
[Optional] In seq2single/images/
, download images (~1 GB). These images are a subset of two different traverses from the Oxford Robotcar dataset.
(Note: These download links from Mega.nz require you to first create an account (free))
\nThe code is released under MIT License.
\n[ArXiv+Supplementary] [IEEE Xplore RA-L 2021] [ICRA 2021 YouTube Video]
\nand
\n[ArXiv] [CVPR 2021 Workshop 3DVR]
\n\n
Sequence-Based Hierarchical Visual Place Recognition.\n
Jan 18, 2022 : MSLS training setup included.
\nJan 07, 2022 : Single Image Vanilla NetVLAD feature extraction enabled.
\nOct 13, 2021 : Oxford & Brisbane Day-Night pretrained models download link.
\nAug 03, 2021 : Added Oxford dataset files and a direct link to download the Nordland dataset.
\nJun 23, 2021: CVPR 2021 Workshop 3DVR paper, \"SeqNetVLAD vs PointNetVLAD\", now available on arXiv.
\nconda create -n seqnet numpy pytorch=1.8.0 torchvision tqdm scikit-learn faiss tensorboardx h5py -c pytorch -c conda-forge\n
\nRun bash download.sh
to download single image NetVLAD descriptors (3.4 GB) for the Nordland-clean dataset [a] and the Oxford dataset (0.3 GB), and Nordland-trained model files (1.5 GB) [b]. Other pre-trained models for Oxford and Brisbane Day-Night can be downloaded from here.
To train sequential descriptors through SeqNet on the Nordland dataset:
\npython main.py --mode train --pooling seqnet --dataset nordland-sw --seqL 10 --w 5 --outDims 4096 --expName \"w5\"\n
\nor the Oxford dataset (set --dataset oxford-pnv
for pointnetvlad-like data split as described in the CVPR 2021 Workshop paper):
python main.py --mode train --pooling seqnet --dataset oxford-v1.0 --seqL 5 --w 3 --outDims 4096 --expName \"w3\"\n
\nor the MSLS dataset (specifying --msls_trainCity
and --msls_valCity
as default values):
python main.py --mode train --pooling seqnet --dataset msls --msls_trainCity melbourne --msls_valCity austin --seqL 5 --w 3 --outDims 4096 --expName \"msls_w3\"\n
\nTo train transformed single descriptors through SeqNet:
\npython main.py --mode train --pooling seqnet --dataset nordland-sw --seqL 1 --w 1 --outDims 4096 --expName \"w1\"\n
\nOn the Nordland dataset:
\npython main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-22-44_l10_w5/ \n
\nOn the MSLS dataset (can change --msls_valCity
to melbourne
or austin
too):
python main.py --mode test --pooling seqnet --dataset msls --msls_valCity amman --seqL 5 --split test --resume ./data/runs/<modelName>/\n
\nThe above will reproduce results for SeqNet (S5) as per Supp. Table III on Page 10.
\n# Raw Single (NetVLAD) Descriptor\npython main.py --mode test --pooling single --dataset nordland-sf --seqL 1 --split test\n\n# SeqNet (S1)\npython main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 1 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/\n\n# Raw + Smoothing\npython main.py --mode test --pooling smooth --dataset nordland-sf --seqL 5 --split test\n\n# Raw + Delta\npython main.py --mode test --pooling delta --dataset nordland-sf --seqL 5 --split test\n\n# Raw + SeqMatch\npython main.py --mode test --pooling single+seqmatch --dataset nordland-sf --seqL 5 --split test\n\n# SeqNet (S1) + SeqMatch\npython main.py --mode test --pooling s1+seqmatch --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/\n\n# HVPR (S5 to S1)\n# Run S5 first and save its predictions by specifying `resultsPath`\npython main.py --mode test --pooling seqnet --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-22-44_l10_w5/ --resultsPath ./data/results/\n# Now run S1 + SeqMatch using results from above (the timestamp of `predictionsFile` would be different in your case)\npython main.py --mode test --pooling s1+seqmatch --dataset nordland-sf --seqL 5 --split test --resume ./data/runs/Jun03_15-07-46_l1_w1/ --predictionsFile ./data/results/Jun03_16-07-36_l5_0.npz\n\n
\n# Setup Patch-NetVLAD submodule from the seqNet repo:\ncd seqNet \ngit submodule update --init\n\n# Download NetVLAD+PCA model\ncd thirdparty/Patch-NetVLAD/patchnetvlad/pretrained_models\nwget -O pitts_orig_WPCA4096.pth.tar https://cloudstor.aarnet.edu.au/plus/s/gJZvogRj4FUUQMy/download\n\n# Compute global descriptors\ncd ../../../Patch-NetVLAD/\npython feature_extract.py --config_path patchnetvlad/configs/seqnet.ini --dataset_file_path ../../structFiles/imageNamesFiles/oxford_2014-12-16-18-44-24_imagenames_subsampled-2m.txt --dataset_root_dir <PATH_TO_OXFORD_IMAGE_DIR> --output_features_fullpath ../../data/descData/netvlad-pytorch/oxford_2014-12-16-18-44-24_stereo_left.npy\n\n# example for MSLS (replace 'database' with 'query' and use different city names to compute all)\npython feature_extract.py --config_path patchnetvlad/configs/seqnet.ini --dataset_file_path ../../structFiles/imageNamesFiles/msls_melbourne_database_imageNames.txt --dataset_root_dir <PATH_TO_Mapillary_Street_Level_Sequences> --output_features_fullpath ../../data/descData/netvlad-pytorch/msls_melbourne_database.npy\n
\nThe code in this repository is based on Nanne/pytorch-NetVlad. Thanks to Tobias Fischer for his contributions to this code during the development of our project QVPR/Patch-NetVLAD.
\n@article{garg2021seqnet,\n title={SeqNet: Learning Descriptors for Sequence-based Hierarchical Place Recognition},\n author={Garg, Sourav and Milford, Michael},\n journal={IEEE Robotics and Automation Letters},\n volume={6},\n number={3},\n pages={4305-4312},\n year={2021},\n publisher={IEEE},\n doi={10.1109/LRA.2021.3067633}\n}\n\n@misc{garg2021seqnetvlad,\n title={SeqNetVLAD vs PointNetVLAD: Image Sequence vs 3D Point Clouds for Day-Night Place Recognition},\n author={Garg, Sourav and Milford, Michael},\n howpublished={CVPR 2021 Workshop on 3D Vision and Robotics (3DVR)},\n month={Jun},\n year={2021},\n}\n
\nSeqMatchNet (2021);\nPatch-NetVLAD (2021);\nDelta Descriptors (2020);\nCoarseHash (2020);\nseq2single (2019);\nLoST (2018)
\n[a] This is the clean version of the dataset that excludes images from the tunnels and red lights and can be downloaded from here.
\n[b] These will automatically save to ./data/
, you can modify this path in download.sh and get_datasets.py to specify your workdir.
\n![]() | \n\nA Python implementation of the Spatial Math Toolbox for MATLAB®\n\n | \n
Spatial mathematics capability underpins all of robotics and robotic vision where we need to describe the position, orientation or pose of objects in 2D or 3D spaces.
\nThe package provides classes to represent pose and orientation in 3D and 2D\nspace:
\nRepresents | \nin 3D | \nin 2D | \n
---|---|---|
pose | \nSE3 Twist3 UnitDualQuaternion | \nSE2 Twist2 | \n
orientation | \nSO3 UnitQuaternion | \nSO2 | \n
More specifically:
\nSE3
matrices belonging to the group $\\mathbf{SE}(3)$ for position and orientation (pose) in 3-dimensionsSO3
matrices belonging to the group $\\mathbf{SO}(3)$ for orientation in 3-dimensionsUnitQuaternion
belonging to the group $\\mathbf{S}^3$ for orientation in 3-dimensionsTwist3
vectors belonging to the group $\\mathbf{se}(3)$ for pose in 3-dimensionsUnitDualQuaternion
maps to the group $\\mathbf{SE}(3)$ for position and orientation (pose) in 3-dimensionsSE2
matrices belonging to the group $\\mathbf{SE}(2)$ for position and orientation (pose) in 2-dimensionsSO2
matrices belonging to the group $\\mathbf{SO}(2)$ for orientation in 2-dimensionsTwist2
vectors belonging to the group $\\mathbf{se}(2)$ for pose in 2-dimensionsThese classes provide convenience and type safety, as well as methods and overloaded operators to support:
\n*
operator*
operator**
operatorThese are layered over a set of base functions that perform many of the same operations but represent data explicitly in terms of numpy
arrays.
The class, method and functions names largely mirror those of the MATLAB toolboxes, and the semantics are quite similar.
\nCheck out our ICRA 2021 paper on IEEE Xplore or get the PDF from Peter's website. This describes the Robotics Toolbox for Python as well Spatial Maths.
\nIf the toolbox helped you in your research, please cite
\n@inproceedings{rtb,\n title={Not your grandmother’s toolbox--the Robotics Toolbox reinvented for Python},\n author={Corke, Peter and Haviland, Jesse},\n booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},\n pages={11357--11363},\n year={2021},\n organization={IEEE}\n}\n
\nIf you are using the Toolbox in your open source code, feel free to add our badge to your readme!
\n\nSimply copy the following
\n[](https://github.com/bdaiinstitute/spatialmath-python)\n
\nInstall a snapshot from PyPI
\npip install spatialmath-python\n
\nInstall the current code base from GitHub and pip install a link to that cloned copy
\ngit clone https://github.com/bdaiinstitute/spatialmath-python.git\ncd spatialmath-python\npip install -e .\n# Optional: if you would like to contribute and commit code changes to the repository,\n# pre-commit install\n
\nnumpy
, scipy
, matplotlib
, ffmpeg
(if rendering animations as a movie)
These classes abstract the low-level numpy arrays into objects that obey the rules associated with the mathematical groups SO(2), SE(2), SO(3), SE(3) as well as twists and quaternions.
\nUsing classes ensures type safety, for example it stops us mixing a 2D homogeneous transformation with a 3D rotation matrix -- both of which are 3x3 matrices. It also ensures that the internal matrix representation is always a valid member of the relevant group.
\nFor example, to create an object representing a rotation of 0.3 radians about the x-axis is simply
\n>>> from spatialmath import SO3, SE3\n>>> R1 = SO3.Rx(0.3)\n>>> R1\n 1 0 0 \n 0 0.955336 -0.29552 \n 0 0.29552 0.955336 \n
\nwhile a rotation of 30 deg about the z-axis is
\n>>> R2 = SO3.Rz(30, 'deg')\n>>> R2\n 0.866025 -0.5 0 \n 0.5 0.866025 0 \n 0 0 1 \n
\nand the composition of these two rotations is
\n>>> R = R1 * R2\n 0.866025 -0.5 0 \n 0.433013 0.75 -0.5 \n 0.25 0.433013 0.866025 \n
\nWe can find the corresponding Euler angles (in radians)
\n>> R.eul()\narray([-1.57079633, 0.52359878, 2.0943951 ])\n
\nFrequently in robotics we want a sequence, a trajectory, of rotation matrices or poses. These pose classes inherit capability from the list
class
>>> R = SO3() # the null rotation or identity matrix\n>>> R.append(R1)\n>>> R.append(R2)\n>>> len(R)\n 3\n>>> R[1]\n 1 0 0 \n 0 0.955336 -0.29552 \n 0 0.29552 0.955336 \n
\nand this can be used in for
loops and list comprehensions.
An alternative way of constructing this would be (R1
, R2
defined above)
>>> R = SO3( [ SO3(), R1, R2 ] ) \n>>> len(R)\n 3\n
\nMany of the constructors such as .Rx
, .Ry
and .Rz
support vectorization
>>> R = SO3.Rx( np.arange(0, 2*np.pi, 0.2))\n>>> len(R)\n 32\n
\nwhich has created, in a single line, a list of rotation matrices.
\nVectorization also applies to the operators, for instance
\n>>> A = R * SO3.Ry(0.5)\n>>> len(R)\n 32\n
\nwill produce a result where each element is the product of each element of the left-hand side with the right-hand side, ie. R[i] * SO3.Ry(0.5)
.
Similarly
\n>>> A = SO3.Ry(0.5) * R \n>>> len(R)\n 32\n
\nwill produce a result where each element is the product of the left-hand side with each element of the right-hand side , ie. SO3.Ry(0.5) * R[i]
.
Finally
\n>>> A = R * R \n>>> len(R)\n 32\n
\nwill produce a result where each element is the product of each element of the left-hand side with each element of the right-hand side , ie. R[i] * R[i]
.
The underlying representation of these classes is a numpy matrix, but the class ensures that the structure of that matrix is valid for the particular group represented: SO(2), SE(2), SO(3), SE(3). Any operation that is not valid for the group will return a matrix rather than a pose class, for example
\n>>> SO3.Rx(0.3) * 2\narray([[ 2. , 0. , 0. ],\n [ 0. , 1.91067298, -0.59104041],\n [ 0. , 0.59104041, 1.91067298]])\n\n>>> SO3.Rx(0.3) - 1\narray([[ 0. , -1. , -1. ],\n [-1. , -0.04466351, -1.29552021],\n [-1. , -0.70447979, -0.04466351]])\n
\nWe can print and plot these objects as well
\n>>> T = SE3(1,2,3) * SE3.Rx(30, 'deg')\n>>> T.print()\n 1 0 0 1 \n 0 0.866025 -0.5 2 \n 0 0.5 0.866025 3 \n 0 0 0 1 \n\n>>> T.printline()\nt = 1, 2, 3; rpy/zyx = 30, 0, 0 deg\n\n>>> T.plot()\n
\nprintline
is a compact single line format for tabular listing, whereas print
shows the underlying matrix and for consoles that support it, it is colorised, with rotational elements in red and translational elements in blue.
For more detail checkout the shipped Python notebooks:
\n\nYou can browse it statically through the links above, or clone the toolbox and run them interactively using Jupyter or JupyterLab.
\nImport the low-level transform functions
\n>>> from spatialmath.base import *\n
\nWe can create a 3D rotation matrix
\n>>> rotx(0.3)\narray([[ 1. , 0. , 0. ],\n [ 0. , 0.95533649, -0.29552021],\n [ 0. , 0.29552021, 0.95533649]])\n\n>>> rotx(30, unit='deg')\narray([[ 1. , 0. , 0. ],\n [ 0. , 0.8660254, -0.5 ],\n [ 0. , 0.5 , 0.8660254]])\n
\nThe results are numpy
arrays so to perform matrix multiplication you need to use the @
operator, for example
rotx(0.3) @ roty(0.2)\n
\nWe also support multiple ways of passing vector information to functions that require it:
\ntransl2(1, 2)\narray([[1., 0., 1.],\n [0., 1., 2.],\n [0., 0., 1.]])\n
\ntransl2( [1,2] )\narray([[1., 0., 1.],\n [0., 1., 2.],\n [0., 0., 1.]])\n\ntransl2( (1,2) )\nOut[444]: \narray([[1., 0., 1.],\n [0., 1., 2.],\n [0., 0., 1.]])\n
\nnumpy
arraytransl2( np.array([1,2]) )\nOut[445]: \narray([[1., 0., 1.],\n [0., 1., 2.],\n [0., 0., 1.]])\n
\nThere is a single module that deals with quaternions, unit or not, and the representation is a numpy
array of four elements. As above, functions can accept the numpy
array, a list, dict or numpy
row or column vectors.
>>> from spatialmath.base.quaternion import *\n>>> q = qqmul([1,2,3,4], [5,6,7,8])\n>>> q\narray([-60, 12, 30, 24])\n>>> qprint(q)\n-60.000000 < 12.000000, 30.000000, 24.000000 >\n>>> qnorm(q)\n72.24956747275377\n
\nThe functions support various plotting styles
\ntrplot( transl(1,2,3), frame='A', rviz=True, width=1, dims=[0, 10, 0, 10, 0, 10])\ntrplot( transl(3,1, 2), color='red', width=3, frame='B')\ntrplot( transl(4, 3, 1)@trotx(math.pi/3), color='green', frame='c', dims=[0,4,0,4,0,4])\n
\nAnimation is straightforward
\ntranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200)\n
\nand it can be saved to a file by
\ntranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200, movie='out.mp4')\n
\n\nAt the moment we can only save as an MP4, but the following incantation will covert that to an animated GIF for embedding in web pages
\nffmpeg -i out -r 20 -vf \"fps=10,scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\" out.gif\n
\nFor use in a Jupyter notebook, or on Colab, you can display an animation by
\nfrom IPython.core.display import HTML\nHTML(tranimate(transl(4, 3, 4)@trotx(2)@troty(-2), frame='A', arrow=False, dims=[0, 5], nframes=200, movie=True))\n
\nThe movie=True
option causes tranimate
to output an HTML5 fragment which\nis displayed inline by the HTML
function.
Some functions have support for symbolic variables, for example
\nimport sympy\n\ntheta = sym.symbols('theta')\nprint(rotx(theta))\n[[1 0 0]\n [0 cos(theta) -sin(theta)]\n [0 sin(theta) cos(theta)]]\n
\nThe resulting numpy
array is an array of symbolic objects not numbers – the constants are also symbolic objects. You can read the elements of the matrix
a = T[0,0]\n\na\nOut[258]: 1\n\ntype(a)\nOut[259]: int\n\na = T[1,1]\na\nOut[256]: \ncos(theta)\ntype(a)\nOut[255]: cos\n
\nWe see that the symbolic constants are converted back to Python numeric types on read.
\nSimilarly when we assign an element or slice of the symbolic matrix to a numeric value, they are converted to symbolic constants on the way in.
\nThis package was originally created by Peter Corke and Jesse Haviland and was inspired by the Spatial Math Toolbox for MATLAB. It supports the textbook Robotics, Vision & Control in Python 3e.
\nThe package is now a collaboration with Boston Dynamics AI Institute.
\n","name":"Spatialmath Python","type":"code","url":"https://github.com/petercorke/spatialmath-python","image":"repo:/docs/figs/CartesianSnakes_LogoW.png","image_fit":"contain","_images":["/_next/static/images/CartesianSnakes_LogoW-7d2f987ca5432e1ce32ce72e90be7c64.png.webp","/_next/static/images/CartesianSnakes_LogoW-d72d60a588449aa6a08846bed694c0c9.png"],"src":"/content/robotics_toolbox/spatialmath-python.md","id":"spatialmath-python","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json new file mode 100644 index 0000000000..361f3ac22a --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/swift.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"Swift is a light-weight browser-based simulator built on top of the Robotics Toolbox for Python. This simulator provides robotics-specific functionality for rapid prototyping of algorithms, research, and education. Built using Python and Javascript, Swift is cross-platform (Linux, MacOS, and Windows) while also leveraging the ubiquity and support of these languages.
\nThrough the Robotics Toolbox for Python, Swift can visualise over 30 supplied robot models: well-known contemporary robots from Franka-Emika, Kinova, Universal Robotics, Rethink as well as classical robots such as the Puma 560 and the Stanford arm. Swift is under development and will support mobile robots in the future.
\nSwift provides:
\nSwift is designed to be controlled through the Robotics Toolbox for Python. By installing the toolbox through PyPI, swift is installed as a dependency
\npip3 install roboticstoolbox-python\n
\nOtherwise, Swift can be install by
\npip3 install swift-sim\n
\nAvailable options are:
\nnb
provides the ability for Swift to be embedded within a Jupyter Notebookvision
implements an RTC communication strategy allowing for visual feedback from Swift and allows Swift to be run on Google ColabPut the options in a comma-separated list like
\npip3 install swift-sim[optionlist]\n
\nTo install the latest version from GitHub
\ngit clone https://github.com/jhavl/swift.git\ncd swift\npip3 install -e .\n
\nWe will load a model of the Franka-Emika Panda robot and plot it. We set the joint angles of the robot into the ready joint configuration qr.
\nimport roboticstoolbox as rp\n\npanda = rp.models.Panda()\npanda.plot(q=panda.qr)\n
\n\n
We will load a model of the Franka-Emika Panda robot and make it travel towards a goal pose defined by the variable Tep.
\nimport roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\nfrom swift import Swift\n\n\n# Make and instance of the Swift simulator and open it\nenv = Swift()\nenv.launch(realtime=True)\n\n# Make a panda model and set its joint angles to the ready joint configuration\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\n# Set a desired and effector pose an an offset from the current end-effector pose\nTep = panda.fkine(panda.q) * sm.SE3.Tx(0.2) * sm.SE3.Ty(0.2) * sm.SE3.Tz(0.45)\n\n# Add the robot to the simulator\nenv.add(panda)\n\n# Simulate the robot while it has not arrived at the goal\narrived = False\nwhile not arrived:\n\n # Work out the required end-effector velocity to go towards the goal\n v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n \n # Set the Panda's joint velocities\n panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n \n # Step the simulator by 50 milliseconds\n env.step(0.05)\n
\n\n \n
\nTo embed within a Jupyter Notebook Cell, use the browser=\"notebook\"
option when launching the simulator.
# Try this example within a Jupyter Notebook Cell!\nimport roboticstoolbox as rtb\nimport spatialmath as sm\nimport numpy as np\nfrom swift import Swift\n\n# Make and instance of the Swift simulator and open it\nenv = Swift()\nenv.launch(realtime=True, browser=\"notebook\")\n\n# Make a panda model and set its joint angles to the ready joint configuration\npanda = rtb.models.Panda()\npanda.q = panda.qr\n\n# Set a desired and effector pose an an offset from the current end-effector pose\nTep = panda.fkine(panda.q) * sm.SE3.Tx(0.2) * sm.SE3.Ty(0.2) * sm.SE3.Tz(0.45)\n\n# Add the robot to the simulator\nenv.add(panda)\n\n# Simulate the robot while it has not arrived at the goal\narrived = False\nwhile not arrived:\n\n # Work out the required end-effector velocity to go towards the goal\n v, arrived = rtb.p_servo(panda.fkine(panda.q), Tep, 1)\n \n # Set the Panda's joint velocities\n panda.qd = np.linalg.pinv(panda.jacobe(panda.q)) @ v\n \n # Step the simulator by 50 milliseconds\n env.step(0.05)\n
\n","name":"Swift","type":"code","url":"https://github.com/jhavl/swift","image":"repo:/.github/figures/panda.png","_images":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"src":"/content/robotics_toolbox/swift.md","id":"swift","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json
new file mode 100644
index 0000000000..a7a343587e
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/teach_repeat.json
@@ -0,0 +1 @@
+{"pageProps":{"codeData":{"content":"This repository contains code for a low compute teach and repeat navigation approach which only requires monocular vision and wheel odometry. Teach the robot a route by teleoperation, then the robot will be able to repeat it - robust to lighting variation and moderate environmental changes. For full details see our IROS2021 paper, available on arXiv. You can view the conference presentation here as well as other multimedia material and a full 550 metre outdoor run.
\n\nIf you use the code in this repository, please cite our paper. The code is available under the BSD-2-Clause License.
\n@inproceedings{dallostoFastRobustBioinspired2021,\n title = {Fast and {{Robust Bio-inspired Teach}} and {{Repeat Navigation}}},\n booktitle = {2021 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}})},\n author = {Dall'Osto, Dominic and Fischer, Tobias and Milford, Michael},\n year = {2021},\n month = sep,\n pages = {500--507},\n publisher = {{IEEE}},\n address = {{Prague, Czech Republic}},\n doi = {10.1109/IROS51168.2021.9636334},\n}\n
\nThis approach can be used with any mobile robot with a monocular camera and odometry source.
\nFor the teach run, run both the data_collect.py
and data_save.py
nodes. Teleoperate the robot along the desired route and the teach run (odometry poses and images) will be recorded to a specified folder.
For the repeat run, use image_matcher.py
and localiser.py
. The localiser will publish Goal
messages on the topic goal
, containing a goal to navigate to in the robot's odometry frame. An example drive_to_pose_controller
is used here, but can be replaced with another controller as required.
In both cases, remap the odom
and image
topics to those provided by the robot. Note, the published odometry must also contain an integrated pose estimate.
Essential parameters for these nodes are shown below. Other parameters exist to save additional diagnostic data, or to wait for a ready signal before starting - if the robot needs to run a setup procedure for example. These are shown in the nodes and example usage is shown in the provided launch files.
\nParameter | \nDescription | \nDefault Value | \n
---|---|---|
/data_load_dir | \ndirectory in which the teach runs are saved | \n~/miro/data | \n
/data_save_dir | \ndirectory in which to save the results of a repeat run | \n~/miro/data/follow-straight_tests/5 | \n
/image_resize_width | \nwidth to resize images before comparison | \n115 | \n
/image_resize_height | \nheight to resize images before comparison | \n44 | \n
/patch_size | \npatch size to use for patch normalisation | \n(9,9) | \n
/goal_pose_separation | \ndistance between goals, should match ~distance_threshold in data_collect.py | \n0.2 | \n
/image_field_of_view_width_deg | \nhorizontal field of view of images (degrees) | \n175.2 | \n
/wait_for_ready | \nwhether the localiser waits for a service signal 'ready_localiser' before starting, allowing robot initialisation | \nfalse | \n
data_collect.py
Parameter | \nDescription | \nExample Value | \n
---|---|---|
~distance_threshold | \ndistance (metres) travelled from the previous pose after which a new pose is stored in the teach map | \n0.2 | \n
~angle_threshold_deg | \nangular distance (degrees) travelled from the previous pose after which a new pose is stored in the teach map | \n15.0 | \n
data_save.py
Parameter | \nDescription | \nExample Value | \n
---|---|---|
~save_dir | \ndirectory in which to save the teach run | \n~/miro/data | \n
~timestamp_folder | \nwhether to timestamp the folder name of the teach run, so multiple runs can be performed without overwriting | \ntrue | \n
localiser.py
Parameter | \nDescription | \nDefault Value | \n
---|---|---|
~rotation_correction_gain | \nproportional gain term to use for rotation corrections, $K_\\theta$, shouldn't need to be tuned | \n0.01 | \n
~path_correction_gain | \nproportional gain term to use for along-path corrections, $K_p$, shouldn't need to be tuned | \n0.01 | \n
~stop_at_end | \nwhether the robot should stop at the end of the route, otherwise it assumes the route is circular and restarts from the beginning | \ntrue | \n
~discrete-correction | \nreduce compute by only performing a correction at each goal pose, not continually | \nfalse | \n
~search-range | \nhow many teach images to search either side of the current to perform along-path correction | \n1 | \n
~global_localisation_init | \nwhen initialising, find the closest matching teach image to the current and start the route from there, otherwise start at the first goal | \nfalse | \n
~min_init_correlation | \nminimum correlation with a teach image at initialisation, otherwise the robot thinks it's not on the path and doesn't start repeating | \n0.0 | \n
First the robot needs to be taught a route via teleoperation. At regular distance intervals along the path the dead-reckoning position and and image will be saved, resulting in a topometric map of the route. Images are patch normalised to increase robustness to lighting variation.
\n\nHaving learnt a route, the robot can robustly repeat it. The robot initially follows the sequence of odometry poses stored during the teach run, but errors accumulate in this approach over time. Images are compared between the teach and repeat routes to make corrections to the route.
\nBoth rotational and lateral path errors result in horizontal image offsets that can't be distinguished, but this is not a problem because both require the same correction response. However, moving along the path can also horizontal image offsets. These must be accounted for by interpolating between the previous and next goal images.
\n\nIf an orientation error is detected by comparing teach and repeat images, an associated path correction is performed, modulated by a constant gain factor. This correction causes the robot to steer back onto the path.
\n\nRepeat images are compared to teach images within a certain search range of the current goal. If correlation values are stronger to images ahead or behind the robot's current estimated position, and along-path correction is performed. In this case, the goal is pulled towards the robot so it will be reached faster, allowing the estimated position to \"catch up\" to the real position.
\n\nroslaunch teach_repeat data_collection_miro.launch
roslaunch teach_repeat data_matching_miro.launch
rosnode kill twist_mux
(optional, only required for comparison with bearnav)roslaunch slam_toolbox localization.launch
(optional, only required for quantitative analysis)roslaunch stroll_bearnav mapping-core-jackal.launch
(optional, only required for comparison with bearnav)roslaunch stroll_bearnav mapping-gui-jackal.launch
(optional, only required for comparison with bearnav)roslaunch teach_repeat data_collection_jackal.launch
roslaunch slam_toolbox localization.launch
(optional, only required for quantitative analysis)roslaunch teach_repeat data_matching_jackal.launch
roslaunch slam_toolbox localization.launch
roslaunch stroll_bearnav navigation-core-jackal.launch
roslaunch stroll_bearnav navigation-gui-jackal.launch
This repository contains code related to the following paper - please cite it if you use this code:
\n@article{xu2021probabilistic,\n title={Probabilistic Appearance-Invariant Topometric Localization with New Place Awareness},\n author={Xu, Ming and Fischer, Tobias and S{\\\"u}nderhauf, Niko and Milford, Michael},\n journal={IEEE Robotics and Automation Letters},\n volume={6},\n number={4},\n pages={6985--6992},\n year={2021}\n}\n
\nRun the steps in the following order to get things working.
\nClone repo, and setup virtual environment. OpenVINO only works with Python 3.7, so run
\nconda create env -n topometricloc python=3.7\nconda activate topometricloc\n
\nThen run
\ncd TopometricLoc\nsh setup.sh\n
\nThis script will install the associated python package to this repository with dependencies. It will also download model weights for feature extraction (HF-Net with OpenVINO) and ask you to enter directories where data is stored (DATA_DIR
) and results (RESULTS_DIR
). Directories entered are stored in the topometricloc/settings.py
file and used as global variables in scripts in this repo.
To use this code for any dataset you like, simply adhere to the following data format.
\nYou will require a set of images with timestamps or frame order for filenames (sans file extension), corresponding global image descriptors for each image (e.g. NetVLAD, HF-Net), ground truth poses for each image (e.g. GPS) and finally odometry estimates between adjacent images.
\nThe base directory for all data is the DATA_DIR
directory. We assume the data is presented as a set of traverses, with each traverse occupying its own folder in DATA_DIR
. An example valid directory structure is given as follows:
----\n|-- DATA_DIR\n| |-- <traverse_1>\n| | |-- images\n| | | |-- 0001.png\n| | | |-- ...\n| | | |-- 1000.png\n| | |-- features\n| | | |-- 0001.npy\n| | | |-- ...\n| | | |-- 1000.npy\n| | |-- camera_poses.csv\n| | |-- odometry.csv\n| |-- ...\n| |-- <traverse_5>\n| | |-- images\n| | | |-- 0001.png\n| | | |-- ...\n| | | |-- 0500.png\n| | |-- features\n| | | |-- 0001.npy\n| | | |-- ...\n| | | |-- 0500.npy\n| | |-- camera_poses.csv\n| | |-- odometry.csv\n
\nFor a given traverse, raw images are stored in DATA_DIR/<traverse_name>/images/
with arbitrary filename extensions. We also assume image names have a corresponding numeric (at least have the ability to be cast into an int
!!) identifier which describes the order images are captured (e.g. timestamp). An example of a valid filename is given by 00001.png
.
Global features/descriptors are stored in DATA_DIR/<traverse_name>/features/
as .npy
files. Note, for a given traverse, each image in the images/
folder MUST have a corresponding feature. For example, 00001.png
must have a corresponding feature 00001.npy
in the features/
directory. Features are assumed to be stored as a 1D numpy array with shape (D,)
, e.g. (4096,)
for vanilla NetVLAD and HF-Net.
Ground truth poses for a trajectory must be stored in a single .csv
file located at DATA_DIR/<traverse_name>/camera_poses.csv
. The format of ground truth pose information is stored as a 6D pose with orientation given by a r, p, y
Euler angle representation (please have mercy on my soul :p). All ground truth poses are given in the world coordinate frame as a world-to-body transform.
We store 6D poses for the purposes of applying one of our comparison methods (MCL) which requires 6DoF poses. If you have an alternative (lower) number of DoFs, e.g. 3, 4, then simply save a 6DoF pose with zeros in dimensions that are not used.
\nts, x, y, z, r, p, y\n0001, 1.0, 100.0, 0.5, 0.003, -0.06, 0.07\n0002, 3.2, 105.0, 0.7, -0.01, -0.05, 0.075\n...\n
\nOdometry is defined as a relative pose between adjacent pairs (source_frame, destination_frame)
of images and is given as a 6D relative pose. We assume the origin of the transformation is at the position of the source frame. As a simple check, composing the global pose of the source frame with the relative pose estimate between source and dest should yield the pose of the dest frame. Example:
source_ts, dest_ts, x, y, z, r, p, y\n0001, 0002, 1.0, 100.0, 0.5, 0.003, -0.06, 0.07\n0002, 0003, 3.2, 105.0, 0.7, -0.01, -0.05, 0.075\n...\n
\nAgain, similar to ground truth poses, if odometry in a lower number of DoFs is provided, then fill in unused dimensions with zeros.
\nWe provide a helpful utility to easily extract features from images assuming the data structure in section 2 has been adhered to. The feature extraction method provided is an OpenVINO version of HF-Net for GPU-free feature extraction. Our code has minor changes to original code found in this repo.
\nTo extract features from images, use the topometricloc/feature_extraction/extract_features.py
script. You simply provide the folder name of the traverse you wish to extract features from located inside the DATA_DIR
and it'll do it's thing!
After raw feature extraction and data processing of the entire traverse, we subsample the traverses based on odometry (given by VO) for mapping and localization. To do this, use the src/data/subsample_traverse.py
script (use --help
for information).
Reference maps can be build from subsampled traverse data. Maps store the nodes with odometry constraints (segments) between them preprocessed before localization. Maps also store the global descriptors (NetVLAD from HF-Net) and timestamps (to load local descriptors from disk when required). This map object will be used frequently when localizing. To build a map, use the src/mapping.py
script (see --help
for information).
Baselines are stored in the src/baselines/
folder, and scripts include Localization objects which store state estimates, model parameters and can be iterated to update state estimates given odometry and appearance observations. Our method is stored in src/localization.py
. Both the comparison methods and our method has the same class structure for Localization
objects and are called in the src/evaluate.py
script.
Run src/evaluate.py
to generate results. Script uniformly (spatially) samples the full query traverse as a starting point for global localization and runs each method (ours or comparisons) until convergence. It stores results in RESULTS_DIR
with a description of the experiment which is automatically generated if none is provided (see --help
for more information).
Model parameters for each method are stored in the src/params/
folder as yaml files.
src/results.py
aggregates results into tables and outputs them as .tex
files using pandas. The input to this script is a csv file storing the traverse/method/exper description information about the experiments to be aggregated.
There is a folder tests
with notebooks containing exploratory experiments. tests/off_map_classifier-geom.ipynb
contains a notebook for tuning the off-map detector parameters and allows you to change parameter values and evaluate detector performance on an on-map and off-map segment.
src/visualization.py
allows you to visualize localization for our method for any traverse. Outputs a multitude of useful diagnostic plots to understand how the state estimate (belief) is being updated, where the state proposals are with confidence scores, sensor data (measurement likelihoods, motion, off-map detector, retrieved images). Very handy for tuning parameters on the training set!
This repository contains code for three of our papers:
\nEnsembles of Modular SNNs with/without sequence matching: Applications of Spiking Neural Networks in Visual Place Recognition
\nModular SNN: Ensembles of Compact, Region-specific & Regularized Spiking Neural Networks for Scalable Place Recognition (ICRA 2023) DOI: 10.1109/ICRA48891.2023.10160749
\nNon-modular SNN: Spiking Neural Networks for Visual Place Recognition via Weighted Neuronal Assignments (RAL + ICRA2022) DOI: 10.1109/LRA.2022.3149030
\nDec 2023:
\nOct 2023:
\nMay 2023:
\nThis code is licensed under MIT License.
\nIf you use our Ensemble of Modular SNNs with/without sequence matching code, please cite the following paper:
\n@article{hussaini2023applications,\n title={Applications of Spiking Neural Networks in Visual Place Recognition},\n author={Hussaini, Somayeh and Milford, Michael and Fischer, Tobias},\n journal={arXiv preprint arXiv:2311.13186},\n year={2023}\n}\n
\nIf you use our Modular SNN code, please cite the following paper:
\n@inproceedings{hussaini2023ensembles,\n title={Ensembles of compact, region-specific \\& regularized spiking neural networks for scalable place recognition},\n author={Hussaini, Somayeh and Milford, Michael and Fischer, Tobias},\n booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},\n pages={4200--4207},\n year={2023},\n organization={IEEE}\n}\n
\nIf you use our Non-modular SNN code, please cite the following paper:
\n@article{hussaini2022spiking,\n title={Spiking Neural Networks for Visual Place Recognition via Weighted Neuronal Assignments},\n author={Hussaini, Somayeh and Milford, Michael J and Fischer, Tobias},\n journal={IEEE Robotics and Automation Letters},\n year={2022},\n publisher={IEEE}\n}\n
\nPlease refer to the readme files of the Ensemble of Modular SNNs & sequence matching, Modular SNN and Non-modular SNN folders for instructions to run the code for each work respectively.
\n\n
Video: https://www.youtube.com/watch?v=TNDdfmPSe1U&t=137s
\n\n
Video: https://www.youtube.com/watch?v=VGfv4ZVOMkw
\n\n
This work is an adaptation of the spiking neural network model from \"Unsupervised Learning of Digit Recognition Using Spike-Timing-Dependent Plasticity\", Diehl and Cook, (2015) for Visual Place Recognition (VPR). DOI: 10.3389/fncom.2015.00099.\nVisual Place Recognition is the problem of how a robot can identify whether it has previously visited a place given an image of the place despite challenges including changes in appearance and perceptual aliasing (where two different places look similar).
\nThe code is based on the following repositories, that include the original code and the modified versions of the original code.
\nOriginal code (Peter U. Diehl): https://github.com/peter-u-diehl/stdp-mnist
\nUpdated for Brian2: zxzhijia: https://github.com/zxzhijia/Brian2STDPMNIST
\nUpdated for Python3: sdpenguin: https://github.com/sdpenguin/Brian2STDPMNIST
\nPlease refer to the wiki tab for additional ablation studies.
\nThese works were supported by the Australian Government, Intel Labs, and the Queensland University of Technology (QUT) through the Centre for Robotics.
\n","name":"Spiking Neural Networks for Visual Place Recognition","type":"code","url":"https://github.com/QVPR/VPRSNN","id":"vpr_snn","image":"./resources/Ens_of_modularSNNs.png","_images":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"src":"/content/visual_place_recognition/vpr_snn.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json new file mode 100644 index 0000000000..7fc230e986 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/code/vprbench.json @@ -0,0 +1 @@ +{"pageProps":{"codeData":{"content":"VPR-Bench is an open-source Visual Place Recognition evaluation framework with quantifiable viewpoint and illumination invariance. This repository represents the open-source release relating to our VPR-Bench paper published in the International Journal of Computer Vision, which you can access here.\n
This repository allows you to do the following two things:
\nCompute the performance of 8 VPR techniques on 12 VPR datasets using multiple evaluation metrics, such as PR curves, ROC curves, RecallRate@N, True-Positive Distribution over a Trajectory etc.
\nCompute the quantified limits of viewpoint and illumination invariance of VPR techniques on Point Features dataset, QUT Multi-lane dataset and MIT Multi-illumination dataset.
\nSide Note: You can extend our codebase to include more datasets (or use full versions of some datasets) and techniques by following the templates described in the appendix of our paper. For further understanding these templates, dig into the 'VPR_techniques' and 'helper_functions' folders of this repository.
\nOur code was written in Python 2, tested in Ubuntu 18.04 LTS and Ubuntu 20.04 LTS both using Anaconda Python. Please follow the below steps for installing dependencies:
\nInstall Anaconda Python on your system (https://docs.anaconda.com/anaconda/install/). We are running conda 4.9.2 but other versions should also work.
\nClone this VPR-Bench Github repository (using git clone).
\ngit clone https://github.com/MubarizZaffar/VPR-Bench\n\n
\ncd YOURDIR/VPR-Bench/\nsh must_downloads.sh\n
\nconda env create -f environment.yml\n
\nThere is a known Caffe bug regarding 'mean shape incompatible with input shape' , so follow the solution in https://stackoverflow.com/questions/30808735/error-when-using-classify-in-caffe. That is, modify the lines 253-254 in {USER}/anaconda3/envs/myvprbenchenv/lib/python2.7/site-packages/caffe.
\nFinally activate your environment using the following and you should be good to go.
\n\nconda activate myvprbenchenv\n\n
\npython main.py -em 0 -sm 1 -dn Corridor -ddir datasets/corridor/ -mdir precomputed_matches/corridor/ -techs CoHOG CALC\n
\npython main.py -em 0 -sm 1 -dn Corridor -ddir datasets/corridor/ -mdir precomputed_matches/corridor/ -techs CoHOG CALC NetVLAD RegionVLAD AMOSNet HybridNet HOG AlexNet_VPR\n
\nIf you want to use any of the other 12 datasets in our work, download them from here (https://surfdrive.surf.nl/files/index.php/s/sbZRXzYe3l0v67W), and set the dataset fields (-dn and -ddir) accordingly.
\nIf you just want to use the matching data we had already computed for the 10 techniques on 12 datasets in our work, append '_Precomputed' to the name of a technique(s). This matching info is already available for corridor and SPEDTEST datasets in this repo, but for other datasets you would need to have downloaded this matching data (https://surfdrive.surf.nl/files/index.php/s/ThIgFycwwhRCVZv). Also set the dataset path via -ddir for access to ground-truth data. This ground-truth data is present for all datasets by default in the 'VPR-Bench/datasets/' folder. An example usage is given below.
\npython main.py -em 0 -sm 0 -dn SPEDTEST -ddir datasets/SPEDTEST/ -mdir precomputed_matches/SPEDTEST/ -techs CoHOG_Precomputed CALC_Precomputed NetVLAD_Precomputed RegionVLAD_Precomputed\n
\npython main.py -em 2 -techs NetVLAD RegionVLAD AMOSNet HybridNet CALC HOG CoHOG AlexNet_VPR\n\n
\nYou can send an email at mubarizzaffar at gmail dot com, m dot zaffar at tudelft dot nl or s dot garg at qut dot edu dot au for further guidance and/or questions.
\nImportant Note: For all the datasets and techniques, we have made our maximum effort to provide original citations and/or licenses within the respective folders, where possible and applicable. We request all users of VPR-Bench to be aware of (and use) the original citations and licenses in any of their works. If you have any concerns about this, please do send us an email.
\nIf you find this work useful, please cite as:
\n@article{zaffar2021vpr,\n title={Vpr-bench: An open-source visual place recognition evaluation framework with quantifiable viewpoint and appearance change},\n author={Zaffar, Mubariz and Garg, Sourav and Milford, Michael and Kooij, Julian and Flynn, David and McDonald-Maier, Klaus and Ehsan, Shoaib},\n journal={International Journal of Computer Vision},\n pages={1--39},\n year={2021},\n publisher={Springer}\n}\n
\n","name":"VPR-Bench","type":"code","url":"https://github.com/MubarizZaffar/VPR-Bench","id":"vprbench","image":"VPRBench.jpg","_images":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"src":"/content/visual_place_recognition/vprbench_code.md","image_position":"center"}},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json
new file mode 100644
index 0000000000..939e1a12fc
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection.json
@@ -0,0 +1 @@
+{"pageProps":{"listData":[{"linkUrl":"/collection/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/human-cues","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Human Cues for Robot Navigation","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/python_robotics","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Python Robotics","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/rt_gene_overview","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments","secondaryText":"Collection","secondaryTransform":"capitalize"},{"linkUrl":"/collection/vpr_overview","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Visual Place Recognition","secondaryText":"Collection","secondaryTransform":"capitalize"}],"title":"Open source collections"},"__N_SSG":true}
\ No newline at end of file
diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json
new file mode 100644
index 0000000000..c185492733
--- /dev/null
+++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/benchbot.json
@@ -0,0 +1 @@
+{"pageProps":{"code":[{"linkUrl":"/code/benchbot","mediaPosition":"100% center","mediaUrls":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"],"primaryText":"BenchBot Software Stack","secondaryText":"qcr/benchbot","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-api","mediaPosition":"center 100%","mediaUrls":["/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webm","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.mp4","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.webp","/_next/static/images/benchbot_api_web-9d335e4f90cddac6fb91d546b1d6dc20.jpg"],"primaryText":"BenchBot Python API","secondaryText":"qcr/benchbot_api","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-addons","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webm","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.mp4","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.webp","/_next/static/images/benchbot_addons-39bf0e168760909371d48341ec57fbad.jpg"],"primaryText":"BenchBot Add-ons Manager","secondaryText":"qcr/benchbot_addons","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-eval","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"BenchBot Evaluation Tools","secondaryText":"qcr/benchbot_eval","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-supervisor","mediaPosition":"center 0%","mediaUrls":["/_next/static/images/benchbot_supervisor-3e4092b6584962e3e4529101ae489a08.jpg.webp","/_next/static/images/benchbot_supervisor-fb509eb331f3380fbf5da2c3035116b6.jpg"],"primaryText":"BenchBot Backend Supervisor","secondaryText":"qcr/benchbot_supervisor","secondaryTransform":"lowercase"},{"linkUrl":"/code/benchbot-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webm","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.mp4","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.webp","/_next/static/images/benchbot_simulator-0194a01dd7a2f34b0b6a9e53bd88dc2e.jpg"],"primaryText":"BenchBot Simulator (Isaac)","secondaryText":"qcr/benchbot_simulator","secondaryTransform":"lowercase"}],"collectionData":{"content":"The BenchBot software stack is a collection of software packages that allow end users to control robots in real or simulated environments with a simple python API. It leverages the simple \"observe, act, repeat\" approach to robot problems prevalent in reinforcement learning communities (OpenAI Gym users will find the BenchBot API interface very similar).
\n","name":"BenchBot","type":"collection","url":"http://benchbot.org","id":"benchbot","code":["benchbot","benchbot-api","benchbot-addons","benchbot-eval","benchbot-supervisor","benchbot-simulator"],"datasets":["benchbot-bear-data"],"feature":1,"src":"/content/benchbot/collection.md","image_position":"100% center","_code":[],"_datasets":[],"image":"./docs/benchbot_web.gif","_images":["/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webm","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.mp4","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.webp","/_next/static/images/benchbot_web-0b702a0b6abfa7d90459998e6fa0ee8c.jpg"]},"datasets":[{"linkUrl":"/dataset/benchbot-bear-data","mediaPosition":"center","mediaUrls":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"primaryText":"BenchBot Environments for Active Robotics (BEAR)","secondaryText":"15.9GB","secondaryTransform":"capitalize"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json new file mode 100644 index 0000000000..bf1ad1225b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/human-cues.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/abstract-map","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"primaryText":"Abstract Map (Python)","secondaryText":"btalb/abstract_map","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map-simulator","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_simulation-55e32b58dd5e4ed9caf7a85baf98677c.png.webp","/_next/static/images/abstract_map_simulation-3a9dbfc04fa16e80a961cec841d316fc.png"],"primaryText":"2D Simulator for Zoo Experiments","secondaryText":"btalb/abstract_map_simulator","secondaryTransform":"lowercase"},{"linkUrl":"/code/abstract-map-app","mediaPosition":"center","mediaUrls":["/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webm","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.mp4","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.webp","/_next/static/images/abstract_map_app-d75baca0c5b7f59d88c7db6b1dff9e4d.jpg"],"primaryText":"Android App for Human Participants","secondaryText":"btalb/abstract_map_app","secondaryTransform":"lowercase"}],"collectionData":{"content":"The Human Cues for Robot Navigation ARC Discovery Project (DP140103216) investigated how a robot can navigate using the same navigation cues humans use when navigating built environments. Types of navigation cues targeted include labels, directional signs, signboards, maps & floor plans, navigational gestures, and spoken directions & descriptions. The main contribution from this work is the abstract map, a navigational tool that allows a robot to employ symbolic spatial information in its navigation of unseen spaces.
\n","name":"Human Cues for Robot Navigation","type":"collection","url":"https://btalb.github.io/abstract_map","code":["abstract-map","abstract-map-simulator","abstract-map-app"],"feature":0,"src":"/content/human_cues/human-cues.md","id":"human-cues","image_position":"center","_code":[],"image":"./docs/assets/images/abstract_map_in_action.png","_images":["/_next/static/images/abstract_map_in_action-51c5e1dcb68134fbb20baad53816b40f.png.webp","/_next/static/images/abstract_map_in_action-863c3403cb5be611fa8f5dcbdbb45c3f.png"],"_datasets":[]},"datasets":[]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json new file mode 100644 index 0000000000..99e12aec99 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/python_robotics.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/spatialmath-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/CartesianSnakes_LogoW-7d2f987ca5432e1ce32ce72e90be7c64.png.webp","/_next/static/images/CartesianSnakes_LogoW-d72d60a588449aa6a08846bed694c0c9.png"],"primaryText":"Spatialmath Python","secondaryText":"petercorke/spatialmath-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/robotics-toolbox-python","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"primaryText":"Robotics Toolbox Python","secondaryText":"petercorke/robotics-toolbox-python","secondaryTransform":"lowercase"},{"linkUrl":"/code/swift","mediaPosition":"center","mediaUrls":["/_next/static/images/panda-f1735ad2d702ae9c686b2f0e727e9941.png.webp","/_next/static/images/panda-c3722217e520e43c10f1bc26fffcd0fd.png"],"primaryText":"Swift","secondaryText":"jhavl/swift","secondaryTransform":"lowercase"}],"collectionData":{"content":"Python Robotics is a collection of software packages providing robotics-specific functionality to Python. While leveraging Python's advantages of portability, ubiquity and support, and the capability of the open-source ecosystem for linear algebra (numpy, scipy), graphics (matplotlib, three.js, WebGL), interactive development (jupyter, jupyterlab, mybinder.org), and documentation (sphinx).
\nThe collection is built on top of Spatialmath which underpins all of robotics and robotic vision where we need to describe the position, orientation or pose of objects in 2D or 3D spaces. The core of the collection is the the Robotics Toolbox for Python while Swift provides a light-weight browser-based simulation environment.
\n","name":"Python Robotics","type":"collection","url":"https://petercorke.github.io/robotics-toolbox-python/","image":"repo:petercorke/robotics-toolbox-python/docs/figs/RobToolBox_RoundLogoB.png","image_fit":"contain","id":"python_robotics","code":["spatialmath-python","robotics-toolbox-python","swift"],"feature":99999,"_images":["/_next/static/images/RobToolBox_RoundLogoB-fd4fa9f238808ea84fa7ed15c039c58c.png.webp","/_next/static/images/RobToolBox_RoundLogoB-dd66a766d39b1761d4fba8db5bb28020.png"],"src":"/content/robotics_toolbox/collection.md","image_position":"center","_code":[],"_datasets":[]},"datasets":[]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json new file mode 100644 index 0000000000..3993343a0f --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/rt_gene_overview.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/rt_gene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"},{"linkUrl":"/code/rt_bene_code","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_best_poster_award-5ac70111852de9eac6c94cd88ef726e0.png.webp","/_next/static/images/rt_bene_best_poster_award-d72f84610eb0050287dd856b52cc99c5.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Codebase","secondaryText":"Tobias-Fischer/rt_gene","secondaryTransform":"lowercase"}],"collectionData":{"content":"This project contains code + datasets for real-time eye gaze and blink estimation.
\nThe work done in this project was done within the Personal Robotics Lab at Imperial College London.
\n\n\n","name":"RT-GENE & RT-BENE: Real-Time Eye Gaze and Blink Estimation in Natural Environments","type":"collection","url":"https://github.com/Tobias-Fischer/rt_gene","id":"rt_gene_overview","code":["rt_gene_code","rt_bene_code"],"datasets":["rt_gene_dataset","rt_bene_dataset"],"feature":3,"src":"/content/rt-gene/project.md","image_position":"center","_code":[],"_datasets":[],"image":"repo:/assets/system_overview.jpg","_images":["/_next/static/images/system_overview-e905413b7b8a569c769b893296ea5aa3.jpg.webp","/_next/static/images/system_overview-f550cd56b0872bdc54bc11c36db2eaf5.jpg"],"image_fit":"contain"},"datasets":[{"linkUrl":"/dataset/rt_gene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset_figure-5572954dcf83fca94ae80fa38a0f36ab.jpg.webp","/_next/static/images/dataset_figure-024bff6ee75c09b3b9afd020a4e1467b.jpg"],"primaryText":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Dataset","secondaryText":"45GB","secondaryTransform":"capitalize"},{"linkUrl":"/dataset/rt_bene_dataset","mediaFit":"contain","mediaPosition":"center","mediaUrls":["/_next/static/images/rt_bene_labels-f79290be354a9a6ea6dfa387d60da1c1.png.webp","/_next/static/images/rt_bene_labels-4ac642446c5fd65a3d20b2b46f856cdc.png"],"primaryText":"RT-BENE: Real-Time Blink Estimation in Natural Environments Dataset","secondaryText":"600MB","secondaryTransform":"capitalize"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json new file mode 100644 index 0000000000..5e03480269 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/collection/vpr_overview.json @@ -0,0 +1 @@ +{"pageProps":{"code":[{"linkUrl":"/code/patchnetvlad_code","mediaPosition":"center","mediaUrls":["/_next/static/images/patch_netvlad_method_diagram-a9187148aad4ff631ce8f55f695459ec.png.webp","/_next/static/images/patch_netvlad_method_diagram-26dab363c927eaf0c0020decf330646e.png"],"primaryText":"Patch-NetVLAD","secondaryText":"QVPR/Patch-NetVLAD","secondaryTransform":"lowercase"},{"linkUrl":"/code/seqnet_code","mediaPosition":"center","mediaUrls":["/_next/static/images/seqnet-cfc1aecd3cd2b268af41400a4fb86e6a.jpg.webp","/_next/static/images/seqnet-69de71978f2b7f0ffbcefcbb976010d3.jpg"],"primaryText":"SeqNet","secondaryText":"oravus/seqNet","secondaryTransform":"lowercase"},{"linkUrl":"/code/vprbench","mediaPosition":"center","mediaUrls":["/_next/static/images/VPRBench-a4fbe919a2ac5fc851261353f3fbdd9a.jpg.webp","/_next/static/images/VPRBench-5db45a25afa26692b0958cbf579b9a77.jpg"],"primaryText":"VPR-Bench","secondaryText":"MubarizZaffar/VPR-Bench","secondaryTransform":"lowercase"},{"linkUrl":"/code/delta_descriptors_code","mediaPosition":"center","mediaUrls":["/_next/static/images/ral-iros-2020-delta-descriptors-schematic-b5f57732c327f2f8546715b5dc3643af.png.webp","/_next/static/images/ral-iros-2020-delta-descriptors-schematic-95f5d1a50f3d92aa3344d9782ac13c32.png"],"primaryText":"Delta Descriptors","secondaryText":"oravus/DeltaDescriptors","secondaryTransform":"lowercase"},{"linkUrl":"/code/event_vpr_code","mediaPosition":"center","mediaUrls":["/_next/static/images/dataset-77ee27292f9a639c3024670f2a9939e2.png.webp","/_next/static/images/dataset-179d4dc0b9d40cbdc11117c78f1d45de.png"],"primaryText":"Visual Place Recognition using Event Cameras","secondaryText":"Tobias-Fischer/ensemble-event-vpr","secondaryTransform":"lowercase"},{"linkUrl":"/code/lost_code","mediaPosition":"center","mediaUrls":["/_next/static/images/day-night-keypoint-correspondence-place-recognition-38203057bf036a1e9271b0a7647119fa.jpg.webp","/_next/static/images/day-night-keypoint-correspondence-place-recognition-bed6f778b7ec1ce4edaa346e24fb33bf.jpg"],"primaryText":"LoST-X","secondaryText":"oravus/lostX","secondaryTransform":"lowercase"},{"linkUrl":"/code/openseqslam2_code","mediaPosition":"center","mediaUrls":["/_next/static/images/openseqslam2-c5079d59d4cff5bd652acb1652d047f6.png.webp","/_next/static/images/openseqslam2-f3755fc8e61c0d81c8f0b0f42c5e08ae.png"],"primaryText":"OpenSeqSLAM2","secondaryText":"qcr/openseqslam2","secondaryTransform":"lowercase"},{"linkUrl":"/code/seq2single_code","mediaPosition":"center","mediaUrls":["/_next/static/images/illustration-73bec1a3cac56819cdbea1268b711fa4.png.webp","/_next/static/images/illustration-1e185173132d7d8138449660ac905c04.png"],"primaryText":"seq2single","secondaryText":"oravus/seq2single","secondaryTransform":"lowercase"},{"linkUrl":"/code/teach_repeat","mediaPosition":"center","mediaUrls":["/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webm","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.mp4","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.webp","/_next/static/images/outdoor-run-c6d0f9054f19ca3ca4a9c32ae5089b50.jpg"],"primaryText":"Visual Teach and Repeat","secondaryText":"QVPR/teach-repeat","secondaryTransform":"lowercase"},{"linkUrl":"/code/heaputil_code","mediaPosition":"center","mediaUrls":["/_next/static/images/overview-8c193585e23714439d55f0227d88f923.jpg.webp","/_next/static/images/overview-fc609d6102a3c08cb20b14382e57ee50.jpg"],"primaryText":"HEAPUtil","secondaryText":"Nik-V9/HEAPUtil","secondaryTransform":"lowercase"},{"linkUrl":"/code/topometric_localization","mediaPosition":"center","mediaUrls":["/qcr_logo_light_filled.svg"],"primaryText":"Place-aware Topometric Localization","secondaryText":"mingu6/TopometricLoc","secondaryTransform":"lowercase"},{"linkUrl":"/code/vpr_snn","mediaPosition":"center","mediaUrls":["/_next/static/images/Ens_of_modularSNNs-b59ff02969917c2eb544fd14a2014936.png.webp","/_next/static/images/Ens_of_modularSNNs-2e12118a078b9b819e6e9169d4994b74.png"],"primaryText":"Spiking Neural Networks for Visual Place Recognition","secondaryText":"QVPR/VPRSNN","secondaryTransform":"lowercase"}],"collectionData":{"content":"This collection features code related to Visual Place Recognition (VPR) research, which is concerned with the fundamental problem of how a robot or autonomous vehicle uses perception to create maps and calculates and tracks its location in the world. Research questions include addressing how:
\nA dataset of aircraft operating around an airport and efficient implementation of an optimal quickest change detection rule to detect changes in airspace traffic.
\nThe raw data is supplied by Airservices Australia and consists of merged track surveillance data acquired from ADS-B and Radar for aircraft operating around an airport. The data was processed to extract the aircraft patterns on a grid at certain sampling points, and aircraft count information used to identify busy and quiet traffic environments. The aircraft pattern and traffic environment class is made available in this dataset.
\n","name":"Air Traffic Occupancy Data","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/wPCSZ6Znipez1vC","image":"./airspace_traffic_occupation.jpg","size":"19.5MB","id":"air-traffic-occupancy","_images":["/_next/static/images/airspace_traffic_occupation-d8931212d7eaeaba10c2acf785a0ecd9.jpg.webp","/_next/static/images/airspace_traffic_occupation-01dcbf21de2f822245f4bd51c364dc3a.jpg"],"src":"/content/aircraft_detection/airspace_traffic_occupancy.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json new file mode 100644 index 0000000000..f29ee38e56 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/aircraft-collision-course.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A dataset of stationary, fixed-wing aircraft on a collision course for vision-based sense and avoid.
\nThe dataset consists of 15 uncompressed, high-resolution image sequences containing 55,521 images of a fixed-wing aircraft approaching a stationary, grounded camera.
\nGround truth labels and videos of the image sequences are also provided.
\nThis dataset is licensed under the BSD-3 license. If you use this dataset please cite the following paper:
\nMartin, Jasmin, Jenna Riseley, and Jason J. Ford. \"A Dataset of Stationary, Fixed-wing Aircraft on a Collision Course for Vision-Based Sense and Avoid.\" arXiv preprint arXiv:2112.02735 (2021).
\n","name":"Aircraft Collision Course Dataset","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/qHjCKYrFagWBHL5","image":"./aircraft_collision_course.png","size":"43.1GB","_images":["/_next/static/images/aircraft_collision_course-5837e09e1d9c74d5172247fc1e45d485.png.webp","/_next/static/images/aircraft_collision_course-39c4d558d7857ed871ee5625ead09fe7.png"],"src":"/content/aircraft_detection/aircraft_collision_course.md","id":"aircraft-collision-course","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json new file mode 100644 index 0000000000..1359e1fbc5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/alderley.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A vision dataset gathered from a car driven around Alderley, Queensland in two different conditions for the same route: one on a sunny day and one during a rainy night. The dataset includes extracted frames from the original .avi video files, as well as manually ground-truthed frame correspondences. The dataset was first used in the ICRA2012 Best Robot Vision Paper:
\nM. Milford, G. Wyeth, \"SeqSLAM: Visual route-based navigation for sunny summer days and stormy winter nights\", in IEEE International Conference on Robotics and Automation, St Paul, United States, 2012.
\nIf you use this dataset please cite the above paper. BibTeX, Endnote, RefMan and CSV citation options available by clicking here.
\n","name":"Alderley Day and Night","type":"dataset","url":"https://wiki.qut.edu.au/pages/viewpage.action?pageId=181178395","url_type":"external","size":"2.07GB","image":"./alderley.jpg","_images":["/_next/static/images/alderley-b50cf86288ad8d46bfd22a4d44388409.jpg.webp","/_next/static/images/alderley-1ca139fb61a2f7e5f85056bd73b7be49.jpg"],"src":"/content/legacy_datasets/alderley.md","id":"alderley","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json new file mode 100644 index 0000000000..bbe50de40d --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/benchbot-bear-data.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"The BenchBot Environments for Active Robotics (BEAR) are a set of Unreal Engine environments for use with the BenchBot software stack in the ACRV Semantic Scene Understanding Challenge. A collage of the robot starting position for each of the environments is shown below:
\nFeatures of the dataset include:
\nThe primary and easiest way to utilise the dataset is through BenchBot software stack. For full instructions on using an active agent within the environments with BenchBot we refer users to the BenchBot documentation. The link above gives access to the packaged Unreal \"games\" (not raw assets) for all environments, split into a development and challenge set, in line with the original scene understanding challenge. Develop contains house and miniroom. Challenge contains apartment, company, and office. Note that these ae just the environments. Ground truth object cuboid maps are located in the BenchBot add-ons ground_truths_isaac_develop and ground_truths_isaac_challenge respectively.
\nFor more details of the dataset, challenge, BenchBot, and how it all fits together, please see our summary video below:
\n\n","name":"BenchBot Environments for Active Robotics (BEAR)","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/s/pr8Bthtj2OFbg4R/download","size":"15.9GB","image":"./all_envs.png","_images":["/_next/static/images/all_envs-55ef0a35e02b68a820d9940edf6a1521.png.webp","/_next/static/images/all_envs-7573d0362a6d5ba5fc5e45e2542e99b9.png"],"src":"/content/benchbot/benchbot-bear-data.md","id":"benchbot-bear-data","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json new file mode 100644 index 0000000000..4bc323e4cf --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/brisbane_event_vpr_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Brisbane-Event-VPR was captured in the Brookfield and Kenmore Hills outer suburbs of Brisbane. The route is approx. 8km long and contains a variety of different roads, ranging from single-lane roads without much build-up over dual carriageways to built-up areas. Some areas contain many trees that cast shadows on the street and lead to challenging lighting conditions. The dataset includes 6 traverses recorded at different times of the day and under varying weather conditions. A DAVIS346 event camera was used to record the dataset; it was mounted forward-facing on the inside of the windshield of a Honda Civic. The DAVIS346 allows recording of events and aligned RGB frames with 346x260 pixels resolution.
\nPaper is currently in press and expected to be published in 2016. Please cite the following paper if you use these datasets (use the correct year after it has been published):
\nPepperell, E., Corke, P. & Milford, M. (in press). Routed Roads: Probabilistic Vision-Based Place Recognition for Changing Conditions, Split Streets and Varied Viewpoints. The International Journal of Robotics Research (IJRR).
\nLinks to datasets can be found through the download button above.
\n","name":"CBD and Highway Datasets","type":"dataset","url":[{"name":"CBD","url":"https://mega.co.nz/#F!FEM2zBzb!D72oxkUG2jDhaIDxsig1iQ","size":"2.20GB"},{"name":"Highway","url":"https://mega.co.nz/#F!xRsxCZ4Y!s1Lq4KmtmZfR5MLBLw4a2g","size":"2.46GB"}],"url_type":"list","image":"./cbd_and_highway.png","size":"4.66GB","_images":["/_next/static/images/cbd_and_highway-68b114bb9789803999a0af2fe0a97d91.png.webp","/_next/static/images/cbd_and_highway-6e93fb1dae7cf6034cc07b783b8ca033.png"],"src":"/content/legacy_datasets/cbd_and_highway.md","id":"cbd-and-highway","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json new file mode 100644 index 0000000000..99f9c2cf28 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/city-sunset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Description:
\nGoPro vision-only dataset gathered on a late afternoon / evening approximately 10 km drive (one way) into and out of the Brisbane metropolitan area. Lots of varied traffic and interesting pedestrian situations. Map shows inbound route (part 1), return route (part 2) is approximately in reverse but has some extra suburban streets at the end.
\nSettings: 1080p 30 fps wide FOV setting on a GoPro 4 Silver.
\nDownload links for both parts can be accessed via the button above.
\nPaper reference:
\nIf you use this dataset, please cite the below paper:
\nMichael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.
\nPaper web link:
\n\n","name":"City sunset drive","type":"dataset","url":[{"name":"Inbound route (part 1)","url":"https://mega.nz/#!UBkhiL7L!xppCjeRaadUqK1ESk36O_ZdObpC0C3ETXmXaonweIF0","size":"4.07GB"},{"name":"Outbound route (part 2)","url":"https://mega.nz/#!8ZFQEZDC!mDcOPs5g6V1Ad4SSJ5_6SCUcxIveI8JnK7LEZe696Mg","size":"5.72GB"}],"url_type":"list","size":"9.8GB","image":"./city_sunset_sample.jpg","_images":["/_next/static/images/city_sunset_sample-7d8be3853972a74f7ab4885baa118dba.jpg.webp","/_next/static/images/city_sunset_sample-970f5a9952e2126c37c4a7235d8b48b1.jpg"],"src":"/content/legacy_datasets/city_sunset.md","id":"city-sunset","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json new file mode 100644 index 0000000000..00e861c556 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/day-night-lateral.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Two vision datasets of a single route through the Gardens Point Campus, Queensland University of Technology and along the Brisbane River, Brisbane, Australia. One route is traversed on the left-hand side of the path during the day and the other day route is traversed on the right-hand side of the path during the night, to capture both pose and condition change.
\nFull details of how to use the dataset and individual download links are available from:
\nhttps://wiki.qut.edu.au/pages/viewpage.action?pageId=175739622
\n","name":"Day and Night with Lateral Pose Change","type":"dataset","url":"https://wiki.qut.edu.au/pages/viewpage.action?pageId=175739622","url_type":"external","image":"./day_night_lateral.jpg","size":"67.9MB","_images":["/_next/static/images/day_night_lateral-56ae9615a767dd2fc33a0b2c257f9f28.jpg.webp","/_next/static/images/day_night_lateral-ece5db3367c8eae1fb94458fafcb8e99.jpg"],"src":"/content/legacy_datasets/day_night_lateral.md","id":"day-night-lateral","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json new file mode 100644 index 0000000000..acc745f039 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/fish-image.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"This fish dataset currently consisting of 3,960 images collected from 468 species. This data consists of real-world images of fish captured in conditions defined as \"controlled\", \"out-of-the-water\" and \"in-situ\". The \"controlled\", images consists of fish specimens, with their fins spread, taken against a constant background with controlled illumination. The \"in-situ\" images are underwater images of fish in their natural habitat and so there is no control over background or illumination. The \"out-of-the-water\" images consist of fish specimens, taken out of the water with a varying background and limited control over the illumination conditions. A tight red bounding box is annotated around the fish.
\nFull details of how to use the dataset and individual download links are available from:
\nhttps://wiki.qut.edu.au/display/cyphy/Fish+Dataset
\n","name":"Fish images","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/Fish+Dataset","url_type":"external","image":"./fish_image.png","size":"512MB","_images":["/_next/static/images/fish_image-a252b1d6bfcba0ada070ce6af73fc92b.png.webp","/_next/static/images/fish_image-d74fa618dc09390794a0785ebede2291.png"],"src":"/content/legacy_datasets/fish_image.md","id":"fish-image","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json new file mode 100644 index 0000000000..afcf7014c9 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/floor-and-lawn.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Vision datasets gathered within a townhouse (Indooroopilly, Brisbane), and a suburban backyard (Gaythorne, Brisbane) in varying conditions over the same area: one set during the day, and one during night time. The dataset includes the all the extracted frames, as well as a text document containing their ground truthed locations. The dataset was used in a paper that is accepted to ICRA2016:
\nJ. Mount, M. Milford, \"2D Vision Place Recognition for Domestic Service Robots at Night\", in IEEE International Conference on Robotics and Automation, Stockholm, Sweden, 2016.
\nThe code used to compare images and perform place recognition is also contained within the files.
\nIf you use this dataset, or the provided code, please cite the above paper.
\n","name":"Day-night vacuum-cleaner robot and lawn datasets","type":"dataset","url":"https://mega.nz/#!ZUVhjCjK!E5vxmbVDwo18_bVkuz5vVMV_5Fiu3GJo9M0Z8YUufNs","image":"./floor_and_lawn.jpg","size":"400MB","_images":["/_next/static/images/floor_and_lawn-17b4af12dd8360d83e5f91a61c0f043e.jpg.webp","/_next/static/images/floor_and_lawn-93eb7282e76ae159b32f1cdb983f4317.jpg"],"src":"/content/legacy_datasets/floor_and_lawn.md","id":"floor-and-lawn","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json new file mode 100644 index 0000000000..1d33a45e39 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/gold-coast-drive.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Description:
\nGoPro vision-only dataset gathered along an approximately 87 km drive from Brisbane to the Gold Coast, in sunny weather (no ground truth but a reference trajectory provided in the image on the left. Lots of varied traffic conditions, some interesting pedestrian and dangerous driving situations captured on the camera.
\nSettings: 1080p 30 fps wide FOV setting on a GoPro 4 Silver .
\nDownload links are available through the button above for:
\nPaper reference:
\nIf you use this dataset, please cite the below paper:
\nMichael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.
\nPaper web link:
\n\n","name":"Gold Coast drive","type":"dataset","url":[{"name":"Full video","url":"https://mega.nz/#!4NFzmJyI!fkgjjEN-OJ9ceYRtUMT5VXYVHo8GhakpbuIs-Ih5FjE","size":"15.24GB"},{"name":"Low resolution, highly compressed video","url":"https://mega.nz/#!EUlmQRqD!pOD6Ob7i2G5SDdmC7cvcjBK0K4cxx-drjHlFqWhBMgo","size":"257.2MB"},{"name":"Short sample segment (~314MB)","url":"https://mega.nz/#!1YUHzTIR!46f0xwKy57_9Zdbay466u-vWkMUgIJjgPbJW5lqLjyQ","size":"314.8MB"}],"url_type":"list","size":"15.8GB","image":"./gold_coast_sample.jpg","_images":["/_next/static/images/gold_coast_sample-5c2190ed72cc8f3cb58d4a4d8c6f1e66.jpg.webp","/_next/static/images/gold_coast_sample-403d54f648160fcbabca61f64c22ba22.jpg"],"src":"/content/legacy_datasets/gold_coast_drive.md","id":"gold-coast-drive","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json new file mode 100644 index 0000000000..5d3fada407 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/indoor-level-7.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A vision dataset was taken on level 7 of S-Block QUT Gardens Point campus. The data contains stereo images, laser data and wheel odometry in addition to secondary data such as camera calibrations and transforms between sensors. This data was collected over a single continuous run over the level with the Guiabot platform under manual control.
\nFull details of how to use the dataset and individual download links are available from:
\nhttps://wiki.qut.edu.au/display/cyphy/Indoor+Level+7+S-Block+Dataset
\n","name":"Indoor Level 7 S-Block","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/Indoor+Level+7+S-Block+Dataset","url_type":"external","image":"./indoor_level_7.png","size":"42.2GB","_images":["/_next/static/images/indoor_level_7-5206991317d188dcc5c74a9fcb49af3a.png.webp","/_next/static/images/indoor_level_7-0af1bc519fe9a2884ef2d8b0423c7c55.png"],"src":"/content/legacy_datasets/indoor_level_7.md","id":"indoor-level-7","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json new file mode 100644 index 0000000000..09e366294b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kagaru.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A vision dataset gathered from a radio-controlled aircraft flown at Kagaru, Queensland, Australia on 31/08/10. The data consists of visual data from a pair of downward facing cameras, translation and orientation information as a ground truth from an XSens Mti-g INS/GPS and additional information from a USB NMEA GPS. The dataset traverses over farmland and includes views of grass, an air-strip, roads, trees, ponds, parked aircraft and buildings.
\nPlease see the author's page for up-to-date details and documentation on the dataset:
\nhttps://michaelwarren.info/docs/datasets/kagaru-airborne-stereo/
\n","name":"Kagaru Airborne Vision","type":"dataset","url":"https://michaelwarren.info/docs/datasets/kagaru-airborne-stereo/","url_type":"external","image":"./kagaru.png","size":"22.9GB","_images":["/_next/static/images/kagaru-f2c2fb6210e37c5a307667580f126176.png.webp","/_next/static/images/kagaru-a9f63225cd629de63260ecebd1aadf5c.png"],"src":"/content/legacy_datasets/kagaru.md","id":"kagaru","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json new file mode 100644 index 0000000000..28eaae70f5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/kitti-semantics.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Dataset contains 41 original KITTI images and the corresponding manually semantically labelled data. The labelled data is in the form of a Matlab .mat file with each entry in the array corresponding to the class label.
\nFull details of how to use the dataset and individual download links are available from:
\nhttps://wiki.qut.edu.au/display/cyphy/KITTI+Semantic+Labels
\n","name":"KITTI images with semantic labels","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/KITTI+Semantic+Labels","url_type":"external","image":"./kitti_semantics.png","size":"31.8MB","_images":["/_next/static/images/kitti_semantics-f17afe36ed6d7a3faa570f932450244d.png.webp","/_next/static/images/kitti_semantics-b99130453265bfc7fe08428ef322d4ac.png"],"src":"/content/legacy_datasets/kitti_semantics.md","id":"kitti-semantics","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json new file mode 100644 index 0000000000..9ccd204c39 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/low-light.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"For 28 objects (22 within the ImageNet class set and 6 within the PASCAL VOC class set), a set of raw images (DNG format) has been obtained at a variety of lighting conditions (1-40lx), ISO settings (3200 - 409600) and exposure times (1/8000 - 1/10) for comparison of the influence of demosaicing techniques on feature point detectors and CNNs at low-light and with noise. Each object set has a reference image captured at ~380lx. All images were captured with a Sony α7s in a dark room with controlled lighting.
\nPresented in:
\n\n","name":"Raw image low-light object dataset","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/gdJNon8OdEnQeXU/download","image":"./low_light.jpg","size":"23.3GB","_images":["/_next/static/images/low_light-e29b3b1c358fbb7bb15cf1075b2f1066.jpg.webp","/_next/static/images/low_light-5d50fd52e0698697f62132897dbf6bfa.jpg"],"src":"/content/legacy_datasets/low_light.md","id":"low-light","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json new file mode 100644 index 0000000000..0b6d5925b8 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/mt-cootha-day-night.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Description:
\nMt Cootha circuit day and night time laps with a Sony A7s camera (note this is a later dataset, not the original in the paper below).
\nThe original datasets for this paper were recorded using a Nikon D5100 with long exposures.
\nDownload links for all of the datasets can be accessed using the button above.
\nPaper reference:
\nIf you use this dataset, please cite the below paper:
\nMilford, Michael, Turner, Ian, & Corke, Peter (2013) Long exposure localization in darkness using consumer cameras. In Vincze, M (Ed.) Proceedings of the 2013 IEEE International Conference on Robotics and Automation (ICRA). Institute of Electrical and Electronic Engineers (IEEE), United States, pp. 3755-3761.
\n","name":"Mt Cootha Day and Night Drives","type":"dataset","url":[{"name":"Daytime drive recorded on a Sony A7s","url":"https://mega.nz/#!1JVy0CxQ!kKxjcJB6Ma5ML4ERJZzWv3AcFV9j-V3vMpbZLX68JqM","size":"785.9MB"},{"name":"Nighttime drive recorded on a Sony A7s","url":"https://mega.nz/#!dAVG1KJD!2GHfMxj_kUiALfCjGDGH8ERQZO1qNmRaqNUHHTK5Pmo","size":"489.2MB"},{"name":"Daytime drive recorded on a Nikon D5100","url":"https://mega.nz/#!8Fdgkabb!S_FFvCmuH3RvebV9NBx5m28o8PMOp1eBwRVW0-LVcb4","size":"566.5MB"},{"name":"Nighttime drive recorded on a Niko D5100","url":"https://mega.nz/#!dQEQRSRI!cm_Xgm1ceGbHara8xX4vbn3X5gYtpPXamtS5WzABfJk","size":"1.11GB"}],"url_type":"list","image":"./mt_cootha.jpg","size":"2.8GB","_images":["/_next/static/images/mt_cootha-bd14dc6ef8b1122300e0b93391b69052.jpg.webp","/_next/static/images/mt_cootha-237af78c49ccc8dd88c237a761fbbfe0.jpg"],"src":"/content/legacy_datasets/mt_cootha_day_night.md","id":"mt-cootha-day-night","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json new file mode 100644 index 0000000000..96db179564 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/multilane-sideways.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"We provide two vehicular datasets, acquired in Queensland, Australia, each consisting of multiple passes in different lanes across day and night on approximately 4 km, 4-lane, bidirectional road sections, divided by median strips. The first dataset was collected along the Gold Coast Highway in Palm Beach (\"highway\") and the second was collected along Christine Avenue in Robina and nearby suburbs (\"suburban\").
\nThe dataset can be downloaded using the button above, with further information contained in the Readme.txt
.
Presented with the permission of NASA's Jet Propulsion Laboratory (JPL) in:
\n\nFurther referenced in:
\n\nAssociated code for the above papers can be obtained at the following repository:
\nhttps://github.com/jamessergeant/seqreg_tpp.git
\n","name":"Multimodal Rock Surface Images","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/nX1rhsKMehp1h6N/download","image":"./multimodal_rock_surface.png","size":"2.0GB","_images":["/_next/static/images/multimodal_rock_surface-f19b647fbcffec77c0dc67dd2eef1984.png.webp","/_next/static/images/multimodal_rock_surface-75e156a88b63a6a1e4a9cb862b891742.png"],"src":"/content/legacy_datasets/multimodal_rock_surface.md","id":"multimodal-rock-surface","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json new file mode 100644 index 0000000000..7ac91c1e15 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/night-time-drive.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Description:
\nNight time ~13 km drive with Sony A7s camera mounted on the roof in Brisbane. Mixture of highway and suburban driving, some light traffic and stop go at traffic lights.
\nSettings: 1080p 25 fps.
\nDownload links for both the full video and a highly compressed version are available through the button above.
\nPaper reference:
\nIf you use this dataset, please cite the below paper:
\nMichael Milford, Chunhua Shen, Stephanie Lowry, Niko Suenderhauf, Sareh Shirazi, Guosheng Lin, Fayao Liu, Edward Pepperell, Cesar Lerma, Ben Upcroft, Ian Reid, \"Sequence Searching With Deep-Learnt Depth for Condition- and Viewpoint-Invariant Route-Based Place Recognition\", in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2015, pp. 18-25.
\nPaper web link:
\n\n","name":"Night time drive","type":"dataset","url":[{"name":"Full video","url":"https://mega.nz/#!IY90HQJY!HQrsdXRLN6FkeLpBIrPDZ3xX6k2ajaKO7OUbzpG7AzM","size":"2.50GB"},{"name":"Low resolution, highly compressed video","url":"https://mega.nz/#!lF90UBZS!Dhyt-DiY4PfuGB-HXG4XAjhMGu5rP0NRYJyrprIoBrA","size":"65.7MB"}],"url_type":"list","size":"2.57GB","image":"./night_drive_sample.jpg","_images":["/_next/static/images/night_drive_sample-ec428d6b27967b4c1bd2738263c5c4ce.jpg.webp","/_next/static/images/night_drive_sample-0be92f8f70cbdd291f029a387cac0565.jpg"],"src":"/content/legacy_datasets/night_time_drive.md","id":"night-time-drive","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json new file mode 100644 index 0000000000..3dcd845ce5 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/openratslam.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A ROS and OpenCV version of RatSLAM is available at: Google Code
\nPlease see the following paper for more information about openRatSLAM. We would appreciate cites if you use the code.
\n\nWe provide the following datasets here, which can all be downloaded via the button above.
\niRat 2011 Australia
\nThe iRat - intelligent Rat animat technology
\nSt Lucia 2007
\nSee the youtube video describing this dataset.
\nOxford's New College 2008
\nWe have re-encoded Oxford's dataset that is available here into a rosbag file. The rosbag file only include the odometry and the panoramic image sensor data. The odometry has been integrated to be at the same rate (3Hz) as the panoramic images. Note that this re-encoded datset file has been created without permission and is maintained purely by us.
\nIf you use this dataset, please reference the original paper:
\nMike Smith, Ian Baldwin, Winston Churchill, Rohan Paul, Paul Newman (2009) The New College Vision and Laser Data Set. The International Journal of Robotics Research. 28:5
\n","name":"OpenRATSLAM","type":"dataset","url":[{"name":"iRat 2011 Australia","url":"https://mega.co.nz/#!FAlXyZbB!6rMpQ6EE4LQIKmZvy5zN7Stdu4pIzZm2h3TnHkG2wms","size":"861MB"},{"name":"St Lucia 2007","url":"https://mega.co.nz/#!od8xVbKJ!E81hKj-M1-CybBkX1dLe3htAJw-gP9MAQIEeZkPwuUY","size":"2.31GB"},{"name":"Oxford's New College 2008","url":"https://mega.co.nz/#!oJdwxTAJ!EB-M_gLWq8Sy2uFvmER-D_uTZ7_Rd4v-5ZUhu1YGNCQ","size":"1.18GB"}],"url_type":"list","size":"3.6GB","image":"/qcr_logo_light_filled.svg","_images":["/qcr_logo_light_filled.svg"],"src":"/content/legacy_datasets/openratslam.md","id":"openratslam","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json new file mode 100644 index 0000000000..74c2111ff8 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_bene_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"The RT-BENE dataset is a new open-sourced dataset with annotations of the eye-openness of more than 200,000 eye images, including more than 10,000 images where the eyes are closed. We annotate the RT-GENE dataset, which was proposed for gaze estimation, with blink labels. We define open eyes as images where at least some part of the sclera (white part of the eye) or pupil is visible. Closed eyes are those where the eyelids are fully closed. The uncertain category is used when the image cannot be clearly grouped into one of the other categories due to e.g. extreme head poses, or when the two annotators labelled the image differently. Using this approach, we labelled in total 243,714 images, 218,548 of them where the eyes are open, 10,444 where the eyes are closed and 14,722 uncertain images.
\nThe work done in this project was done within the Personal Robotics Lab at Imperial College London.
\n","name":"RT-BENE: Real-Time Blink Estimation in Natural Environments Dataset","type":"dataset","url":"https://zenodo.org/record/3685316","url_type":"external","size":"600MB","id":"rt_bene_dataset","image":"repo:Tobias-Fischer/rt_gene/assets/rt_bene_labels.png","image_fit":"contain","_images":["/_next/static/images/rt_bene_labels-f79290be354a9a6ea6dfa387d60da1c1.png.webp","/_next/static/images/rt_bene_labels-4ac642446c5fd65a3d20b2b46f856cdc.png"],"src":"/content/rt-gene/rt-bene-dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json new file mode 100644 index 0000000000..71b71d3db6 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/rt_gene_dataset.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"The RT-GENE dataset is a novel dataset of varied gaze and head pose images in a natural environment, addressing the issue of ground truth annotation by measuring head pose using a motion capture system and eye gaze using mobile eyetracking glasses. We apply semantic image inpainting to the area covered by the glasses to bridge the gap between training and testing images by removing the obtrusiveness of the glasses. The proposed RT-GENE dataset contains recordings of 15 participants (9 male, 6 female, 2 participants recorded twice), with a total of 122,531 labeled training images and 154,755 unlabeled images of the same subjects where the eyetracking glasses are not worn.
\nThe work done in this project was done within the Personal Robotics Lab at Imperial College London.
\n","name":"RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments Dataset","type":"dataset","url":"https://zenodo.org/record/2529036","url_type":"external","size":"45GB","id":"rt_gene_dataset","image":"repo:Tobias-Fischer/rt_gene/assets/dataset_figure.jpg","image_fit":"contain","_images":["/_next/static/images/dataset_figure-5572954dcf83fca94ae80fa38a0f36ab.jpg.webp","/_next/static/images/dataset_figure-024bff6ee75c09b3b9afd020a4e1467b.jpg"],"src":"/content/rt-gene/rt-gene-dataset.md","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json new file mode 100644 index 0000000000..1f5be0662c --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/st-lucia-multiple.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A vision dataset of a single route through the suburb of St Lucia, Queensland, Australia. The visual data was collected with a forward facing webcam attached to the roof of a car. The route was traversed at five different times during the day to capture the difference in appearance between early morning and late afternoon. The route was traversed again, another five times, two weeks later for a total of ten datasets. GPS data is included for each dataset.
\nFull details of how to use the dataset and individual download links are available from:
\nhttps://wiki.qut.edu.au/display/cyphy/St+Lucia+Multiple+Times+of+Day
\n","name":"St Lucia multiple times of day","type":"dataset","url":"https://wiki.qut.edu.au/display/cyphy/St+Lucia+Multiple+Times+of+Day","url_type":"external","image":"./st_lucia_multiple.jpg","size":"1.46GB","_images":["/_next/static/images/st_lucia_multiple-2cbaafdfc8e48c3435bf99f7dd1664c7.jpg.webp","/_next/static/images/st_lucia_multiple-a633d97afbaa94da3e29001a6583b448.jpg"],"src":"/content/legacy_datasets/st_lucia_multiple.md","id":"st-lucia-multiple","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json new file mode 100644 index 0000000000..8f773c4326 --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/trip-hazards.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"Contains RGB, Depth and HHA images of a Construction Site with the trip hazards labelled.
\nThe dataset spans 2'000m2 of construction site over four floors, contains ~629 trip hazards.
\nPresented in paper under review:
\nMcMahon, S., Sϋnderhauf, N., Upcroft, B & Milford, M. (2017). Trip Hazard Detection On Construction Sites Using Colour and Depth Information. Submitted to International Conference on Intelligent Robotics and Systems (IROS) with RAL option 2017
\n","name":"Trip Hazards on a Construction Site","type":"dataset","url":"https://cloudstor.aarnet.edu.au/plus/index.php/s/kVAh7G8V4mwdtp4/download","image":"./trip_hazards.png","size":"339MB","_images":["/_next/static/images/trip_hazards-c2e22a1e003c0abefb7e1182008b7d5d.png.webp","/_next/static/images/trip_hazards-0fb1c5950e6df6a57f0abb9f71e7b113.png"],"src":"/content/legacy_datasets/trip_hazards.md","id":"trip-hazards","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json new file mode 100644 index 0000000000..057c7c984b --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/uq-st-lucia.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"A vision dataset gathered from a car driven in a 9.5km circuit around the University of Queensland's St Lucia campus on 15/12/10. The data consists of visual data from a calibrated stereo pair, translation and orientation information as a ground truth from an XSens Mti-g INS/GPS and additional information from a USB NMEA GPS. The dataset traverses local roads and encounters a number of varying scenarios including roadworks, speed bumps, bright scenes, dark scenes, reverse traverses, a number of loop closure events, multi-lane roads, roundabouts and speeds of up to 60 km/h.
\nPlease see the author's page for up-to-date details and documentation on the dataset:
\nhttps://michaelwarren.info/docs/datasets/uq-st-lucia-stereo-dataset/
\n","name":"UQ St Lucia Vision","type":"dataset","url":"https://michaelwarren.info/docs/datasets/uq-st-lucia-stereo-dataset/","url_type":"external","image":"./uq_st_lucia.jpg","size":"38.1GB","_images":["/_next/static/images/uq_st_lucia-1d565cc939abfe002a470df1354ac6e8.jpg.webp","/_next/static/images/uq_st_lucia-cd1a4c432374b14373c4c98eb02e8d7c.jpg"],"src":"/content/legacy_datasets/uq_st_lucia.md","id":"uq-st-lucia","image_position":"center"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json new file mode 100644 index 0000000000..a03ea0a2fa --- /dev/null +++ b/_next/data/jRfPhdat00YV9X7T_v1K6/dataset/vprbench.json @@ -0,0 +1 @@ +{"pageProps":{"datasetData":{"content":"These can be downloaded from here. More details for benchmarking can be found here. Please cite the original sources of the datasets when using them in your work.
\nd){const e=l-d;i-=e,n.vertical+=e}if(a
f){const e=c-f;a-=e,n.horizontal+=e}return{top:`${Math.round(i)}px`,left:`${Math.round(a)}px`,transformOrigin:Rf(n)}}),[r,s,k,R,p]),P=I.useCallback((()=>{const e=Z.current;if(!e)return;const t=M(e);null!==t.top&&(e.style.top=t.top),null!==t.left&&(e.style.left=t.left),e.style.transformOrigin=t.transformOrigin}),[M]);I.useEffect((()=>{f&&P()})),I.useImperativeHandle(o,(()=>f?{updatePosition:()=>{P()}}:null),[f,P]),I.useEffect((()=>{if(!f)return;const e=G((()=>{P()})),t=ee(r);return t.addEventListener("resize",e),()=>{e.clear(),t.removeEventListener("resize",e)}}),[r,f,P]);let $=v;"auto"!==v||g.muiSupportAuto||($=void 0);const j=u||(r?Q(Mf(r)).body:void 0);return(0,D.jsx)(Pf,(0,T.Z)({BackdropProps:{invisible:!0},className:(0,A.Z)(C.root,c),container:j,open:f,ref:t,ownerState:S},x,{children:(0,D.jsx)(g,(0,T.Z)({appear:!0,in:f,onEntering:(e,t)=>{b&&b(e,t),P()},timeout:$},y,{children:(0,D.jsx)(Tf,(0,T.Z)({elevation:d},m,{ref:w,className:(0,A.Z)(C.paper,m.className),children:l}))}))}))}));var $f=If;function Of(e){return(0,N.Z)("MuiMenu",e)}var Af=(0,z.Z)("MuiMenu",["root","paper","list"]);const Ef=["onEntering"],Lf=["autoFocus","children","disableAutoFocusItem","MenuListProps","onClose","open","PaperProps","PopoverClasses","transitionDuration","TransitionProps","variant"],jf={vertical:"top",horizontal:"right"},Nf={vertical:"top",horizontal:"left"},zf=(0,j.ZP)($f,{shouldForwardProp:e=>(0,j.FO)(e)||"classes"===e,name:"MuiMenu",slot:"Root",overridesResolver:(e,t)=>t.root})({}),Bf=(0,j.ZP)(it,{name:"MuiMenu",slot:"Paper",overridesResolver:(e,t)=>t.paper})({maxHeight:"calc(100% - 96px)",WebkitOverflowScrolling:"touch"}),Ff=(0,j.ZP)(yf,{name:"MuiMenu",slot:"List",overridesResolver:(e,t)=>t.list})({outline:0});var Df=I.forwardRef((function(e,t){const n=(0,L.Z)({props:e,name:"MuiMenu"}),{autoFocus:o=!0,children:r,disableAutoFocusItem:i=!1,MenuListProps:a={},onClose:s,open:l,PaperProps:c={},PopoverClasses:u,transitionDuration:d="auto",TransitionProps:{onEntering:p}={},variant:f="selectedMenu"}=n,m=(0,O.Z)(n.TransitionProps,Ef),h=(0,O.Z)(n,Lf),g=(0,Ve.Z)(),v="rtl"===g.direction,b=(0,T.Z)({},n,{autoFocus:o,disableAutoFocusItem:i,MenuListProps:a,onEntering:p,PaperProps:c,transitionDuration:d,TransitionProps:m,variant:f}),y=(e=>{const{classes:t}=e;return(0,E.Z)({root:["root"],paper:["paper"],list:["list"]},Of,t)})(b),x=o&&!i&&l,Z=I.useRef(null);let w=-1;return I.Children.map(r,((e,t)=>{I.isValidElement(e)&&(e.props.disabled||("selectedMenu"===f&&e.props.selected||-1===w)&&(w=t))})),(0,D.jsx)(zf,(0,T.Z)({classes:u,onClose:s,anchorOrigin:{vertical:"bottom",horizontal:v?"right":"left"},transformOrigin:v?jf:Nf,PaperProps:(0,T.Z)({component:Bf},c,{classes:(0,T.Z)({},c.classes,{root:y.paper})}),className:y.root,open:l,ref:t,transitionDuration:d,TransitionProps:(0,T.Z)({onEntering:(e,t)=>{Z.current&&Z.current.adjustStyleForScrollbar(e,g),p&&p(e,t)}},m),ownerState:b},h,{children:(0,D.jsx)(Ff,(0,T.Z)({onKeyDown:e=>{"Tab"===e.key&&(e.preventDefault(),s&&s(e,"tabKeyDown"))},actions:Z,autoFocus:o&&(-1===w||i),autoFocusItem:x,variant:f},a,{className:(0,A.Z)(y.list,a.className),children:r}))}))}));function Wf(e){return(0,N.Z)("MuiMenuItem",e)}var Hf=(0,z.Z)("MuiMenuItem",["root","focusVisible","dense","disabled","divider","gutters","selected"]);const Vf=["autoFocus","component","dense","divider","disableGutters","focusVisibleClassName","role","tabIndex"],_f=(0,j.ZP)(Kt,{shouldForwardProp:e=>(0,j.FO)(e)||"classes"===e,name:"MuiMenuItem",slot:"Root",overridesResolver:(e,t)=>{const{ownerState:n}=e;return[t.root,n.dense&&t.dense,n.divider&&t.divider,!n.disableGutters&&t.gutters]}})((({theme:e,ownerState:t})=>(0,T.Z)({},e.typography.body1,{display:"flex",justifyContent:"flex-start",alignItems:"center",position:"relative",textDecoration:"none",minHeight:48,paddingTop:6,paddingBottom:6,boxSizing:"border-box",whiteSpace:"nowrap"},!t.disableGutters&&{paddingLeft:16,paddingRight:16},t.divider&&{borderBottom:`1px solid ${e.palette.divider}`,backgroundClip:"padding-box"},{"&:hover":{textDecoration:"none",backgroundColor:e.palette.action.hover,"@media (hover: none)":{backgroundColor:"transparent"}},[`&.${Hf.selected}`]:{backgroundColor:(0,Je.Fq)(e.palette.primary.main,e.palette.action.selectedOpacity),[`&.${Hf.focusVisible}`]:{backgroundColor:(0,Je.Fq)(e.palette.primary.main,e.palette.action.selectedOpacity+e.palette.action.focusOpacity)}},[`&.${Hf.selected}:hover`]:{backgroundColor:(0,Je.Fq)(e.palette.primary.main,e.palette.action.selectedOpacity+e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:(0,Je.Fq)(e.palette.primary.main,e.palette.action.selectedOpacity)}},[`&.${Hf.focusVisible}`]:{backgroundColor:e.palette.action.focus},[`&.${Hf.disabled}`]:{opacity:e.palette.action.disabledOpacity},[`& + .${rc.root}`]:{marginTop:e.spacing(1),marginBottom:e.spacing(1)},[`& + .${rc.inset}`]:{marginLeft:52},[`& .${cf.root}`]:{marginTop:0,marginBottom:0},[`& .${cf.inset}`]:{paddingLeft:36},[`& .${of.root}`]:{minWidth:36}},!t.dense&&{[e.breakpoints.up("sm")]:{minHeight:"auto"}},t.dense&&(0,T.Z)({minHeight:32,paddingTop:4,paddingBottom:4},e.typography.body2,{[`& .${of.root} svg`]:{fontSize:"1.25rem"}}))));var Uf=I.forwardRef((function(e,t){const n=(0,L.Z)({props:e,name:"MuiMenuItem"}),{autoFocus:o=!1,component:r="li",dense:i=!1,divider:a=!1,disableGutters:s=!1,focusVisibleClassName:l,role:c="menuitem",tabIndex:u}=n,d=(0,O.Z)(n,Vf),p=I.useContext(Mp),f={dense:i||p.dense||!1,disableGutters:s},m=I.useRef(null);ie((()=>{o&&m.current&&m.current.focus()}),[o]);const h=(0,T.Z)({},n,{dense:f.dense,divider:a,disableGutters:s}),g=(e=>{const{disabled:t,dense:n,divider:o,disableGutters:r,selected:i,classes:a}=e,s={root:["root",n&&"dense",t&&"disabled",!r&&"gutters",o&&"divider",i&&"selected"]},l=(0,E.Z)(s,Wf,a);return(0,T.Z)({},a,l)})(n),v=ge(m,t);let b;return n.disabled||(b=void 0!==u?u:-1),(0,D.jsx)(Mp.Provider,{value:f,children:(0,D.jsx)(_f,(0,T.Z)({ref:v,role:c,tabIndex:b,component:r,focusVisibleClassName:(0,A.Z)(g.focusVisible,l)},d,{ownerState:h,classes:g}))})}));function qf(e){return(0,N.Z)("MuiMobileStepper",e)}var Gf=(0,z.Z)("MuiMobileStepper",["root","positionBottom","positionTop","positionStatic","dots","dot","dotActive","progress"]);const Xf=["activeStep","backButton","className","LinearProgressProps","nextButton","position","steps","variant"],Kf=(0,j.ZP)(it,{name:"MuiMobileStepper",slot:"Root",overridesResolver:(e,t)=>{const{ownerState:n}=e;return[t.root,t[`position${(0,R.Z)(n.position)}`]]}})((({theme:e,ownerState:t})=>(0,T.Z)({display:"flex",flexDirection:"row",justifyContent:"space-between",alignItems:"center",background:e.palette.background.default,padding:8},"bottom"===t.position&&{position:"fixed",bottom:0,left:0,right:0,zIndex:e.zIndex.mobileStepper},"top"===t.position&&{position:"fixed",top:0,left:0,right:0,zIndex:e.zIndex.mobileStepper}))),Yf=(0,j.ZP)("div",{name:"MuiMobileStepper",slot:"Dots",overridesResolver:(e,t)=>t.dots})((({ownerState:e})=>(0,T.Z)({},"dots"===e.variant&&{display:"flex",flexDirection:"row"}))),Qf=(0,j.ZP)("div",{name:"MuiMobileStepper",slot:"Dot",shouldForwardProp:e=>(0,j.Dz)(e)&&"dotActive"!==e,overridesResolver:(e,t)=>{const{dotActive:n}=e;return[t.dot,n&&t.dotActive]}})((({theme:e,ownerState:t,dotActive:n})=>(0,T.Z)({},"dots"===t.variant&&(0,T.Z)({transition:e.transitions.create("background-color",{duration:e.transitions.duration.shortest}),backgroundColor:e.palette.action.disabled,borderRadius:"50%",width:8,height:8,margin:"0 2px"},n&&{backgroundColor:e.palette.primary.main})))),Jf=(0,j.ZP)(yp,{name:"MuiMobileStepper",slot:"Progress",overridesResolver:(e,t)=>t.progress})((({ownerState:e})=>(0,T.Z)({},"progress"===e.variant&&{width:"50%"})));var em=I.forwardRef((function(e,t){const n=(0,L.Z)({props:e,name:"MuiMobileStepper"}),{activeStep:o=0,backButton:r,className:i,LinearProgressProps:a,nextButton:s,position:l="bottom",steps:c,variant:u="dots"}=n,d=(0,O.Z)(n,Xf),p=(0,T.Z)({},n,{activeStep:o,position:l,variant:u}),f=(e=>{const{classes:t,position:n}=e,o={root:["root",`position${(0,R.Z)(n)}`],dots:["dots"],dot:["dot"],dotActive:["dotActive"],progress:["progress"]};return(0,E.Z)(o,qf,t)})(p);return(0,D.jsxs)(Kf,(0,T.Z)({square:!0,elevation:0,className:(0,A.Z)(f.root,i),ref:t,ownerState:p},d,{children:[r,"text"===u&&(0,D.jsxs)(I.Fragment,{children:[o+1," / ",c]}),"dots"===u&&(0,D.jsx)(Yf,{ownerState:p,className:f.dots,children:[...new Array(c)].map(((e,t)=>(0,D.jsx)(Qf,{className:(0,A.Z)(f.dot,t===o&&f.dotActive),ownerState:p,dotActive:t===o},t)))}),"progress"===u&&(0,D.jsx)(Jf,(0,T.Z)({ownerState:p,className:f.progress,variant:"determinate",value:Math.ceil(o/(c-1)*100)},a)),s]}))}));function tm(e){return(0,N.Z)("MuiNativeSelect",e)}var nm=(0,z.Z)("MuiNativeSelect",["root","select","multiple","filled","outlined","standard","disabled","icon","iconOpen","iconFilled","iconOutlined","iconStandard","nativeInput"]);const om=["className","disabled","IconComponent","inputRef","variant"],rm=({ownerState:e,theme:t})=>(0,T.Z)({MozAppearance:"none",WebkitAppearance:"none",userSelect:"none",borderRadius:0,cursor:"pointer","&:focus":{backgroundColor:"light"===t.palette.mode?"rgba(0, 0, 0, 0.05)":"rgba(255, 255, 255, 0.05)",borderRadius:0},"&::-ms-expand":{display:"none"},[`&.${nm.disabled}`]:{cursor:"default"},"&[multiple]":{height:"auto"},"&:not([multiple]) option, &:not([multiple]) optgroup":{backgroundColor:t.palette.background.paper},"&&&":{paddingRight:24,minWidth:16}},"filled"===e.variant&&{"&&&":{paddingRight:32}},"outlined"===e.variant&&{borderRadius:t.shape.borderRadius,"&:focus":{borderRadius:t.shape.borderRadius},"&&&":{paddingRight:32}}),im=(0,j.ZP)("select",{name:"MuiNativeSelect",slot:"Select",shouldForwardProp:j.FO,overridesResolver:(e,t)=>{const{ownerState:n}=e;return[t.select,t[n.variant],{[`&.${nm.multiple}`]:t.multiple}]}})(rm),am=({ownerState:e,theme:t})=>(0,T.Z)({position:"absolute",right:0,top:"calc(50% - .5em)",pointerEvents:"none",color:t.palette.action.active,[`&.${nm.disabled}`]:{color:t.palette.action.disabled}},e.open&&{transform:"rotate(180deg)"},"filled"===e.variant&&{right:7},"outlined"===e.variant&&{right:7}),sm=(0,j.ZP)("svg",{name:"MuiNativeSelect",slot:"Icon",overridesResolver:(e,t)=>{const{ownerState:n}=e;return[t.icon,n.variant&&t[`icon${(0,R.Z)(n.variant)}`],n.open&&t.iconOpen]}})(am);var lm=I.forwardRef((function(e,t){const{className:n,disabled:o,IconComponent:r,inputRef:i,variant:a="standard"}=e,s=(0,O.Z)(e,om),l=(0,T.Z)({},e,{disabled:o,variant:a}),c=(e=>{const{classes:t,variant:n,disabled:o,multiple:r,open:i}=e,a={select:["select",n,o&&"disabled",r&&"multiple"],icon:["icon",`icon${(0,R.Z)(n)}`,i&&"iconOpen",o&&"disabled"]};return(0,E.Z)(a,tm,t)})(l);return(0,D.jsxs)(I.Fragment,{children:[(0,D.jsx)(im,(0,T.Z)({ownerState:l,className:(0,A.Z)(c.select,n),disabled:o,ref:i||t},s)),e.multiple?null:(0,D.jsx)(sm,{as:r,ownerState:l,className:c.icon})]})}));const cm=["className","children","classes","IconComponent","input","inputProps","variant"],um=["root"],dm=(0,D.jsx)(Hd,{}),pm=I.forwardRef((function(e,t){const n=(0,L.Z)({name:"MuiNativeSelect",props:e}),{className:o,children:r,classes:i={},IconComponent:a=$r,input:s=dm,inputProps:l}=n,c=(0,O.Z)(n,cm),u=Nc({props:n,muiFormControl:fs(),states:["variant"]}),d=(e=>{const{classes:t}=e;return(0,E.Z)({root:["root"]},tm,t)})((0,T.Z)({},n,{classes:i})),p=(0,O.Z)(i,um);return I.cloneElement(s,(0,T.Z)({inputComponent:lm,inputProps:(0,T.Z)({children:r,classes:p,IconComponent:a,variant:u.variant,type:void 0},l,s?s.props.inputProps:{}),ref:t},c,{className:(0,A.Z)(d.root,s.props.className,o)}))}));pm.muiName="Select";var fm=pm;var mm,hm=function(e){const{children:t,defer:n=!1,fallback:o=null}=e,[r,i]=I.useState(!1);return re((()=>{n||i(!0)}),[n]),I.useEffect((()=>{n&&i(!0)}),[n]),(0,D.jsx)(I.Fragment,{children:r?t:o})};const gm=["children","classes","className","label","notched"],vm=(0,j.ZP)("fieldset")({textAlign:"left",position:"absolute",bottom:0,right:0,top:-5,left:0,margin:0,padding:"0 8px",pointerEvents:"none",borderRadius:"inherit",borderStyle:"solid",borderWidth:1,overflow:"hidden",minWidth:"0%"}),bm=(0,j.ZP)("legend")((({ownerState:e,theme:t})=>(0,T.Z)({float:"unset",overflow:"hidden"},!e.withLabel&&{padding:0,lineHeight:"11px",transition:t.transitions.create("width",{duration:150,easing:t.transitions.easing.easeOut})},e.withLabel&&(0,T.Z)({display:"block",width:"auto",padding:0,height:11,fontSize:"0.75em",visibility:"hidden",maxWidth:.01,transition:t.transitions.create("max-width",{duration:50,easing:t.transitions.easing.easeOut}),whiteSpace:"nowrap","& > span":{paddingLeft:5,paddingRight:5,display:"inline-block"}},e.notched&&{maxWidth:"100%",transition:t.transitions.create("max-width",{duration:100,easing:t.transitions.easing.easeOut,delay:50})}))));const ym=["components","fullWidth","inputComponent","label","multiline","notched","type"],xm=(0,j.ZP)(Hc,{shouldForwardProp:e=>(0,j.FO)(e)||"classes"===e,name:"MuiOutlinedInput",slot:"Root",overridesResolver:Dc})((({theme:e,ownerState:t})=>{const n="light"===e.palette.mode?"rgba(0, 0, 0, 0.23)":"rgba(255, 255, 255, 0.23)";return(0,T.Z)({position:"relative",borderRadius:e.shape.borderRadius,[`&:hover .${Pr.notchedOutline}`]:{borderColor:e.palette.text.primary},"@media (hover: none)":{[`&:hover .${Pr.notchedOutline}`]:{borderColor:n}},[`&.${Pr.focused} .${Pr.notchedOutline}`]:{borderColor:e.palette[t.color].main,borderWidth:2},[`&.${Pr.error} .${Pr.notchedOutline}`]:{borderColor:e.palette.error.main},[`&.${Pr.disabled} .${Pr.notchedOutline}`]:{borderColor:e.palette.action.disabled}},t.startAdornment&&{paddingLeft:14},t.endAdornment&&{paddingRight:14},t.multiline&&(0,T.Z)({padding:"16.5px 14px"},"small"===t.size&&{padding:"8.5px 14px"}))})),Zm=(0,j.ZP)((function(e){const{className:t,label:n,notched:o}=e,r=(0,O.Z)(e,gm),i=null!=n&&""!==n,a=(0,T.Z)({},e,{notched:o,withLabel:i});return(0,D.jsx)(vm,(0,T.Z)({"aria-hidden":!0,className:t,ownerState:a},r,{children:(0,D.jsx)(bm,{ownerState:a,children:i?(0,D.jsx)("span",{children:n}):mm||(mm=(0,D.jsx)("span",{className:"notranslate",children:"\u200b"}))})}))}),{name:"MuiOutlinedInput",slot:"NotchedOutline",overridesResolver:(e,t)=>t.notchedOutline})((({theme:e})=>({borderColor:"light"===e.palette.mode?"rgba(0, 0, 0, 0.23)":"rgba(255, 255, 255, 0.23)"}))),wm=(0,j.ZP)(Vc,{name:"MuiOutlinedInput",slot:"Input",overridesResolver:Wc})((({theme:e,ownerState:t})=>(0,T.Z)({padding:"16.5px 14px","&:-webkit-autofill":{WebkitBoxShadow:"light"===e.palette.mode?null:"0 0 0 100px #266798 inset",WebkitTextFillColor:"light"===e.palette.mode?null:"#fff",caretColor:"light"===e.palette.mode?null:"#fff",borderRadius:"inherit"}},"small"===t.size&&{padding:"8.5px 14px"},t.multiline&&{padding:0},t.startAdornment&&{paddingLeft:0},t.endAdornment&&{paddingRight:0}))),Sm=I.forwardRef((function(e,t){var n;const o=(0,L.Z)({props:e,name:"MuiOutlinedInput"}),{components:r={},fullWidth:i=!1,inputComponent:a="input",label:s,multiline:l=!1,notched:c,type:u="text"}=o,d=(0,O.Z)(o,ym),p=(e=>{const{classes:t}=e,n=(0,E.Z)({root:["root"],notchedOutline:["notchedOutline"],input:["input"]},Mr,t);return(0,T.Z)({},t,n)})(o),f=Nc({props:o,muiFormControl:fs(),states:["required"]});return(0,D.jsx)(qc,(0,T.Z)({components:(0,T.Z)({Root:xm,Input:wm},r),renderSuffix:e=>(0,D.jsx)(Zm,{className:p.notchedOutline,label:null!=s&&""!==s&&f.required?n||(n=(0,D.jsxs)(I.Fragment,{children:[s,"\xa0","*"]})):s,notched:"undefined"!==typeof c?c:Boolean(e.startAdornment||e.filled||e.focused)}),fullWidth:i,inputComponent:a,multiline:l,ref:t,type:u},d,{classes:(0,T.Z)({},p,{notchedOutline:null})}))}));Sm.muiName="Input";var Cm=Sm;function km(e){return(0,N.Z)("MuiPagination",e)}var Rm=(0,z.Z)("MuiPagination",["root","ul","outlined","text"]);const Mm=["boundaryCount","componentName","count","defaultPage","disabled","hideNextButton","hidePrevButton","onChange","page","showFirstButton","showLastButton","siblingCount"];function Pm(e={}){const{boundaryCount:t=1,componentName:n="usePagination",count:o=1,defaultPage:r=1,disabled:i=!1,hideNextButton:a=!1,hidePrevButton:s=!1,onChange:l,page:c,showFirstButton:u=!1,showLastButton:d=!1,siblingCount:p=1}=e,f=(0,O.Z)(e,Mm),[m,h]=de({controlled:c,default:r,name:n,state:"page"}),g=(e,t)=>{c||h(t),l&&l(e,t)},v=(e,t)=>{const n=t-e+1;return Array.from({length:n},((t,n)=>e+n))},b=v(1,Math.min(t,o)),y=v(Math.max(o-t+1,t+1),o),x=Math.max(Math.min(m-p,o-t-2*p-1),t+2),Z=Math.min(Math.max(m+p,t+2*p+2),y.length>0?y[0]-2:o-1),w=[...u?["first"]:[],...s?[]:["previous"],...b,...x>t+2?["start-ellipsis"]:t+1 ~ Please see the abstract map site for further details about the research publication ~ This repository contains the mobile application used by human participants in the zoo experiments described in our IEEE TCDS journal. The app, created with Android Studio, includes the following: The project should be directly openable using Android Studio. Please keep in mind that this app was last developed in 2019, and Android Studio often introduces minor breaking changes with new versions. Often you will have to tweak things like Gradle versions / syntax etc. to get a project working with newer versions. Android Studio is very good though with pointing out where it sees errors and offering suggestions for how to resolve them. Once you have the project open, you should be able to compile the app and load it directly onto a device without issues. This work was supported by the Australian Research Council's Discovery Projects Funding Scheme under Project DP140103216. The authors are with the QUT Centre for Robotics. If you use this software in your research, or for comparisons, please kindly cite our work: CRICOS No. 00213JAndroid App for Human Participants
btalb/abstract_map_app
App for the Human vs Abstract Map Zoo Experiments
+
+
+
+Developing & producing the app
+Acknowledgements & Citing our work
+
+@ARTICLE{9091567,
+ author={B. {Talbot} and F. {Dayoub} and P. {Corke} and G. {Wyeth}},
+ journal={IEEE Transactions on Cognitive and Developmental Systems},
+ title={Robot Navigation in Unseen Spaces using an Abstract Map},
+ year={2020},
+ volume={},
+ number={},
+ pages={1-1},
+ keywords={Navigation;Robot sensing systems;Measurement;Linguistics;Visualization;symbol grounding;symbolic spatial information;abstract map;navigation;cognitive robotics;intelligent robots.},
+ doi={10.1109/TCDS.2020.2993855},
+ ISSN={2379-8939},
+ month={},}
+}
+