Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove obsolete extrinsic hand eye optimizations #48

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 20 additions & 15 deletions rct_examples/src/examples/camera_on_wrist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#include <rct_image_tools/image_observation_finder.h>
// This header brings in he calibration function for 'moving camera' on robot wrist - what we
// want to calibrate here.
#include <rct_optimizations/extrinsic_camera_on_wrist.h>
#include <rct_optimizations/extrinsic_hand_eye.h>
// This header brings in the ros::package::getPath() command so I can find the data set
// on your computer.
#include <ros/package.h>
Expand All @@ -36,7 +36,7 @@ int extrinsicWristCameraCalibration()
// - Guesses for the 'base to target' and 'wrist to camera'
// - The wrist poses
// - Correspondences (2D in image to 3D in target) for each wrist pose
rct_optimizations::ExtrinsicCameraOnWristProblem problem_def;
rct_optimizations::ExtrinsicHandEyeProblem2D3D problem_def;

// Step 1: Define the camera intrinsics - I know mine so I write them here. If you don't know
// yours, use the opencv calibration tool to figure them out!
Expand All @@ -52,16 +52,16 @@ int extrinsicWristCameraCalibration()
wrist_to_camera_rot << 0, 1, 0,
-1, 0, 0,
0, 0, 1;
problem_def.wrist_to_camera_guess.translation() = wrist_to_camera_tx;
problem_def.wrist_to_camera_guess.linear() = wrist_to_camera_rot;
problem_def.camera_mount_to_camera_guess.translation() = wrist_to_camera_tx;
problem_def.camera_mount_to_camera_guess.linear() = wrist_to_camera_rot;

Eigen::Vector3d base_to_target_tx (1, 0, 0); // Guess for base-to-target
Eigen::Matrix3d base_to_target_rot;
base_to_target_rot << 0, 1, 0,
-1, 0, 0,
0, 0, 1;
problem_def.base_to_target_guess.translation() = base_to_target_tx;
problem_def.base_to_target_guess.linear() = base_to_target_rot;
problem_def.target_mount_to_target_guess.translation() = base_to_target_tx;
problem_def.target_mount_to_target_guess.linear() = base_to_target_rot;

// Step 3: We need to specify the wrist-poses and the correspondences. I'm going to use a simple
// tool I wrote to load images and wrist poses from a file. You need to implement the data
Expand Down Expand Up @@ -96,6 +96,7 @@ int extrinsicWristCameraCalibration()
// and 3) push them into the problem definition.
// This process of finding the dots can fail, we we make sure they are found before adding the
// wrist location for that image.
problem_def.observations.reserve(image_set.size());
for (std::size_t i = 0; i < image_set.size(); ++i)
{
// Try to find the circle grid in this image:
Expand All @@ -108,7 +109,8 @@ int extrinsicWristCameraCalibration()

// We found the target! Let's "zip-up" the correspondence pairs for this image - one for each
// dot. So we create a data structure:
rct_optimizations::CorrespondenceSet correspondences;
rct_optimizations::Observation2D3D obs;
obs.correspondence_set.reserve(maybe_obs->size());

// And loop over each detected dot:
for (std::size_t j = 0; j < maybe_obs->size(); ++j)
Expand All @@ -119,17 +121,20 @@ int extrinsicWristCameraCalibration()
rct_optimizations::Correspondence2D3D pair;
pair.in_image = maybe_obs->at(j); // The obs finder and target define their points in the same order!
pair.in_target = target.points[j];
correspondences.push_back(pair);
obs.correspondence_set.push_back(pair);
}

// Finally, let's add the wrist pose for this image, and all of the images correspondences
// to the problem!
problem_def.wrist_poses.push_back(wrist_poses[i]);
problem_def.image_observations.push_back(correspondences);
// Let's add the wrist pose for this image as the "to_camera_mount" transform
// Since the target is not moving relative to the camera, set the "to_target_mount" transform to identity
obs.to_camera_mount = wrist_poses[i];
obs.to_target_mount = Eigen::Isometry3d::Identity();

// Finally let's add this observation to the problem!
problem_def.observations.push_back(obs);
}

// Step 4: You defined a problem, let the tools solve it! Call 'optimize()'.
rct_optimizations::ExtrinsicCameraOnWristResult opt_result = rct_optimizations::optimize(problem_def);
rct_optimizations::ExtrinsicHandEyeResult opt_result = rct_optimizations::optimize(problem_def);

// Step 5: Do something with your results. Here I just print the results, but you might want to
// update a data structure, save to a file, push to a mutable joint or mutable state publisher in
Expand All @@ -139,11 +144,11 @@ int extrinsicWristCameraCalibration()

// Note: Convergence and low cost does not mean a good calibration. See the calibration primer
// readme on the main page of this repo.
Eigen::Isometry3d c = opt_result.wrist_to_camera;
Eigen::Isometry3d c = opt_result.camera_mount_to_camera;
rct_ros_tools::printTransform(c, "Wrist", "Camera", "WRIST TO CAMERA");
rct_ros_tools::printNewLine();

Eigen::Isometry3d t = opt_result.base_to_target;
Eigen::Isometry3d t = opt_result.target_mount_to_target;
rct_ros_tools::printTransform(t, "Base", "Target", "BASE TO TARGET");
rct_ros_tools::printNewLine();

Expand Down
42 changes: 26 additions & 16 deletions rct_examples/src/tools/camera_on_wrist_extrinsic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <rct_image_tools/image_observation_finder.h>
#include <rct_image_tools/image_utils.h>
// The calibration function for 'moving camera' on robot wrist
#include <rct_optimizations/extrinsic_camera_on_wrist.h>
#include <rct_optimizations/extrinsic_hand_eye.h>
#include <rct_optimizations/experimental/pnp.h>

// For display of found targets
Expand All @@ -16,21 +16,20 @@
#include <rct_optimizations/ceres_math_utilities.h>

static void reproject(const Eigen::Isometry3d& wrist_to_camera, const Eigen::Isometry3d& base_to_target,
const Eigen::Isometry3d& base_to_wrist, const rct_optimizations::CameraIntrinsics& intr,
const rct_image_tools::ModifiedCircleGridTarget& target, const cv::Mat& image,
const rct_optimizations::CorrespondenceSet& corr)
const rct_optimizations::Observation2D3D& obs, const rct_optimizations::CameraIntrinsics& intr,
const rct_image_tools::ModifiedCircleGridTarget& target, const cv::Mat& image)
{
// We want to compute the "positional error" as well
// So first we compute the "camera to target" transform based on the calibration...
Eigen::Isometry3d camera_to_target = (base_to_wrist * wrist_to_camera).inverse() * base_to_target;
Eigen::Isometry3d camera_to_target = (obs.to_camera_mount * wrist_to_camera).inverse() * base_to_target;
std::vector<cv::Point2d> reprojections = rct_image_tools::getReprojections(camera_to_target, intr, target.points);

cv::Mat frame = image.clone();
rct_image_tools::drawReprojections(reprojections, 3, cv::Scalar(0, 0, 255), frame);

rct_optimizations::PnPProblem pb;
pb.camera_to_target_guess = camera_to_target;
pb.correspondences = corr;
pb.correspondences = obs.correspondence_set;
pb.intr = intr;
rct_optimizations::PnPResult r = rct_optimizations::optimize(pb);

Expand Down Expand Up @@ -88,17 +87,17 @@ int main(int argc, char** argv)
rct_image_tools::ModifiedCircleGridObservationFinder obs_finder(target);

// Now we create our calibration problem
rct_optimizations::ExtrinsicCameraOnWristProblem problem_def;
rct_optimizations::ExtrinsicHandEyeProblem2D3D problem_def;
problem_def.intr = intr; // Set the camera properties

// Our 'base to camera guess': A camera off to the side, looking at a point centered in front of the robot
if (!rct_ros_tools::loadPose(pnh, "base_to_target_guess", problem_def.base_to_target_guess))
if (!rct_ros_tools::loadPose(pnh, "base_to_target_guess", problem_def.target_mount_to_target_guess))
{
ROS_WARN_STREAM("Unable to load guess for base to camera from the 'base_to_target_guess' parameter struct");
return 1;
}

if (!rct_ros_tools::loadPose(pnh, "wrist_to_camera_guess", problem_def.wrist_to_camera_guess))
if (!rct_ros_tools::loadPose(pnh, "wrist_to_camera_guess", problem_def.camera_mount_to_camera_guess))
{
ROS_WARN_STREAM("Unable to load guess for wrist to target from the 'wrist_to_camera_guess' parameter struct");
return 1;
Expand All @@ -107,6 +106,7 @@ int main(int argc, char** argv)
// Finally, we need to process our images into correspondence sets: for each dot in the
// target this will be where that dot is in the target and where it was seen in the image.
// Repeat for each image. We also tell where the wrist was when the image was taken.
problem_def.observations.reserve(data_set.images.size());
for (std::size_t i = 0; i < data_set.images.size(); ++i)
{
// Try to find the circle grid in this image:
Expand All @@ -125,35 +125,45 @@ int main(int argc, char** argv)
cv::waitKey();
}

rct_optimizations::Observation2D3D obs;
obs.correspondence_set.reserve(maybe_obs->size());

// So for each image we need to:
//// 1. Record the wrist position
problem_def.wrist_poses.push_back(data_set.tool_poses[i]);
obs.to_camera_mount = data_set.tool_poses[i];
obs.to_target_mount = Eigen::Isometry3d::Identity();

//// And finally add that to the problem
problem_def.image_observations.push_back(rct_image_tools::getCorrespondenceSet(*maybe_obs, target.points));
obs.correspondence_set = rct_image_tools::getCorrespondenceSet(*maybe_obs, target.points);

problem_def.observations.push_back(obs);
}

// Now we have a defined problem, run optimization:
rct_optimizations::ExtrinsicCameraOnWristResult opt_result = rct_optimizations::optimize(problem_def);
rct_optimizations::ExtrinsicHandEyeResult opt_result = rct_optimizations::optimize(problem_def);

// Report results
rct_ros_tools::printOptResults(opt_result.converged, opt_result.initial_cost_per_obs, opt_result.final_cost_per_obs);
rct_ros_tools::printNewLine();

Eigen::Isometry3d c = opt_result.wrist_to_camera;
Eigen::Isometry3d c = opt_result.camera_mount_to_camera;
rct_ros_tools::printTransform(c, "Wrist", "Camera", "WRIST TO CAMERA");
rct_ros_tools::printNewLine();

Eigen::Isometry3d t = opt_result.base_to_target;
Eigen::Isometry3d t = opt_result.target_mount_to_target;
rct_ros_tools::printTransform(t, "Base", "Target", "BASE TO TARGET");
rct_ros_tools::printNewLine();

rct_ros_tools::printTitle("REPROJECTION ERROR");
for (std::size_t i = 0; i < data_set.images.size(); ++i)
{
rct_ros_tools::printTitle("REPROJECT IMAGE " + std::to_string(i));
reproject(opt_result.wrist_to_camera, opt_result.base_to_target, data_set.tool_poses[i],
intr, target, data_set.images[i], problem_def.image_observations[i]);
reproject(opt_result.camera_mount_to_camera,
opt_result.target_mount_to_target,
problem_def.observations[i],
intr,
target,
data_set.images[i]);
}

return 0;
Expand Down
45 changes: 26 additions & 19 deletions rct_examples/src/tools/static_camera_extrinsic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <rct_image_tools/image_observation_finder.h>
#include <rct_image_tools/image_utils.h>
// The calibration function for 'static camera' on robot wrist
#include <rct_optimizations/extrinsic_static_camera.h>
#include <rct_optimizations/extrinsic_hand_eye.h>

#include <opencv2/highgui.hpp>
#include <ros/ros.h>
Expand All @@ -16,11 +16,10 @@
#include <rct_optimizations/experimental/pnp.h>

static void reproject(const Eigen::Isometry3d& wrist_to_target, const Eigen::Isometry3d& base_to_camera,
const Eigen::Isometry3d& base_to_wrist, const rct_optimizations::CameraIntrinsics& intr,
const rct_image_tools::ModifiedCircleGridTarget& target, const cv::Mat& image,
const rct_optimizations::CorrespondenceSet& corr)
const rct_optimizations::Observation2D3D& obs, const rct_optimizations::CameraIntrinsics& intr,
const rct_image_tools::ModifiedCircleGridTarget& target, const cv::Mat& image)
{
Eigen::Isometry3d camera_to_target = base_to_camera.inverse() * (base_to_wrist * wrist_to_target);
Eigen::Isometry3d camera_to_target = base_to_camera.inverse() * (obs.to_target_mount * wrist_to_target);
std::vector<cv::Point2d> reprojections = rct_image_tools::getReprojections(camera_to_target, intr, target.points);

cv::Mat frame = image.clone();
Expand All @@ -30,7 +29,7 @@ static void reproject(const Eigen::Isometry3d& wrist_to_target, const Eigen::Iso
// So first we compute the "camera to target" transform based on the calibration...
rct_optimizations::PnPProblem pb;
pb.camera_to_target_guess = camera_to_target;
pb.correspondences = corr;
pb.correspondences = obs.correspondence_set;
pb.intr = intr;
rct_optimizations::PnPResult r = rct_optimizations::optimize(pb);

Expand Down Expand Up @@ -85,18 +84,18 @@ int main(int argc, char** argv)
rct_image_tools::ModifiedCircleGridObservationFinder obs_finder(target);

// Now we construct our problem:
rct_optimizations::ExtrinsicStaticCameraMovingTargetProblem problem_def;
rct_optimizations::ExtrinsicHandEyeProblem2D3D problem_def;
// Our camera intrinsics to use
problem_def.intr = intr;

// Our 'base to camera guess': A camera off to the side, looking at a point centered in front of the robot
if (!rct_ros_tools::loadPose(pnh, "base_to_camera_guess", problem_def.base_to_camera_guess))
if (!rct_ros_tools::loadPose(pnh, "base_to_camera_guess", problem_def.camera_mount_to_camera_guess))
{
ROS_WARN_STREAM("Unable to load guess for base to camera from the 'base_to_camera_guess' parameter struct");
return 1;
}

if (!rct_ros_tools::loadPose(pnh, "wrist_to_target_guess", problem_def.wrist_to_target_guess))
if (!rct_ros_tools::loadPose(pnh, "wrist_to_target_guess", problem_def.target_mount_to_target_guess))
{
ROS_WARN_STREAM("Unable to load guess for wrist to target from the 'wrist_to_target_guess' parameter struct");
return 1;
Expand All @@ -106,6 +105,7 @@ int main(int argc, char** argv)
// target this will be where that dot is in the target and where it was seen in the image.
// Repeat for each image. We also tell where the wrist was when the image was taken.
rct_ros_tools::ExtrinsicDataSet found_images;
problem_def.observations.reserve(data_set.images.size());
for (std::size_t i = 0; i < data_set.images.size(); ++i)
{
// Try to find the circle grid in this image:
Expand All @@ -128,37 +128,44 @@ int main(int argc, char** argv)
found_images.images.push_back(data_set.images[i]);
found_images.tool_poses.push_back(data_set.tool_poses[i]);

rct_optimizations::Observation2D3D obs;

// So for each image we need to:
//// 1. Record the wrist position
problem_def.wrist_poses.push_back(data_set.tool_poses[i]);
obs.to_target_mount = data_set.tool_poses[i];
obs.to_camera_mount = Eigen::Isometry3d::Identity();

//// And finally add that to the problem
problem_def.image_observations.push_back(rct_image_tools::getCorrespondenceSet(*maybe_obs, target.points));
obs.correspondence_set = rct_image_tools::getCorrespondenceSet(*maybe_obs, target.points);

problem_def.observations.push_back(obs);
}

// Run optimization
rct_optimizations::ExtrinsicStaticCameraMovingTargetResult
opt_result = rct_optimizations::optimize(problem_def);
rct_optimizations::ExtrinsicHandEyeResult opt_result = rct_optimizations::optimize(problem_def);

// Report results
rct_ros_tools::printOptResults(opt_result.converged, opt_result.initial_cost_per_obs, opt_result.final_cost_per_obs);
rct_ros_tools::printNewLine();

Eigen::Isometry3d c = opt_result.base_to_camera;
Eigen::Isometry3d c = opt_result.camera_mount_to_camera;
rct_ros_tools::printTransform(c, "Base", "Camera", "BASE TO CAMERA");
rct_ros_tools::printNewLine();

Eigen::Isometry3d t = opt_result.wrist_to_target;
rct_ros_tools::printTransform(t, "Base", "Camera", "BASE TO CAMERA");
Eigen::Isometry3d t = opt_result.target_mount_to_target;
rct_ros_tools::printTransform(t, "Wrist", "Target", "WRIST_TO_TARGET");
rct_ros_tools::printNewLine();

for (std::size_t i = 0; i < found_images.images.size(); ++i)
{
rct_ros_tools::printTitle("REPROJECT IMAGE " + std::to_string(i));
reproject(opt_result.wrist_to_target, opt_result.base_to_camera, found_images.tool_poses[i],
intr, target, found_images.images[i], problem_def.image_observations[i]);
reproject(opt_result.target_mount_to_target,
opt_result.camera_mount_to_camera,
problem_def.observations[i],
intr,
target,
found_images.images[i]);
}


return 0;
}
6 changes: 1 addition & 5 deletions rct_optimizations/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,11 @@ find_package(Ceres REQUIRED)
add_library(${PROJECT_NAME} SHARED
# Utilities
src/${PROJECT_NAME}/eigen_conversions.cpp
# Optimizations (monocular camera)
src/${PROJECT_NAME}/extrinsic_camera_on_wrist.cpp
src/${PROJECT_NAME}/extrinsic_static_camera.cpp
# Optimizations (multiple cameras)
src/${PROJECT_NAME}/extrinsic_multi_static_camera.cpp
src/${PROJECT_NAME}/extrinsic_multi_static_camera_only.cpp
src/${PROJECT_NAME}/extrinsic_multi_static_camera_wrist_only.cpp
# Optimizations (3D sensor)
src/${PROJECT_NAME}/extrinsic_3d_camera_on_wrist.cpp
# Optimizations (extrinsic hand-eye, 2D and 3D cameras)
src/${PROJECT_NAME}/extrinsic_hand_eye.cpp
# Optimizations (Experimental) - Intrinsic
src/${PROJECT_NAME}/camera_intrinsic.cpp
Expand Down
Loading