From b08e175901c2b98aa999858fad971ac10cda1d97 Mon Sep 17 00:00:00 2001 From: PGgit08 Date: Sun, 16 Feb 2025 21:16:37 -0500 Subject: [PATCH] updated notes and ideas on ambiguity after adding noise to vision sim, gonna fix unit test tmrw --- notes.md | 2 +- .../frc/robot/utils/VisionPoseEstimator.java | 2 ++ .../frc/robot/VisionPoseEstimatorTest.java | 31 +++++++++++++------ 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/notes.md b/notes.md index 9e4b8bc..906ebbe 100644 --- a/notes.md +++ b/notes.md @@ -21,4 +21,4 @@ To actually find intrinsics, camera calibration is necessary. A checkerboard pat Noise is innaccurate measurements that occur from time to time from a sensor or signal. When detecting corners, there may be some noise in the camera sensor, and the detected image-coordinates of a corner can be different than what they are expected to be given the camera's current extrinsics and calibrated intrinsics. ### Ambiguity -When detecting pose using an AprilTag (4 corners), it is often possible to get two pose solutions from solvePnP, each with their own re-projection errors. If the re-projection errors are very close or equal, it is hard to tell which pose solution is the correct one, making the problem ambigious. In this case, the correct pose can be chosen by comparing the two to a different robot sensor, the gyro for example, and determining which one is correct. In terms of filtering, measurements with high ambiguity are typically correlated with high noise (a tag seen at a large distance would cause both ambiguity and corner noise), so those get rejected. At lower ambiguities, the measurement with better re-projection error may not necessarily be the correct measurement. Corner noise may cause for one innaccurate low re-projection error pose along with a more accurate higher re-projection error pose, so the correct one for lower ambiguities should also be determined using the gyro. +Often when detecting a single tag, solvePnP may come up with two candidate pose solutions for that single tag frame. Each solution has its own reprojection error. Ambiguity occurs when both solutions have very similar reprojection error, meaning both poses fit the image corners equally well, and it can be unclear which of the two candidate poses is the "correct" one. When ambiguity is low, the two reprojection errors are very different, and the two poses differ strongly both in translation and heading, so comparing both poses to the gyro heading (which is always accurate) can find the correct pose (lower reprojection error may not necessarily be the correct pose, it just means the projected tag corners are closer to the image tag corners). In the case where ambiguity is high, the two poses are very similar, and their heading distance from the gyro may be very similar, so after gyro disambiguation, the wrong pose may still be chosen (incorrect translation), therefore it's good to ignore higher ambiguity measurements. Higher noise can also increase ambiguity as the lower reprojection error estimate would increase in reprojection error and be closer to the higher reprojection error estimate. diff --git a/src/main/java/frc/robot/utils/VisionPoseEstimator.java b/src/main/java/frc/robot/utils/VisionPoseEstimator.java index ce9777b..a859d5f 100644 --- a/src/main/java/frc/robot/utils/VisionPoseEstimator.java +++ b/src/main/java/frc/robot/utils/VisionPoseEstimator.java @@ -223,6 +223,8 @@ public VisionPoseEstimator( if (Robot.isSimulation()) { var cameraProps = new SimCameraProperties(); + cameraProps.setCalibError(0.01, 0.001); + _cameraSim = new PhotonCameraSim(_camera, cameraProps, fieldLayout); } else { _cameraSim = null; diff --git a/src/test/java/frc/robot/VisionPoseEstimatorTest.java b/src/test/java/frc/robot/VisionPoseEstimatorTest.java index 102ae7e..6997889 100644 --- a/src/test/java/frc/robot/VisionPoseEstimatorTest.java +++ b/src/test/java/frc/robot/VisionPoseEstimatorTest.java @@ -10,6 +10,7 @@ import edu.wpi.first.math.geometry.Rotation2d; import edu.wpi.first.math.geometry.Rotation3d; import edu.wpi.first.math.geometry.Transform3d; +import edu.wpi.first.math.geometry.Translation2d; import edu.wpi.first.math.geometry.Translation3d; import edu.wpi.first.math.util.Units; import frc.robot.utils.VisionPoseEstimator; @@ -74,7 +75,9 @@ public void setup() { 3, 7); - _testCam = VisionPoseEstimator.buildFromConstants(testCam, getNtInst(), _fieldLayout); + _testCam = + VisionPoseEstimator.buildFromConstants( + testCam, getNtInst(), _fieldLayout, this::dummyGyroHeading); _visionSystemSim = new VisionSystemSim(""); _visionSystemSim.addCamera(_testCam.getCameraSim(), _testCam.robotToCam); @@ -89,7 +92,7 @@ public void close() { public void noResults() { _visionSystemSim.update(Pose2d.kZero); - _testCam.update(this::dummyGyroHeading); + _testCam.update(); // no targets, no new estimates assertEquals(0, _testCam.getNewEstimates().size()); @@ -102,7 +105,7 @@ public void singleTagResult() { _visionSystemSim.update(Pose2d.kZero); // should see the single target - _testCam.update(this::dummyGyroHeading); + _testCam.update(); assertEquals(1, _testCam.getNewEstimates().size()); } @@ -115,7 +118,7 @@ public void singleTagResults() { _visionSystemSim.update(Pose2d.kZero); _visionSystemSim.update(Pose2d.kZero); // should see two new results of the single target - _testCam.update(this::dummyGyroHeading); + _testCam.update(); assertEquals(2, _testCam.getNewEstimates().size()); } @@ -127,7 +130,7 @@ public void singleTagEstimate() { _visionSystemSim.update(Pose2d.kZero); - _testCam.update(this::dummyGyroHeading); + _testCam.update(); var estimates = _testCam.getNewEstimates(); // 1 estimate assertEquals(1, estimates.size()); @@ -167,7 +170,7 @@ public void multiTagEstimate() { _visionSystemSim.addAprilTags(_fieldLayout); _visionSystemSim.update(Pose2d.kZero); - _testCam.update(this::dummyGyroHeading); + _testCam.update(); var estimates = _testCam.getNewEstimates(); // 1 estimate assertEquals(1, estimates.size()); @@ -220,7 +223,7 @@ public void boundsFilter() { // float the robot above the ground by 0.3 meters _visionSystemSim.update(new Pose3d(0, 0, 0.3, Rotation3d.kZero)); - _testCam.update(this::dummyGyroHeading); + _testCam.update(); var estimates = _testCam.getNewEstimates(); // 1 estimate assertEquals(1, estimates.size()); @@ -266,7 +269,7 @@ public void singleTagDistanceFilter() { _visionSystemSim.update(Pose2d.kZero); - _testCam.update(this::dummyGyroHeading); + _testCam.update(); var estimates = _testCam.getNewEstimates(); // 1 estimate assertEquals(1, estimates.size()); @@ -312,7 +315,15 @@ public void estimateSort() { for (int i = 3; i > 0; i--) { newEstimates.add( new VisionPoseEstimate( - Pose3d.kZero, i, 0.03, new int[] {1}, 1.2, new double[] {0.3, 0.1, 0.2}, true)); + Pose3d.kZero, + i, + 0.03, + Pose3d.kZero, + new Translation2d[] {Translation2d.kZero}, + new int[] {1}, + 1.2, + new double[] {0.3, 0.1, 0.2}, + true)); } newEstimates.add( @@ -320,6 +331,8 @@ public void estimateSort() { Pose3d.kZero, 2, // same timestamp case 0.03, + Pose3d.kZero, + new Translation2d[] {Translation2d.kZero}, new int[] {1}, 1.2, new double[] {