process(CVMat in) {
+ var imgMat = in.getMat();
+ var detections = photonDetector.detect(imgMat);
+ // manually do corner refinement ourselves
+ if (params.useCornerRefinement) {
+ for (var detection : detections) {
+ double[] xCorners = detection.getXCorners();
+ double[] yCorners = detection.getYCorners();
+ Point[] cornerPoints =
+ new Point[] {
+ new Point(xCorners[0], yCorners[0]),
+ new Point(xCorners[1], yCorners[1]),
+ new Point(xCorners[2], yCorners[2]),
+ new Point(xCorners[3], yCorners[3])
+ };
+ double bltr =
+ Math.hypot(
+ cornerPoints[2].x - cornerPoints[0].x, cornerPoints[2].y - cornerPoints[0].y);
+ double brtl =
+ Math.hypot(
+ cornerPoints[3].x - cornerPoints[1].x, cornerPoints[3].y - cornerPoints[1].y);
+ double minDiag = Math.min(bltr, brtl);
+ int halfWindowLength =
+ (int) Math.ceil(kRefineWindowImageRatio * Math.min(imgMat.rows(), imgMat.cols()));
+ halfWindowLength += (int) (minDiag * kRefineWindowMarkerRatio);
+ // dont do refinement on small markers
+ if (halfWindowLength < 4) continue;
+ var halfWindowSize = new Size(halfWindowLength, halfWindowLength);
+ var ptsMat = new MatOfPoint2f(cornerPoints);
+ var criteria =
+ new TermCriteria(3, params.refinementMaxIterations, params.refinementMinErrorPx);
+ Imgproc.cornerSubPix(imgMat, ptsMat, halfWindowSize, new Size(-1, -1), criteria);
+ cornerPoints = ptsMat.toArray();
+ for (int i = 0; i < cornerPoints.length; i++) {
+ var pt = cornerPoints[i];
+ xCorners[i] = pt.x;
+ yCorners[i] = pt.y;
+ // If we want to debug the refinement window, draw a rectangle on the image
+ if (params.debugRefineWindow) {
+ drawCornerRefineWindow(imgMat, pt, halfWindowLength);
+ }
+ }
+ }
+ }
+ return List.of(detections);
}
@Override
- public void setParams(ArucoDetectionPipeParams params) {
- super.setParams(params);
+ public void setParams(ArucoDetectionPipeParams newParams) {
+ if (this.params == null || !this.params.equals(newParams)) {
+ photonDetector
+ .getDetector()
+ .setDictionary(Objdetect.getPredefinedDictionary(newParams.tagFamily));
+ var detectParams = photonDetector.getParams();
+
+ detectParams.set_adaptiveThreshWinSizeMin(newParams.threshMinSize);
+ detectParams.set_adaptiveThreshWinSizeStep(newParams.threshStepSize);
+ detectParams.set_adaptiveThreshWinSizeMax(newParams.threshMaxSize);
+ detectParams.set_adaptiveThreshConstant(newParams.threshConstant);
+
+ detectParams.set_errorCorrectionRate(newParams.errorCorrectionRate);
+
+ detectParams.set_useAruco3Detection(newParams.useAruco3);
+ detectParams.set_minSideLengthCanonicalImg(newParams.aruco3MinCanonicalImgSide);
+ detectParams.set_minMarkerLengthRatioOriginalImg((float) newParams.aruco3MinMarkerSideRatio);
+
+ photonDetector.setParams(detectParams);
+ }
+
+ super.setParams(newParams);
+ }
+
+ public PhotonArucoDetector getPhotonDetector() {
+ return photonDetector;
}
- public DetectorParameters getParameters() {
- return params == null ? null : params.detectorParams.getDetectorParameters();
+ private void drawCornerRefineWindow(Mat outputMat, Point corner, int windowSize) {
+ int thickness = (int) (Math.ceil(Math.max(outputMat.cols(), outputMat.rows()) * 0.003));
+ var pt1 = new Point(corner.x - windowSize, corner.y - windowSize);
+ var pt2 = new Point(corner.x + windowSize, corner.y + windowSize);
+ Imgproc.rectangle(outputMat, pt1, pt2, new Scalar(0, 0, 255), thickness);
}
}
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoDetectionPipeParams.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoDetectionPipeParams.java
index 62d3214030..34b1384e92 100644
--- a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoDetectionPipeParams.java
+++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoDetectionPipeParams.java
@@ -18,40 +18,90 @@
package org.photonvision.vision.pipe.impl;
import java.util.Objects;
-import org.opencv.objdetect.ArucoDetector;
-import org.photonvision.vision.calibration.CameraCalibrationCoefficients;
+import org.opencv.objdetect.Objdetect;
+/** Detector parameters. See https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html. */
public class ArucoDetectionPipeParams {
- public ArucoDetector detectorParams;
- public final CameraCalibrationCoefficients cameraCalibrationCoefficients;
+ /** Tag family. Default: {@link Objdetect#DICT_APRILTAG_16h5}. */
+ public int tagFamily = Objdetect.DICT_APRILTAG_16h5;
- public ArucoDetectionPipeParams(
- ArucoDetector detector, CameraCalibrationCoefficients cameraCalibrationCoefficients) {
- this.detectorParams = detector;
- this.cameraCalibrationCoefficients = cameraCalibrationCoefficients;
- }
+ public int threshMinSize = 11;
+ public int threshStepSize = 40;
+ public int threshMaxSize = 91;
+ public double threshConstant = 10;
+
+ /**
+ * Bits allowed to be corrected, expressed as a ratio of the tag families theoretical maximum.
+ *
+ * E.g. 36h11 -> 11 * errorCorrectionRate = Max error bits
+ */
+ public double errorCorrectionRate = 0;
+
+ /**
+ * If obtained corners should be iteratively refined. This should always be on for 3D estimation.
+ */
+ public boolean useCornerRefinement = true;
+
+ /** Maximum corner refinement iterations. */
+ public int refinementMaxIterations = 30;
+
+ /**
+ * Minimum error (accuracy) for corner refinement in pixels. When a corner refinement iteration
+ * moves the corner by less than this value, the refinement is considered finished.
+ */
+ public double refinementMinErrorPx = 0.005;
+
+ public boolean debugRefineWindow = false;
+
+ /**
+ * If the 'Aruco3' speedup should be used. This is similar to AprilTag's 'decimate' value, but
+ * automatically determined with the given parameters.
+ *
+ *
T_i = aruco3MinMarkerSideRatio, and T_c = aruco3MinCanonicalImgSide
+ *
+ *
Scale factor = T_c / (T_c + T_i * max(img_width, img_height))
+ */
+ public boolean useAruco3 = false;
+
+ /** Minimum side length of markers expressed as a ratio of the largest image dimension. */
+ public double aruco3MinMarkerSideRatio = 0.02;
+
+ /** Minimum side length of the canonical image (marker after undoing perspective distortion). */
+ public int aruco3MinCanonicalImgSide = 32;
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ArucoDetectionPipeParams that = (ArucoDetectionPipeParams) o;
- return Objects.equals(detectorParams, that.detectorParams)
- && Objects.equals(cameraCalibrationCoefficients, that.cameraCalibrationCoefficients);
+ return tagFamily == that.tagFamily
+ && threshMinSize == that.threshMinSize
+ && threshStepSize == that.threshStepSize
+ && threshMaxSize == that.threshMaxSize
+ && threshConstant == that.threshConstant
+ && errorCorrectionRate == that.errorCorrectionRate
+ && useCornerRefinement == that.useCornerRefinement
+ && refinementMaxIterations == that.refinementMaxIterations
+ && refinementMinErrorPx == that.refinementMinErrorPx
+ && useAruco3 == that.useAruco3
+ && aruco3MinMarkerSideRatio == that.aruco3MinMarkerSideRatio
+ && aruco3MinCanonicalImgSide == that.aruco3MinCanonicalImgSide;
}
@Override
public int hashCode() {
- return Objects.hash(detectorParams, cameraCalibrationCoefficients);
- }
-
- @Override
- public String toString() {
- return "ArucoDetectionPipeParams{"
- + "detectorParams="
- + detectorParams
- + ", cameraCalibrationCoefficients="
- + cameraCalibrationCoefficients
- + '}';
+ return Objects.hash(
+ tagFamily,
+ threshMinSize,
+ threshStepSize,
+ threshMaxSize,
+ threshConstant,
+ errorCorrectionRate,
+ useCornerRefinement,
+ refinementMaxIterations,
+ refinementMinErrorPx,
+ useAruco3,
+ aruco3MinMarkerSideRatio,
+ aruco3MinCanonicalImgSide);
}
}
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoPoseEstimatorPipe.java b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoPoseEstimatorPipe.java
new file mode 100644
index 0000000000..579996890e
--- /dev/null
+++ b/photon-core/src/main/java/org/photonvision/vision/pipe/impl/ArucoPoseEstimatorPipe.java
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) Photon Vision.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+package org.photonvision.vision.pipe.impl;
+
+import edu.wpi.first.apriltag.AprilTagPoseEstimate;
+import edu.wpi.first.math.VecBuilder;
+import edu.wpi.first.math.geometry.Rotation3d;
+import edu.wpi.first.math.geometry.Transform3d;
+import edu.wpi.first.math.geometry.Translation3d;
+import java.util.ArrayList;
+import java.util.List;
+import org.opencv.calib3d.Calib3d;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfPoint2f;
+import org.opencv.core.MatOfPoint3f;
+import org.opencv.core.Point3;
+import org.photonvision.vision.aruco.ArucoDetectionResult;
+import org.photonvision.vision.calibration.CameraCalibrationCoefficients;
+import org.photonvision.vision.pipe.CVPipe;
+
+public class ArucoPoseEstimatorPipe
+ extends CVPipe<
+ ArucoDetectionResult,
+ AprilTagPoseEstimate,
+ ArucoPoseEstimatorPipe.ArucoPoseEstimatorPipeParams> {
+ // image points of marker corners
+ private final MatOfPoint2f imagePoints = new MatOfPoint2f(Mat.zeros(4, 1, CvType.CV_32FC2));
+ // rvec/tvec estimations from solvepnp
+ private final List rvecs = new ArrayList<>();
+ private final List tvecs = new ArrayList<>();
+ // unused parameters
+ private final Mat rvec = Mat.zeros(3, 1, CvType.CV_32F);
+ private final Mat tvec = Mat.zeros(3, 1, CvType.CV_32F);
+ // reprojection error of solvepnp estimations
+ private final Mat reprojectionErrors = Mat.zeros(2, 1, CvType.CV_32F);
+
+ private final int kNaNRetries = 1;
+
+ private Translation3d tvecToTranslation3d(Mat mat) {
+ double[] tvec = new double[3];
+ mat.get(0, 0, tvec);
+ return new Translation3d(tvec[0], tvec[1], tvec[2]);
+ }
+
+ private Rotation3d rvecToRotation3d(Mat mat) {
+ double[] rvec = new double[3];
+ mat.get(0, 0, rvec);
+ return new Rotation3d(VecBuilder.fill(rvec[0], rvec[1], rvec[2]));
+ }
+
+ @Override
+ protected AprilTagPoseEstimate process(ArucoDetectionResult in) {
+ // We receive 2d corners as (BL, BR, TR, TL) but we want (BR, BL, TL, TR)
+ double[] xCorn = in.getXCorners();
+ double[] yCorn = in.getYCorners();
+ imagePoints.put(0, 0, new float[] {(float) xCorn[1], (float) yCorn[1]});
+ imagePoints.put(1, 0, new float[] {(float) xCorn[0], (float) yCorn[0]});
+ imagePoints.put(2, 0, new float[] {(float) xCorn[3], (float) yCorn[3]});
+ imagePoints.put(3, 0, new float[] {(float) xCorn[2], (float) yCorn[2]});
+
+ float[] reprojErrors = new float[2];
+ // very rarely the solvepnp solver returns NaN results, so we retry with slight noise added
+ for (int i = 0; i < kNaNRetries + 1; i++) {
+ // SolvePnP with SOLVEPNP_IPPE_SQUARE solver
+ Calib3d.solvePnPGeneric(
+ params.objectPoints,
+ imagePoints,
+ params.calibration.getCameraIntrinsicsMat(),
+ params.calibration.getDistCoeffsMat(),
+ rvecs,
+ tvecs,
+ false,
+ Calib3d.SOLVEPNP_IPPE_SQUARE,
+ rvec,
+ tvec,
+ reprojectionErrors);
+
+ // check if we got a NaN result
+ reprojectionErrors.get(0, 0, reprojErrors);
+ if (!Double.isNaN(reprojErrors[0])) break;
+ else { // add noise and retry
+ double[] br = imagePoints.get(0, 0);
+ br[0] -= 0.001;
+ br[1] -= 0.001;
+ imagePoints.put(0, 0, br);
+ }
+ }
+
+ // create AprilTagPoseEstimate with results
+ if (tvecs.isEmpty())
+ return new AprilTagPoseEstimate(new Transform3d(), new Transform3d(), 0, 0);
+ return new AprilTagPoseEstimate(
+ new Transform3d(tvecToTranslation3d(tvecs.get(0)), rvecToRotation3d(rvecs.get(0))),
+ new Transform3d(tvecToTranslation3d(tvecs.get(1)), rvecToRotation3d(rvecs.get(1))),
+ reprojErrors[0],
+ reprojErrors[1]);
+ }
+
+ @Override
+ public void setParams(ArucoPoseEstimatorPipe.ArucoPoseEstimatorPipeParams newParams) {
+ super.setParams(newParams);
+ }
+
+ public static class ArucoPoseEstimatorPipeParams {
+ final CameraCalibrationCoefficients calibration;
+ final double tagSize;
+ // object vertices defined by tag size
+ final MatOfPoint3f objectPoints;
+
+ public ArucoPoseEstimatorPipeParams(CameraCalibrationCoefficients cal, double tagSize) {
+ this.calibration = cal;
+ this.tagSize = tagSize;
+
+ // This order is relevant for SOLVEPNP_IPPE_SQUARE
+ // The expected 2d correspondences with a tag facing the camera would be (BR, BL, TL, TR)
+ objectPoints =
+ new MatOfPoint3f(
+ new Point3(-tagSize / 2, tagSize / 2, 0),
+ new Point3(tagSize / 2, tagSize / 2, 0),
+ new Point3(tagSize / 2, -tagSize / 2, 0),
+ new Point3(-tagSize / 2, -tagSize / 2, 0));
+ }
+ }
+}
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java
index d1837ff770..97ec1f2f3c 100644
--- a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java
+++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipeline.java
@@ -125,8 +125,7 @@ protected CVPipelineResult process(Frame frame, AprilTagPipelineSettings setting
long sumPipeNanosElapsed = 0L;
if (frame.type != FrameThresholdType.GREYSCALE) {
- // TODO so all cameras should give us GREYSCALE -- how should we handle if not?
- // Right now, we just return nothing
+ // We asked for a GREYSCALE frame, but didn't get one -- best we can do is give up
return new CVPipelineResult(0, 0, List.of(), frame);
}
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipelineSettings.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipelineSettings.java
index 47faf82e29..6db514d005 100644
--- a/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipelineSettings.java
+++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/AprilTagPipelineSettings.java
@@ -32,7 +32,7 @@ public class AprilTagPipelineSettings extends AdvancedPipelineSettings {
public int numIterations = 40;
public int hammingDist = 0;
public int decisionMargin = 35;
- public boolean doMultiTarget = true;
+ public boolean doMultiTarget = false;
public boolean doSingleTargetAlways = false;
// 3d settings
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java
index 26a5c60151..4d80029cbd 100644
--- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java
+++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipeline.java
@@ -34,29 +34,36 @@
package org.photonvision.vision.pipeline;
+import edu.wpi.first.apriltag.AprilTagPoseEstimate;
+import edu.wpi.first.math.geometry.CoordinateSystem;
+import edu.wpi.first.math.geometry.Pose3d;
import edu.wpi.first.math.geometry.Transform3d;
+import edu.wpi.first.math.util.Units;
import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Mat;
+import org.opencv.imgproc.Imgproc;
+import org.opencv.objdetect.Objdetect;
+import org.photonvision.common.configuration.ConfigManager;
+import org.photonvision.common.util.math.MathUtils;
+import org.photonvision.targeting.MultiTargetPNPResults;
import org.photonvision.vision.aruco.ArucoDetectionResult;
-import org.photonvision.vision.aruco.ArucoDetectorParams;
import org.photonvision.vision.frame.Frame;
import org.photonvision.vision.frame.FrameThresholdType;
import org.photonvision.vision.pipe.CVPipe.CVPipeResult;
import org.photonvision.vision.pipe.impl.*;
+import org.photonvision.vision.pipe.impl.ArucoPoseEstimatorPipe.ArucoPoseEstimatorPipeParams;
+import org.photonvision.vision.pipe.impl.MultiTargetPNPPipe.MultiTargetPNPPipeParams;
import org.photonvision.vision.pipeline.result.CVPipelineResult;
import org.photonvision.vision.target.TrackedTarget;
import org.photonvision.vision.target.TrackedTarget.TargetCalculationParameters;
public class ArucoPipeline extends CVPipeline {
- private final RotateImagePipe rotateImagePipe = new RotateImagePipe();
- private final GrayscalePipe grayscalePipe = new GrayscalePipe();
-
private final ArucoDetectionPipe arucoDetectionPipe = new ArucoDetectionPipe();
+ private final ArucoPoseEstimatorPipe singleTagPoseEstimatorPipe = new ArucoPoseEstimatorPipe();
+ private final MultiTargetPNPPipe multiTagPNPPipe = new MultiTargetPNPPipe();
private final CalculateFPSPipe calculateFPSPipe = new CalculateFPSPipe();
- ArucoDetectorParams m_arucoDetectorParams = new ArucoDetectorParams();
-
public ArucoPipeline() {
super(FrameThresholdType.GREYSCALE);
settings = new ArucoPipelineSettings();
@@ -69,59 +76,171 @@ public ArucoPipeline(ArucoPipelineSettings settings) {
@Override
protected void setPipeParamsImpl() {
- // Sanitize thread count - not supported to have fewer than 1 threads
- settings.threads = Math.max(1, settings.threads);
-
- RotateImagePipe.RotateImageParams rotateImageParams =
- new RotateImagePipe.RotateImageParams(settings.inputImageRotationMode);
- rotateImagePipe.setParams(rotateImageParams);
-
- m_arucoDetectorParams.setDecimation((float) settings.decimate);
- m_arucoDetectorParams.setCornerRefinementMaxIterations(settings.numIterations);
- m_arucoDetectorParams.setCornerAccuracy(settings.cornerAccuracy);
+ var params = new ArucoDetectionPipeParams();
+ // sanitize and record settings
+
+ switch (settings.tagFamily) {
+ case kTag36h11:
+ params.tagFamily = Objdetect.DICT_APRILTAG_36h11;
+ break;
+ case kTag25h9:
+ params.tagFamily = Objdetect.DICT_APRILTAG_25h9;
+ break;
+ default:
+ params.tagFamily = Objdetect.DICT_APRILTAG_16h5;
+ }
- arucoDetectionPipe.setParams(
- new ArucoDetectionPipeParams(
- m_arucoDetectorParams.getDetector(), frameStaticProperties.cameraCalibration));
+ int threshMinSize = Math.max(3, settings.threshWinSizes.getFirst());
+ settings.threshWinSizes.setFirst(threshMinSize);
+ params.threshMinSize = threshMinSize;
+ int threshStepSize = Math.max(2, settings.threshStepSize);
+ settings.threshStepSize = threshStepSize;
+ params.threshStepSize = threshStepSize;
+ int threshMaxSize = Math.max(threshMinSize, settings.threshWinSizes.getSecond());
+ settings.threshWinSizes.setSecond(threshMaxSize);
+ params.threshMaxSize = threshMaxSize;
+ params.threshConstant = settings.threshConstant;
+
+ params.useCornerRefinement = settings.useCornerRefinement;
+ params.refinementMaxIterations = settings.refineNumIterations;
+ params.refinementMinErrorPx = settings.refineMinErrorPx;
+ params.useAruco3 = settings.useAruco3;
+ params.aruco3MinMarkerSideRatio = settings.aruco3MinMarkerSideRatio;
+ params.aruco3MinCanonicalImgSide = settings.aruco3MinCanonicalImgSide;
+ arucoDetectionPipe.setParams(params);
+
+ if (frameStaticProperties.cameraCalibration != null) {
+ var cameraMatrix = frameStaticProperties.cameraCalibration.getCameraIntrinsicsMat();
+ if (cameraMatrix != null && cameraMatrix.rows() > 0) {
+ var estimatorParams =
+ new ArucoPoseEstimatorPipeParams(
+ frameStaticProperties.cameraCalibration, Units.inchesToMeters(6));
+ singleTagPoseEstimatorPipe.setParams(estimatorParams);
+
+ // TODO global state ew
+ var atfl = ConfigManager.getInstance().getConfig().getApriltagFieldLayout();
+ multiTagPNPPipe.setParams(
+ new MultiTargetPNPPipeParams(frameStaticProperties.cameraCalibration, atfl));
+ }
+ }
}
@Override
protected CVPipelineResult process(Frame frame, ArucoPipelineSettings settings) {
long sumPipeNanosElapsed = 0L;
- Mat rawInputMat;
- rawInputMat = frame.colorImage.getMat();
- List targetList;
- CVPipeResult> tagDetectionPipeResult;
+ if (frame.type != FrameThresholdType.GREYSCALE) {
+ // We asked for a GREYSCALE frame, but didn't get one -- best we can do is give up
+ return new CVPipelineResult(0, 0, List.of(), frame);
+ }
- if (rawInputMat.empty()) {
- return new CVPipelineResult(sumPipeNanosElapsed, 0, List.of(), frame);
+ CVPipeResult> tagDetectionPipeResult;
+ tagDetectionPipeResult = arucoDetectionPipe.run(frame.processedImage);
+ sumPipeNanosElapsed += tagDetectionPipeResult.nanosElapsed;
+
+ // If we want to debug the thresholding steps, draw the first step to the color image
+ if (settings.debugThreshold) {
+ drawThresholdFrame(
+ frame.processedImage.getMat(),
+ frame.colorImage.getMat(),
+ settings.threshWinSizes.getFirst(),
+ settings.threshConstant);
}
- tagDetectionPipeResult = arucoDetectionPipe.run(rawInputMat);
- targetList = new ArrayList<>();
+ List targetList = new ArrayList<>();
for (ArucoDetectionResult detection : tagDetectionPipeResult.output) {
- // TODO this should be in a pipe, not in the top level here (Matt)
-
- // populate the target list
- // Challenge here is that TrackedTarget functions with OpenCV Contour
+ // Populate target list for multitag
+ // (TODO: Address circular dependencies. Multitag only requires corners and IDs, this should
+ // not be necessary.)
TrackedTarget target =
new TrackedTarget(
detection,
+ null,
new TargetCalculationParameters(
false, null, null, null, null, frameStaticProperties));
- var correctedBestPose = target.getBestCameraToTarget3d();
+ targetList.add(target);
+ }
- target.setBestCameraToTarget3d(
- new Transform3d(correctedBestPose.getTranslation(), correctedBestPose.getRotation()));
+ // Do multi-tag pose estimation
+ MultiTargetPNPResults multiTagResult = new MultiTargetPNPResults();
+ if (settings.solvePNPEnabled && settings.doMultiTarget) {
+ var multiTagOutput = multiTagPNPPipe.run(targetList);
+ sumPipeNanosElapsed += multiTagOutput.nanosElapsed;
+ multiTagResult = multiTagOutput.output;
+ }
- targetList.add(target);
+ // Do single-tag pose estimation
+ if (settings.solvePNPEnabled) {
+ // Clear target list that was used for multitag so we can add target transforms
+ targetList.clear();
+ // TODO global state again ew
+ var atfl = ConfigManager.getInstance().getConfig().getApriltagFieldLayout();
+
+ for (ArucoDetectionResult detection : tagDetectionPipeResult.output) {
+ AprilTagPoseEstimate tagPoseEstimate = null;
+ // Do single-tag estimation when "always enabled" or if a tag was not used for multitag
+ if (settings.doSingleTargetAlways
+ || !multiTagResult.fiducialIDsUsed.contains(detection.getId())) {
+ var poseResult = singleTagPoseEstimatorPipe.run(detection);
+ sumPipeNanosElapsed += poseResult.nanosElapsed;
+ tagPoseEstimate = poseResult.output;
+ }
+
+ // If single-tag estimation was not done, this is a multi-target tag from the layout
+ if (tagPoseEstimate == null) {
+ // compute this tag's camera-to-tag transform using the multitag result
+ var tagPose = atfl.getTagPose(detection.getId());
+ if (tagPose.isPresent()) {
+ var camToTag =
+ new Transform3d(
+ new Pose3d().plus(multiTagResult.estimatedPose.best), tagPose.get());
+ // match expected OpenCV coordinate system
+ camToTag =
+ CoordinateSystem.convert(camToTag, CoordinateSystem.NWU(), CoordinateSystem.EDN());
+
+ tagPoseEstimate = new AprilTagPoseEstimate(camToTag, camToTag, 0, 0);
+ }
+ }
+
+ // populate the target list
+ // Challenge here is that TrackedTarget functions with OpenCV Contour
+ TrackedTarget target =
+ new TrackedTarget(
+ detection,
+ tagPoseEstimate,
+ new TargetCalculationParameters(
+ false, null, null, null, null, frameStaticProperties));
+
+ var correctedBestPose =
+ MathUtils.convertOpenCVtoPhotonTransform(target.getBestCameraToTarget3d());
+ var correctedAltPose =
+ MathUtils.convertOpenCVtoPhotonTransform(target.getAltCameraToTarget3d());
+
+ target.setBestCameraToTarget3d(
+ new Transform3d(correctedBestPose.getTranslation(), correctedBestPose.getRotation()));
+ target.setAltCameraToTarget3d(
+ new Transform3d(correctedAltPose.getTranslation(), correctedAltPose.getRotation()));
+
+ targetList.add(target);
+ }
}
var fpsResult = calculateFPSPipe.run(null);
var fps = fpsResult.output;
- return new CVPipelineResult(sumPipeNanosElapsed, fps, targetList, frame);
+ return new CVPipelineResult(sumPipeNanosElapsed, fps, targetList, multiTagResult, frame);
+ }
+
+ private void drawThresholdFrame(Mat greyMat, Mat outputMat, int windowSize, double constant) {
+ if (windowSize % 2 == 0) windowSize++;
+ Imgproc.adaptiveThreshold(
+ greyMat,
+ outputMat,
+ 255,
+ Imgproc.ADAPTIVE_THRESH_MEAN_C,
+ Imgproc.THRESH_BINARY_INV,
+ windowSize,
+ constant);
}
}
diff --git a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipelineSettings.java b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipelineSettings.java
index 265a8d202e..94eb963435 100644
--- a/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipelineSettings.java
+++ b/photon-core/src/main/java/org/photonvision/vision/pipeline/ArucoPipelineSettings.java
@@ -18,23 +18,35 @@
package org.photonvision.vision.pipeline;
import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.photonvision.common.util.numbers.IntegerCouple;
+import org.photonvision.vision.apriltag.AprilTagFamily;
import org.photonvision.vision.target.TargetModel;
@JsonTypeName("ArucoPipelineSettings")
public class ArucoPipelineSettings extends AdvancedPipelineSettings {
- public double decimate = 1;
- public int threads = 2;
- public int numIterations = 100;
- public double cornerAccuracy = 25.0;
- public boolean useAruco3 = true;
+ public AprilTagFamily tagFamily = AprilTagFamily.kTag16h5;
- // 3d settings
+ public IntegerCouple threshWinSizes = new IntegerCouple(11, 91);
+ public int threshStepSize = 40;
+ public double threshConstant = 10;
+ public boolean debugThreshold = false;
+
+ public boolean useCornerRefinement = true;
+ public int refineNumIterations = 30;
+ public double refineMinErrorPx = 0.005;
+
+ public boolean useAruco3 = false;
+ public double aruco3MinMarkerSideRatio = 0.02;
+ public int aruco3MinCanonicalImgSide = 32;
+
+ public boolean doMultiTarget = false;
+ public boolean doSingleTargetAlways = false;
public ArucoPipelineSettings() {
super();
pipelineType = PipelineType.Aruco;
outputShowMultipleTargets = true;
- targetModel = TargetModel.kAruco6in_16h5;
+ targetModel = TargetModel.k6in_16h5;
cameraExposure = -1;
cameraAutoExposure = true;
ledMode = false;
diff --git a/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java b/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java
index 198cbd5139..de6635b588 100644
--- a/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java
+++ b/photon-core/src/main/java/org/photonvision/vision/target/TargetModel.java
@@ -114,14 +114,8 @@ public enum TargetModel implements Releasable {
new Point3(-Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0),
new Point3(Units.inchesToMeters(3.25), -Units.inchesToMeters(3.25), 0)),
Units.inchesToMeters(3.25 * 2)),
- kAruco6in_16h5( // Corners of the tag's inner black square (excluding white border)
- List.of(
- new Point3(Units.inchesToMeters(3), Units.inchesToMeters(3), 0),
- new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0),
- new Point3(-Units.inchesToMeters(3), -Units.inchesToMeters(3), 0),
- new Point3(Units.inchesToMeters(3), -Units.inchesToMeters(3), 0)),
- Units.inchesToMeters(3 * 2)),
- k6in_16h5( // Corners of the tag's inner black square (excluding white border)
+ k6in_16h5( // Nominal edge length of 200 mm includes the white border, but solvePNP corners
+ // do not
List.of(
new Point3(Units.inchesToMeters(3), Units.inchesToMeters(3), 0),
new Point3(-Units.inchesToMeters(3), Units.inchesToMeters(3), 0),
diff --git a/photon-core/src/main/java/org/photonvision/vision/target/TrackedTarget.java b/photon-core/src/main/java/org/photonvision/vision/target/TrackedTarget.java
index 9be1e9162c..2a28d05e56 100644
--- a/photon-core/src/main/java/org/photonvision/vision/target/TrackedTarget.java
+++ b/photon-core/src/main/java/org/photonvision/vision/target/TrackedTarget.java
@@ -18,10 +18,7 @@
import edu.wpi.first.apriltag.AprilTagDetection;
import edu.wpi.first.apriltag.AprilTagPoseEstimate;
-import edu.wpi.first.math.VecBuilder;
-import edu.wpi.first.math.geometry.Rotation3d;
import edu.wpi.first.math.geometry.Transform3d;
-import edu.wpi.first.math.geometry.Translation3d;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -147,25 +144,12 @@ public TrackedTarget(
// TODO implement skew? or just yeet
m_skew = 0;
-
- var tvec = new Mat(3, 1, CvType.CV_64FC1);
- tvec.put(
- 0,
- 0,
- new double[] {
- bestPose.getTranslation().getX(),
- bestPose.getTranslation().getY(),
- bestPose.getTranslation().getZ()
- });
- setCameraRelativeTvec(tvec);
-
- // Opencv expects a 3d vector with norm = angle and direction = axis
- var rvec = new Mat(3, 1, CvType.CV_64FC1);
- MathUtils.rotationToOpencvRvec(bestPose.getRotation(), rvec);
- setCameraRelativeRvec(rvec);
}
- public TrackedTarget(ArucoDetectionResult result, TargetCalculationParameters params) {
+ public TrackedTarget(
+ ArucoDetectionResult result,
+ AprilTagPoseEstimate tagPose,
+ TargetCalculationParameters params) {
m_targetOffsetPoint = new Point(result.getCenterX(), result.getCenterY());
m_robotOffsetPoint = new Point();
var yawPitch =
@@ -179,8 +163,8 @@ public TrackedTarget(ArucoDetectionResult result, TargetCalculationParameters pa
m_yaw = yawPitch.getFirst();
m_pitch = yawPitch.getSecond();
- double[] xCorners = result.getxCorners();
- double[] yCorners = result.getyCorners();
+ double[] xCorners = result.getXCorners();
+ double[] yCorners = result.getYCorners();
Point[] cornerPoints =
new Point[] {
@@ -198,25 +182,38 @@ public TrackedTarget(ArucoDetectionResult result, TargetCalculationParameters pa
m_shape = null;
// TODO implement skew? or just yeet
+ m_skew = 0;
- var tvec = new Mat(3, 1, CvType.CV_64FC1);
- tvec.put(0, 0, result.getTvec());
- setCameraRelativeTvec(tvec);
+ var bestPose = new Transform3d();
+ var altPose = new Transform3d();
+ if (tagPose != null) {
+ if (tagPose.error1 <= tagPose.error2) {
+ bestPose = tagPose.pose1;
+ altPose = tagPose.pose2;
+ } else {
+ bestPose = tagPose.pose2;
+ altPose = tagPose.pose1;
+ }
- var rvec = new Mat(3, 1, CvType.CV_64FC1);
- rvec.put(0, 0, result.getRvec());
- setCameraRelativeRvec(rvec);
+ m_bestCameraToTarget3d = bestPose;
+ m_altCameraToTarget3d = altPose;
+
+ m_poseAmbiguity = tagPose.getAmbiguity();
- {
- Translation3d translation =
- // new Translation3d(tVec.get(0, 0)[0], tVec.get(1, 0)[0], tVec.get(2, 0)[0]);
- new Translation3d(result.getTvec()[0], result.getTvec()[1], result.getTvec()[2]);
- var axisangle =
- VecBuilder.fill(result.getRvec()[0], result.getRvec()[1], result.getRvec()[2]);
- Rotation3d rotation = new Rotation3d(axisangle, axisangle.normF());
+ var tvec = new Mat(3, 1, CvType.CV_64FC1);
+ tvec.put(
+ 0,
+ 0,
+ new double[] {
+ bestPose.getTranslation().getX(),
+ bestPose.getTranslation().getY(),
+ bestPose.getTranslation().getZ()
+ });
+ setCameraRelativeTvec(tvec);
- m_bestCameraToTarget3d =
- MathUtils.convertOpenCVtoPhotonTransform(new Transform3d(translation, rotation));
+ var rvec = new Mat(3, 1, CvType.CV_64FC1);
+ MathUtils.rotationToOpencvRvec(bestPose.getRotation(), rvec);
+ setCameraRelativeRvec(rvec);
}
}
@@ -410,8 +407,8 @@ public static List simpleFromTrackedTargets(List