Skip to content

Commit

Permalink
spotless
Browse files Browse the repository at this point in the history
  • Loading branch information
amquake committed Aug 15, 2023
1 parent bb1b8a4 commit a084abc
Show file tree
Hide file tree
Showing 7 changed files with 89 additions and 82 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ public static List<TargetCorner> pointsToCorners(MatOfPoint2f matInput) {
var corners = new ArrayList<TargetCorner>();
float[] data = new float[(int) matInput.total() * matInput.channels()];
matInput.get(0, 0, data);
for (int i = 0; i < (int)matInput.total(); i++) {
for (int i = 0; i < (int) matInput.total(); i++) {
corners.add(new TargetCorner(data[0 + 2 * i], data[1 + 2 * i]));
}
return corners;
Expand Down Expand Up @@ -307,9 +307,9 @@ public static Point[] projectPoints(
/**
* Undistort 2d image points using a given camera's intrinsics and distortion.
*
* <p>2d image points from {@link #projectPoints(Matrix, Matrix, RotTrlTransform3d, List) projectPoints()} will
* naturally be distorted, so this operation is important if the image points need to be directly
* used (e.g. 2d yaw/pitch).
* <p>2d image points from {@link #projectPoints(Matrix, Matrix, RotTrlTransform3d, List)
* projectPoints()} will naturally be distorted, so this operation is important if the image
* points need to be directly used (e.g. 2d yaw/pitch).
*
* @param cameraMatrix The camera intrinsics matrix in standard opencv form
* @param distCoeffs The camera distortion matrix in standard opencv form
Expand Down Expand Up @@ -412,8 +412,8 @@ public static Point[] getConvexHull(Point[] points) {
* <li>Point 3: [0, -squareLength / 2, -squareLength / 2]
* </ul>
*
* @param imagePoints The projection of these 3d object points into the 2d camera image. The
* order should match the given object point translations.
* @param imagePoints The projection of these 3d object points into the 2d camera image. The order
* should match the given object point translations.
* @return The resulting transformation that maps the camera pose to the target pose and the
* ambiguity if an alternate solution is available.
*/
Expand Down Expand Up @@ -519,8 +519,8 @@ public static PNPResults solvePNP_SQUARE(
* @param cameraMatrix The camera intrinsics matrix in standard opencv form
* @param distCoeffs The camera distortion matrix in standard opencv form
* @param objectTrls The translations of the object corners, relative to the field.
* @param imagePoints The projection of these 3d object points into the 2d camera image. The
* order should match the given object point translations.
* @param imagePoints The projection of these 3d object points into the 2d camera image. The order
* should match the given object point translations.
* @return The resulting transformation that maps the camera pose to the target pose. If the 3d
* model points are supplied relative to the origin, this transformation brings the camera to
* the origin.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ public TargetModel(double widthMeters, double heightMeters) {
}

/**
* Creates a cuboid target model given the length, width, height. The model has eight
* vertices:
* Creates a cuboid target model given the length, width, height. The model has eight vertices:
*
* <ul>
* <li>Point 0: [length/2, -width/2, -height/2]
Expand All @@ -86,16 +85,16 @@ public TargetModel(double widthMeters, double heightMeters) {
* </ul>
*/
public TargetModel(double lengthMeters, double widthMeters, double heightMeters) {
this(List.of(
new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0)
));
this(
List.of(
new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, widthMeters / 2.0, -heightMeters / 2.0),
new Translation3d(-lengthMeters / 2.0, -widthMeters / 2.0, -heightMeters / 2.0)));
}

/**
Expand All @@ -111,8 +110,8 @@ public TargetModel(double lengthMeters, double widthMeters, double heightMeters)
*
* <i>Q: Why these vertices?</i> A: This target should be oriented to the camera every frame, much
* like a sprite/decal, and these vertices represent the ellipse vertices (maxima). These vertices
* are used for drawing the image of this sphere, but do not match the corners that will be published
* by photonvision.
* are used for drawing the image of this sphere, but do not match the corners that will be
* published by photonvision.
*/
public TargetModel(double diameterMeters) {
double radius = diameterMeters / 2.0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;

import org.opencv.core.Point;
import org.photonvision.targeting.PhotonTrackedTarget;
import org.photonvision.targeting.TargetCorner;
Expand Down Expand Up @@ -68,7 +67,7 @@ public static List<AprilTag> getVisibleLayoutTags(
*
* @param cameraMatrix The camera intrinsics matrix in standard opencv form
* @param distCoeffs The camera distortion matrix in standard opencv form
* @param visTags The visible tags reported by PV. Non-tag targets are automatically excluded.
* @param visTags The visible tags reported by PV. Non-tag targets are automatically excluded.
* @param tagLayout The known tag layout on the field
* @return The transformation that maps the field origin to the camera pose. Ensure the {@link
* PNPResults} are present before utilizing them.
Expand All @@ -90,10 +89,13 @@ public static PNPResults estimateCamPosePNP(
// ensure these are AprilTags in our layout
for (var tgt : visTags) {
int id = tgt.getFiducialId();
tagLayout.getTagPose(id).ifPresent(pose -> {
knownTags.add(new AprilTag(id, pose));
corners.addAll(tgt.getDetectedCorners());
});
tagLayout
.getTagPose(id)
.ifPresent(
pose -> {
knownTags.add(new AprilTag(id, pose));
corners.addAll(tgt.getDetectedCorners());
});
}
if (knownTags.size() == 0 || corners.size() == 0 || corners.size() % 4 != 0) {
return new PNPResults();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.RotatedRect;
import org.opencv.core.Scalar;
Expand All @@ -56,7 +55,6 @@
import org.photonvision.estimation.TargetModel;
import org.photonvision.targeting.PhotonPipelineResult;
import org.photonvision.targeting.PhotonTrackedTarget;
import org.photonvision.targeting.TargetCorner;

/**
* A handle for simulating {@link PhotonCamera} values. Processing simulated targets through this
Expand Down Expand Up @@ -369,43 +367,45 @@ public PhotonPipelineResult process(
var fieldCorners = tgt.getFieldVertices();
if (tgt.getModel().isSpherical) { // target is spherical
var model = tgt.getModel();
// orient the model to the camera (like a sprite/decal) so it appears similar regardless of view
// orient the model to the camera (like a sprite/decal) so it appears similar regardless of
// view
fieldCorners =
model.getFieldVertices(
TargetModel.getOrientedPose(tgt.getPose().getTranslation(), cameraPose.getTranslation()));
TargetModel.getOrientedPose(
tgt.getPose().getTranslation(), cameraPose.getTranslation()));
}
// project 3d target points into 2d image points
var imagePoints =
OpenCVHelp.projectPoints(prop.getIntrinsics(), prop.getDistCoeffs(), camRt, fieldCorners);
// spherical targets need a rotated rectangle of their midpoints for visualization
if(tgt.getModel().isSpherical) {
if (tgt.getModel().isSpherical) {
var center = OpenCVHelp.avgPoint(imagePoints);
int l = 0, t, b, r = 0;
// reference point (left side midpoint)
for(int i = 1; i < 4; i++) {
if(imagePoints[i].x < imagePoints[l].x) l = i;
for (int i = 1; i < 4; i++) {
if (imagePoints[i].x < imagePoints[l].x) l = i;
}
var lc = imagePoints[l];
// determine top, right, bottom midpoints
double[] angles = new double[4];
t = (l+1) % 4;
b = (l+1) % 4;
for(int i = 0; i < 4; i++) {
if(i == l) continue;
t = (l + 1) % 4;
b = (l + 1) % 4;
for (int i = 0; i < 4; i++) {
if (i == l) continue;
var ic = imagePoints[i];
angles[i] = Math.atan2(lc.y-ic.y, ic.x-lc.x);
if(angles[i] >= angles[t]) t = i;
if(angles[i] <= angles[b]) b = i;
angles[i] = Math.atan2(lc.y - ic.y, ic.x - lc.x);
if (angles[i] >= angles[t]) t = i;
if (angles[i] <= angles[b]) b = i;
}
for(int i = 0; i < 4; i++) {
if(i != t && i != l && i != b) r = i;
for (int i = 0; i < 4; i++) {
if (i != t && i != l && i != b) r = i;
}
// create RotatedRect from midpoints
var rect = new RotatedRect(
new Point(center.x, center.y),
new Size(imagePoints[r].x - lc.x, imagePoints[b].y - imagePoints[t].y),
Math.toDegrees(-angles[r])
);
var rect =
new RotatedRect(
new Point(center.x, center.y),
new Size(imagePoints[r].x - lc.x, imagePoints[b].y - imagePoints[t].y),
Math.toDegrees(-angles[r]));
// set target corners to rect corners
Point[] points = new Point[4];
rect.points(points);
Expand Down Expand Up @@ -473,8 +473,7 @@ public PhotonPipelineResult process(
var corn = pair.getSecond();

if (tgt.fiducialID >= 0) { // apriltags
VideoSimUtil.warp16h5TagImage(
tgt.fiducialID, corn, true, videoSimFrameRaw);
VideoSimUtil.warp16h5TagImage(tgt.fiducialID, corn, true, videoSimFrameRaw);
} else if (!tgt.getModel().isSpherical) { // non-spherical targets
var contour = corn;
if (!tgt.getModel()
Expand Down Expand Up @@ -514,12 +513,11 @@ public PhotonPipelineResult process(
Imgproc.LINE_AA);

VideoSimUtil.drawPoly(
OpenCVHelp.cornersToPoints(tgt.getDetectedCorners()),
(int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
new Scalar(255, 20, 20),
true,
videoSimFrameProcessed
);
OpenCVHelp.cornersToPoints(tgt.getDetectedCorners()),
(int) VideoSimUtil.getScaledThickness(1, videoSimFrameProcessed),
new Scalar(255, 20, 20),
true,
videoSimFrameProcessed);
}
}
videoSimProcessed.putFrame(videoSimFrameProcessed);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,9 @@ public SimCameraProperties copy() {
* @param points Points of the contour
*/
public double getContourAreaPercent(Point[] points) {
return Imgproc.contourArea(new MatOfPoint2f(OpenCVHelp.getConvexHull(points))) / getResArea() * 100;
return Imgproc.contourArea(new MatOfPoint2f(OpenCVHelp.getConvexHull(points)))
/ getResArea()
* 100;
}

/** The yaw from the principal point of this camera to the pixel x value. Positive values left. */
Expand Down Expand Up @@ -563,13 +565,13 @@ public Point[] estPixelNoise(Point[] points) {
if (avgErrorPx == 0 && errorStdDevPx == 0) return points;

Point[] noisyPts = new Point[points.length];
for(int i = 0; i < points.length; i++) {
for (int i = 0; i < points.length; i++) {
var p = points[i];
// error pixels in random direction
double error = avgErrorPx + rand.nextGaussian() * errorStdDevPx;
double errorAngle = rand.nextDouble() * 2 * Math.PI - Math.PI;
noisyPts[i] = new Point(
p.x + error * Math.cos(errorAngle), p.y + error * Math.sin(errorAngle));
noisyPts[i] =
new Point(p.x + error * Math.cos(errorAngle), p.y + error * Math.sin(errorAngle));
}
return noisyPts;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,8 +317,8 @@ public static double getScaledThickness(double thickness480p, Mat destinationImg
/**
* Draw a filled ellipse in the destination image.
*
* @param dstPoints The points in the destination image representing the rectangle in which
* the ellipse is inscribed.
* @param dstPoints The points in the destination image representing the rectangle in which the
* ellipse is inscribed.
* @param color The color of the ellipse. This is a scalar with BGR values (0-255)
* @param destination The destination image to draw onto. The image should be in the BGR color
* space.
Expand Down Expand Up @@ -474,8 +474,8 @@ private static List<List<Translation3d>> getFieldFloorLines(int subdivisions) {
}

/**
* Convert 3D lines represented by the given series of translations into a polygon(s) in the camera's
* image.
* Convert 3D lines represented by the given series of translations into a polygon(s) in the
* camera's image.
*
* @param camRt The change in basis from world coordinates to camera coordinates. See {@link
* RotTrlTransform3d#makeRelativeTo(Pose3d)}.
Expand Down Expand Up @@ -518,8 +518,10 @@ public static List<Point[]> polyFrom3dLines(

// project points into 2d
var poly = new ArrayList<Point>();
poly.addAll(Arrays.asList(OpenCVHelp.projectPoints(
prop.getIntrinsics(), prop.getDistCoeffs(), camRt, List.of(pta, ptb))));
poly.addAll(
Arrays.asList(
OpenCVHelp.projectPoints(
prop.getIntrinsics(), prop.getDistCoeffs(), camRt, List.of(pta, ptb))));
var pxa = poly.get(0);
var pxb = poly.get(1);

Expand All @@ -533,7 +535,10 @@ public static List<Point[]> polyFrom3dLines(
}
if (subPts.size() > 0) {
poly.addAll(
1, Arrays.asList(OpenCVHelp.projectPoints(prop.getIntrinsics(), prop.getDistCoeffs(), camRt, subPts)));
1,
Arrays.asList(
OpenCVHelp.projectPoints(
prop.getIntrinsics(), prop.getDistCoeffs(), camRt, subPts)));
}

polyPointList.add(poly.toArray(Point[]::new));
Expand Down Expand Up @@ -572,24 +577,24 @@ public static void drawFieldWireframe(
Mat destination) {
for (var trls : getFieldFloorLines(floorSubdivisions)) {
var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination);
for(var poly : polys) {
for (var poly : polys) {
drawPoly(
poly,
(int) Math.round(getScaledThickness(floorThickness, destination)),
floorColor,
false,
destination);
poly,
(int) Math.round(getScaledThickness(floorThickness, destination)),
floorColor,
false,
destination);
}
}
for (var trls : getFieldWallLines()) {
var polys = VideoSimUtil.polyFrom3dLines(camRt, prop, trls, resolution, false, destination);
for(var poly : polys) {
for (var poly : polys) {
drawPoly(
poly,
(int) Math.round(getScaledThickness(wallThickness, destination)),
wallColor,
false,
destination);
poly,
(int) Math.round(getScaledThickness(wallThickness, destination)),
wallColor,
false,
destination);
}
}
}
Expand Down
5 changes: 3 additions & 2 deletions photon-lib/src/test/java/org/photonvision/OpenCVTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,9 @@ public void testProjection() {
assertTrue(circulation > 0, "2d fiducial points aren't counter-clockwise");

// undo projection distortion
imagePoints = OpenCVHelp.undistortPoints(prop.getIntrinsics(), prop.getDistCoeffs(), imagePoints);

imagePoints =
OpenCVHelp.undistortPoints(prop.getIntrinsics(), prop.getDistCoeffs(), imagePoints);

// test projection results after moving camera
var avgCenterRot1 = prop.getPixelRot(OpenCVHelp.avgPoint(imagePoints));
cameraPose =
Expand Down

0 comments on commit a084abc

Please sign in to comment.