diff --git a/src/main/java/frc/robot/vision/PhotonRunnable.java b/src/main/java/frc/robot/vision/PhotonRunnable.java new file mode 100644 index 0000000..3b5b417 --- /dev/null +++ b/src/main/java/frc/robot/vision/PhotonRunnable.java @@ -0,0 +1,233 @@ +package frc.robot.vision; + +import static edu.wpi.first.units.Units.Meters; +import static org.photonvision.PhotonPoseEstimator.PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR; + +import static frc.robot.vision.VisionConfig.*; + +import edu.wpi.first.apriltag.AprilTagFieldLayout; +import edu.wpi.first.math.Matrix; +import edu.wpi.first.math.VecBuilder; +import edu.wpi.first.math.geometry.Pose2d; +import edu.wpi.first.math.geometry.Pose3d; +import edu.wpi.first.math.geometry.Transform3d; +import edu.wpi.first.math.numbers.N1; +import edu.wpi.first.math.numbers.N3; +import edu.wpi.first.networktables.NetworkTableInstance; +import edu.wpi.first.networktables.PubSubOption; +import edu.wpi.first.networktables.RawSubscriber; +import edu.wpi.first.networktables.StructArrayPublisher; +import edu.wpi.first.util.WPIUtilJNI; +import edu.wpi.first.wpilibj.DriverStation; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; +import org.photonvision.PhotonCamera; +import org.photonvision.PhotonPoseEstimator; +import org.photonvision.targeting.PhotonPipelineResult; +import org.photonvision.targeting.PhotonTrackedTarget; + +/** Runnable that gets AprilTag data from PhotonVision. */ +public class PhotonRunnable implements Runnable { + + // Array of photon pose estimators for photon cameras + private final PhotonPoseEstimator[] photonPoseEstimators; + // Array of subscribers for camera subtables + private final RawSubscriber[] rawBytesSubscribers; + // Array of subscriber wait handles for camera subtables, used to lookup camera index from wait handle + private final int[] waitHandles; + // Array of PhotonCameras for reading results + private final PhotonCamera[] photonCameras; + + // Consumer of pose estimates + private final AddVisionMeasurement poseConsumer; + + private final Supplier poseSupplier; + + @SuppressWarnings("unchecked") + private final StructArrayPublisher[] aprilTagPublishers = new StructArrayPublisher[2]; + + public PhotonRunnable( + String[] cameraNames, + Transform3d[] robotToCameras, + AddVisionMeasurement poseConsumer, + Supplier poseSupplier) { + this.poseConsumer = poseConsumer; + this.poseSupplier = poseSupplier; + + // NT publishers to send data to AdvantageScope + for (int i = 0; i < cameraNames.length; i++) { + aprilTagPublishers[i] = NetworkTableInstance.getDefault() + .getStructArrayTopic("AprilTags-" + cameraNames[i], Pose3d.struct) + .publish(); + } + + rawBytesSubscribers = new RawSubscriber[cameraNames.length]; + photonCameras = new PhotonCamera[cameraNames.length]; + photonPoseEstimators = new PhotonPoseEstimator[cameraNames.length]; + waitHandles = new int[cameraNames.length]; + + AprilTagFieldLayout aprilTagFieldLayout; + try { + aprilTagFieldLayout = AprilTagFieldLayout + .loadFromResource("/frc/robot/vision/2025-reefscape-welded-reef-only.json"); + + for (int i = 0; i < cameraNames.length; i++) { + var cameraTable = NetworkTableInstance.getDefault().getTable("photonvision").getSubTable(cameraNames[i]); + rawBytesSubscribers[i] = cameraTable.getRawTopic("rawBytes") + .subscribe( + PhotonPipelineResult.photonStruct.getTypeString(), + new byte[] {}, + PubSubOption.periodic(0.01), + PubSubOption.sendAll(true), + PubSubOption.pollStorage(20)); + waitHandles[i] = rawBytesSubscribers[i].getHandle(); + photonPoseEstimators[i] = new PhotonPoseEstimator( + aprilTagFieldLayout, + MULTI_TAG_PNP_ON_COPROCESSOR, + robotToCameras[i]); + photonCameras[i] = new PhotonCamera(cameraNames[i]); + } + } catch (IOException e) { + DriverStation.reportError("Failed to load apriltag field layout", true); + } + } + + @Override + public void run() { + var emptyAprilTagArray = new Pose3d[0]; + while (!Thread.interrupted()) { + // Block the thread until new data comes in from PhotonVision + int[] signaledHandles = null; + try { + signaledHandles = WPIUtilJNI.waitForObjects(waitHandles); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + + var currentRobotPose = poseSupplier.get(); + for (int i = 0; i < signaledHandles.length; i++) { + int cameraIndex = getCameraIndex(signaledHandles[i]); + var aprilTagPublisher = aprilTagPublishers[cameraIndex]; + var photonPoseEstimator = photonPoseEstimators[cameraIndex]; + var photonCamera = photonCameras[cameraIndex]; + + // Get AprilTag data + var photonResults = photonCamera.getAllUnreadResults(); + photonResults.forEach(photonResult -> { + if (photonResult.hasTargets() && (photonResult.targets.size() > 1 + || (photonResult.targets.get(0).getPoseAmbiguity() < AMBIGUITY_THRESHOLD))) { + + // Send the AprilTag(s) to NT for AdvantageScope + aprilTagPublisher.accept( + photonResult.targets.stream() + .map( + target -> getTargetPose( + target, + currentRobotPose, + photonPoseEstimator.getRobotToCameraTransform())) + .toArray(Pose3d[]::new)); + + photonPoseEstimator.update(photonResult).ifPresent(estimatedRobotPose -> { + var estimatedPose = estimatedRobotPose.estimatedPose; + // Make sure the measurement is on the field + if (estimatedPose.getX() > 0.0 && estimatedPose.getX() <= FIELD_LENGTH.in(Meters) + && estimatedPose.getY() > 0.0 && estimatedPose.getY() <= FIELD_WIDTH.in(Meters)) { + + var stdDevs = getEstimationStdDevs( + estimatedPose.toPose2d(), + photonResult.getTargets(), + photonPoseEstimator.getFieldTags()); + poseConsumer + .addVisionMeasurement(estimatedPose.toPose2d(), estimatedRobotPose.timestampSeconds, stdDevs); + } + }); + } else { + // No tags, send empty array to NT + aprilTagPublisher.accept(emptyAprilTagArray); + } + }); + } + } + Arrays.stream(rawBytesSubscribers).forEach(RawSubscriber::close); + Arrays.stream(aprilTagPublishers).forEach(StructArrayPublisher::close); + } + + /** + * Transform a target from PhotonVision to a pose on the field + * + * @param target target data from PhotonVision + * @param robotPose current pose of the robot + * @param robotToCamera transform from robot to the camera that saw the target + * @return an AprilTag with an ID and pose + */ + private static Pose3d getTargetPose(PhotonTrackedTarget target, Pose2d robotPose, Transform3d robotToCamera) { + return new Pose3d(robotPose).transformBy(robotToCamera).transformBy(target.getBestCameraToTarget()); + } + + /** + * Find the camera index for a table wait handle + * + * @param signaledHandle handle + * @return index, or -1 if not found + */ + private int getCameraIndex(int signaledHandle) { + for (int i = 0; i < waitHandles.length; i++) { + if (waitHandles[i] == signaledHandle) { + return i; + } + } + return -1; + } + + /** + * Scales the standard deviation based on the number of targets and their distance. + * + * @param estimatedPose estimated pose + * @param targets targets from PhotonVision + * @param fieldLayout tag poses + */ + private static Matrix getEstimationStdDevs( + Pose2d estimatedPose, + List targets, + AprilTagFieldLayout fieldLayout) { + + var estStdDevs = SINGLE_TAG_STD_DEVS; + int numTags = 0; + double avgDist = 0; + for (var tgt : targets) { + var tagPose = fieldLayout.getTagPose(tgt.getFiducialId()); + if (tagPose.isEmpty()) + continue; + numTags++; + avgDist += tagPose.get().toPose2d().getTranslation().getDistance(estimatedPose.getTranslation()); + } + + if (numTags == 0) { + return estStdDevs; + } + avgDist /= numTags; + + // Decrease std devs if multiple targets are visible + if (numTags > 1) { + estStdDevs = MULTI_TAG_STD_DEVS; + } + + // Increase std devs based on (average) distance + if (numTags == 1 && avgDist > SINGLE_TAG_DISTANCE_THRESHOLD.in(Meters)) { + estStdDevs = VecBuilder.fill(Double.MAX_VALUE, Double.MAX_VALUE, Double.MAX_VALUE); + } else { + estStdDevs = estStdDevs.times(1 + (avgDist * avgDist / 30)); + } + return estStdDevs; + } + + @FunctionalInterface + public interface AddVisionMeasurement { + void addVisionMeasurement( + Pose2d visionRobotPoseMeters, + double timestampSeconds, + Matrix visionMeasurementStdDevs); + } +} \ No newline at end of file diff --git a/src/main/java/frc/robot/vision/SwerveVisionEstimator.java b/src/main/java/frc/robot/vision/SwerveVisionEstimator.java deleted file mode 100644 index 2f7e5ba..0000000 --- a/src/main/java/frc/robot/vision/SwerveVisionEstimator.java +++ /dev/null @@ -1,98 +0,0 @@ -package frc.robot.vision; - -import static frc.robot.vision.VisionConfig.camNames; -import static frc.robot.vision.VisionConfig.robotToCamTransforms; - -import java.util.Arrays; -import java.util.function.Supplier; - -import com.ctre.phoenix6.swerve.SwerveRequest; - -import edu.wpi.first.math.estimator.SwerveDrivePoseEstimator; -import edu.wpi.first.math.geometry.Pose2d; -import edu.wpi.first.math.geometry.Rotation2d; -import edu.wpi.first.math.kinematics.SwerveModulePosition; -import edu.wpi.first.wpilibj.smartdashboard.Field2d; -import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard; -import edu.wpi.first.wpilibj2.command.SubsystemBase; -import frc.robot.drivetrain.CommandSwerveDrivetrain; -import frc.robot.drivetrain.TunerConstants; - - -public class SwerveVisionEstimator extends SubsystemBase{ - - private final Vision[] visions; - private static SwerveVisionEstimator mInstance; - private final Supplier modSupplier; - private final Supplier rotationSupplier; - private final SwerveDrivePoseEstimator poseEstimator; - private final Field2d field2d = new Field2d(); - - - public SwerveVisionEstimator( - Supplier rotSup, Supplier modSup){ - this.modSupplier = modSup; - this.rotationSupplier = rotSup; - - poseEstimator = new SwerveDrivePoseEstimator( - CommandSwerveDrivetrain.getInstance().getKinematics(), - rotationSupplier.get(), - modSupplier.get(), - new Pose2d()); - - visions = new Vision[camNames.length]; - for (int i = 0; i < camNames.length; i++){ - visions[i] = new Vision(camNames[i], robotToCamTransforms[i]); - } - } - - public static SwerveVisionEstimator getInstance(){ - if (mInstance == null){ - final var drivetrain = CommandSwerveDrivetrain.getInstance(); - //mInstance = new SwerveVisionEstimator(drivetrain::getGyroYaw, drivetrain::getModulePositions); - mInstance = new SwerveVisionEstimator(() -> - Rotation2d.fromDegrees(drivetrain.mGyro.getYaw().getValueAsDouble()), - ()-> TunerConstants.K_SWERVE_MODULE_POSITIONS); - } - return mInstance; - } - - public Pose2d getCurrrentPose(){ - return poseEstimator.getEstimatedPosition(); - } - - public void estimatorUpdate(){ - Arrays.stream(visions).forEach(vision -> { - var visionEst = vision.getEstimatedGlobalPose(); - - visionEst.ifPresent(est -> { - var estStdDevs = vision.getEstimationStdDevs(); - - poseEstimator.addVisionMeasurement(est.estimatedPose.toPose2d(), est.timestampSeconds, estStdDevs); - }); - }); - } - - private String getFormattedPose(){ - var pose = getCurrrentPose(); - return String.format("(%.3f, %.3f) %.2f degrees", - pose.getX(), - pose.getY(), - pose.getRotation().getDegrees()); - } - - @Override - public void periodic(){ - // //Update swerve pose estimator using drivetrain - poseEstimator.update(rotationSupplier.get(), modSupplier.get()); - //Add vision measurements to swerve pose estimator - estimatorUpdate(); - - //log to dashboard - var dashboardPose = poseEstimator.getEstimatedPosition(); - SmartDashboard.putString("Estimated Pose", getFormattedPose()); - SmartDashboard.putData(field2d); - field2d.setRobotPose(getCurrrentPose()); - // } - } -} \ No newline at end of file diff --git a/src/main/java/frc/robot/vision/Vision.java b/src/main/java/frc/robot/vision/Vision.java deleted file mode 100644 index e3c2af9..0000000 --- a/src/main/java/frc/robot/vision/Vision.java +++ /dev/null @@ -1,137 +0,0 @@ -package frc.robot.vision; - -import static frc.robot.vision.VisionConfig.multiTagStdDevs; -import static frc.robot.vision.VisionConfig.singleTagStdDevs; -import static frc.robot.vision.VisionConfig.tagLayout; - -import java.util.List; -import java.util.Optional; - -import org.photonvision.EstimatedRobotPose; -import org.photonvision.PhotonCamera; -import org.photonvision.PhotonPoseEstimator; -import org.photonvision.PhotonPoseEstimator.PoseStrategy; -import org.photonvision.targeting.PhotonTrackedTarget; - -import edu.wpi.first.math.Matrix; -import edu.wpi.first.math.VecBuilder; -import edu.wpi.first.math.geometry.Transform3d; -import edu.wpi.first.math.numbers.N1; -import edu.wpi.first.math.numbers.N3; -import frc.robot.vision.Vision; -import frc.robot.vision.VisionConfig.*; - -/* - * This code is a modified version of Photonvision's own example and 7028's vision code: - * https://github.com/PhotonVision/photonvision/blob/main/photonlib-java-examples/poseest - * https://github.com/STMARobotics/frc-7028-2025 - */ - -public class Vision { - - private final PhotonCamera cam; - private final PhotonPoseEstimator photonEstimator; - private Matrix curStdDevs; - private static Vision[] mInstance; - - //Construct Vision Instance - public Vision(String cameraName, Transform3d camTransform){ - cam = new PhotonCamera(cameraName); - - photonEstimator = new PhotonPoseEstimator( - tagLayout, - PoseStrategy.MULTI_TAG_PNP_ON_COPROCESSOR, - camTransform); - photonEstimator.setMultiTagFallbackStrategy(PoseStrategy.LOWEST_AMBIGUITY); - } - - public static Vision[] getInstance() { - if (mInstance == null) { - mInstance = new Vision[VisionConfig.camNames.length]; - for (int i = 0; i < VisionConfig.camNames.length; i++){ - mInstance[i] = new Vision(VisionConfig.camNames[i], VisionConfig.robotToCamTransforms[i]); - } - } - return mInstance; - } - - /** - * The latest estimated robot pose on the field from vision data. This may be empty. This should - * only be called once per loop. - * - *

- * Also includes updates for the standard deviations, which can (optionally) be retrieved with - * {@link getEstimationStdDevs} - * - * @return An {@link EstimatedRobotPose} with an estimated pose, estimate timestamp, and targets - * used for estimation. - */ - public Optional getEstimatedGlobalPose() { - Optional visionEst = Optional.empty(); - for (var change : cam.getAllUnreadResults()) { - visionEst = photonEstimator.update(change); - updateEstimationStdDevs(visionEst, change.getTargets()); - } - return visionEst; - } - /** - * Calculates new standard deviations This algorithm is a heuristic that creates dynamic standard - * deviations based on number of tags, estimation strategy, and distance from the tags. - * - * @param estimatedPose The estimated pose to guess standard deviations for. - * @param targets All targets in this camera frame - */ - private void updateEstimationStdDevs(Optional estimatedPose, List targets){ - if (estimatedPose.isEmpty()){ - //No pose input. Default to single-tag std devs - curStdDevs = singleTagStdDevs; - - } else { - //Pose present, start Heuristic - var estStdDevs = singleTagStdDevs; - int numTags = 0; - double avgDist = 0; - - //Precalculation - see how many tags we found, and calculate an average-distance metric - for (var tgt : targets) { - var tagPose = photonEstimator.getFieldTags().getTagPose(tgt.getFiducialId()); - if (tagPose.isEmpty()){ - continue; - } - numTags++; - avgDist += tagPose.get() - .toPose2d() - .getTranslation() - .getDistance(estimatedPose.get().estimatedPose.toPose2d().getTranslation()); - } - - if (numTags == 0){ - //No tags visible. Default to single-tag std devs - curStdDevs = singleTagStdDevs; - } else { - // One or more tags visible, run the full heuristic. - avgDist /= numTags; - // Decrease std devs if multiple targets are visible - if (numTags > 1) - estStdDevs = multiTagStdDevs; - // Increase std devs based on (average) distance - if (numTags == 1 && avgDist > 4) { - estStdDevs = VecBuilder.fill(Double.MAX_VALUE, Double.MAX_VALUE, Double.MAX_VALUE); - } else { - estStdDevs = estStdDevs.times(1 + (avgDist * avgDist / 30)); - } - curStdDevs = estStdDevs; - } - } - } - - /** - * Returns the latest standard deviations of the estimated pose from {@link - * #getEstimatedGlobalPose()}, for use with {@link - * edu.wpi.first.math.estimator.SwerveDrivePoseEstimator SwerveDrivePoseEstimator}. This should - * only be used when there are targets visible. - */ - public Matrix getEstimationStdDevs() { - return curStdDevs; - } -} \ No newline at end of file diff --git a/src/main/java/frc/robot/vision/VisionConfig.java b/src/main/java/frc/robot/vision/VisionConfig.java index f5ba356..50a73b4 100644 --- a/src/main/java/frc/robot/vision/VisionConfig.java +++ b/src/main/java/frc/robot/vision/VisionConfig.java @@ -1,5 +1,7 @@ package frc.robot.vision; +import static edu.wpi.first.units.Units.Meters; + import org.photonvision.PhotonCamera; import edu.wpi.first.apriltag.AprilTagFieldLayout; @@ -13,16 +15,16 @@ import edu.wpi.first.math.numbers.N3; import edu.wpi.first.math.util.Units; import edu.wpi.first.units.Unit; +import edu.wpi.first.units.measure.Distance; public class VisionConfig { // Creates camera names; ensure these all match with the correct camera on the Photonvison Dashboard - public static final int TOTAL_CAMERAS = 4; - public static final String[] camNames = new String[] {"Left_Cam", "Right_Cam"}; //TODO: add center cam and drive cam + public static final int TOTAL_CAMS = 2; //TODO: change to 4 where 4 cams are available + public static final String[] CAM_NAMES = new String[] {"Left_Cam", "Right_Cam"}; //TODO: add center cam and drive cam //Camera Positions - // TODO: config camera transforms - public static final Transform3d[] robotToCamTransforms = new Transform3d[] { + public static final Transform3d[] ROBOT_TO_CAM_TRANSFORMS = new Transform3d[] { //left cam new Transform3d( new Translation3d(Units.inchesToMeters(-12.436), Units.inchesToMeters(11.677), Units.inchesToMeters(7.413)), @@ -34,12 +36,16 @@ public class VisionConfig { }; // Creates field layout for AprilTags - public static final AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadField(AprilTagFields.k2025ReefscapeWelded); + public static AprilTagFieldLayout TAG_FIELD_LAYOUT = AprilTagFieldLayout.loadField(AprilTagFields.k2025ReefscapeWelded); // Standard deviation of vision poses, this helps with correction or something idk thats what photon said // TODO: experiment with standard deviation values and set them to whatever gives the most correct pose - public static final Matrix singleTagStdDevs = VecBuilder.fill(4, 4, 8); // TODO: example values, change when testing - public static final Matrix multiTagStdDevs = VecBuilder.fill(0.5, 0.5, 1); //TODO: change values when testing + public static final Matrix SINGLE_TAG_STD_DEVS = VecBuilder.fill(4, 4, 8); // TODO: example values, change when testing + public static final Matrix MULTI_TAG_STD_DEVS = VecBuilder.fill(0.5, 0.5, 1); //TODO: change values when testing - + public static final double AMBIGUITY_THRESHOLD = 0.2; + public static final Distance SINGLE_TAG_DISTANCE_THRESHOLD = Meters.of(4.5); + + public static final Distance FIELD_LENGTH = Meters.of(17.548); + public static final Distance FIELD_WIDTH = Meters.of(8.052); } \ No newline at end of file diff --git a/src/main/java/frc/robot/vision/VisionConsumer.java b/src/main/java/frc/robot/vision/VisionConsumer.java deleted file mode 100644 index 6bd0019..0000000 --- a/src/main/java/frc/robot/vision/VisionConsumer.java +++ /dev/null @@ -1,15 +0,0 @@ -// package frc.robot.vision; - -// import edu.wpi.first.math.Matrix; -// import edu.wpi.first.math.geometry.Pose2d; -// import edu.wpi.first.math.numbers.N1; -// import edu.wpi.first.math.numbers.N3; - -// @FunctionalInterface -// public interface VisionConsumer { -// public void addVisionMeasurement( -// Pose2d visionRobotPoseM, -// double timestampSeconds, -// Matrix visionMeasurementStdDevs -// ); -// } diff --git a/src/main/java/frc/robot/vision/commands/VisionCommand.java b/src/main/java/frc/robot/vision/commands/VisionCommand.java deleted file mode 100644 index 1306e8c..0000000 --- a/src/main/java/frc/robot/vision/commands/VisionCommand.java +++ /dev/null @@ -1,43 +0,0 @@ -// package frc.robot.vision.commands; - -// import static frc.robot.vision.VisionConfig.camNames; -// import static frc.robot.vision.VisionConfig.robotToCamTransforms; - -// import java.nio.channels.NetworkChannel; - -// import edu.wpi.first.math.geometry.Pose2d; -// import edu.wpi.first.networktables.NetworkTableInstance; -// import edu.wpi.first.networktables.StructPublisher; -// import edu.wpi.first.wpilibj2.command.Command; -// import frc.robot.vision.Vision; -// import frc.robot.vision.VisionConsumer; - -// public class VisionCommand extends Command { -// private final Vision[] visions; -// private final VisionConsumer visionConsumer; - -// public VisionCommand(VisionConsumer consumer){ -// visions = new Vision[camNames.length]; -// for (int i = 0; i < camNames.length; i++){ -// visions[i] = new Vision(camNames[i], robotToCamTransforms[i]); -// } -// this.visionConsumer = consumer; -// } - -// @Override -// public void execute(){ -// for (int i = 0; i < visions.length; i++){ -// var vision = visions[i]; -// var visionEst = vision.getEstimatedGlobalPose(); -// visionEst.ifPresent(est -> { -// var estStdDevs = vision.getEstimationStdDevs(); -// visionConsumer.addVisionMeasurement(est.estimatedPose.toPose2d(), est.timestampSeconds, estStdDevs); -// }); -// } -// } - -// @Override -// public boolean runsWhenDisabled(){ -// return true; -// } -// }