More PhotonVision work, a bunch of comments/docstrings

This commit is contained in:
2026-02-14 22:05:29 -05:00
parent 91a5281202
commit 7f291e42a1
9 changed files with 192 additions and 79 deletions

View File

@@ -21,15 +21,14 @@ import edu.wpi.first.math.kinematics.SwerveDriveKinematics;
import edu.wpi.first.math.kinematics.SwerveModulePosition;
import edu.wpi.first.math.kinematics.SwerveModuleState;
import edu.wpi.first.wpilibj.DriverStation;
import edu.wpi.first.wpilibj.DriverStation.Alliance;
import edu.wpi.first.wpilibj2.command.Command;
import edu.wpi.first.wpilibj2.command.PrintCommand;
import edu.wpi.first.wpilibj2.command.SubsystemBase;
import frc.robot.constants.AutoConstants;
import frc.robot.constants.CompetitionConstants;
import frc.robot.constants.DrivetrainConstants;
import frc.robot.constants.OIConstants;
import frc.robot.utilities.SwerveModule;
import frc.robot.utilities.Utilities;
import frc.robot.utilities.VisualPose;
public class Drivetrain extends SubsystemBase {
@@ -180,6 +179,19 @@ public class Drivetrain extends SubsystemBase {
});
}
/**
* Rotates the robot to a face a given Pose2d position on the field
*
* Note that this Command does not provide a means of timeout. If you are
* using this in an auto context, this Command should be decorated with
* withTimeout(<some_value>). Otherwise, you will be waiting for the PID
* Controller doing the work to report that it is at the desired setpoint.
*
* @param targetPose The Pose2d object to rotate the robot towards
* @param rotate180 When false, the front of the robot faces the specified pose, when true
* the back of the robot faces the specified pose
* @return A complete Command structure that performs the specified action
*/
public Command rotateToPose(Pose2d targetPose, boolean rotate180) {
return runOnce(yawRotationController::reset).andThen(
drive(
@@ -205,21 +217,30 @@ public class Drivetrain extends SubsystemBase {
.until(yawRotationController::atSetpoint);
}
/**
* Locks the robots rotation to face the Alliance Hub on the field.
*
* This method is innately aware of which hub to face based on the assigned alliance color.
*
* This method is <i>NOT</i> for autonomous, see rotateToPose
*
* This method provides a field oriented mechanism of driving the robot, such that the robot
* is always facing the point on the field that is the center of the alliance hub. This
* method assumes that the robots estimated pose is reasonably accurate.
*
* @param xSpeed The X (forward/backward) translational speed of the robot
* @param ySpeed The Y (left/right) translational speed of the robot
* @param rotate180 When false, the front of the robot faces the hub, when true, the back
* of the robot faces the hub
* @return A complete Command structure that performs the specified action
*/
public Command lockRotationToHub(DoubleSupplier xSpeed, DoubleSupplier ySpeed, boolean rotate180) {
return runOnce(yawRotationController::reset).andThen(
drive(
xSpeed,
ySpeed,
() -> {
Optional<Alliance> alliance = DriverStation.getAlliance();
if(alliance.isEmpty()) {
return 0;
}
Pose2d faceTowards = alliance.get() == Alliance.Blue ?
CompetitionConstants.kBlueHubLocation :
CompetitionConstants.kRedHubLocation;
Pose2d faceTowards = Utilities.getHubPose();
Rotation2d targetRotation = new Rotation2d(
faceTowards.getX() - getPose().getX(),

View File

@@ -1,32 +1,47 @@
package frc.robot.subsystems;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.function.Consumer;
import java.util.stream.Stream;
import org.photonvision.EstimatedRobotPose;
import org.photonvision.PhotonCamera;
import org.photonvision.PhotonPoseEstimator;
import org.photonvision.PhotonPoseEstimator.PoseStrategy;
import org.photonvision.PhotonUtils;
import org.photonvision.targeting.PhotonPipelineResult;
import org.photonvision.targeting.PhotonTrackedTarget;
import edu.wpi.first.math.geometry.Pose3d;
import edu.wpi.first.math.geometry.Transform3d;
import edu.wpi.first.math.util.Units;
import edu.wpi.first.wpilibj2.command.SubsystemBase;
import edu.wpi.first.wpilibj2.command.button.Trigger;
import frc.robot.constants.CompetitionConstants;
import frc.robot.constants.PhotonConstants;
import frc.robot.interfaces.IAprilTagProvider;
import frc.robot.utilities.PhotonVisionConfig;
import frc.robot.utilities.VisualPose;
/**
* This "Subsystem" is not actually a Subsystem. The intent is for this to be treated as
* a "resource", that is, something that is not inherently a physical mechanism to be controlled.
*
* A "resource" in this instance should be thought of as something that can be safely shared
* by other Subsystems generally without collision if more that one Subsystem requires the
* "resource" at any given time.
*
* Resources should <i>NOT</i> produce Commands, they should not have a default Command.
* Resources do not have behaviors, and because Commands are in of themselves behaviors,
* this class should not have Commands.
*
* Part of the thinking behind creating the PhotonVision components this way is to rely
* on the CommandScheduler to call periodic. If this weren't the case, some other subsystem
* would have to manage calling for periodic updates, while still sharing the resource with
* other subsystems <i>somehow</i>.
*
* This class is dynamic, by adding or removing PhotonVisionConfig objects to the "configs"
* List in the PhotonConstants file, you change what is set up internally in this class.
* 1 config means 1 camera, 1 estimator, 1 stored pipeline result, 2 configs means 2 cameras,
* 2 estimators, etc. etc.
*/
public class PhotonVision extends SubsystemBase {
private PhotonCamera[] cameras;
private PhotonPoseEstimator[] estimators;
@@ -65,6 +80,7 @@ public class PhotonVision extends SubsystemBase {
if(!pose.isEmpty()) {
VisualPose visualPose = new VisualPose(
cameras[i].getName(),
pose.get().estimatedPose.toPose2d(),
pose.get().timestampSeconds
);
@@ -76,22 +92,68 @@ public class PhotonVision extends SubsystemBase {
}
}
public void testMethod(int targetID) {
Optional<PhotonTrackedTarget> target = latestResults.stream()
.filter((p) -> p != null)
.map(PhotonPipelineResult::getTargets)
.map(List::stream)
.reduce(Stream::concat)
.get()
.filter((p) -> p.getFiducialId() == targetID)
.max(
Comparator.comparingDouble((ptt) -> {
return (double)ptt.getDetectedObjectConfidence();
})
);
/**
* Returns the best 3D pose for a given AprilTag ID as seen by the cameras on the robot.
*
* All cameras fields of view are observed, if no camera can see the given tag ID, this
* method will return Optional.empty().
*
* Note that this method has no minimum confidence threshold for a tag. This means that
* if one camera thinks it sees the tag, even with very low confidence, it'll still return
* some sort of pose.
*
* @param tagID The ID of the tag to look for in the latest results from all cameras
* @return An Optional object containing a Pose3d object, or Optional.empty() if
* the tag is not present anywhere in the robots field of view.
*/
public Optional<Pose3d> getBestPoseForTag(int tagID) {
PhotonVisionConfig config = null;
Transform3d bestCameraToTarget = null;
float bestConfidence = -1;
for(int cameraIndex = 0; cameraIndex < latestResults.size(); cameraIndex++) {
if(latestResults.get(cameraIndex) == null) {
continue;
}
for(PhotonTrackedTarget target: latestResults.get(cameraIndex).getTargets()) {
if(target.getFiducialId() != tagID || bestConfidence > target.getDetectedObjectConfidence()) {
continue;
}
config = PhotonConstants.configs.get(cameraIndex);
bestCameraToTarget = target.bestCameraToTarget;
bestConfidence = target.getDetectedObjectConfidence();
}
}
if(bestCameraToTarget == null) {
return Optional.empty();
}
// This is based on what PhotonVision does for multitag Pose estimation
// See PhotonPoseEstimator.multiTagOnCoprocStrategy
// TODO This doesn't currently account for the offset of the tag relative to say the hub
return Optional.of(Pose3d.kZero
.plus(bestCameraToTarget.inverse())
.relativeTo(CompetitionConstants.kTagLayout.getOrigin())
.plus(config.robotToCamera().inverse())
.plus(CompetitionConstants.kRobotToShooter));
}
/**
* Add a Consumer of VisualPose records to the PhotonVision resource.
*
* Each consumer will receive a VisualPose object when any camera produces a new
* VisualPose.
*
* The number of Poses produced in a given 20ms cycle is the same number as how many
* cameras there are on the robot, as currently all cameras configuration will generate
* a Pose2d
*
* @param consumer The lambda or functional reference that will consume Poses produced
* by the PhotonVision resource.
*/
public void addPoseEstimateConsumer(Consumer<VisualPose> consumer) {
poseEstimateConsumers.add(consumer);
}