math for solving the triangle in the dual tag trianglulation

This commit is contained in:
Tylr-J42 2023-10-09 02:35:12 -04:00
parent 68262d5edd
commit daf90bcd94
4 changed files with 42 additions and 16 deletions

View File

@ -12,6 +12,7 @@ from networktables import NetworkTables
import argparse
from TagObj import TagObj
from PiVid import PiVid
from Pose_Estimation import PoseEstimation
RAD2DEG = 180*pi
@ -19,6 +20,7 @@ RAD2DEG = 180*pi
parser = argparse.ArgumentParser(description="Select display")
parser.add_argument("--display", action='store_true', help="enable a display of the camera")
parser.add_argument("--high_res", action='store_true', help="enable resolution 1088x720 vs 640x480")
parser.add_argument("--pose_estimation", action='store_true', help="estimate pose based on detected tags")
parser.add_argument("--ip_add", type=str, required=True)
args = parser.parse_args()
@ -28,8 +30,8 @@ args = parser.parse_args()
FOCAL_LEN_PIXELS = 621.5827338
# camera matrix from Calibrate_Camera.py.
camera_matrix = np.array([[FOCAL_LEN_PIXELS, 0., 308.94165115],
[0., FOCAL_LEN_PIXELS, 221.9470321],
[0., 0.,1.]])
[0., FOCAL_LEN_PIXELS, 221.9470321],
[0., 0.,1.]])
# from Camera_Calibration.py
dist = np.array([ 2.32929183e-01, -1.35534844e+00, -1.51912733e-03, -2.17960810e-03, 2.25537289e+00])
@ -57,10 +59,8 @@ vision_table = NetworkTables.getTable("Fiducial")
FPS = 0
TARGET_ID = 1
# 2023 Field Apriltag Coordinates index = tag id
# format = [x, y, z, z-rotation] in inches
# format = [id, x, y, z, z-rotation] in inches
tag_coords = [0, [1, 610.77, 42.19, 18.22, 180], [2, 610.77, 108.19, 18.22, 180], [3, 610.77, 174.19, 18.22, 180],
[4, 636.96, 265.74, 27.38, 180], [5, 14.25, 265.74, 27.38, 0], [6, 40.45, 174.19, 18.22, 0], [7, 40.45, 108.19, 18.22, 0],
[8, 40.45, 42.19, 18.22, 0]]
@ -122,7 +122,6 @@ while True:
# points of the tag to be tracked
tag_points = np.array([[det.center[0], det.center[1]], [det.corners[0][0], det.corners[0][1]], [det.corners[1][0], det.corners[1][1]], [det.corners[2][0], det.corners[2][1]], [det.corners[3][0], det.corners[3][1]]], dtype=np.float32)
ret,rvecs, tvecs = cv2.solvePnP(objp, tag_points, camera_matrix, dist, flags=0)
# making translation and rotation vectors into a format good for networktables

View File

@ -17,9 +17,11 @@ class PoseEstimation:
coord_array = []
if(len(self.visible_tags)>1):
for i in self.visible_tags:
tvecXCoord = math.sqrt(self.data_array[i].tvec_z**2 + self.data_array[i].tvec_x**2) * math.cos(self.tag_coords[self.data_array[i].tag_id][4] - self.data_array[i].rvec_y - self.robot_space_pose[5]) - self.robo_space_pose[0]
tvecYCoord = math.sqrt(self.data_array[i].tvec_z**2 + self.data_array[i].tvec_y**2) * math.sin(90-self.data_array[i].rvec_x - self.robot_space_pose[3])
tvecZCoord = math.sqrt(self.data_array[i].tvec_z**2 + self.data_array[i].tvec_x**2) * math.sin(self.tag_coords[self.data_array[i].tag_id][4] - self.data_array[i].rvec_y - self.robot_space_pose[5]) - self.robo_space_pose[2]
tagTvec = cam_pose_to_robot_tvec(self.data_array[i], self.robo_space_pose)
tvecXCoord = math.sqrt((tagTvec[2]**2) + (tagTvec[0]**2)) * math.cos(self.tag_coords[self.data_array[i].tag_id][4] - self.data_array[i].rvec_y - self.robot_space_pose[5]) - self.robo_space_pose[0]
tvecYCoord = math.sqrt((tagTvec[2]**2) + (tagTvec[1]**2)) * math.sin(90-self.data_array[i].rvec_x - self.robot_space_pose[3])
tvecZCoord = math.sqrt((tagTvec[2]**2) + (tagTvec[0]**2)) * math.sin(self.tag_coords[self.data_array[i].tag_id][4] - self.data_array[i].rvec_y - self.robot_space_pose[5]) - self.robo_space_pose[2]
self.coordinates = [self.data_array[i].tag_id, tvecXCoord, tvecYCoord, tvecZCoord]
coord_array.append(self.coordinates)
@ -31,20 +33,45 @@ class PoseEstimation:
self.orientation = [self.data_array[i].tag_id, rvecPitch, rvecRoll, rvecYaw]
coord_array.append(self.orientation)
elif(len(self.visible_tags) == 2):
tvecXCoord
tag0Tvec = cam_pose_to_robot_tvec(self.data_array[0], self.robo_space_pose)
tag1Tvec = cam_pose_to_robot_tvec(self.data_array[1], self.robo_space_pose)
tag0Coords = [self.tag_coords[self.data_array[0].tag_id][0], self.tag_coords[self.data_array[0].tag_id][2], self.tag_coords[self.data_array[0].tag_id][3]]
tag1Coords = [self.tag_coords[self.data_array[1].tag_id][0], self.tag_coords[self.data_array[1].tag_id][2], self.tag_coords[self.data_array[1].tag_id][3]]
tag_line_dist = distance(tag0Coords[0], tag0Coords[1], tag1Coords[0], tag1Coords[1])
tag0_cam_dist = math.hypot(tag0Tvec[0], tag0Tvec[2])
tag1_cam_dist = math.hypot(tag1Tvec[0], tag1Tvec[2])
cam_angle = math.toDegrees(math.atan(tag0Tvec[0]/tag0Tvec[2])-math.atan(tag1Tvec[0]/tag1Tvec[2]))
tag0_angle = math.acos(((tag0_cam_dist**2) + (tag_line_dist**2) - (tag1_cam_dist**2)) / (2 * tag0_cam_dist * tag_line_dist))
tag1_angle = 180-(cam_angle+tag0_angle)
tvecZCoord = math.hypot(tag0Tvec[0], tag0Tvec[1])
return coord_array
def cam_pose_to_robot(self, tag, robot_space_pose):
robot_tvec_x = cam_rotation_comp(tag.tvec_x, tag.tvec_z, robot_space_pose[4])[0]
robot_tvec_y = cam_rotation_comp(tag.tvec_y, tag.tvec_z, robot_space_pose[3])[0]
robot_tvec_z = cam_rotation_comp(tag.tvec_y, tag.tvec_z, robot_space_pose[3])[1]
# convert from camera detection translation vectors to robot-relative translation vectors
def cam_pose_to_robot_tvec(tag, robot_space_pose):
robot_tvec_x = cam_rotation_comp(tag.tvec_x, tag.tvec_z, robot_space_pose[4])[0]
robot_tvec_y = cam_rotation_comp(tag.tvec_y, tag.tvec_z, robot_space_pose[3])[0]
robot_tvec_z = cam_rotation_comp(tag.tvec_y, tag.tvec_z, robot_space_pose[3])[1]
return [robot_tvec_x, robot_tvec_y, robot_tvec_z]
def distance(d1x, d1y, d2x, d2y):
return math.sqrt((d2x - d1x)**2 + (d2y - d1y)**2)
def slope_angle(x1, x2, y1, y2):
if(x2-x1 != 0):
return math.toDegrees(math.atan2((y2-y1) / (x2-x1)))
else:
return 90.0
# math for converting one individual translation vector into robot-relative translation vector
def cam_rotation_comp(opposite, adjacent, tilt_theta):
opposite_out = math.sin(tilt_theta - math.atan(opposite/adjacent)) * math.hypot(opposite, adjacent)
adjacent_out = math.cos(tilt_theta - math.atan(opposite/adjacent)) * math.hypot(opposite, adjacent)
opposite_out = math.toDegrees(math.sin(tilt_theta - math.atan(opposite/adjacent)) * math.hypot(opposite, adjacent))
adjacent_out = math.toDegrees(math.cos(tilt_theta - math.atan(opposite/adjacent)) * math.hypot(opposite, adjacent))
return [opposite_out, adjacent_out]

Binary file not shown.