working on pose estimation

This commit is contained in:
Tylr-J42 2023-12-23 19:11:10 -05:00
parent 762163757f
commit aadab84836
4 changed files with 36 additions and 72 deletions

View File

@ -66,6 +66,8 @@ tag_coords = [[0, 0.0, 0.0, 0.0, 0.0], [1, 610.77, 42.19, 18.22, 180], [2, 610.7
[4, 636.96, 265.74, 27.38, 180], [5, 14.25, 265.74, 27.38, 0], [6, 40.45, 174.19, 18.22, 0], [7, 40.45, 108.19, 18.22, 0],
[8, 40.45, 42.19, 18.22, 0]]
testing_tags = [[0, 0.0, 0.0, 0.0, 0.0], [1, 12.0, 0.0, 0.0, 0.0], [2, -12.0, 0.0, 0.0, 0.0]]
# x,y,z,rx,ry,rz
robo_space_pose = [0, 0, 0, 0, 0, 0]
@ -76,23 +78,24 @@ def tag_corners(tag_coords):
x = tag_coords[i][1]
y = tag_coords[i][2]
z = tag_coords[i][3]
z_rotation = tag_coords[i][4]
y_rotation = tag_coords[i][4]
coordinates = [[], [], [] ,[], []]
x_offset = (b/2)*math.cos(math.radians(z_rotation))
y_offset = (b/2)*math.sin(math.radians(z_rotation))
x_offset = (b/2)*math.cos(math.radians(y_rotation))
z_offset = (b/2)*math.sin(math.radians(y_rotation))
coordinates[0] = tag_coords[i][0]
coordinates[1] = [x-x_offset, y+y_offset, z+b/2]
coordinates[2] = [x+x_offset, y+y_offset, z+b/2]
coordinates[3] = [x+x_offset, y+y_offset, z-b/2]
coordinates[4] = [x-x_offset, y+y_offset, z-b/2]
coordinates[1] = [x-x_offset, y+b/2, z+z_offset]
coordinates[2] = [x+x_offset, y+b/2, z+z_offset]
coordinates[3] = [x+x_offset, y-b/2, z+z_offset]
coordinates[4] = [x-x_offset, y-b/2, z+z_offset]
corners = corners + [coordinates]
return corners
field_tag_coords = tag_corners(tag_coords)
#field_tag_coords = tag_corners(tag_coords)
testing_tag_coords = tag_corners(testing_tags)
def getTagCoords(tag_id):
return tag_coords[tag_id]
@ -125,11 +128,11 @@ def display_features(image, imgpts):
# setting up apriltag detection. Make sure this is OUTSIDE the loop next time
options = apriltag.DetectorOptions(families='tag36h11', border=1, nthreads=4,
quad_decimate=2.0, quad_blur=0.0, refine_edges=True,
quad_decimate=1.0, quad_blur=0.0, refine_edges=True,
refine_decode=False, refine_pose=False, debug=False, quad_contours=True)
detector = apriltag.Detector(options)
pose_estimator = PNPPose(field_tag_coords, robo_space_pose, camera_matrix, dist)
pose_estimator = PNPPose(testing_tag_coords, robo_space_pose, camera_matrix, dist)
# main vision processing code
time.sleep(0.1)
@ -149,8 +152,8 @@ while True:
if det[4]>30:
# points of the tag to be tracked
image_corners = list(image_corners)
image_corners.append(list(det.corners))
image_corners = np.array(image_corners[0])
image_corners = image_corners+(list(det.corners))
image_corners = np.array(image_corners)
tags_detected.append(det.tag_id)
# only show display if you use --display for argparse

View File

@ -19,27 +19,39 @@ class PNPPose:
yaw = 180*atan2(-R[1][0], R[0][0])/pi
rot_params= [roll,pitch,yaw]
return rot_params
def cam_to_field(self, tvecs, euler_angles):
position = [0, 0, 0]
rotation = [0, 0, 0]
total_dist = math.sqrt(tvecs[0]**2 + tvecs[1]**2)
rotation
return position, rotation
def calculate_coords(self, image_corners, tags_detected):
tag_corners = self.tag_corners
PNP_obj_input = np.array([])
PNP_obj_input = []
if len(tags_detected)>0:
for i in range(len(tags_detected)):
if(PNP_obj_input.size == 0):
PNP_obj_input = np.array([tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]])
if(len(PNP_obj_input) == 0):
PNP_obj_input = [tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]]
else:
PNP_obj_input = np.stack([PNP_obj_input, np.array([tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]])])
PNP_obj_input = PNP_obj_input + [tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]]
PNP_obj_input = np.array(PNP_obj_input)
#print("PNP_obj_input: ", PNP_obj_input, ", image_corners: ", image_corners, "tags_detected: ", tags_detected)
ret, rvecs, tvecs = cv2.solvePnP(PNP_obj_input, image_corners, self.camera_matrix, self.dist, flags=0)
for i in range(len(rvecs)):
rvecs[i] = math.degrees(rvecs[i])
#cameraMatx, rotMatx, transVect, S
euler_angles = self.rot_params_rv(rvecs)
total_dist = math.hypot(math.hypot(tvecs[0], tvecs[1]), tvecs[2])
print("euler ZYX: ", euler_angles, "rvecs: ", rvecs,"tvecs: ", tvecs)
print("euler XYZ: ", euler_angles, "tvecs: ", tvecs, "total_dist: ", total_dist)
world_angleXYZ = [-euler_angles[0], -euler_angles[1], -euler_angles[2]]
z_line_offset_coord = []

View File

@ -1,51 +0,0 @@
import math
from PNP_Pose_Estimation import PNPPose
import numpy as np
FOCAL_LEN_PIXELS = 621.5827338
# camera matrix from Calibrate_Camera.py.
camera_matrix = np.array([[FOCAL_LEN_PIXELS, 0., 308.94165115],
[0., FOCAL_LEN_PIXELS, 221.9470321],
[0., 0.,1.]])
# from Camera_Calibration.py
dist = np.array([ 2.32929183e-01, -1.35534844e+00, -1.51912733e-03, -2.17960810e-03, 2.25537289e+00])
# 2023 Field Apriltag Coordinates index = tag id
# format = [id, x, y, z, z-rotation] in inches
tag_coords = [[0, 0.0, 0.0, 0.0, 0.0], [1, 610.77, 42.19, 18.22, 180], [2, 610.77, 108.19, 18.22, 180], [3, 610.77, 174.19, 18.22, 180],
[4, 636.96, 265.74, 27.38, 180], [5, 14.25, 265.74, 27.38, 0], [6, 40.45, 174.19, 18.22, 0], [7, 40.45, 108.19, 18.22, 0],
[8, 40.45, 42.19, 18.22, 0]]
# x,y,z,rx,ry,rz
robo_space_pose = [0, 0, 0, 0, 0, 0]
b = 6.5
def tag_corners(tag_coords):
corners = []
for i in range(len(tag_coords)):
x = tag_coords[i][1]
y = tag_coords[i][2]
z = tag_coords[i][3]
z_rotation = tag_coords[i][4]
coordinates = [[], [], [] ,[], []]
x_offset = (b/2)*math.cos(math.radians(z_rotation))
y_offset = (b/2)*math.sin(math.radians(z_rotation))
coordinates[0] = tag_coords[i][0]
coordinates[1] = [x-x_offset, y+y_offset, z+b/2]
coordinates[2] = [x+x_offset, y+y_offset, z+b/2]
coordinates[3] = [x+x_offset, y+y_offset, z-b/2]
coordinates[4] = [x-x_offset, y+y_offset, z-b/2]
corners = corners + [coordinates]
return corners
field_tag_coords = tag_corners(tag_coords)
pose_estimator = PNPPose(field_tag_coords, robo_space_pose, camera_matrix, dist)
pose_estimator.calculate_coords(np.array([[341.3, 157.0], [220.99, 152.41], [140.93 , 83.28], [284.02, 81.93]]), np.array([0]))