test things and some problems fixed no worko
This commit is contained in:
parent
3203f340d1
commit
3c651154a7
@ -93,8 +93,6 @@ def tag_corners(tag_coords):
|
||||
|
||||
field_tag_coords = tag_corners(tag_coords)
|
||||
|
||||
print(str(tag_corners(tag_coords)[1]))
|
||||
|
||||
def getTagCoords(tag_id):
|
||||
return tag_coords[tag_id]
|
||||
|
||||
|
@ -62,7 +62,7 @@ FPS = 0
|
||||
|
||||
# 2023 Field Apriltag Coordinates index = tag id
|
||||
# format = [id, x, y, z, z-rotation] in inches
|
||||
tag_coords = [[0, 0, 0, 0, 0], [1, 610.77, 42.19, 18.22, 180], [2, 610.77, 108.19, 18.22, 180], [3, 610.77, 174.19, 18.22, 180],
|
||||
tag_coords = [[0, 69.0, 420.0, 69.0, 0.0], [1, 610.77, 42.19, 18.22, 180], [2, 610.77, 108.19, 18.22, 180], [3, 610.77, 174.19, 18.22, 180],
|
||||
[4, 636.96, 265.74, 27.38, 180], [5, 14.25, 265.74, 27.38, 0], [6, 40.45, 174.19, 18.22, 0], [7, 40.45, 108.19, 18.22, 0],
|
||||
[8, 40.45, 42.19, 18.22, 0]]
|
||||
|
||||
@ -88,14 +88,11 @@ def tag_corners(tag_coords):
|
||||
coordinates[3] = [x+x_offset, y+y_offset, z-b/2]
|
||||
coordinates[4] = [x-x_offset, y+y_offset, z-b/2]
|
||||
|
||||
corners.append(coordinates)
|
||||
|
||||
corners = corners + [coordinates]
|
||||
return corners
|
||||
|
||||
field_tag_coords = tag_corners(tag_coords)
|
||||
|
||||
print(str(tag_corners(tag_coords)[1]))
|
||||
|
||||
def getTagCoords(tag_id):
|
||||
return tag_coords[tag_id]
|
||||
|
||||
@ -107,7 +104,7 @@ def connectionListener(connected, info):
|
||||
NetworkTables.addConnectionListener(connectionListener, immediateNotify=True)
|
||||
|
||||
# create overlay on camera feed
|
||||
def display_features(image, imgpts, totalDist):
|
||||
def display_features(image, imgpts):
|
||||
# making red lines around fiducial
|
||||
for i in range(0,4):
|
||||
f = i+1
|
||||
@ -122,7 +119,7 @@ def display_features(image, imgpts, totalDist):
|
||||
image = cv2.line(image, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
|
||||
# draw top layer in red color
|
||||
image = cv2.drawContours(image, [imgpts[4:]],-1,(0,0,255),3)
|
||||
image = cv2.putText(image, "#"+str(det.tag_id)+", "+str(round(totalDist, 4))+"in", (int(det.center[0]),int(det.center[1])+25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA)
|
||||
image = cv2.putText(image, "#"+str(det.tag_id)+", "+"in", (int(det.center[0]),int(det.center[1])+25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA)
|
||||
return image
|
||||
|
||||
# setting up apriltag detection. Make sure this is OUTSIDE the loop next time
|
||||
@ -138,7 +135,7 @@ time.sleep(0.1)
|
||||
while True:
|
||||
frame_start = time.time()
|
||||
image = cam.read()
|
||||
image_corners = []
|
||||
image_corners = np.array([])
|
||||
tags_detected = []
|
||||
|
||||
#detecting april tags
|
||||
@ -150,17 +147,20 @@ while True:
|
||||
# if the confidence is less than 30% exclude the tag from being processed.
|
||||
if det[4]>30:
|
||||
# points of the tag to be tracked
|
||||
tag_points = np.array([[det.center[0], det.center[1]], [det.corners[0][0], det.corners[0][1]], [det.corners[1][0], det.corners[1][1]], [det.corners[2][0], det.corners[2][1]], [det.corners[3][0], det.corners[3][1]]], dtype=np.float32)
|
||||
|
||||
image_corners.append(det.corners)
|
||||
tags_detected.append(det.id)
|
||||
image_corners = list(image_corners)
|
||||
image_corners.append(list(det.corners))
|
||||
image_corners = np.array(image_corners[0])
|
||||
tags_detected.append(det.tag_id)
|
||||
|
||||
# only show display if you use --display for argparse
|
||||
if args.display:
|
||||
tag_points = np.array([[det.center[0], det.center[1]], [det.corners[0][0], det.corners[0][1]], [det.corners[1][0], det.corners[1][1]], [det.corners[2][0], det.corners[2][1]], [det.corners[3][0], det.corners[3][1]]], dtype=np.float32)
|
||||
ret,rvecs, tvecs = cv2.solvePnP(objp, tag_points, camera_matrix, dist, flags=0)
|
||||
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, camera_matrix, dist)
|
||||
image = display_features(image, imgpts)
|
||||
|
||||
pose_coords = pose_estimator.calculate_coords(image_corners, tags_detected)
|
||||
|
||||
#Showing image. use --display to show image
|
||||
if args.display:
|
||||
image = cv2.putText(image, "FPS: "+str(round(FPS, 4)), (25,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2, cv2.LINE_AA)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import math
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
class PNPPose:
|
||||
|
||||
@ -12,11 +13,14 @@ class PNPPose:
|
||||
|
||||
def calculate_coords(self, image_corners, tags_detected):
|
||||
tag_corners = self.tag_corners
|
||||
PNP_obj_input = []
|
||||
PNP_obj_input = np.array([])
|
||||
|
||||
for i in range(len(tags_detected)):
|
||||
PNP_obj_input.append([tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3]])
|
||||
|
||||
if(PNP_obj_input.size == 0):
|
||||
PNP_obj_input = np.array([tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]])
|
||||
else:
|
||||
PNP_obj_input = np.stack([PNP_obj_input, np.array([tag_corners[tags_detected[i]][1], tag_corners[tags_detected[i]][2], tag_corners[tags_detected[i]][3], tag_corners[tags_detected[i]][4]])])
|
||||
|
||||
print("PNP_obj_input: ", PNP_obj_input, ", image_corners: ", image_corners)
|
||||
ret, rvecs, tvecs = cv2.solvePnP(PNP_obj_input, image_corners, self.camera_matrix, self.dist, flags=0)
|
||||
|
||||
print(tvecs)
|
||||
print(tvecs,"poop")
|
||||
|
58
FRC_Fiducial_Tracking/WTFTEST.py
Normal file
58
FRC_Fiducial_Tracking/WTFTEST.py
Normal file
@ -0,0 +1,58 @@
|
||||
import math
|
||||
from PNP_Pose_Estimation import PNPPose
|
||||
import numpy as np
|
||||
|
||||
FOCAL_LEN_PIXELS = 621.5827338
|
||||
# camera matrix from Calibrate_Camera.py.
|
||||
camera_matrix = np.array([[FOCAL_LEN_PIXELS, 0., 308.94165115],
|
||||
[0., FOCAL_LEN_PIXELS, 221.9470321],
|
||||
[0., 0.,1.]])
|
||||
|
||||
# from Camera_Calibration.py
|
||||
dist = np.array([ 2.32929183e-01, -1.35534844e+00, -1.51912733e-03, -2.17960810e-03, 2.25537289e+00])
|
||||
|
||||
# 2023 Field Apriltag Coordinates index = tag id
|
||||
# format = [id, x, y, z, z-rotation] in inches
|
||||
tag_coords = [[0, 69.0, 420.0, 69.0, 0.0], [1, 610.77, 42.19, 18.22, 180], [2, 610.77, 108.19, 18.22, 180], [3, 610.77, 174.19, 18.22, 180],
|
||||
[4, 636.96, 265.74, 27.38, 180], [5, 14.25, 265.74, 27.38, 0], [6, 40.45, 174.19, 18.22, 0], [7, 40.45, 108.19, 18.22, 0],
|
||||
[8, 40.45, 42.19, 18.22, 0]]
|
||||
|
||||
# x,y,z,rx,ry,rz
|
||||
robo_space_pose = [0, 0, 0, 0, 0, 0]
|
||||
|
||||
b = 6.5
|
||||
|
||||
def tag_corners(tag_coords):
|
||||
corners = []
|
||||
|
||||
for i in range(len(tag_coords)):
|
||||
x = tag_coords[i][1]
|
||||
y = tag_coords[i][2]
|
||||
z = tag_coords[i][3]
|
||||
z_rotation = tag_coords[i][4]
|
||||
|
||||
coordinates = [[], [], [] ,[], []]
|
||||
|
||||
x_offset = (b/2)*math.cos(math.radians(z_rotation))
|
||||
y_offset = (b/2)*math.sin(math.radians(z_rotation))
|
||||
coordinates[0] = tag_coords[i][0]
|
||||
coordinates[1] = [x-x_offset, y+y_offset, z+b/2]
|
||||
coordinates[2] = [x+x_offset, y+y_offset, z+b/2]
|
||||
coordinates[3] = [x+x_offset, y+y_offset, z-b/2]
|
||||
coordinates[4] = [x-x_offset, y+y_offset, z-b/2]
|
||||
|
||||
corners = corners + [coordinates]
|
||||
return corners
|
||||
|
||||
field_tag_coords = tag_corners(tag_coords)
|
||||
robo_space_pose = [0, 0, 0, 0, 0, 0]
|
||||
|
||||
pose_estimator = PNPPose(field_tag_coords, robo_space_pose, camera_matrix, dist)
|
||||
|
||||
pose_estimator.calculate_coords( np.array([[ 65.75, 420., 72.25],
|
||||
[ 72.25, 420., 72.25],
|
||||
[ 72.25, 420., 65.75],
|
||||
[ 65.75, 420., 65.75]]), np.array([[398.38015747, 154.20915222],
|
||||
[260.04067993, 150.67947388],
|
||||
[212.12528992 , 77.55628204],
|
||||
[387.06124878, 77.81692505]]))
|
Binary file not shown.
Loading…
Reference in New Issue
Block a user