Fixed so now the center of the tag is being tracked. No more translation vector unit conversion.

This commit is contained in:
Tyler Jacques 2022-10-05 04:37:13 +00:00
parent 31bf5aadcd
commit e182f19700

View File

@ -18,7 +18,7 @@ from threading import Thread
# translation vector units to inches: tvec/71.22 this constant will differ
# according to your camera. Space an apriltag at intervals, note the distance
# in pixels and divide it by the real world distance
TVEC2IN = 1/71.22
TVEC2IN = 1
# Rotational vector radians to degrees
RAD2DEG = 180/pi
@ -31,10 +31,11 @@ camera_matrix = np.array([[FOCAL_LEN_PIXELS, 0., 308.94165115],
[0., FOCAL_LEN_PIXELS, 221.9470321],
[0., 0.,1.]])
b=6.5
# 3d object array. The points of the 3d april tag that coresponds to tag_points which we detect
objp = np.array([[240, 240, 0], [0, 0, 0], [480, 0, 0], [480, 480, 0], [0, 480, 0]], dtype=np.float32)
objp = np.array([[0,0,0], [b/2, b/2, 0], [-b/2, b/2, 0], [-b/2, -b/2, 0], [b/2, -b/2, 0]], dtype=np.float32)
# 2d axis array points for drawing cube overlay
axis = np.array([[0,0,0], [0,480,0], [480,480,0], [480,0,0], [0,0,-480], [0,480,-480], [480,480,-480], [480,0,-480]], dtype=np.float32)
axis = np.array([[b/2, b/2, 0], [-b/2, b/2, 0], [-b/2, -b/2, 0], [b/2, -b/2, 0], [b/2, b/2, -b], [-b/2, b/2, -b], [-b/2, -b/2, -b], [b/2, -b/2, -b]], dtype=np.float32)
# put your RoboRio IP here
NetworkTables.initialize(server="1234567890")
@ -47,6 +48,8 @@ args = parser.parse_args()
FPS = 0
TARGET_ID = 1
# class for allocating a thread to only updating the camera stream,
# the other thread is used for detection processing
class PiVid:
@ -95,13 +98,13 @@ def connectionListener(connected, info):
NetworkTables.addConnectionListener(connectionListener, immediateNotify=True)
# create overlay on camera feed
def display_features(image, imgpts):
def display_features(image, imgpts, totalDist):
# making red lines around fiducial
for i in range(0,4):
f = i+1
if f>3: f=0
cv2.line(image, (int(det.corners[i][0]), int(det.corners[i][1])), (int(det.corners[f][0]), int(det.corners[f][1])), (0,0,255), 3)
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
image = cv2.drawContours(image, [imgpts[:4]],-1,(0,255,0),-3)
@ -110,13 +113,15 @@ def display_features(image, imgpts):
image = cv2.line(image, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
image = cv2.drawContours(image, [imgpts[4:]],-1,(0,0,255),3)
# image = cv2.putText(image, str((round(tvecDist[0], 4), round(tvecDist[1], 4), round(tvecDist[2], 4))), (int(det.center[0])+100,int(det.center[1])+100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA)
# image = cv2.putText(image, str((round(rvecDeg[0], 4), round(rvecDeg[1], 4), round(rvecDeg[2], 4))), (int(det.center[0])+100,int(det.center[1])+100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA)
return image
# setting up apriltag detection. Make sure this is OUTSIDE the loop next time
options = apriltag.DetectorOptions(families='tag36h11', border=1, nthreads=16,
quad_decimate=100.0, quad_blur=0.0, refine_edges=True,
options = apriltag.DetectorOptions(families='tag36h11', border=1, nthreads=4,
quad_decimate=2.0, quad_blur=0.0, refine_edges=True,
refine_decode=False, refine_pose=False, debug=False, quad_contours=True)
detector = apriltag.Detector()
detector = apriltag.Detector(options)
# main vision processing code
time.sleep(0.1)
@ -139,7 +144,7 @@ while True:
ret,rvecs, tvecs = cv2.solvePnP(objp, tag_points, camera_matrix, dist, flags=0)
# making translation and rotation vectors into a format good for networktables
tvecDist = (tvecs*TVEC2IN).tolist()
tvecDist = tvecs.tolist()
rvecDeg = (rvecs*RAD2DEG).tolist()
for i in range(0,len(tvecDist)):
tvecDist[i] = float(tvecDist[i][0])
@ -151,29 +156,36 @@ while True:
# only show display if you use --display for argparse
if args.display:
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, camera_matrix, dist)
image = display_features(image, imgpts)
image = display_features(image, imgpts, totalDist)
data_array.append([tvecDist, rvecDeg, totalDist])
data_array.append([det.tag_id, tvecDist, rvecDeg, totalDist])
# writing data to networktables and ordering tags
target_detected = False
for i in range(len(data_array)):
orderVal = 0
for d in range(len(data_array)):
if data_array[d][2]>data_array[i][2] and d!=i and output[d].tag_id==output[i].tag_id:
orderVal = ++orderVal
vision_table.putNumber("tag"+str(det.tag_id)+"tvecX("+str(orderVal)+")", tvecDist[0])
vision_table.putNumber("tag"+str(det.tag_id)+"tvecY("+str(orderVal)+")", tvecDist[1])
vision_table.putNumber("tag"+str(det.tag_id)+"tvecZ"+str(orderVal)+")", tvecDist[2])
vision_table.putNumber("tag"+str(data_array[i][0])+"tvecX("+str(orderVal)+")", tvecDist[0])
vision_table.putNumber("tag"+str(data_array[i][0])+"tvecY("+str(orderVal)+")", tvecDist[1])
vision_table.putNumber("tag"+str(data_array[i][0])+"tvecZ("+str(orderVal)+")", tvecDist[2])
vision_table.putNumber("tag"+str(det.tag_id)+"rvecX("+str(orderVal)+")", rvecDeg[0])
vision_table.putNumber("tag"+str(det.tag_id)+"rvecY("+str(orderVal)+")", rvecDeg[1])
vision_table.putNumber("tag"+str(det.tag_id)+"rvecZ("+str(orderVal)+")", rvecDeg[2])
vision_table.putNumber("tag"+str(data_array[i][0])+"rvecX("+str(orderVal)+")", rvecDeg[0])
vision_table.putNumber("tag"+str(data_array[i][0])+"rvecY("+str(orderVal)+")", rvecDeg[1])
vision_table.putNumber("tag"+str(data_array[i][0])+"rvecZ("+str(orderVal)+")", rvecDeg[2])
if TARGET_ID == data_array[0]:
target_detected = True
vision_table.putBoolean("targetDetected", target_detected)
#Showing image. use --display to show image
if args.display:
image = cv2.putText(image, "FPS: "+str(round(FPS, 4)), (25,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA)
image = cv2.putText(image, "FPS: "+str(round(FPS, 4)), (25,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF