diff --git a/FRC_Fiducial_Tracking/April_PNP_Live.py b/FRC_Fiducial_Tracking/April_PNP_Live.py index c0828ec..fbba18e 100644 --- a/FRC_Fiducial_Tracking/April_PNP_Live.py +++ b/FRC_Fiducial_Tracking/April_PNP_Live.py @@ -156,29 +156,30 @@ while True: output = detector.detect(tagFrame) for det in output: - # points of the tag to be tracked - tag_points = np.array([[det.center[0], det.center[1]], [det.corners[0][0], det.corners[0][1]], [det.corners[1][0], det.corners[1][1]], [det.corners[2][0], det.corners[2][1]], [det.corners[3][0], det.corners[3][1]]], dtype=np.float32) + # if the confidence is less than 40% exclude the tag from being processed. + if det[4]>40: + # points of the tag to be tracked + tag_points = np.array([[det.center[0], det.center[1]], [det.corners[0][0], det.corners[0][1]], [det.corners[1][0], det.corners[1][1]], [det.corners[2][0], det.corners[2][1]], [det.corners[3][0], det.corners[3][1]]], dtype=np.float32) - ret,rvecs, tvecs = cv2.solvePnP(objp, tag_points, camera_matrix, dist, flags=0) + ret,rvecs, tvecs = cv2.solvePnP(objp, tag_points, camera_matrix, dist, flags=0) - # making translation and rotation vectors into a format good for networktables - tvecDist = tvecs.tolist() - rvecDeg = (rvecs*RAD2DEG).tolist() - for i in range(0,len(tvecDist)): - tvecDist[i] = float(tvecDist[i][0]) - for i in range(0,len(rvecDeg)): - rvecDeg[i] = float(rvecDeg[i][0]) + # making translation and rotation vectors into a format good for networktables + tvecDist = tvecs.tolist() + rvecDeg = (rvecs*RAD2DEG).tolist() + for i in range(0,len(tvecDist)): + tvecDist[i] = float(tvecDist[i][0]) + for i in range(0,len(rvecDeg)): + rvecDeg[i] = float(rvecDeg[i][0]) - totalDist = sqrt((tvecDist[0]**2)+(tvecDist[1]**2)+(tvecDist[2]**2)) + totalDist = sqrt((tvecDist[0]**2)+(tvecDist[1]**2)+(tvecDist[2]**2)) - # only show display if you use --display for argparse - if args.display: - imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, camera_matrix, dist) - image = display_features(image, imgpts, totalDist) + # only show display if you use --display for argparse + if args.display: + imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, camera_matrix, dist) + image = display_features(image, imgpts, totalDist) - data_array.append([det.tag_id, tvecDist, rvecDeg, totalDist]) - + data_array.append([det.tag_id, tvecDist, rvecDeg, totalDist]) for i in range(len(data_array)): orderVal = 0