diff --git a/mycobot_280/mycobot_280/camera_calibration/camera_calibration.py b/mycobot_280/mycobot_280/camera_calibration/camera_calibration.py index 4162cba..3f37039 100644 --- a/mycobot_280/mycobot_280/camera_calibration/camera_calibration.py +++ b/mycobot_280/mycobot_280/camera_calibration/camera_calibration.py @@ -4,11 +4,23 @@ import numpy as np import json import time +import os from scipy.linalg import svd from pymycobot import * +from marker_utils import * +import shutil +import glob -mc = MyCobot("/dev/ttyUSB0") # 设置端口 +ports = glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*') +print(ports) +if ports: + arm_port = ports[0] +else: + raise Exception("No MyCobot device found") + + +mc = MyCobot(port=arm_port) # 设置端口 np.set_printoptions(suppress=True, formatter={'float_kind': '{:.2f}'.format}) @@ -29,6 +41,9 @@ def __init__(self, camera_id, marker_size, mtx, dist): # Initialize EyesInHand_matrix to None or load from a document if available self.EyesInHand_matrix = None + file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + self.matrix_file_path = os.path.join(file_dir, "config","EyesInHand_matrix.json") + self.load_matrix() def save_matrix(self, filename="EyesInHand_matrix.json"): @@ -36,6 +51,13 @@ def save_matrix(self, filename="EyesInHand_matrix.json"): if self.EyesInHand_matrix is not None: with open(filename, 'w') as f: json.dump(self.EyesInHand_matrix.tolist(), f) + + try: + # 复制文件到目标路径 + shutil.copy(filename, self.matrix_file_path) + print(f"File copied to {self.matrix_file_path}") + except IOError as e: + print(f"Failed to copy file: {e}") def load_matrix(self, filename="EyesInHand_matrix.json"): # Load the EyesInHand_matrix from a JSON file, if it exists diff --git a/mycobot_280/mycobot_280/camera_calibration/marker_utils.py b/mycobot_280/mycobot_280/camera_calibration/marker_utils.py new file mode 100644 index 0000000..21c325b --- /dev/null +++ b/mycobot_280/mycobot_280/camera_calibration/marker_utils.py @@ -0,0 +1,59 @@ +import cv2 +import numpy as np +import typing as T +from numpy.typing import NDArray, ArrayLike + + +class MarkerInfo(T.TypedDict): + corners: np.ndarray + tvec: np.ndarray + rvec: np.ndarray + num_id: int + + +def solve_marker_pnp(corners: NDArray, marker_size: int, mtx: NDArray, dist: NDArray): + """ + This will estimate the rvec and tvec for each of the marker corners detected by: + corners, ids, rejectedImgPoints = detector.detectMarkers(image) + corners - is an array of detected corners for each detected marker in the image + marker_size - is the size of the detected markers + mtx - is the camera matrix + distortion - is the camera distortion matrix + RETURN list of rvecs, tvecs, and trash (so that it corresponds to the old estimatePoseSingleMarkers()) + """ + marker_points = np.array( + [ + [-marker_size / 2, marker_size / 2, 0], + [marker_size / 2, marker_size / 2, 0], + [marker_size / 2, -marker_size / 2, 0], + [-marker_size / 2, -marker_size / 2, 0], + ], + dtype=np.float32, + ) + rvecs = [] + tvecs = [] + for corner in corners: + retval, rvec, tvec = cv2.solvePnP( + marker_points, + corner, + mtx, + dist, + flags=cv2.SOLVEPNP_IPPE_SQUARE, + ) + if retval: + rvecs.append(rvec) + tvecs.append(tvec) + + rvecs = np.array(rvecs) # type: ignore + tvecs = np.array(tvecs) # type: ignore + (rvecs - tvecs).any() # type: ignore + return rvecs, tvecs + + +def draw_marker(frame: np.ndarray, corners, tvecs, rvecs, ids, mtx, dist) -> None: + # cv2.aruco.drawDetectedMarkers(frame, corners, None, borderColor=(0, 255, 0)) + cv2.aruco.drawDetectedMarkers(frame, corners, ids, borderColor=(0, 200, 200)) + for i in range(len(ids)): + corner, tvec, rvec, marker_id = corners[i], tvecs[i], rvecs[i], ids[i] + cv2.drawFrameAxes(frame, mtx, dist, rvec, tvec, 60, 2) +