|
a |
|
b/src/UR_move.py |
|
|
1 |
# navigate the robot to the target 3D coordinates |
|
|
2 |
|
|
|
3 |
import open3d as o3d |
|
|
4 |
import math3d as m3d |
|
|
5 |
import URBasic |
|
|
6 |
import time |
|
|
7 |
from utils.pose_conversion import * |
|
|
8 |
from utils.trajectory_io import * |
|
|
9 |
import pickle |
|
|
10 |
from utils.triangulation import * |
|
|
11 |
|
|
|
12 |
from subject_info import SUBJECT_NAME, SCAN_POSE |
|
|
13 |
import argparse |
|
|
14 |
|
|
|
15 |
parser = argparse.ArgumentParser(description='Compute target') |
|
|
16 |
parser.add_argument('--pose_model', type=str, default='ViTPose_large', help='pose model') |
|
|
17 |
args = parser.parse_args() |
|
|
18 |
POSE_MODEL = args.pose_model |
|
|
19 |
|
|
|
20 |
folder_path = 'src/data/' + SUBJECT_NAME + '/' + SCAN_POSE + '/' |
|
|
21 |
|
|
|
22 |
# read intrinsics |
|
|
23 |
with open(folder_path + 'intrinsics/cam_1_intrinsics.pickle', 'rb') as f: |
|
|
24 |
cam1_intr = pickle.load(f) |
|
|
25 |
print("cam1_intr: \n", cam1_intr) |
|
|
26 |
|
|
|
27 |
with open(folder_path + 'intrinsics/cam_2_intrinsics.pickle', 'rb') as f: |
|
|
28 |
cam2_intr = pickle.load(f) |
|
|
29 |
print("cam2_intr: \n", cam2_intr) |
|
|
30 |
|
|
|
31 |
# read extrinsics |
|
|
32 |
camera_poses = read_trajectory(folder_path + "odometry.log") |
|
|
33 |
|
|
|
34 |
# TSDF volume |
|
|
35 |
volume = o3d.pipelines.integration.ScalableTSDFVolume( |
|
|
36 |
voxel_length=1 / 512.0, |
|
|
37 |
sdf_trunc=0.04, |
|
|
38 |
color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8) |
|
|
39 |
|
|
|
40 |
# show RGBD images from all the views |
|
|
41 |
for i in range(len(camera_poses)): |
|
|
42 |
print("Integrate {:d}-th image into the volume.".format(i)) |
|
|
43 |
color = o3d.io.read_image(folder_path + "color_images/cam_{}.jpg".format(i+1)) |
|
|
44 |
depth = o3d.io.read_image(folder_path + "depth_images/cam_{}.png".format(i+1)) |
|
|
45 |
|
|
|
46 |
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth( |
|
|
47 |
color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False) |
|
|
48 |
|
|
|
49 |
cam_intr = cam1_intr if i == 0 else cam2_intr |
|
|
50 |
|
|
|
51 |
intr = o3d.camera.PinholeCameraIntrinsic( |
|
|
52 |
width=640, |
|
|
53 |
height=480, |
|
|
54 |
fx=cam_intr[0,0], |
|
|
55 |
fy=cam_intr[1,1], |
|
|
56 |
cx=cam_intr[0,2], |
|
|
57 |
cy=cam_intr[1,2] |
|
|
58 |
) |
|
|
59 |
|
|
|
60 |
volume.integrate(rgbd, |
|
|
61 |
intr, |
|
|
62 |
np.linalg.inv(camera_poses[i].pose)) |
|
|
63 |
|
|
|
64 |
# point cloud generation |
|
|
65 |
pcd = volume.extract_point_cloud() |
|
|
66 |
downpcd = pcd.voxel_down_sample(voxel_size=0.01) |
|
|
67 |
|
|
|
68 |
downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30)) |
|
|
69 |
# o3d.visualization.draw_geometries([downpcd]) |
|
|
70 |
# o3d.visualization.draw_geometries([downpcd], point_show_normal=True) |
|
|
71 |
coordinates = np.asarray(downpcd.points) |
|
|
72 |
normals = np.asarray(downpcd.normals) |
|
|
73 |
|
|
|
74 |
# UR3e configuration |
|
|
75 |
ROBOT_IP = '169.254.147.11' # real robot IP |
|
|
76 |
ACCELERATION = 0.5 # robot acceleration |
|
|
77 |
VELOCITY = 0.5 # robot speed value |
|
|
78 |
|
|
|
79 |
|
|
|
80 |
robot_start_position = (np.radians(-99.73), np.radians(-171.50), np.radians(85.98), |
|
|
81 |
np.radians(-1.81), np.radians(90.23), np.radians(221.59)) # front |
|
|
82 |
|
|
|
83 |
# add a waypoint to avoid collision |
|
|
84 |
robot_waypoint = (np.radians(98.57), np.radians(-113.38), np.radians(-38.86), |
|
|
85 |
np.radians(-90.), np.radians(91.28), np.radians(155.32)) |
|
|
86 |
|
|
|
87 |
|
|
|
88 |
# initialise robot with URBasic |
|
|
89 |
print("initialising robot") |
|
|
90 |
robotModel = URBasic.robotModel.RobotModel() |
|
|
91 |
robot = URBasic.urScriptExt.UrScriptExt(host=ROBOT_IP, robotModel=robotModel) |
|
|
92 |
|
|
|
93 |
robot.reset_error() |
|
|
94 |
print("robot initialised") |
|
|
95 |
time.sleep(1) |
|
|
96 |
robot.init_realtime_control() |
|
|
97 |
time.sleep(1) |
|
|
98 |
robot.movej(q=robot_start_position, a=ACCELERATION, v=VELOCITY) |
|
|
99 |
|
|
|
100 |
|
|
|
101 |
with open(folder_path + POSE_MODEL + '/final_target.pickle', 'rb') as f: |
|
|
102 |
X_targets = pickle.load(f) |
|
|
103 |
|
|
|
104 |
for i, X_target in enumerate(X_targets): |
|
|
105 |
print("========================================") |
|
|
106 |
print(f"{i}th target point: {X_target.squeeze()}") |
|
|
107 |
|
|
|
108 |
# need to convert robot base coordinate to camera coordinate |
|
|
109 |
init_estimate = X_target.squeeze() # an initial estimate of the target point |
|
|
110 |
idx = np.argmin(np.square(coordinates[:, 0:2] - init_estimate[0:2]).sum(axis=1)) |
|
|
111 |
|
|
|
112 |
t_target_base = coordinates[idx] |
|
|
113 |
target_normal_base = normals[idx] |
|
|
114 |
|
|
|
115 |
print('closest point in base frame:', t_target_base) |
|
|
116 |
print('closest point normal in base frame:', target_normal_base) |
|
|
117 |
|
|
|
118 |
TCP_6d_pose_base = robot.get_actual_tcp_pose() |
|
|
119 |
print("robot pose: ", TCP_6d_pose_base) |
|
|
120 |
|
|
|
121 |
# Want to align the orientation of TCP to be (0,0,1) |
|
|
122 |
T_tcp_base = np.asarray(m3d.Transform(TCP_6d_pose_base).get_matrix()) # 4x4 matrix |
|
|
123 |
R_tcp_base = T_tcp_base[0:3, 0:3] |
|
|
124 |
R_base_tcp = np.linalg.inv(R_tcp_base) |
|
|
125 |
|
|
|
126 |
target_normal_tcp = R_base_tcp @ target_normal_base.reshape(-1,1) |
|
|
127 |
|
|
|
128 |
# compute the rotation from (0,0,1)/tcp's z-axis to the negative direction of the target normal in tcp frame |
|
|
129 |
R_target_tcp = rotation_align(np.array([0, 0, 1]), -target_normal_tcp.squeeze()) |
|
|
130 |
|
|
|
131 |
R_target_base = R_tcp_base @ R_target_tcp |
|
|
132 |
T_target_base = np.vstack([np.hstack([R_target_base, t_target_base.reshape(-1, 1)]), np.array([0, 0, 0, 1])]) |
|
|
133 |
|
|
|
134 |
target_6d_pose_base = m3d.Transform(T_target_base).get_pose_vector() |
|
|
135 |
print("Final 6d pose base: ", target_6d_pose_base) |
|
|
136 |
|
|
|
137 |
robot.movej(q=robot_waypoint, a=ACCELERATION, v=VELOCITY) |
|
|
138 |
robot.movej(pose=target_6d_pose_base, a=ACCELERATION, v=VELOCITY) |
|
|
139 |
|
|
|
140 |
time.sleep(3) |
|
|
141 |
robot.movej(q=robot_waypoint, a=ACCELERATION, v=VELOCITY) |
|
|
142 |
robot.movej(q=robot_start_position, a=ACCELERATION, v=VELOCITY) |
|
|
143 |
|
|
|
144 |
|
|
|
145 |
robot.close() |
|
|
146 |
|
|
|
147 |
exit(0) |
|
|
148 |
|