-
Notifications
You must be signed in to change notification settings - Fork 1
/
realsense_basic.py
110 lines (89 loc) · 3.3 KB
/
realsense_basic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import pyrealsense2 as rs
import numpy as np
import cv2
from config import image_shape
# Custom data
scale = 170 / 640
focus = 150
fps = 30
class Camera():
"""
The usage:
cam = Camera()
color_image, depth_image = cam.read()
xyz = cam.getXYZ(x, y, d)
"""
def __init__(self):
self.pipeline = None
self.aligned = None
self.image_shape = image_shape
""" Initial and setup the realsense camera """
# Configure depth and color streams
self.pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, image_shape[0], image_shape[1],
rs.format.bgr8, fps)
config.enable_stream(rs.stream.depth, image_shape[0], image_shape[1],
rs.format.z16, fps)
# align
self.aligned = rs.align(rs.stream.color)
# Start streaming
self.pipeline.start(config)
print("Start Camera")
def read(self):
""" Read Image, Return RGB image and depth image """
# Read and align
frames = self.pipeline.wait_for_frames()
aligned_frames = self.aligned.process(frames)
# Convert images to numpy arrays
color_image = np.asanyarray(aligned_frames.get_color_frame().get_data())
depth_image = np.asanyarray(aligned_frames.get_depth_frame().get_data())
return color_image, depth_image
def readWithIntrinsics(self):
""" Not work. Use this before getDepth """
# read
frames = self.pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
# color_frame = frames.get_color_frame()
# save intrinsics
self.depth_intrinsics = rs.video_stream_profile(depth_frame.profile).get_intrinsics()
# Convert images to numpy arrays
aligned_frames = self.aligned.process(frames)
color_image = np.asanyarray(aligned_frames.get_color_frame().get_data())
depth_image = np.asanyarray(aligned_frames.get_depth_frame().get_data())
return color_image, depth_image
def getXYZ(self, x, y, d):
""" Get realworld xyz from Image x,y,d """
# calibrated transformation in 2020-06-30
cy = (x - image_shape[1] / 2) * -scale
cx = (y - image_shape[0] / 2) * scale
z = d
x = cx * z / focus
y = cy * z / focus
# This build-in method is not correct
# Used this before readWithIntrinsics
# return rs.rs2_deproject_pixel_to_point(self.depth_intrinsics, [x, y], d)
return [x, y, z]
def stop(self):
print("Stop camera")
self.pipeline.stop()
if __name__ == "__main__":
# init camera
camera = Camera()
# Run forever
print("Enter q to exit")
while True:
# read
color_image, depth_image = camera.read()
if color_image is None:
continue
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.3),
cv2.COLORMAP_JET)
cv2.imshow('RealSense', np.hstack((depth_colormap, color_image)))
key = cv2.waitKey(10)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
camera.stop()