Commit ac5486be authored by Inès EL HADRI's avatar Inès EL HADRI 💤

Add OpenCV visualization to camera

parent d86fea75
#!/usr/bin/env python3 #!/usr/bin/env python3
## Doc: https://dev.intelrealsense.com/docs/python2
############################################### ###############################################
## Simple Request ## ## Open CV and Numpy integration ##
############################################### ###############################################
import pyrealsense2 as rs import pyrealsense2 as rs
import signal, time, numpy as np
import sys, cv2, rclpy
# Configure depth and color streams # Configure depth and color streams
pipeline = rs.pipeline() pipeline = rs.pipeline()
...@@ -17,5 +20,75 @@ device = pipeline_profile.get_device() ...@@ -17,5 +20,75 @@ device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line)) device_product_line = str(device.get_info(rs.camera_info.product_line))
print( f"Connect: {device_product_line}" ) print( f"Connect: {device_product_line}" )
found_rgb = True
for s in device.sensors: for s in device.sensors:
print( "Name:" + s.get_info(rs.camera_info.name) ) print( "Name:" + s.get_info(rs.camera_info.name) )
\ No newline at end of file if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
if not (found_rgb):
print("Depth camera required !!!")
exit(0)
config.enable_stream(rs.stream.color, width=848, height=480, format=rs.format.bgr8, framerate=60)
config.enable_stream(rs.stream.depth, width=848, height=480, format=rs.format.z16, framerate=60)
# Capture ctrl-c event
isOk= True
def signalInterruption(signum, frame):
global isOk
print( "\nCtrl-c pressed" )
isOk= False
signal.signal(signal.SIGINT, signalInterruption)
# Start streaming
pipeline.start(config)
count= 1
refTime= time.process_time()
freq= 60
sys.stdout.write("-")
while isOk:
# Wait for a coherent tuple of frames: depth, color and accel
frames = pipeline.wait_for_frames()
color_frame = frames.first(rs.stream.color)
depth_frame = frames.first(rs.stream.depth)
if not (depth_frame and color_frame):
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
sys.stdout.write( f"\r- {color_colormap_dim} - {depth_colormap_dim} - ({round(freq)} fps)" )
# Show images
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.waitKey(1)
# Frequency:
if count == 10 :
newTime= time.process_time()
freq= 10/((newTime-refTime))
refTime= newTime
count= 0
count+= 1
# Stop streaming
print("\nEnding...")
pipeline.stop()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment