Commit 9e82dc26 authored by Inès EL HADRI's avatar Inès EL HADRI 💤

add cam depth driver

parent a21ec2c5
......@@ -2,8 +2,9 @@
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import Float32MultiArray, MultiArrayDimension, MultiArrayLayout
import pyrealsense2 as rs
import signal, time, numpy as np
import signal, time, numpy as np, math
import sys, cv2
from cv_bridge import CvBridge
......@@ -16,6 +17,7 @@ class CameraDriver(Node):
# Initialize publishers
self._rgb_publisher = self.create_publisher(Image, 'cam_rgb', 10)
self._depth_publisher = self.create_publisher(Image, 'cam_depth', 10)
self._depth_dist_publisher = self.create_publisher(Float32MultiArray, 'cam_depth_dist', 10)
self._infra_publisher_1 = self.create_publisher(Image, 'cam_infra_1',10)
self._infra_publisher_2 = self.create_publisher(Image, 'cam_infra_2',10)
......@@ -28,6 +30,12 @@ class CameraDriver(Node):
## Configure depth and color streams
self._pipeline = rs.pipeline()
self._config = rs.config()
self._colorizer = rs.colorizer()
self._align_to = rs.stream.depth
self._align = rs.align(self._align_to)
self._color_info=(0, 0, 255)
## Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(self._pipeline)
......@@ -63,17 +71,25 @@ class CameraDriver(Node):
# Split frames
color_frame = frames.first(rs.stream.color)
depth_frame = frames.first(rs.stream.depth)
aligned_frames = self._align.process(frames)
depth_dist_frame = aligned_frames.get_depth_frame()
#color_frame = aligned_frames.get_color_frame()
infra_frame_1 = frames.get_infrared_frame(1)
infra_frame_2 = frames.get_infrared_frame(2)
if not (depth_frame and color_frame and infra_frame_1 and infra_frame_2):
print("eh oh le truc y marche pas")
if not (depth_dist_frame and aligned_frames and color_frame and infra_frame_1 and infra_frame_2):
return
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
depth_image = np.asanyarray(depth_dist_frame.get_data())
depth_dist = np.zeros((848, 480))
for y in range(480):
for x in range(848):
depth = depth_dist_frame.get_distance(x, y)
dx ,dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], depth)
depth_dist[x, y] = math.sqrt(dx**2 + dy**2 + dz**2)
color_image = np.asanyarray(color_frame.get_data())
infra_image_1 = np.asanyarray(infra_frame_1.get_data())
infra_image_2 = np.asanyarray(infra_frame_2.get_data())
......@@ -89,10 +105,23 @@ class CameraDriver(Node):
self._rgb_image = bridge.cv2_to_imgmsg(color_image,"bgr8")
self._rgb_image.header.stamp = self.get_clock().now().to_msg()
self._rgb_image.header.frame_id = "image"
# - Depth Image
# - Depth Image Colormap
self._depth_image = bridge.cv2_to_imgmsg(depth_colormap, "bgr8")
self._depth_image.header.stamp = self._rgb_image.header.stamp
self._depth_image.header.frame_id = "depth"
# - Depth Image Values
val = Float32MultiArray()
val.layout.dim = [MultiArrayDimension(), MultiArrayDimension()]
val.layout.dim[0].label = 'width'
val.layout.dim[1].label = 'height'
val.layout.dim[0].size = 848
val.layout.dim[1].size = 480
val.data = depth_dist.reshape(480*848)
#print(depth_dist)
self._depth_dist = val
# - IR1 Image
self._infra1_image = bridge.cv2_to_imgmsg(infra_colormap_1,"bgr8")
self._infra1_image.header.stamp = self._rgb_image.header.stamp
......@@ -125,6 +154,7 @@ class CameraDriver(Node):
"""
self._depth_publisher.publish(self._depth_image)
self._depth_dist_publisher.publish(self._depth_dist)
return 0
......@@ -153,7 +183,7 @@ def main():
signal.signal(signal.SIGINT, signalInterruption)
rclpy.init()
rosNode = CameraDriver(timerFreq=1.0/120)
rosNode = CameraDriver()
isOk = True
while isOk:
rosNode.read_imgs()
......
......@@ -4,10 +4,11 @@ from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import String
import numpy as np
import sys, cv2, os, pathlib
import sys, cv2, pathlib, statistics
from cv_bridge import CvBridge
class CameraVision(Node):
def __init__(self, name="cam_vision", timerFreq = 1/60.0):
......@@ -21,14 +22,17 @@ class CameraVision(Node):
# Initialize subscribers
self.create_subscription(Image, 'cam_rgb', self.rgb_callback, 10)
self.create_subscription(Image, 'cam_depth_dist', self.depth_callback, 10)
# Initialize subcriber variables
self._rgb_img = None
self._depth_img = None
# Initialize publishers
self._rgb_detection_publisher = self.create_publisher(Image, 'rgb_detection', 10)
self._mask_detection_publisher = self.create_publisher(Image, 'mask_detection', 10)
self._detection_publisher = self.create_publisher(String, 'detection', 10)
#self._depth_point_publisher = self.create_publisher(String, 'depth_point', 10)
# Initialize a clock for the publisher
self.create_timer(timerFreq, self.loop)
......@@ -40,6 +44,12 @@ class CameraVision(Node):
def rgb_callback(self, img):
self._rgb_img = self._bridge.imgmsg_to_cv2(img, desired_encoding='passthrough')
def depth_callback(self, img):
width = img.dim[0].size
height = img.dim[1].size
self._depth_img = np.array(img.data).reshape((width, height))
def publish_msgs(self, image, mask):
# Convert cv2 images to ROS2 RGB Image20
......@@ -63,9 +73,13 @@ class CameraVision(Node):
self._rgb_detection_publisher.publish(ros2_rgb_image)
self._mask_detection_publisher.publish(ros2_mask_image)
# Depth publish
#self._depth_point_publisher.publish(ros2_mask_image)
# Node internal loop
def loop(self):
if self._rgb_img is None :
print('a')
return
# Convert image to HSV for easier thresholding
......@@ -96,6 +110,9 @@ class CameraVision(Node):
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)
findPt = False
(x, y) = 0, 0
for c in contours:
# Compare our mask with our templates
......@@ -107,8 +124,13 @@ class CameraVision(Node):
# If we find a correct match that is big enough, we publish data
if min(match, match2) < 0.3 and radius > 30 and y > 2.0/5 * 480:
# print(f'Match : {min(match, match2)}')
findPt = True
break # Stop checking other contours
# point found
if findPt:
# Circle the bottle on the image
cv2.circle(image, (int(x), int(y)), int(radius), self._color_info, 2)
cv2.circle(image, (int(x), int(y)), 5, self._color_info, 10)
......@@ -116,7 +138,28 @@ class CameraVision(Node):
# Publish images and string message
self.publish_msgs(image, mask)
break # Stop checking other contours
# Depth Program
if self._depth_img is None :
return
depth = self._depth_img
print(depth[int(y), int(x)])
"""
if findPt:
circle_points = []
D_RADIUS = 10
for X in range(int(x)-D_RADIUS, int(x)+D_RADIUS):
for Y in range(int(y)-D_RADIUS, int(y)+D_RADIUS):
if (X-int(x))**2 + (Y-int(y))**2 <= D_RADIUS:
circle_points.append(depth[Y, X])
#publier moy(circle_points)
print(statistics.mean(circle_points))
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment