Commit 9592cb01 authored by Inès EL HADRI's avatar Inès EL HADRI 💤

merge cam_driver and cam_vision

parent 160556fa
...@@ -2,3 +2,4 @@ ...@@ -2,3 +2,4 @@
**/build **/build
**/install **/install
**/log **/log
**/__pycache__
\ No newline at end of file
...@@ -45,7 +45,7 @@ install(DIRECTORY ...@@ -45,7 +45,7 @@ install(DIRECTORY
# Python scripts # Python scripts
install( PROGRAMS install( PROGRAMS
src/cam_driver src/cam_driver.py
src/cam_vision src/cam_vision
src/reactive_move src/reactive_move
src/scan2point_cloud src/scan2point_cloud
......
...@@ -45,9 +45,6 @@ launch: ...@@ -45,9 +45,6 @@ launch:
from: '/cam_rgb' from: '/cam_rgb'
to: '/camera/image_raw' to: '/camera/image_raw'
- executable:
cmd: gnome-terminal --tab -e 'ros2 run teleop_twist_keyboard teleop_twist_keyboard --ros-args --remap turtle1/cmd_vel:=/cmd_vel'
# Launch a SLAM algorithm # Launch a SLAM algorithm
- include: - include:
file: "$(find-pkg-share slam_toolbox)/launch/online_sync_launch.py" file: "$(find-pkg-share slam_toolbox)/launch/online_sync_launch.py"
\ No newline at end of file
...@@ -20,12 +20,6 @@ launch: ...@@ -20,12 +20,6 @@ launch:
from: '/cmd_vel' from: '/cmd_vel'
to: '/multi/cmd_nav' to: '/multi/cmd_nav'
# Publish camera views
- node:
pkg: "grp_astro"
exec: "cam_driver"
name: "cam_driver"
# Publish if there is a bottle in the field of view # Publish if there is a bottle in the field of view
- node: - node:
pkg: "grp_astro" pkg: "grp_astro"
......
...@@ -14,29 +14,38 @@ class CameraDriver(Node): ...@@ -14,29 +14,38 @@ class CameraDriver(Node):
def __init__(self, name="cam_driver", timerFreq = 1/60.0): def __init__(self, name="cam_driver", timerFreq = 1/60.0):
super().__init__(name) # Create the node super().__init__(name) # Create the node
# Initialize publishers # Initialize publishers:
self.init_publishers(timerFreq)
# Initialize camera
self._init_camera()
def init_publishers(self, timerFreq : float):
"""
Initialize Publishers
"""
self._rgb_publisher = self.create_publisher(Image, 'cam_rgb', 10) self._rgb_publisher = self.create_publisher(Image, 'cam_rgb', 10)
self._depth_publisher = self.create_publisher(Image, 'cam_depth', 10) self._depth_publisher = self.create_publisher(Image, 'cam_depth', 10)
self._depth_dist_publisher = self.create_publisher(Float32MultiArray, 'cam_depth_dist', 10)
self._infra_publisher_1 = self.create_publisher(Image, 'cam_infra_1',10) self._infra_publisher_1 = self.create_publisher(Image, 'cam_infra_1',10)
self._infra_publisher_2 = self.create_publisher(Image, 'cam_infra_2',10) self._infra_publisher_2 = self.create_publisher(Image, 'cam_infra_2',10)
# Initialize a clock for the publisher # Initialize a clock for the publisher
self.create_timer(timerFreq, self.publish_imgs) self.create_timer(timerFreq, self.publish_imgs)
def _init_camera(self):
# ------------------------------ Initialize camera ------------------------------ # """
Initialize camera
"""
## Configure depth and color streams ## Configure depth and color streams
self._pipeline = rs.pipeline() self._pipeline = rs.pipeline()
self._config = rs.config() self._config = rs.config()
self._colorizer = rs.colorizer() self._colorizer = rs.colorizer()
self._align_to = rs.stream.depth self._align_to = rs.stream.color
self._align = rs.align(self._align_to) self._align = rs.align(self._align_to)
self._color_info=(0, 0, 255) self._color_info=(0, 0, 255)
## Get device product line for setting a supporting resolution ## Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(self._pipeline) pipeline_wrapper = rs.pipeline_wrapper(self._pipeline)
pipeline_profile = self._config.resolve(pipeline_wrapper) pipeline_profile = self._config.resolve(pipeline_wrapper)
...@@ -63,36 +72,42 @@ class CameraDriver(Node): ...@@ -63,36 +72,42 @@ class CameraDriver(Node):
## Start the acquisition ## Start the acquisition
self._pipeline.start(self._config) self._pipeline.start(self._config)
def read_imgs(self): def _update_imgs(self):
"""lire et traduire images camera""" """
Update self images parameters
self._color_frame, self._depth_dist_frame, self._infra_frame_1, self._infra_frame_2
"""
# Get frames # Get frames
frames = self._pipeline.wait_for_frames() frames = self._pipeline.wait_for_frames()
# Split frames # Split frames
color_frame = frames.first(rs.stream.color)
aligned_frames = self._align.process(frames) aligned_frames = self._align.process(frames)
depth_dist_frame = aligned_frames.get_depth_frame()
#color_frame = aligned_frames.get_color_frame() #self._color_frame = frames.first(rs.stream.color)
infra_frame_1 = frames.get_infrared_frame(1) self._color_frame = aligned_frames.get_color_frame()
infra_frame_2 = frames.get_infrared_frame(2) self._depth_dist_frame = aligned_frames.get_depth_frame()
self._infra_frame_1 = frames.get_infrared_frame(1)
self._infra_frame_2 = frames.get_infrared_frame(2)
return None if (self._color_frame and self._depth_dist_frame and self._infra_frame_1 and self._infra_frame_2) is None else 1
if not (depth_dist_frame and aligned_frames and color_frame and infra_frame_1 and infra_frame_2): def read_imgs(self):
"""
Read and convert images to ROS - only to publish ros images
"""
self._update_imgs()
if not (self._depth_dist_frame and self._color_frame and self._infra_frame_1 and self._infra_frame_2):
return return
# Convert images to numpy arrays # Convert images to numpy arrays
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics color_image = np.asanyarray(self._color_frame.get_data())
depth_image = np.asanyarray(depth_dist_frame.get_data()) infra_image_1 = np.asanyarray(self._infra_frame_1.get_data())
depth_dist = np.zeros((848, 480)) infra_image_2 = np.asanyarray(self._infra_frame_2.get_data())
for y in range(480):
for x in range(848):
depth = depth_dist_frame.get_distance(x, y)
dx ,dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], depth)
depth_dist[x, y] = math.sqrt(dx**2 + dy**2 + dz**2)
color_image = np.asanyarray(color_frame.get_data())
infra_image_1 = np.asanyarray(infra_frame_1.get_data())
infra_image_2 = np.asanyarray(infra_frame_2.get_data())
# Apply colormap on depth and IR images (image must be converted to 8-bit per pixel first) # Apply colormap on depth and IR images (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
...@@ -134,6 +149,9 @@ class CameraDriver(Node): ...@@ -134,6 +149,9 @@ class CameraDriver(Node):
return self._rgb_image, self._depth_image, self._infra1_image, self._infra2_image return self._rgb_image, self._depth_image, self._infra1_image, self._infra2_image
def publish_imgs(self): def publish_imgs(self):
"""
publish ros images
"""
self.publish_rgb() self.publish_rgb()
self.publish_depth() self.publish_depth()
self.publish_IR() self.publish_IR()
......
...@@ -4,30 +4,31 @@ from rclpy.node import Node ...@@ -4,30 +4,31 @@ from rclpy.node import Node
from sensor_msgs.msg import Image from sensor_msgs.msg import Image
from std_msgs.msg import String, Float32MultiArray from std_msgs.msg import String, Float32MultiArray
import numpy as np import numpy as np
import sys, cv2, pathlib, statistics import cv2, pathlib, statistics
from cv_bridge import CvBridge from cv_bridge import CvBridge
from cam_driver import CameraDriver
import pyrealsense2 as rs
import math
class CameraVision(Node): class CameraVision(CameraDriver):
def __init__(self, name="cam_vision", timerFreq = 1/60.0): def __init__(self, name="cam_vision", timerFreq = 1/60.0):
super().__init__(name) # Create the node super().__init__(name, timerFreq) # Create the node
# Initialiaze node variables
# Initialize node variables
self._low = np.array([60-25, 50, 50]) self._low = np.array([60-25, 50, 50])
self._high = np.array([65+25, 255, 255]) self._high = np.array([65+25, 255, 255])
self._kernel = np.ones((5, 5), np.uint8) self._kernel = np.ones((5, 5), np.uint8)
self._color_info = (0, 0, 255) self._color_info = (0, 0, 255)
# Initialize subscribers # Initialize a ROS2 Image to CV2 image converter
self.create_subscription(Image, 'cam_rgb', self.rgb_callback, 10) self._bridge = CvBridge()
self.create_subscription(Float32MultiArray, 'cam_depth_dist', self.depth_callback, 10)
# Initialize subcriber variables
self._rgb_img = None
self._depth_img = None
def init_publishers(self, timerFreq: float):
# Initialize publishers # Initialize publishers
self._rgb_detection_publisher = self.create_publisher(Image, 'rgb_detection', 10) self._rgb_detection_publisher = self.create_publisher(Image, 'rgb_detection', 10)
self._mask_detection_publisher = self.create_publisher(Image, 'mask_detection', 10) self._mask_detection_publisher = self.create_publisher(Image, 'mask_detection', 10)
...@@ -37,20 +38,6 @@ class CameraVision(Node): ...@@ -37,20 +38,6 @@ class CameraVision(Node):
# Initialize a clock for the publisher # Initialize a clock for the publisher
self.create_timer(timerFreq, self.loop) self.create_timer(timerFreq, self.loop)
# Initialize a ROS2 Image to CV2 image converter
self._bridge = CvBridge()
# Callbacks
def rgb_callback(self, img):
print('coucou')
self._rgb_img = self._bridge.imgmsg_to_cv2(img, desired_encoding='passthrough')
def depth_callback(self, img):
width = img.dim[0].size
height = img.dim[1].size
self._depth_img = np.array(img.data).reshape((width, height))
def publish_msgs(self, image, mask): def publish_msgs(self, image, mask):
# Convert cv2 images to ROS2 RGB Image20 # Convert cv2 images to ROS2 RGB Image20
...@@ -79,12 +66,17 @@ class CameraVision(Node): ...@@ -79,12 +66,17 @@ class CameraVision(Node):
# Node internal loop # Node internal loop
def loop(self): def loop(self):
if self._rgb_img is None : """
print('a') Main loop to call
"""
# update images
if self._update_imgs() is None :
return return
color_image = np.asanyarray(self._color_frame.get_data())
# Convert image to HSV for easier thresholding # Convert image to HSV for easier thresholding
image = cv2.cvtColor(self._rgb_img, cv2.COLOR_BGR2HSV) image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
# Seuillage par colorimétrie # Seuillage par colorimétrie
mask = cv2.inRange(image, self._low, self._high) mask = cv2.inRange(image, self._low, self._high)
...@@ -114,7 +106,7 @@ class CameraVision(Node): ...@@ -114,7 +106,7 @@ class CameraVision(Node):
findPt = False findPt = False
(x, y) = 0, 0 (x, y) = 0, 0
for c in contours: for c in sorted_contours:
# Compare our mask with our templates # Compare our mask with our templates
match = cv2.matchShapes(template_contour, c, 3, 0.0) match = cv2.matchShapes(template_contour, c, 3, 0.0)
...@@ -124,7 +116,7 @@ class CameraVision(Node): ...@@ -124,7 +116,7 @@ class CameraVision(Node):
(x, y), radius = cv2.minEnclosingCircle(c) (x, y), radius = cv2.minEnclosingCircle(c)
# If we find a correct match that is big enough, we publish data # If we find a correct match that is big enough, we publish data
if min(match, match2) < 0.3 and radius > 30 and y > 2.0/5 * 480: if min(match, match2) < 0.3 and radius > 30:
findPt = True findPt = True
break # Stop checking other contours break # Stop checking other contours
...@@ -139,23 +131,19 @@ class CameraVision(Node): ...@@ -139,23 +131,19 @@ class CameraVision(Node):
# Publish images and string message # Publish images and string message
self.publish_msgs(image, mask) self.publish_msgs(image, mask)
# Depth Program # Depth calculus
if self._depth_img is None : x,y = int(x), int(y)
return color_intrin = self._color_frame.profile.as_video_stream_profile().intrinsics
depth = self._depth_img
print(depth[int(y), int(x)])
"""
if findPt:
circle_points = [] circle_points = []
D_RADIUS = 10 D_RADIUS = 10
for X in range(int(x)-D_RADIUS, int(x)+D_RADIUS): for X in range(x-D_RADIUS, x+D_RADIUS):
for Y in range(int(y)-D_RADIUS, int(y)+D_RADIUS): for Y in range(y-D_RADIUS, y+D_RADIUS):
if (X-int(x))**2 + (Y-int(y))**2 <= D_RADIUS: if (X-x)**2 + (Y-y)**2 <= D_RADIUS:
circle_points.append(depth[Y, X]) depth = self._depth_dist_frame.get_distance(x, y)
dx ,dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], depth)
circle_points.append(math.sqrt(dx**2 + dy**2 + dz**2))
#publier moy(circle_points) #publier moy(circle_points)
print(statistics.mean(circle_points)) print(statistics.mean(circle_points))
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment