Commit 9592cb01 authored by Inès EL HADRI's avatar Inès EL HADRI 💤

merge cam_driver and cam_vision

parent 160556fa
......@@ -2,3 +2,4 @@
**/build
**/install
**/log
**/__pycache__
\ No newline at end of file
......@@ -45,7 +45,7 @@ install(DIRECTORY
# Python scripts
install( PROGRAMS
src/cam_driver
src/cam_driver.py
src/cam_vision
src/reactive_move
src/scan2point_cloud
......
......@@ -45,9 +45,6 @@ launch:
from: '/cam_rgb'
to: '/camera/image_raw'
- executable:
cmd: gnome-terminal --tab -e 'ros2 run teleop_twist_keyboard teleop_twist_keyboard --ros-args --remap turtle1/cmd_vel:=/cmd_vel'
# Launch a SLAM algorithm
- include:
file: "$(find-pkg-share slam_toolbox)/launch/online_sync_launch.py"
\ No newline at end of file
......@@ -20,12 +20,6 @@ launch:
from: '/cmd_vel'
to: '/multi/cmd_nav'
# Publish camera views
- node:
pkg: "grp_astro"
exec: "cam_driver"
name: "cam_driver"
# Publish if there is a bottle in the field of view
- node:
pkg: "grp_astro"
......
......@@ -14,29 +14,38 @@ class CameraDriver(Node):
def __init__(self, name="cam_driver", timerFreq = 1/60.0):
super().__init__(name) # Create the node
# Initialize publishers
# Initialize publishers:
self.init_publishers(timerFreq)
# Initialize camera
self._init_camera()
def init_publishers(self, timerFreq : float):
"""
Initialize Publishers
"""
self._rgb_publisher = self.create_publisher(Image, 'cam_rgb', 10)
self._depth_publisher = self.create_publisher(Image, 'cam_depth', 10)
self._depth_dist_publisher = self.create_publisher(Float32MultiArray, 'cam_depth_dist', 10)
self._infra_publisher_1 = self.create_publisher(Image, 'cam_infra_1',10)
self._infra_publisher_2 = self.create_publisher(Image, 'cam_infra_2',10)
# Initialize a clock for the publisher
self.create_timer(timerFreq, self.publish_imgs)
# ------------------------------ Initialize camera ------------------------------ #
def _init_camera(self):
"""
Initialize camera
"""
## Configure depth and color streams
self._pipeline = rs.pipeline()
self._config = rs.config()
self._colorizer = rs.colorizer()
self._align_to = rs.stream.depth
self._align_to = rs.stream.color
self._align = rs.align(self._align_to)
self._color_info=(0, 0, 255)
## Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(self._pipeline)
pipeline_profile = self._config.resolve(pipeline_wrapper)
......@@ -63,36 +72,42 @@ class CameraDriver(Node):
## Start the acquisition
self._pipeline.start(self._config)
def read_imgs(self):
"""lire et traduire images camera"""
def _update_imgs(self):
"""
Update self images parameters
self._color_frame, self._depth_dist_frame, self._infra_frame_1, self._infra_frame_2
"""
# Get frames
frames = self._pipeline.wait_for_frames()
# Split frames
color_frame = frames.first(rs.stream.color)
aligned_frames = self._align.process(frames)
depth_dist_frame = aligned_frames.get_depth_frame()
#color_frame = aligned_frames.get_color_frame()
infra_frame_1 = frames.get_infrared_frame(1)
infra_frame_2 = frames.get_infrared_frame(2)
#self._color_frame = frames.first(rs.stream.color)
self._color_frame = aligned_frames.get_color_frame()
self._depth_dist_frame = aligned_frames.get_depth_frame()
self._infra_frame_1 = frames.get_infrared_frame(1)
self._infra_frame_2 = frames.get_infrared_frame(2)
return None if (self._color_frame and self._depth_dist_frame and self._infra_frame_1 and self._infra_frame_2) is None else 1
if not (depth_dist_frame and aligned_frames and color_frame and infra_frame_1 and infra_frame_2):
def read_imgs(self):
"""
Read and convert images to ROS - only to publish ros images
"""
self._update_imgs()
if not (self._depth_dist_frame and self._color_frame and self._infra_frame_1 and self._infra_frame_2):
return
# Convert images to numpy arrays
color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
depth_image = np.asanyarray(depth_dist_frame.get_data())
depth_dist = np.zeros((848, 480))
for y in range(480):
for x in range(848):
depth = depth_dist_frame.get_distance(x, y)
dx ,dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], depth)
depth_dist[x, y] = math.sqrt(dx**2 + dy**2 + dz**2)
color_image = np.asanyarray(color_frame.get_data())
infra_image_1 = np.asanyarray(infra_frame_1.get_data())
infra_image_2 = np.asanyarray(infra_frame_2.get_data())
color_image = np.asanyarray(self._color_frame.get_data())
infra_image_1 = np.asanyarray(self._infra_frame_1.get_data())
infra_image_2 = np.asanyarray(self._infra_frame_2.get_data())
# Apply colormap on depth and IR images (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
......@@ -134,6 +149,9 @@ class CameraDriver(Node):
return self._rgb_image, self._depth_image, self._infra1_image, self._infra2_image
def publish_imgs(self):
"""
publish ros images
"""
self.publish_rgb()
self.publish_depth()
self.publish_IR()
......
......@@ -4,30 +4,31 @@ from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import String, Float32MultiArray
import numpy as np
import sys, cv2, pathlib, statistics
import cv2, pathlib, statistics
from cv_bridge import CvBridge
from cam_driver import CameraDriver
import pyrealsense2 as rs
import math
class CameraVision(Node):
class CameraVision(CameraDriver):
def __init__(self, name="cam_vision", timerFreq = 1/60.0):
super().__init__(name) # Create the node
# Initialiaze node variables
super().__init__(name, timerFreq) # Create the node
# Initialize node variables
self._low = np.array([60-25, 50, 50])
self._high = np.array([65+25, 255, 255])
self._kernel = np.ones((5, 5), np.uint8)
self._color_info = (0, 0, 255)
# Initialize subscribers
self.create_subscription(Image, 'cam_rgb', self.rgb_callback, 10)
self.create_subscription(Float32MultiArray, 'cam_depth_dist', self.depth_callback, 10)
# Initialize subcriber variables
self._rgb_img = None
self._depth_img = None
# Initialize a ROS2 Image to CV2 image converter
self._bridge = CvBridge()
def init_publishers(self, timerFreq: float):
# Initialize publishers
self._rgb_detection_publisher = self.create_publisher(Image, 'rgb_detection', 10)
self._mask_detection_publisher = self.create_publisher(Image, 'mask_detection', 10)
......@@ -37,20 +38,6 @@ class CameraVision(Node):
# Initialize a clock for the publisher
self.create_timer(timerFreq, self.loop)
# Initialize a ROS2 Image to CV2 image converter
self._bridge = CvBridge()
# Callbacks
def rgb_callback(self, img):
print('coucou')
self._rgb_img = self._bridge.imgmsg_to_cv2(img, desired_encoding='passthrough')
def depth_callback(self, img):
width = img.dim[0].size
height = img.dim[1].size
self._depth_img = np.array(img.data).reshape((width, height))
def publish_msgs(self, image, mask):
# Convert cv2 images to ROS2 RGB Image20
......@@ -79,12 +66,17 @@ class CameraVision(Node):
# Node internal loop
def loop(self):
if self._rgb_img is None :
print('a')
"""
Main loop to call
"""
# update images
if self._update_imgs() is None :
return
color_image = np.asanyarray(self._color_frame.get_data())
# Convert image to HSV for easier thresholding
image = cv2.cvtColor(self._rgb_img, cv2.COLOR_BGR2HSV)
image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
# Seuillage par colorimétrie
mask = cv2.inRange(image, self._low, self._high)
......@@ -114,7 +106,7 @@ class CameraVision(Node):
findPt = False
(x, y) = 0, 0
for c in contours:
for c in sorted_contours:
# Compare our mask with our templates
match = cv2.matchShapes(template_contour, c, 3, 0.0)
......@@ -124,7 +116,7 @@ class CameraVision(Node):
(x, y), radius = cv2.minEnclosingCircle(c)
# If we find a correct match that is big enough, we publish data
if min(match, match2) < 0.3 and radius > 30 and y > 2.0/5 * 480:
if min(match, match2) < 0.3 and radius > 30:
findPt = True
break # Stop checking other contours
......@@ -139,23 +131,19 @@ class CameraVision(Node):
# Publish images and string message
self.publish_msgs(image, mask)
# Depth Program
if self._depth_img is None :
return
depth = self._depth_img
print(depth[int(y), int(x)])
"""
if findPt:
# Depth calculus
x,y = int(x), int(y)
color_intrin = self._color_frame.profile.as_video_stream_profile().intrinsics
circle_points = []
D_RADIUS = 10
for X in range(int(x)-D_RADIUS, int(x)+D_RADIUS):
for Y in range(int(y)-D_RADIUS, int(y)+D_RADIUS):
if (X-int(x))**2 + (Y-int(y))**2 <= D_RADIUS:
circle_points.append(depth[Y, X])
for X in range(x-D_RADIUS, x+D_RADIUS):
for Y in range(y-D_RADIUS, y+D_RADIUS):
if (X-x)**2 + (Y-y)**2 <= D_RADIUS:
depth = self._depth_dist_frame.get_distance(x, y)
dx ,dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], depth)
circle_points.append(math.sqrt(dx**2 + dy**2 + dz**2))
#publier moy(circle_points)
print(statistics.mean(circle_points))
"""
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment