diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
new file mode 100644
index 0000000..af5a8df
--- /dev/null
+++ b/.vscode/c_cpp_properties.json
@@ -0,0 +1,20 @@
+{
+ "configurations": [
+ {
+ "browse": {
+ "databaseFilename": "${default}",
+ "limitSymbolsToIncludedHeaders": false
+ },
+ "includePath": [
+ "/opt/ros/humble/include/**",
+ "/usr/include/**"
+ ],
+ "name": "ROS",
+ "intelliSenseMode": "gcc-x64",
+ "compilerPath": "/usr/bin/gcc",
+ "cStandard": "gnu11",
+ "cppStandard": "c++14"
+ }
+ ],
+ "version": 4
+}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..02090d7
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,15 @@
+{
+ "python.autoComplete.extraPaths": [
+ "/home/pragya/ros2_ws/build/selection_task",
+ "/home/pragya/ros2_ws/install/selection_task/lib/python3.10/site-packages",
+ "/opt/ros/humble/lib/python3.10/site-packages",
+ "/opt/ros/humble/local/lib/python3.10/dist-packages"
+ ],
+ "python.analysis.extraPaths": [
+ "/home/pragya/ros2_ws/build/selection_task",
+ "/home/pragya/ros2_ws/install/selection_task/lib/python3.10/site-packages",
+ "/opt/ros/humble/lib/python3.10/site-packages",
+ "/opt/ros/humble/local/lib/python3.10/dist-packages"
+ ],
+ "ros.distro": "humble"
+}
\ No newline at end of file
diff --git a/deeplabv3.tflite b/deeplabv3.tflite
new file mode 100644
index 0000000..420a934
Binary files /dev/null and b/deeplabv3.tflite differ
diff --git a/person_follower/package.xml b/person_follower/package.xml
new file mode 100644
index 0000000..2be282c
--- /dev/null
+++ b/person_follower/package.xml
@@ -0,0 +1,21 @@
+
+
+
+ person_follower
+ 0.0.0
+ TODO: Package description
+ pragya
+ TODO: License declaration
+
+ rclpy
+ sensor_msgs
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/person_follower/person_follower/__init__.py b/person_follower/person_follower/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/person_follower/person_follower/__pycache__/__init__.cpython-310.pyc b/person_follower/person_follower/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..e0e0c29
Binary files /dev/null and b/person_follower/person_follower/__pycache__/__init__.cpython-310.pyc differ
diff --git a/person_follower/person_follower/__pycache__/camera_subscriber.cpython-310.pyc b/person_follower/person_follower/__pycache__/camera_subscriber.cpython-310.pyc
new file mode 100644
index 0000000..ad77447
Binary files /dev/null and b/person_follower/person_follower/__pycache__/camera_subscriber.cpython-310.pyc differ
diff --git a/person_follower/person_follower/__pycache__/livefeed_centroid.cpython-310.pyc b/person_follower/person_follower/__pycache__/livefeed_centroid.cpython-310.pyc
new file mode 100644
index 0000000..7433eb2
Binary files /dev/null and b/person_follower/person_follower/__pycache__/livefeed_centroid.cpython-310.pyc differ
diff --git a/person_follower/person_follower/__pycache__/person_follower.cpython-310.pyc b/person_follower/person_follower/__pycache__/person_follower.cpython-310.pyc
new file mode 100644
index 0000000..b99763f
Binary files /dev/null and b/person_follower/person_follower/__pycache__/person_follower.cpython-310.pyc differ
diff --git a/person_follower/person_follower/camera_subscriber.py b/person_follower/person_follower/camera_subscriber.py
new file mode 100755
index 0000000..91f6aba
--- /dev/null
+++ b/person_follower/person_follower/camera_subscriber.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python3
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+import cv2
+from cv_bridge import CvBridge
+
+class CameraSubscriber(Node):
+ def __init__(self):
+ super().__init__('camera_subscriber_node')
+ self.subscription = self.create_subscription(
+ Image,'/kinect_camera/image_raw', self.image_callback, 10)
+ self.bridge = CvBridge()
+ print("holaaaaaaaa")
+
+ def image_callback(self, msg):
+ cv_image = self.bridge.imgmsg_to_cv2(msg, encoding='bgr8')
+ cv2.imshow('Kinect Camera Feed', cv_image)
+ cv2.waitKey(1)
+
+def main(args=None):
+ rclpy.init(args=args)
+ camera_subscriber = CameraSubscriber()
+ rclpy.spin(camera_subscriber)
+ camera_subscriber.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/person_follower/person_follower/final_code.py b/person_follower/person_follower/final_code.py
new file mode 100644
index 0000000..4d0c98d
--- /dev/null
+++ b/person_follower/person_follower/final_code.py
@@ -0,0 +1,73 @@
+#! /usr/bin/env python3
+
+import cv2
+import mediapipe as mp
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import numpy as np
+
+
+class CameraSubscriber(Node):
+ def __init__(self):
+ super().__init__('camera_subscriber_node')
+ self.bridge = CvBridge()
+ self.subscription = self.create_subscription(Image, '/kinect_camera/image_raw', self.image_callback, 10)
+ self.cv_image = None
+ self.mp_holistic = mp.solutions.holistic.Holistic()
+ self.mp_drawing = mp.solutions.drawing_utils
+
+ def calculate_centroid(self, keypoints):
+ x_sum, y_sum = 0, 0
+ num_points = len(keypoints)
+ for point in keypoints:
+ x_sum += point[0]
+ y_sum += point[1]
+ centroid_x = x_sum / num_points
+ centroid_y = y_sum / num_points
+ return [int(centroid_x), int(centroid_y)]
+
+ def perform_segmentation(self, image):
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ _, segmented_mask = cv2.threshold(gray_image, 128, 255, cv2.THRESH_BINARY)
+ return segmented_mask
+
+ def webcam_centroid(self):
+ try:
+ results = self.mp_holistic.process(self.cv_image)
+ if results.pose_landmarks:
+ keypoints = []
+ for landmark in results.pose_landmarks.landmark:
+ h, w, c = self.cv_image.shape
+ x, y = int(landmark.x * w), int(landmark.y * h)
+ keypoints.append((x, y))
+ centroid = self.calculate_centroid(keypoints)
+ cv2.circle(self.cv_image, centroid, 5, (0, 255, 0), -1)
+ self.mp_drawing.draw_landmarks(self.cv_image, results.pose_landmarks, mp.solutions.holistic.POSE_CONNECTIONS)
+
+ #image segmentation
+ segmented_mask = self.perform_segmentation(self.cv_image)
+ cv2.imshow('Segmentation Mask', segmented_mask)
+ cv2.imshow('Kinect Camera Feed', self.cv_image)
+
+ except Exception as e:
+ print(e)
+
+ def image_callback(self, msg):
+ self.cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
+ self.webcam_centroid()
+ cv2.waitKey(3)
+
+def main(args=None):
+ rclpy.init(args=args)
+ camera_subscriber = CameraSubscriber()
+ rclpy.spin(camera_subscriber)
+ camera_subscriber.mp_holistic.close()
+ cv2.destroyAllWindows()
+ camera_subscriber.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == "__main__":
+ main()
+
diff --git a/person_follower/person_follower/image_segmentation.py b/person_follower/person_follower/image_segmentation.py
new file mode 100644
index 0000000..6284e0c
--- /dev/null
+++ b/person_follower/person_follower/image_segmentation.py
@@ -0,0 +1,180 @@
+#! /usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import Twist
+from cv_bridge import CvBridge, CvBridgeError
+import cv2
+from sensor_msgs.msg import Image
+import mediapipe as mp
+from mediapipe.tasks import python
+from mediapipe.tasks.python import vision
+import numpy as np
+
+class PersonFollower(Node):
+ def __init__(self):
+ super().__init__("person_follower")
+ self.bridge = CvBridge()
+ self.image_sub = self.create_subscription(Image, "/kinect_camera/image_raw",self.callback, 10)
+ self.depth_sub=self.create_subscription(Image,"/kinect_camera/depth/image_raw",self.depth_callback,10)
+ self.pub = self.create_publisher(Twist, '/cmd_vel', 10)
+ self.velocity_msg = Twist()
+ self.depth_image = None
+ self.velocity_msg.linear.y = 0.0
+ self.velocity_msg.linear.z = 0.0
+ self.velocity_msg.angular.x = 0.0
+ self.velocity_msg.angular.y = 0.0
+
+ self.mp_drawing = mp.solutions.drawing_utils
+ self.mp_holistic = mp.solutions.holistic
+ self.mp_pose = mp.solutions.pose.Pose(
+ min_detection_confidence=0.5,
+ min_tracking_confidence=0.5
+ )
+ self.kp_angular=0.005
+ self.kp_linear=0.7
+ self.x_center=None
+ self.image_center=None
+ self.buffer=10
+
+ # Create the options that will be used for ImageSegmenter
+ base_options = python.BaseOptions(model_asset_path='/home/pragya/personfollower_ws/src/Person-Follower/person_follower/person_follower/deeplabv3.tflite')
+ options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
+ self.segmenter = vision.ImageSegmenter.create_from_options(options)
+
+ self.BG_COLOR = (192, 192, 192) # gray
+ self.MASK_COLOR = (0, 255, 0) # white
+
+ def depth_callback(self,data):
+ try:
+ self.depth_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
+ except Exception as e:
+ self.get_logger().error(f"Error converting depth image: {e}")
+
+
+ def callback(self,data):
+ self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
+ # self.cv_image= cv2.resize(self.cv_image,(720,600))
+ rgb_cv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2RGB)
+ self.segmentation_frame=self.cv_image
+ self.results = self.mp_pose.process(rgb_cv_image)
+
+ if self.results.pose_landmarks is not None:
+ landmarks = self.results.pose_landmarks.landmark
+ # Get the average of all landmark x and y coordinates to find the centroid
+ x_centroid = sum([landmark.x for landmark in landmarks]) / len(landmarks)
+ y_centroid = sum([landmark.y for landmark in landmarks]) / len(landmarks)
+ centroid = (x_centroid, y_centroid)
+ self.x_center=x_centroid * self.cv_image.shape[1]
+ self.y_center=y_centroid * self.cv_image.shape[0]
+ x_length=self.cv_image.shape[1]
+ self.image_center=x_length/2
+
+ x =int(x_length/2)
+ # if self.depth_image is not None:
+ # depth_mm = self.depth_image[int(self.y_center),int(self.x_center)]
+ # print("depth is :",depth_mm)
+
+ cv2.circle(self.cv_image, (int(x_centroid * self.cv_image.shape[1]), int(y_centroid * self.cv_image.shape[0])), 5, (0, 0, 255), -1)
+
+ x_min = min([landmark.x for landmark in landmarks])
+ x_max = max([landmark.x for landmark in landmarks])
+ y_min = min([landmark.y for landmark in landmarks])
+ y_max = max([landmark.y for landmark in landmarks])
+
+ cv2.rectangle(self.cv_image, (int(x_min * self.cv_image.shape[1]), int(y_min * self.cv_image.shape[0])),
+ (int(x_max * self.cv_image.shape[1]), int(y_max * self.cv_image.shape[0])), (0, 255, 0), 2)
+ self.segmentation_frame = mp.Image(image_format=mp.ImageFormat.SRGB, data=self.segmentation_frame)
+ # mask for the segmented image
+ segmentation_result = self.segmenter.segment(self.segmentation_frame)
+ category_mask = segmentation_result.category_mask
+
+ image_data = self.segmentation_frame.numpy_view()
+ fg_image = np.zeros(image_data.shape, dtype=np.uint8)
+ fg_image[:] = self.MASK_COLOR
+ bg_image = np.zeros(image_data.shape, dtype=np.uint8)
+ bg_image[:] = self.BG_COLOR
+
+ condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
+
+ self.segmentation_frame = np.where(condition, fg_image, bg_image)
+ self.mp_drawing.draw_landmarks(self.segmentation_frame, self.results.pose_landmarks,self.mp_holistic.POSE_CONNECTIONS)
+
+ if self.results.pose_landmarks is not None:
+ cv2.line(self.cv_image, (int(self.x_center-15), int(self.y_center)), (int(self.x_center+15), int(self.y_center)), (255, 0, 0), 3)
+ cv2.line(self.cv_image, (int(self.x_center), int(self.y_center-15)), (int(self.x_center), int(self.y_center+15)), (255, 0, 0), 3)
+ cv2.line(self.segmentation_frame, (int(self.x_center-15), int(self.y_center)), (int(self.x_center+15), int(self.y_center)), (255, 0, 0), 3)
+ cv2.line(self.segmentation_frame, (int(self.x_center), int(self.y_center-15)), (int(self.x_center), int(self.y_center+15)), (255, 0, 0), 3)
+ cv2.line(self.segmentation_frame, (int(350), int(0)), (int(350), int(500)), (0, 0, 255), 2)
+ cv2.line(self.segmentation_frame, (int(0), int(self.y_center)), (int(700), int(self.y_center)), (0, 0, 255), 2)
+
+
+ self.control_loop()
+
+ def control_loop(self):
+ if self.results.pose_landmarks is None:
+ self.velocity_control(0.0,-0.5)
+ self.top="Searching for Person"
+ self.bottom="<=Rotating=>"
+
+ else:
+ if self.x_center > 700:
+ self.x_center=699.0
+ if self.y_center> 500:
+ self.y_center=499.0
+ depth_mm = self.depth_image[int(self.y_center),int(self.x_center)]
+
+ if (3.0 + self.x_center) < self.image_center : #person is left means positive ang vel
+ self.top="<==Left"
+ self.bottom="Going Forward"
+
+ self.error_linear= depth_mm - 3.0
+
+ self.error= (self.image_center - self.x_center)
+ self.angular_vel= self.kp_angular * self.error
+ if self.error_linear < 0.3:
+ self.error_linear=0.0
+ self.linear_vel= self.kp_linear * self.error_linear
+ self.velocity_control(self.linear_vel,self.angular_vel)
+
+ elif self.x_center > self.image_center + 3.0 : #person is right means negative ang vel
+ self.top="Right==>"
+ self.bottom="Going Right"
+
+ self.error_linear= depth_mm - 3.0
+ self.error= (self.image_center - self.x_center)
+ self.angular_vel= self.kp_angular * self.error
+
+ if self.error_linear < 0.3:
+ self.error_linear=0.0
+ self.linear_vel= self.kp_linear * self.error_linear
+ self.velocity_control(self.linear_vel,self.angular_vel)
+
+ elif abs(self.x_center - self.image_center)<= 3.0 :
+ self.top="Stop"
+ self.bottom="Reached Goal position"
+ self.velocity_control(0.0,0.0)
+
+
+ cv2.putText(self.cv_image,self.top,(200,50),cv2.FONT_HERSHEY_DUPLEX,0.8,(0, 0,255),2)
+ cv2.putText(self.cv_image,self.bottom,(200,450),cv2.FONT_HERSHEY_DUPLEX,0.8,(0, 0, 255),2)
+ cv2.imshow('Person Detection', self.cv_image)
+ cv2.imshow('Person Segmentation', self.segmentation_frame)
+ cv2.waitKey(3)
+ print("No person detected in the image")
+
+ def velocity_control(self,linear,angular):
+ # linear = min(linear,2)
+ self.velocity_msg.linear.x = float(linear)
+ self.velocity_msg.angular.z = angular
+ self.pub.publish(self.velocity_msg)
+
+def main():
+ rclpy.init()
+ Mynode = PersonFollower()
+ rclpy.spin(Mynode)
+ cv2.destroyAllWindows()
+ rclpy.shutdown()
+
+if __name__=="__main__" :
+ main()
diff --git a/person_follower/person_follower/livefeed_centroid.py b/person_follower/person_follower/livefeed_centroid.py
new file mode 100644
index 0000000..86d35c5
--- /dev/null
+++ b/person_follower/person_follower/livefeed_centroid.py
@@ -0,0 +1,72 @@
+#! /usr/bin/env python3
+
+import cv2
+import mediapipe as mp
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+
+
+class CameraSubscriber(Node):
+ def __init__(self):
+ super().__init__('camera_subscriber_node')
+ self.bridge = CvBridge()
+ self.subscription = self.create_subscription(Image,'/kinect_camera/image_raw', self.image_callback, 10)
+ self.cv_image=None
+
+ #take webcam feed and display
+ def calculate_centroid(self,keypoints):
+ x_sum, y_sum = 0, 0
+ num_points = len(keypoints)
+ for point in keypoints:
+ x_sum += point[0]
+ y_sum += point[1]
+ centroid_x = x_sum / num_points
+ centroid_y = y_sum / num_points
+ return [int(centroid_x), int(centroid_y)]
+
+ def webcam_centroid(self):
+ #while True:
+ #self.cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
+ try:
+ mp_holistic = mp.solutions.holistic
+ mp_drawing = mp.solutions.drawing_utils
+ holistic = mp_holistic.Holistic()
+ results = holistic.process(self.cv_image)
+ if results.pose_landmarks:
+ #keypoints for centroid
+ keypoints = []
+ for landmark in results.pose_landmarks.landmark:
+ h, w, c = self.cv_image.shape
+ x, y = int(landmark.x * w), int(landmark.y * h)
+ keypoints.append((x, y))
+ centroid = self.calculate_centroid(keypoints) #centroid calculation
+ cv2.circle(self.cv_image, centroid, 5, (0, 255, 0), -1)
+ mp_drawing.draw_landmarks(self.cv_image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
+ cv2.imshow('Kinect Camera Feed', self.cv_image)
+ #cv2.waitkey(3)
+ except Exception as e:
+ print(e)
+ #if cv2.waitKey(3) & 0xFF == ord('q'):
+ #break
+ #self.cv_image.release()
+ #cv2.destroyAllWindows()
+
+
+ def image_callback(self, msg):
+ self.cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
+ #cv2.imshow('Kinect Camera Feed', self.cv_image)
+ self.webcam_centroid()
+ cv2.waitKey(3)
+
+def main(args=None):
+ rclpy.init(args=args)
+ camera_subscriber = CameraSubscriber()
+ rclpy.spin(camera_subscriber)
+ #camera_subscriber.webcam_centroid()
+ camera_subscriber.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == "__main__":
+ main()
diff --git a/person_follower/person_follower/person_follower.py b/person_follower/person_follower/person_follower.py
new file mode 100644
index 0000000..19f9311
--- /dev/null
+++ b/person_follower/person_follower/person_follower.py
@@ -0,0 +1,83 @@
+#! /usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import Twist
+from cv_bridge import CvBridge, CvBridgeError
+import cv2
+from sensor_msgs.msg import Image
+import mediapipe as mp
+
+
+class PersonFollower(Node):
+ def __init__(self):
+ super().__init__("person_follower")
+ self.bridge = CvBridge()
+ self.image_sub = self.create_subscription(Image, "/kinect_camera/image_raw",self.callback, 10)
+ self.depth_sub=self.create_subscription(Image,"/kinect_camera/depth/image_raw",self.depth_callback,10)
+ self.velocity_msg = Twist()
+ self.depth_image = None
+
+
+ self.mp_pose = mp.solutions.pose.Pose(
+ min_detection_confidence=0.5,
+ min_tracking_confidence=0.5
+ )
+
+ def depth_callback(self,data):
+ try:
+ self.depth_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
+ except Exception as e:
+ self.get_logger().error(f"Error converting depth image: {e}")
+
+
+ def callback(self,data):
+ self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
+ # self.cv_image= cv2.resize(self.cv_image,(720,600))
+ rgb_cv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2RGB)
+ self.results = self.mp_pose.process(rgb_cv_image)
+ if self.results.pose_landmarks is not None:
+ landmarks = self.results.pose_landmarks.landmark
+
+ # Get the average of all landmark x and y coordinates to find the centroid
+ x_centroid = sum([landmark.x for landmark in landmarks]) / len(landmarks)
+ y_centroid = sum([landmark.y for landmark in landmarks]) / len(landmarks)
+ centroid = (x_centroid, y_centroid)
+ self.x_center=x_centroid * self.cv_image.shape[1]
+ self.y_center=y_centroid * self.cv_image.shape[0]
+ x_length=self.cv_image.shape[1]
+ self.image_center=x_length/2
+
+ x =int(x_length/2)
+ if self.depth_image is not None:
+ depth_mm = self.depth_image[int(self.y_center),int(self.x_center)]
+ print("depth is :",depth_mm)
+
+ cv2.circle(self.cv_image, (int(x_centroid * self.cv_image.shape[1]), int(y_centroid * self.cv_image.shape[0])), 5, (0, 0, 255), -1)
+
+ x_min = min([landmark.x for landmark in landmarks])
+ x_max = max([landmark.x for landmark in landmarks])
+ y_min = min([landmark.y for landmark in landmarks])
+ y_max = max([landmark.y for landmark in landmarks])
+
+ cv2.rectangle(self.cv_image, (int(x_min * self.cv_image.shape[1]), int(y_min * self.cv_image.shape[0])),
+ (int(x_max * self.cv_image.shape[1]), int(y_max * self.cv_image.shape[0])), (0, 255, 0), 2)
+ cv2.imshow('Person Detection', self.cv_image)
+ cv2.waitKey(3)
+ print("Person detected in the image")
+
+ else:
+ cv2.imshow('Person Detection', self.cv_image)
+ cv2.waitKey(3)
+ print("No person detected in the image")
+
+
+def main():
+ rclpy.init()
+ Mynode = PersonFollower()
+ rclpy.spin(Mynode)
+ cv2.destroyAllWindows()
+ rclpy.shutdown()
+
+if __name__=="__main__" :
+ main()
\ No newline at end of file
diff --git a/person_follower/person_follower/thresholding.py b/person_follower/person_follower/thresholding.py
new file mode 100644
index 0000000..4eb95f0
--- /dev/null
+++ b/person_follower/person_follower/thresholding.py
@@ -0,0 +1,26 @@
+import cv2
+import numpy as np
+
+# Function to perform basic image segmentation using thresholding
+def segment_image(image_path, threshold_value=128):
+
+ # Read the image in grayscale
+ image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
+
+ # Apply thresholding
+ _, binary_mask = cv2.threshold(image, threshold_value, 255, cv2.THRESH_BINARY_INV)
+
+ # Apply bitwise AND to get the segmented image
+ segmented_image = cv2.bitwise_and(image, image, mask=binary_mask)
+
+ return segmented_image
+
+# Example usage
+image_path = '/home/pragya/Downloads/gazebopose2.jpg'
+segmented_image = segment_image(image_path)
+
+# Display the original and segmented images
+cv2.imshow('Original Image', cv2.imread(image_path))
+cv2.imshow('Segmented Image', segmented_image)
+cv2.waitKey(0)
+cv2.destroyAllWindows()
diff --git a/person_follower/resource/person_follower b/person_follower/resource/person_follower
new file mode 100644
index 0000000..e69de29
diff --git a/person_follower/setup.cfg b/person_follower/setup.cfg
new file mode 100644
index 0000000..2116ce3
--- /dev/null
+++ b/person_follower/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/person_follower
+[install]
+install_scripts=$base/lib/person_follower
diff --git a/person_follower/setup.py b/person_follower/setup.py
new file mode 100644
index 0000000..1bf5772
--- /dev/null
+++ b/person_follower/setup.py
@@ -0,0 +1,29 @@
+from setuptools import find_packages, setup
+
+package_name = 'person_follower'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='pragya',
+ maintainer_email='pragya@todo.todo',
+ description='TODO: Package description',
+ license='TODO: License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ "camera_subscriber= person_follower.camera_subscriber:main",
+ "livefeed_centroid=person_follower.livefeed_centroid:main",
+ "person_follower_py=person_follower.person_follower:main",
+ "final_code=final_code.final_code:main"
+ ],
+ },
+)
diff --git a/person_follower/test/test_copyright.py b/person_follower/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/person_follower/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/person_follower/test/test_flake8.py b/person_follower/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/person_follower/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/person_follower/test/test_pep257.py b/person_follower/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/person_follower/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/person_follower_sim/gazebo/mr_robot_plugins.gazebo b/person_follower_sim/gazebo/mr_robot_plugins.gazebo
index 988201f..fc06276 100644
--- a/person_follower_sim/gazebo/mr_robot_plugins.gazebo
+++ b/person_follower_sim/gazebo/mr_robot_plugins.gazebo
@@ -9,28 +9,24 @@
-
+
false
50.0
left_motor_joint
right_motor_joint
0.199
- ${2*0.0535}
+ 0.1070
base_link
-
cmd_vel
odom
odom
- tf
+ tf
true
true
5.0
-
-
@@ -45,12 +41,12 @@
1.089
R8G8B8
- 640
- 480
+ 700
+ 500
0.05
- 8.0
+ 100.0
diff --git a/person_follower_sim/launch/gazebo.launch.py b/person_follower_sim/launch/gazebo.launch.py
index 2fbabac..b2f7329 100644
--- a/person_follower_sim/launch/gazebo.launch.py
+++ b/person_follower_sim/launch/gazebo.launch.py
@@ -15,7 +15,7 @@ def generate_launch_description():
#path to xacro file
xacro_file=get_package_share_directory('person_follower_sim')+'/urdf/mr_robot.xacro'
my_pkg_dir = join(get_package_share_directory('person_follower_sim'), 'worlds', 'model.sdf')
- actor_world = join(get_package_share_directory("person_follower_sim"),'worlds','walk.world')
+ actor_world = join(get_package_share_directory("person_follower_sim"),'worlds','stand.world')
# Include the gazebo.launch.py file
gazebo=IncludeLaunchDescription(
PythonLaunchDescriptionSource([get_package_share_directory('gazebo_ros'), '/launch/gazebo.launch.py']),
@@ -51,9 +51,10 @@ def generate_launch_description():
arguments=[
'-entity', 'mr_robot',
'-topic', '/robot_description',
- '-x', '2.0', # Set the initial X position
- '-y', '0.0', # Set the initial Y position
- '-z', '0.0' # Set the initial Z position
+ '-x', '6.0', # Set the initial X position
+ '-y', '2.0', # Set the initial Y position
+ '-z', '0.0' , # Set the initial Z position
+ '-Y', '-3.14' # Set the initial Z position
]
)
return LaunchDescription([
diff --git a/person_follower_sim/urdf/mr_robot.xacro b/person_follower_sim/urdf/mr_robot.xacro
index fe411c4..e20b4cc 100644
--- a/person_follower_sim/urdf/mr_robot.xacro
+++ b/person_follower_sim/urdf/mr_robot.xacro
@@ -124,7 +124,7 @@
-
+
diff --git a/person_follower_sim/worlds/stand.world b/person_follower_sim/worlds/stand.world
new file mode 100644
index 0000000..2b9cf22
--- /dev/null
+++ b/person_follower_sim/worlds/stand.world
@@ -0,0 +1,18 @@
+
+
+
+
+ model://sun
+
+
+ model://ground_plane
+
+
+ -6 0 0 0 0 0
+
+ stand.dae
+ 1
+
+
+
+
diff --git a/segment.py b/segment.py
new file mode 100644
index 0000000..fbc9bd1
--- /dev/null
+++ b/segment.py
@@ -0,0 +1,45 @@
+'''import cv2
+import mediapipe as mp
+
+mp_holistic = mp.solutions.holistic
+holistic = mp_holistic.Holistic()
+
+def segment_live_camera():
+ cap = cv2.VideoCapture(0)
+
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if not ret:
+ break
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ results = holistic.process(frame_rgb)
+ if results.segmentation_mask is not None:
+ frame = cv2.cvtColor(cv2.addWeighted(frame, 0.5, cv2.cvtColor(results.segmentation_mask, cv2.COLOR_GRAY2BGR), 0.5, 0), cv2.COLOR_BGR2RGB)
+ cv2.imshow('Live Segmentation', frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+ cap.release()
+ cv2.destroyAllWindows()
+segment_live_camera()
+
+import cv2
+import mediapipe as mp
+
+mp_holistic = mp.solutions.holistic
+holistic = mp_holistic.Holistic()
+
+def segment_image(image_path):
+ image = cv2.imread(image_path)
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ results = holistic.process(image_rgb)
+ if results.segmentation_mask is not None:
+ image = cv2.cvtColor(cv2.addWeighted(image, 0.5, cv2.cvtColor(results.segmentation_mask, cv2.COLOR_GRAY2BGR), 0.5, 0), cv2.COLOR_BGR2RGB)
+ return image
+
+image_path = '/home/pragya/Downloads/gazebopose2.jpg'
+segmented_image = segment_image(image_path)
+
+cv2.imshow('Original Image', cv2.imread(image_path))
+cv2.imshow('Segmented Image', cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR))
+cv2.waitKey(0)
+cv2.destroyAllWindows()'''
\ No newline at end of file