diff --git a/catkin_ws/src/bowser_cam/CMakeLists.txt b/catkin_ws/src/bowser_cam/CMakeLists.txt new file mode 100644 index 0000000..0575710 --- /dev/null +++ b/catkin_ws/src/bowser_cam/CMakeLists.txt @@ -0,0 +1,207 @@ +cmake_minimum_required(VERSION 2.8.3) +project(bowser_cam) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + cv2 + numpy + rospy + std_msgs +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES bowser_cam +# CATKIN_DEPENDS cv2 numpy rospy std_msgs +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/bowser_cam.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/bowser_cam_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# install(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_bowser_cam.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/catkin_ws/src/bowser_cam/docs/reference code/LaneDetection.py b/catkin_ws/src/bowser_cam/docs/reference code/LaneDetection.py new file mode 100644 index 0000000..379a9f4 --- /dev/null +++ b/catkin_ws/src/bowser_cam/docs/reference code/LaneDetection.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python +#from __future__ import print_function + +import roslib +import numpy as np +import sys +import rospy +import rosunit +import math +import cv2 +import random +#import cv2.cv as cv + +from std_msgs.msg import String +import std_msgs.msg +from sensor_msgs.msg import Image +from cv_bridge import CvBridge, CvBridgeError + +class image_processor: + + #global timer + timer= None + # global end_time + end_time= None + #global averaging + averaging = False + # global lines + lines = None + def __init__(self): + self.image_pub = rospy.Publisher("Lane Distance",Float64, queue_size = 1) + self.bridge = CvBridge() + self.timer = rospy.Time.now() + self.end_time = self.timer + rospy.Duration(.2) + + self.image_sub = rospy.Subscriber("/usb_cam_node/image_raw",Image,self.callback) + + def callback(self,data): + + try: + cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + #create copy of image + original = cv_image + cv2.imshow('Original' , original) + #apply gaussian + cv_image = cv2.GaussianBlur(cv_image, (9,9),0) + cv2.imshow('Gaussian', cv_image) + + # Decrease Brightness + value = 30 # decrease image brightness by + hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) + h, s, v = cv2.split(hsv) + lim = 0 + value + v[v < lim] = 0 + v[v >= lim] -= value + final_hsv = cv2.merge((h, s, v)) + cv_image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) + cv2.imshow('Reduced Brightness', cv_image) + + #get Canny edge detecton + gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) + can = cv2.Canny(gray, 30, 120) + ## use edge detection data as mask on to color image + cv_image = cv2.bitwise_and(cv_image, cv_image,mask=can) + cv2.imshow('Cannyed', cv_image) + + # Color thresholding + lower = np.array([105,120,105]) #B G R + upper = np.array([230,200,230]) + mask = cv2.inRange(cv_image, lower, upper) + cMasked = cv2.bitwise_and(cv_image,cv_image, mask= mask) + cv2.imshow('Color Masked', cMasked) + + #threshold to remove things that are too bright or not bright enough + brightnessMin = np.array([30]) + brightnessMax = np.array([230]) + grayimg = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) + brightMask = cv2.inRange(grayimg, brightnessMin, brightnessMax) + bMasked = cv2.bitwise_and(cMasked,cMasked, mask= brightMask) + cv2.imshow('Brightness Masked', bMasked) + #res = cv2.bitwise_and(cv_image,cv_image, mask= brightMask) + + #lines = cv2.HoughLines(can,1,np.pi/180,200) + + # Probabilistic Line Transform + + bMasked = cv2.cvtColor(bMasked, cv2.COLOR_BGR2GRAY) + self.lines = cv2.HoughLines(bMasked,1,np.pi/180,40) + ''' + if self.lines == None: + + self.lines = temp + print self.lines + print ("initialize") + else: + self.lines = np.append( self.lines,temp,0) + print self.lines + print("Append") + ''' + centers = [] + rhothresh = 90 + minRhoThresh = 15 + thetaThresh = .5 + #np.concatenate((self.lines, temp), axis=0) + + + if( rospy.Time.now() > self.end_time): # get lines over time to + if self.lines is not None: + + + while( len(self.lines) > 1) : + + outer = self.lines[0] + #print ("outer ") + #print(outer) + cur = outer[0] + + currho = cur[0] + curtheta = cur[1] + + a = np.cos(curtheta) + b = np.sin(curtheta) + x0 = a*currho + y0 = b*currho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + + + for index in range ( 1, len(self.lines)) : + + selected = self.lines[index] + line = selected[0] + + linerho = line[0] + linetheta = line[1] + + if (linerho < currho + rhothresh and linerho > currho - rhothresh)and ( linerho > currho +minRhoThresh or linerho < currho - minRhoThresh) and (linetheta < curtheta + thetaThresh and linetheta > curtheta - thetaThresh) : + + la = np.cos(linetheta) + lb = np.sin(linetheta) + lx0 = la*linerho + ly0 = lb*linerho + lx1 = int(lx0 + 1000*(-lb)) + ly1 = int(ly0 + 1000*(la)) + lx2 = int(lx0 - 1000*(-lb)) + ly2 = int(ly0 - 1000*(la)) + + linCenter = (ly1 +ly2)/2 + curCenter = (y1 + y2)/2 + + if( linCenter > curCenter) : + centers.append(linCenter) + else: + centers.append(curCenter) + + color = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) + + #pairs.append([cur, line]) + cv2.line(original, (lx1,ly1),(lx2,ly2), color, 2) + cv2.line(original, (x1,y1),(x2,y2),color,2) + self.lines = np.delete(self.lines, index,0) + break + + self.lines = np.delete(self.lines, 0,0) + + + + + + self.lines = None + self.end_time = rospy.Time.now() + rospy.Duration(.2) + #end editing + cv2.imshow('Line view', original) + + distancePx = 0 + if( len(centers) > 0): + + for center in centers: + distancePx += center + + distancePx /= len(centers) + + + distanceIn = 187 - 29.7* math.log1p(distancePx) + distanceIn -=1 + + if distanceIn < 80 : + print(distanceIn) + + distanceMeters = distanceIn * 0.254 + + cv2.waitKey(3) + + + self.image_pub.publish(distanceMeters) + + +def main(args): + rospy.init_node('laneDistance', anonymous=True) + ic = image_processor() + + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.md b/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.md new file mode 100644 index 0000000..5db9fc9 --- /dev/null +++ b/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.md @@ -0,0 +1,4 @@ +# Pothole Locator +Loads the camera using magic numbers to find circles. + +Determines the distance from the pothole. diff --git a/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.py b/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.py new file mode 100644 index 0000000..17d7e17 --- /dev/null +++ b/catkin_ws/src/bowser_cam/docs/reference code/PotholeLocator.py @@ -0,0 +1,87 @@ +import numpy as np +import sys +import math +import time +import cv2 as cv + +class pothole_locator: + + class camera: + def __init__(self, image, fov, cameraHeight, cameraAngle): + h = cameraHeight*math.cos(math.radians(cameraAngle)) + self.baseDist = cameraHeight*math.tan(math.radians(cameraAngle-(fov/2))) + topDist = cameraHeight*math.tan(math.radians(cameraAngle+(fov/2))) + self.verticalDist = topDist - self.baseDist + self.horizontalDist = h*math.tan(math.radians(fov/2)) + self.height = image.shape[0] + #actually half the width, but it makes stuff so much easier + self.width = image.shape[1]/2 + + def __init__(self): + return + + def whiteThreshold(self, image): + hsv = cv.GaussianBlur(image, (5,5), 0) + hsv = cv.cvtColor(hsv, cv.COLOR_BGR2HSV) + lower = np.array([0,0,0]) + upper = np.array([0,0,256]) + + mask = cv.inRange(hsv, lower, upper) + return cv.bitwise_and(image, image, mask= mask) + return mask + + def pixelToDistance(self, x, y, g): + forward = g.baseDist + g.verticalDist*(y/g.height); + x = x-g.width + side = g.horizontalDist*(x/g.width); + return (forward, side) + + def findPotholes(self, image): + g = self.camera(image, 43.3, 22, 90-15) + circles = self.locateInFrame(image) + if circles is not None: + potholes = [self.pixelToDistance(i[0], i[1], g) for i in circles[0]] + return potholes + return None + + def locateInFrame(self, image): + image = self.topDown(image) + image = self.whiteThreshold(image) + img = cv.cvtColor(image, cv.COLOR_BGR2GRAY) + circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1.2, 300, param1=100, param2=20, maxRadius=250) + return circles + + def topDown(self, image): + height, width = image.shape[:2] + dest = np.array([[0,0],[width, 0],[0, height],[width, height]], np.float32) + warpDist = math.cos(math.radians(15))*width + corners = np.array([[0-warpDist,0],[width+warpDist, 0],[0, height],[width, height]], np.float32) + warp = cv.getPerspectiveTransform(corners, dest) + return cv.warpPerspective(image, warp, (width, height)) + + + +def plotFrame(image, circles): + if circles is not None: + circles = np.round(circles[0,:]).astype("int") + for (x, y, r) in circles: + cv.circle(image, (x,y), r, color=(0,0,255), thickness=2) + cv.imshow("window", image) + +def videoIn(): + print('loading video source...') + try: + cap = cv.VideoCapture(0) + time.sleep(2) + if cap is None or cap == None: + raise IOError + except IOError: + sys.exit('video load failure') + loc = pothole_locator() + while(True): + ret, frame = cap.read() + print(loc.findPotholes(frame)) + if cv.waitKey(1) & 0xFF == ord('q'): + break + +videoIn() diff --git a/catkin_ws/src/bowser_cam/docs/reference code/conversion.md b/catkin_ws/src/bowser_cam/docs/reference code/conversion.md new file mode 100644 index 0000000..2fe947b --- /dev/null +++ b/catkin_ws/src/bowser_cam/docs/reference code/conversion.md @@ -0,0 +1,18 @@ +# Conversion +Creates a publisher topic ```LineDistance``` and subscriber topic ``` image_raw```. + +Converts an image message to open cv2. + +Scans the image and creates a mask that leaves only the color white behind. + +Greyscales the image and applies canny edge detection. + +Finds the intersection between the greyscalewd image and the masked image. + +Removes glare using HSV. + +Extract lines using the hough transform. + +Finds parallel lines. + +Publishing the distance from the closest line. diff --git a/catkin_ws/src/bowser_cam/docs/reference code/conversion.py b/catkin_ws/src/bowser_cam/docs/reference code/conversion.py new file mode 100755 index 0000000..eb59627 --- /dev/null +++ b/catkin_ws/src/bowser_cam/docs/reference code/conversion.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python + + +import roslib +import numpy as np +import sys +import rospy +import cv2 +import random +#import cv2.cv as cv +from std_msgs.msg import String +from sensor_msgs.msg import Image +from cv_bridge import CvBridge, CvBridgeError + +class image_converter: + + #global timer + timer= None + # global end_time + end_time= None + #global averaging + averaging = False + # global lines + lines = None + def __init__(self): + self.image_pub = rospy.Publisher("lineDistance",Float32, queue_size = 1) + self.bridge = CvBridge() + self.timer = rospy.Time.now() + self.end_time = self.timer + rospy.Duration(.2) + self.image_sub = rospy.Subscriber("/camera/image_raw",Image,self.callback) + + def callback(self,data): + + try: + cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + # do the editing here + + # we want to do a threshold to zero on all values not white + # define range of color + lower = np.array([130,130,130]) + upper = np.array([240,240,240]) + + ReduceBrightnessBy = 25 + + # Threshold BGR to get only whitish shades + mask = cv2.inRange(cv_image, lower, upper) + + #threshold to remove things that are too bright to get rid of glare HSV V= brightness + + gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) + + can = cv2.Canny(gray, 45 , 100) + + image = cv2.bitwise_and(cv_image,cv_image, mask= can) + + hsvimg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + + hsvimg[:,:,2] -= ReduceBrightnessBy + + for x in hsvimg: + for y in hsvimg[x]: + val = hsvimg[x,y,2] + if( val <0) + hsvimg[x,y,2]= 0 + + image = hsvimg + + image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) + + colorMask = cv2.inRange(image, lower, upper) + + image = cv2.bitwise_and(image, image , mask=colorMask) + + image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + self.lines = cv2.HoughLines(image,1,np.pi/180,105) + ''' + if self.lines == None: + + self.lines = temp + print self.lines + print ("initialize") + else: + self.lines = np.append( self.lines,temp,0) + print self.lines + print("Append")''' + + rhothresh = 70 + minRhoThresh = 13 + thetaThresh = .5 + #np.concatenate((self.lines, temp), axis=0) + + parallels = [] + + if( rospy.Time.now() > self.end_time): # get lines over time to + if self.lines is not None: + + + while( len(self.lines) > 1) : + + outer = self.lines[0] + #print ("outer ") + #print(outer) + cur = outer[0] + + currho = cur[0] + curtheta = cur[1] + + a = np.cos(curtheta) + b = np.sin(curtheta) + x0 = a*currho + y0 = b*currho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + + + for index in range ( 1, len(self.lines)) : + + selected = self.lines[index] + line = selected[0] + + linerho = line[0] + linetheta = line[1] + + if (linerho < currho + rhothresh and linerho > currho - rhothresh)and ( linerho > currho +minRhoThresh or linerho < currho - minRhoThresh) and (linetheta < curtheta + thetaThresh and linetheta > curtheta - thetaThresh) : + + la = np.cos(linetheta) + lb = np.sin(linetheta) + lx0 = la*linerho + ly0 = lb*linerho + lx1 = int(lx0 + 1000*(-lb)) + ly1 = int(ly0 + 1000*(la)) + lx2 = int(lx0 - 1000*(-lb)) + ly2 = int(ly0 - 1000*(la)) + + color = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) + + parallels.append((cur,line))) + + + self.lines = np.delete(self.lines, index,0) + break + + self.lines = np.delete(self.lines, 0,0) + + + + + + self.lines = None + self.end_time = rospy.Time.now() + rospy.Duration(.1) + + + + numline =len (parallels) + + averagey = 0 + + for i in range( numLine): + + l1 = numline[i][0] + l1rho = l1[0] + l1theta = l1[1] + l1a = np.cos(l1theta) + l1b = np.sin(l1theta) + l1x0 = l1a*l1rho + l1y0 = l1b*l1rho + + + l2 = numline[i][1] + l2rho = l2[0] + l2theta = l2[1] + l2a = np.cos(l2theta) + l2b = np.sin(l2theta) + l2x0 = l2a*l2rho + l2y0 = l2b*l2rho + + + closest = (l2y0 < l1y0)? l1y0 : l2y0 + averagey += closest + + averagey /= numline + + distanceInInches = 176 - 27.3*(np.ln(averagey)) + + meters = distanceInInches * 0.0254 + + + + + + cv2.imshow("edges window", can ) + cv2.imshow("color seg", res) + cv2.imshow("Image window", cv_image) + cv2.waitKey(3) + + try: + self.image_pub.publish(meters) + except CvBridgeError as e: + print(e) + + + +def main(args): + rospy.init_node('image_converter', anonymous=True) + ic = image_converter() + + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/catkin_ws/src/bowser_cam/package.xml b/catkin_ws/src/bowser_cam/package.xml new file mode 100644 index 0000000..a6cf766 --- /dev/null +++ b/catkin_ws/src/bowser_cam/package.xml @@ -0,0 +1,71 @@ + + + bowser_cam + 0.0.0 + The bowser_cam package + + + + + darien + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + cv2 + numpy + rospy + std_msgs + cv2 + numpy + rospy + std_msgs + cv2 + numpy + rospy + std_msgs + + + + + + + + diff --git a/catkin_ws/src/bowser_cam/src/AngleToLaneLine.py b/catkin_ws/src/bowser_cam/src/AngleToLaneLine.py new file mode 100644 index 0000000..379a9f4 --- /dev/null +++ b/catkin_ws/src/bowser_cam/src/AngleToLaneLine.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python +#from __future__ import print_function + +import roslib +import numpy as np +import sys +import rospy +import rosunit +import math +import cv2 +import random +#import cv2.cv as cv + +from std_msgs.msg import String +import std_msgs.msg +from sensor_msgs.msg import Image +from cv_bridge import CvBridge, CvBridgeError + +class image_processor: + + #global timer + timer= None + # global end_time + end_time= None + #global averaging + averaging = False + # global lines + lines = None + def __init__(self): + self.image_pub = rospy.Publisher("Lane Distance",Float64, queue_size = 1) + self.bridge = CvBridge() + self.timer = rospy.Time.now() + self.end_time = self.timer + rospy.Duration(.2) + + self.image_sub = rospy.Subscriber("/usb_cam_node/image_raw",Image,self.callback) + + def callback(self,data): + + try: + cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + #create copy of image + original = cv_image + cv2.imshow('Original' , original) + #apply gaussian + cv_image = cv2.GaussianBlur(cv_image, (9,9),0) + cv2.imshow('Gaussian', cv_image) + + # Decrease Brightness + value = 30 # decrease image brightness by + hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) + h, s, v = cv2.split(hsv) + lim = 0 + value + v[v < lim] = 0 + v[v >= lim] -= value + final_hsv = cv2.merge((h, s, v)) + cv_image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) + cv2.imshow('Reduced Brightness', cv_image) + + #get Canny edge detecton + gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) + can = cv2.Canny(gray, 30, 120) + ## use edge detection data as mask on to color image + cv_image = cv2.bitwise_and(cv_image, cv_image,mask=can) + cv2.imshow('Cannyed', cv_image) + + # Color thresholding + lower = np.array([105,120,105]) #B G R + upper = np.array([230,200,230]) + mask = cv2.inRange(cv_image, lower, upper) + cMasked = cv2.bitwise_and(cv_image,cv_image, mask= mask) + cv2.imshow('Color Masked', cMasked) + + #threshold to remove things that are too bright or not bright enough + brightnessMin = np.array([30]) + brightnessMax = np.array([230]) + grayimg = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) + brightMask = cv2.inRange(grayimg, brightnessMin, brightnessMax) + bMasked = cv2.bitwise_and(cMasked,cMasked, mask= brightMask) + cv2.imshow('Brightness Masked', bMasked) + #res = cv2.bitwise_and(cv_image,cv_image, mask= brightMask) + + #lines = cv2.HoughLines(can,1,np.pi/180,200) + + # Probabilistic Line Transform + + bMasked = cv2.cvtColor(bMasked, cv2.COLOR_BGR2GRAY) + self.lines = cv2.HoughLines(bMasked,1,np.pi/180,40) + ''' + if self.lines == None: + + self.lines = temp + print self.lines + print ("initialize") + else: + self.lines = np.append( self.lines,temp,0) + print self.lines + print("Append") + ''' + centers = [] + rhothresh = 90 + minRhoThresh = 15 + thetaThresh = .5 + #np.concatenate((self.lines, temp), axis=0) + + + if( rospy.Time.now() > self.end_time): # get lines over time to + if self.lines is not None: + + + while( len(self.lines) > 1) : + + outer = self.lines[0] + #print ("outer ") + #print(outer) + cur = outer[0] + + currho = cur[0] + curtheta = cur[1] + + a = np.cos(curtheta) + b = np.sin(curtheta) + x0 = a*currho + y0 = b*currho + x1 = int(x0 + 1000*(-b)) + y1 = int(y0 + 1000*(a)) + x2 = int(x0 - 1000*(-b)) + y2 = int(y0 - 1000*(a)) + + + for index in range ( 1, len(self.lines)) : + + selected = self.lines[index] + line = selected[0] + + linerho = line[0] + linetheta = line[1] + + if (linerho < currho + rhothresh and linerho > currho - rhothresh)and ( linerho > currho +minRhoThresh or linerho < currho - minRhoThresh) and (linetheta < curtheta + thetaThresh and linetheta > curtheta - thetaThresh) : + + la = np.cos(linetheta) + lb = np.sin(linetheta) + lx0 = la*linerho + ly0 = lb*linerho + lx1 = int(lx0 + 1000*(-lb)) + ly1 = int(ly0 + 1000*(la)) + lx2 = int(lx0 - 1000*(-lb)) + ly2 = int(ly0 - 1000*(la)) + + linCenter = (ly1 +ly2)/2 + curCenter = (y1 + y2)/2 + + if( linCenter > curCenter) : + centers.append(linCenter) + else: + centers.append(curCenter) + + color = (random.randint(0,255),random.randint(0,255),random.randint(0,255)) + + #pairs.append([cur, line]) + cv2.line(original, (lx1,ly1),(lx2,ly2), color, 2) + cv2.line(original, (x1,y1),(x2,y2),color,2) + self.lines = np.delete(self.lines, index,0) + break + + self.lines = np.delete(self.lines, 0,0) + + + + + + self.lines = None + self.end_time = rospy.Time.now() + rospy.Duration(.2) + #end editing + cv2.imshow('Line view', original) + + distancePx = 0 + if( len(centers) > 0): + + for center in centers: + distancePx += center + + distancePx /= len(centers) + + + distanceIn = 187 - 29.7* math.log1p(distancePx) + distanceIn -=1 + + if distanceIn < 80 : + print(distanceIn) + + distanceMeters = distanceIn * 0.254 + + cv2.waitKey(3) + + + self.image_pub.publish(distanceMeters) + + +def main(args): + rospy.init_node('laneDistance', anonymous=True) + ic = image_processor() + + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/catkin_ws/src/bowser_nav/src/LineFollowing.py b/catkin_ws/src/bowser_nav/src/LineFollowing.py new file mode 100644 index 0000000..12f9d0c --- /dev/null +++ b/catkin_ws/src/bowser_nav/src/LineFollowing.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +#from __future__ import print_function + +import roslib +import numpy as np +import sys +import rospy +import rosunit +import math +from std_msgs.msg import Float32MultiArray, Float64 +import cv2 +import random +#import bowser_msg.msg +#import cv2.cv as cv + +def main(args): + lineFollower = line_follower() + +# 2cm + +class line_follower: + + def __init__(self): + self.cmd_vel_pub = rospy.Publisher("Cmd_Vel",Float32MultiArray,queue_size = 1) + self.distance_sub = rospy.Subscriber("Lane Distance",Float64,self.callback) + + + def callback(self,data): + Cmd_Vel[0] = -1 * ((data.angle - 90)/90) #Assume we are making a left turn initially + Cmd_Vel[1] = data.distance + + if (data.distance < .2): + Cmd_Vel[0] = -1 + + if (data.distance < .4): + Cmd_Vel[1] = .4 + + self.cmd_vel_pub.publish(Cmd_Vel) + +if __name__ == '__main__': + main(sys.argv)