From 4ac3b05aec6c24a9972ead1cdcdccdd9776e6498 Mon Sep 17 00:00:00 2001 From: vorkorro14 Date: Mon, 25 Jan 2021 15:06:22 +0300 Subject: [PATCH 01/11] Baseline --- .gitignore | 1 - final_project/main.py | 4 + final_project/server.py | 120 +++++++++++++++ final_project/src/model/__init__.py | 4 + final_project/src/model/kal.json | 208 ++++++++++++++++++++++++++ final_project/src/model/kondomv.py | 32 ++++ final_project/src/model/model.py | 160 ++++++++++++++++++++ final_project/src/model/servo_dict.py | 37 +++++ final_project/src/vision/README.md | 20 +++ final_project/src/vision/__init__.py | 1 + final_project/src/vision/vision.py | 13 ++ 11 files changed, 599 insertions(+), 1 deletion(-) create mode 100644 final_project/main.py create mode 100644 final_project/server.py create mode 100644 final_project/src/model/__init__.py create mode 100644 final_project/src/model/kal.json create mode 100644 final_project/src/model/kondomv.py create mode 100644 final_project/src/model/model.py create mode 100644 final_project/src/model/servo_dict.py create mode 100644 final_project/src/vision/README.md create mode 100755 final_project/src/vision/__init__.py create mode 100755 final_project/src/vision/vision.py diff --git a/.gitignore b/.gitignore index 2e23669..56c9615 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,6 @@ __pycache__ .#* .coverage .pytest_cache -src *.swp *.map diff --git a/final_project/main.py b/final_project/main.py new file mode 100644 index 0000000..7b76a87 --- /dev/null +++ b/final_project/main.py @@ -0,0 +1,4 @@ +from server import Server + +server = Server() +server.run() diff --git a/final_project/server.py b/final_project/server.py new file mode 100644 index 0000000..4aeb329 --- /dev/null +++ b/final_project/server.py @@ -0,0 +1,120 @@ +import sys +import time +import math +import json +import os +import warnings + +from src.simulation import sensor +from src.simulation import image +warnings.warn("CV reload imported") + +from src.localization.tools import median +from src.vision import Vision, VisionPostProcessing +from src.localization import Localization +from src.model import KondoMV + +class Server: + def __init__(self): + self.vision = Vision() + self.vision_postprocessing = VisionPostProcessing() + self.vision.load_detectors("vision/detectors_config.json") + + + with open("calibration/camera_calibration.json") as f: + calibration = json.load(f) + + side = "left" + + self.localization = Localization(-0.7, -1.3, math.pi/2, side) + self.model = KondoMV() + + # setting model parametrs + self.model.setParams(calib["cam_col"], + self.model.robot_height, + [0, 0, 0, 0, 0, 0], + [0, 0]) + + + + + + def run(self): + while True: + if not self.no_vision: + self.clock = time.clock() + self.clock.tick() + curr_t = pyb.millis() + #print (curr_t - t) + t = curr_t + selfData = {} + for i in range(12): + # motion part. Head movement. + self.motion.apply({'name': 'head'}) + if not self.no_vision: + self.model.update_camera_pan_tilt(self.motion.head.pan, self.motion.head.tilt) + # vision part. Taking picture. + img = sensor.snapshot().lens_corr(strength=1.2, zoom=1.0) + + cameraDataRaw = self.vision.get(img, objects_list=["yellow_posts", "blue_posts", "ball"], + drawing_list=["yellow_posts", "blue_posts", "ball"]) + + # cameraDataRaw=vision.get(img, objects_list=["yellow_posts", "ball", "white_posts_support"], + # drawing_list=["yellow_posts", "ball", "white_posts_support"]) + + # vision_postprocessing.process (cameraDataRaw, "yellow_posts", "white_posts_support") + cameraDataProcessed = cameraDataRaw + # model part. Mapping to world coords. + + # self means in robots coords + for observationType in cameraDataProcessed: + if observationType not in selfData.keys(): + selfData[observationType] = [] + selfPoints = [] + + # if (len (cameraDataProcessed[observationType]) > 0): + # print ("obser", cameraDataProcessed[observationType] [0]) + + for observation in cameraDataProcessed[observationType]: + cx = observation[5] + cy = observation[6] + w = observation[2] + h = observation[3] + + selfPoints.append(self.model.pic2r(cx, cy + (w+h)/4)) + selfData[observationType] += selfPoints + + print("eto self yello points", #can be turned off, but now thats needs for debug + selfData['yellow_posts'], "eto self blue points", selfData["blue_posts"]) + + if len(selfData['yellow_posts']) != 0: + general = [] + first_side = [] + second_side = [] + k = selfData['yellow_posts'][0] + for pep in selfData['yellow_posts']: + if math.fabs(math.atan(pep[0]/pep[1]) - math.atan(k[0]/k[1])) < 0.3: + first_side.append(list(pep)) + else: + second_side.append(list(pep)) + if len(first_side) != 1: + first_side = median(first_side) + else: + first_side = first_side[0] + + if second_side != 0 and len(second_side) != 1: + second_side = median(second_side) + elif len(second_side) == 1: + second_side = second_side[0] + + general.append(first_side) + if len(second_side) != 0: + general.append(second_side) + selfData['yellow_posts'] = general + print("eto self yello points", #can be turned off, but now thats needs for debug + selfData['yellow_posts'], "eto self blue points", selfData["blue_posts"]) + + self.localization.update(selfData) + self.localization.update_ball(selfData) + self.localization.localized = True #can be turned off, but now thats needs for debug + # print(loc.ballPosSelf) diff --git a/final_project/src/model/__init__.py b/final_project/src/model/__init__.py new file mode 100644 index 0000000..9817666 --- /dev/null +++ b/final_project/src/model/__init__.py @@ -0,0 +1,4 @@ +from .model import Model +from .kondomv import KondoMV +from .servo_dict import ServoDict +__all__ = ["Model", "KondoMV", "ServoDict"] \ No newline at end of file diff --git a/final_project/src/model/kal.json b/final_project/src/model/kal.json new file mode 100644 index 0000000..6d5a348 --- /dev/null +++ b/final_project/src/model/kal.json @@ -0,0 +1,208 @@ +{ + "torso": { + "id": 0, + "sio": 2, + "simulation_joint_name" : "Tors_0", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + }, + "left_shoulder_pitch": + { + "id": 1, + "sio": 1, + "simulation_joint_name" : "hand_left_1", + "zero": 0, + "limits": [-180, 180], + "inverse": -1 + }, + "right_shoulder_pitch": + { + "id": 1, + "sio": 2, + "simulation_joint_name" : "hand_right_1", + "zero": 0, + "limits": [-180, 180], + "inverse": -1 + }, + "left_shoulder_roll": + { + "id": 2, + "sio": 1, + "simulation_joint_name" : "hand_left_2", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + }, + "right_shoulder_roll": + { + "id": 2, + "sio": 2, + "simulation_joint_name" : "hand_right_2", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + }, + "left_elbow_yaw": + { + "id": 3, + "sio": 1, + "simulation_joint_name" : "hand_left_3", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + }, + "right_elbow_yaw": + { + "id": 3, + "sio": 2, + "simulation_joint_name" : "hand_right_3", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + }, + "left_elbow_pitch": + { + "id": 4, + "sio": 1, + "simulation_joint_name" : "hand_left_4", + "zero": 0, + "limits": [-180, 180], + "inverse": -1 + }, + "right_elbow_pitch": + { + "id": 4, + "sio": 2, + "simulation_joint_name" : "hand_right_4", + "zero": 0, + "limits": [-180, 180], + "inverse": -1 + }, + "left_hip_yaw": + { + "id": 5, + "sio": 1, + "simulation_joint_name" : "leg_left_5", + "zero": 0, + "limits": [-90, 90], + "inverse": -1 + }, + "right_hip_yaw": + { + "id": 5, + "sio": 2, + "simulation_joint_name" : "leg_right_5", + "zero": 0, + "limits": [-90, 90], + "inverse": -1 + }, + "left_hip_roll": + { + "id": 6, + "sio": 1, + "simulation_joint_name" : "leg_left_6", + "zero": 0, + "limits": [-100, 25], + "inverse": -1 + }, + "right_hip_roll": + { + "id": 6, + "sio": 2, + "simulation_joint_name" : "leg_right_6", + "zero": 0, + "limits": [-100, 25], + "inverse": -1 + }, + "left_hip_pitch": + { + "id": 7, + "sio": 1, + "simulation_joint_name" : "leg_left_7", + "zero": 0, + "limits": [-120, 110], + "inverse": 1 + }, + "right_hip_pitch": + { + "id": 7, + "sio": 2, + "simulation_joint_name" : "leg_right_7", + "zero": 0, + "limits": [-120, 110], + "inverse": 1 + }, + "left_knee": + { + "id": 8, + "sio": 1, + "simulation_joint_name" : "leg_left_8", + "zero": 0, + "limits": [-140, 0], + "inverse": 1 + }, + "right_knee": + { + "id": 8, + "sio": 2, + "simulation_joint_name" : "leg_right_8", + "zero": 0, + "limits": [-140, 0], + "inverse": -1 + }, + "left_ankle_pitch": + { + "id": 9, + "sio": 1, + "simulation_joint_name" : "leg_left_9", + "zero": 0, + "limits": [-135, 100], + "inverse": -1 + }, + "right_ankle_pitch": + { + "id": 9, + "sio": 2, + "simulation_joint_name" : "leg_right_9", + "zero": 0, + "limits": [-135, 100], + "inverse": -1 + }, + "left_ankle_roll": + { + "id": 10, + "sio": 1, + "simulation_joint_name" : "leg_left_10", + "zero": 0, + "limits": [-95, 20], + "inverse": 1 + }, + "right_ankle_roll": + { + "id": 10, + "sio": 2, + "simulation_joint_name" : "leg_right_10", + "zero": 0, + "limits": [-95, 20], + "inverse": 1 + }, + "head_yaw": + { + "id": 0, + "sio": 1, + "simulation_joint_name" : "head0", + "zero": 0, + "limits": [-180, 180], + "inverse": -1 + }, + "head_pitch": + { + "id": 12, + "sio": 2, + "simulation_joint_name" : "head12", + "zero": 0, + "limits": [-180, 180], + "inverse": 1 + } +} \ No newline at end of file diff --git a/final_project/src/model/kondomv.py b/final_project/src/model/kondomv.py new file mode 100644 index 0000000..c28bc6d --- /dev/null +++ b/final_project/src/model/kondomv.py @@ -0,0 +1,32 @@ +import sys +import json +from .model import Model +from ..odometry import Odometry +from .servo_dict import ServoDict + +class KondoMV(Model): + def __init__(self, imu=None): + super().__init__() + with open("model/kal.json") as f: + self.servos = ServoDict(json.loads(f.read())) + + self.robot_height = 0.42 + + self.sizes = { + "a5": 0.0215, # m distance from symmetry axis to servo 5 + "b5": 0.0185, # м расстояние от оси сервы 5 до оси сервы 6 по горизонтали + "c5": 0, # м расстояние от оси сервы 6 до нуля Z по вертикали + "a6": 0.042, # м расстояние от оси сервы 6 до оси сервы 7 + "a7": 0.0655, # м расстояние от оси сервы 7 до оси сервы 8 + "a8": 0.0638, # м расстояние от оси сервы 8 до оси сервы 9 + "a9": 0.0355, # м расстояние от оси сервы 9 до оси сервы 10 + "a10": 0.0254, # м расстояние от оси сервы 10 до центра стопы по горизонтали + "b10": 0.0164, # м расстояние от оси сервы 10 до низа стопы + "c10": 0.012 # м расстояние от оси сервы 6 до оси сервы 10 по горизонтали + } + + self.odometry = Odometry(imu) + +if __name__ == "__main__": + kondo = KondoMV() + print(kondo.servos['torso']) diff --git a/final_project/src/model/model.py b/final_project/src/model/model.py new file mode 100644 index 0000000..296abd9 --- /dev/null +++ b/final_project/src/model/model.py @@ -0,0 +1,160 @@ +import math + +class Model: + def __init__(self): + self.camera_pan = 0 + self.camera_tilt = 0 + self.active_servos = {} + + def update_camera_pan_tilt(self, camera_pan, camera_tilt): + self.camera_pan = camera_pan + self.camera_tilt = camera_tilt + + def setParams(self, cam_matrix, h, k_coefs, p_coefs): + # cam_matrix - camera matrix + # h - camera height + # k_coefs, p_coefs - coefficients of lense distortion + self.cam_matrix = cam_matrix + self.h = h + self.k_coefs = k_coefs + self.p_coefs = p_coefs + self.head_to_base_matrix = [[0.99936382, 0.034978, -0.00696373], + [-0.03510436, 0.99920425, -0.01893518], + [0.00629587, 0.01916759, 0.99979646]] + + # function calculating dot product of matrix and vector + def matrix_dot_vec(self, matr, vect): + n = len(vect) + res_vect = [] + for i in range(n): + res_vect.append(0.0) + for j in range(n): + res_vect[i] += matr[i][j] * vect[j] + + return res_vect + + # transformation from robot coords to camera coords + def r2cam(self, xb, yb): + r_coords = [xb, yb, -self.h] + a = self.camera_pan + b = self.camera_tilt + + r2cam_rot_matrix = [[math.cos(a) * math.cos(b), + math.sin(a), + -math.cos(a) * math.sin(b)], + [-math.sin(a) * math.cos(b), + math.cos(a), + math.sin(a) * math.sin(b)], + [math.sin(b), + 0.0, + math.cos(b)]] + + return self.matrix_dot_vec(r2cam_rot_matrix, r_coords) + + # transformation from camera coords to robot coords + def cam2r(self, cam_coords): + a = self.camera_pan + b = self.camera_tilt + + cam2r_rot_matrix = [[math.cos(a) * math.cos(b), + -math.sin(a), + math.cos(a) * math.sin(b)], + [math.sin(a) * math.cos(b), + math.cos(a), + math.sin(a) * math.sin(b)], + [-math.sin(b), + 0.0, + math.cos(b)]] + + return self.matrix_dot_vec(cam2r_rot_matrix, cam_coords) + + # transformation from robot coords to pixel picture coords + def r2pic(self, xb, yb): + # xb, yb - coords of the ball in the robot system + + # transformation to camera coords and back to apply head-to-base matrix + cam_coords = self.r2cam(xb, yb) + fixed_cam_coords = self.matrix_dot_vec(self.head_to_base_matrix, + cam_coords) + xb, yb, zb = self.cam2r(fixed_cam_coords) + + # rotating the coordinate system by the cameraPan + # xbi,ybi - coords of the ball in the same system after rotation + xbi = xb * math.cos(self.camera_pan) + yb * math.sin(self.camera_pan) + ybi = yb * math.cos(self.camera_pan) - xb * math.sin(self.camera_pan) + + # taking into account the case when the ball is behind the camera + if xbi <= 0: + print("Ball behind the camera") + return (-1, -1) + + # alp - artificially created angle, used for simplification + # x,y - coords of the ball in system, parallel to the camera screen + # u,v - pixel coords of the ball in the screen system + alp = math.atan(self.h / xbi) + self.camera_tilt + x = ybi / math.cos(alp) / math.sqrt(xbi**2 + self.h ** 2) + y = math.tan(alp) + + # applying the lense distortion-fix formula + r_sq = x ** 2 + y ** 2 + coef_numerator = (1 + + self.k_coefs[0] * r_sq + + self.k_coefs[1] * (r_sq ** 2) + + self.k_coefs[2] * (r_sq ** 3)) + + coef_denominator = (1 + + self.k_coefs[3] * r_sq + + self.k_coefs[4] * (r_sq ** 2) + + self.k_coefs[5] * (r_sq ** 3)) + + coef = coef_numerator / coef_denominator + + # x_cor, y_cor - ball cords in artificially created coord system + x_cor_parts = [x * coef, + 2 * self.p_coefs[0] * x * y, + self.p_coefs[1] * (r_sq + 2 * x ** 2)] + x_cor = sum(x_cor_parts) + + y_cor_parts = [y * coef, + 2 * self.p_coefs[1] * x * y, + self.p_coefs[0] * (r_sq + 2 * y ** 2)] + y_cor = sum(y_cor_parts) + + # u, v - pixel coords of the ball + u = -x_cor * self.cam_matrix[0][0] + self.cam_matrix[0][2] + v = y_cor * self.cam_matrix[1][1] + self.cam_matrix[1][2] + return (int(u), int(v)) + + # transformation from pixel coords to robot coords + def pic2r(self, u, v): + # u,v - pixel coords of the ball in the screen system + + # x,y - coords of the ball in the coordinate system, + # parallel to the camera screen + x = (float(u) - self.cam_matrix[0][2]) / self.cam_matrix[0][0] + y = (float(v) - self.cam_matrix[1][2]) / self.cam_matrix[1][1] + + # alp,bet - vertical and horizontal angles + # of the ball radius-vector relative to the camera 0,0 direction + alp = math.atan(y) + bet = math.atan(x) + + robot_alp = alp - self.camera_tilt + + # xb, yb - coordinates of the ball in the system turned by cameraPan + xb = self.h / math.tan(robot_alp) + yb = math.tan(bet) * math.sqrt(self.h ** 2 + xb ** 2) + + # xb_r, yb_r - coordinates of the ball in the robot system + xb_r = xb * math.cos(self.camera_pan) + yb * math.sin(self.camera_pan) + yb_r = yb * math.cos(self.camera_pan) - xb * math.sin(self.camera_pan) + + # transformation to camera coords and back to apply head-to-base matrix + cam_coords = self.r2cam(xb_r, -yb_r) + fixed_cam_coords = self.matrix_dot_vec(self.head_to_base_matrix, + cam_coords) + + # final coords in robot system + xb_r, yb_r, zb_r = self.cam2r(fixed_cam_coords) + + return (xb_r, yb_r) diff --git a/final_project/src/model/servo_dict.py b/final_project/src/model/servo_dict.py new file mode 100644 index 0000000..16bbbc9 --- /dev/null +++ b/final_project/src/model/servo_dict.py @@ -0,0 +1,37 @@ +class ServoDict: + def __init__(self, base_dict): + self.container = base_dict + + def __getitem__(self, key): + if isinstance(key, tuple) or isinstance(key, list): + if len(key) == 2: + for item in self.container.values(): + if item['id'] == key[0] and item['sio'] == key[1]: + return item + else: + raise KeyError + else: + return self.container[key] + + def __setitem__(self, key, item): + self.container[key] = item + + def __iter__(self): + return iter(self.container) + + def __contains__(self, item): + return self.container.__contains__(item) + + def __getattr__(self, attr): + if attr == 'ids': + values = self.container.values + return [(value['id'], value['sio']) for value in values] + + def __repr__(self): + return self.container + +if __name__ == "__main__": + d = {'a': 0, 'b': 1} + servos = ServoDict(d) + for servo in servos: + print(servo, servos[servo]) \ No newline at end of file diff --git a/final_project/src/vision/README.md b/final_project/src/vision/README.md new file mode 100644 index 0000000..2e08f08 --- /dev/null +++ b/final_project/src/vision/README.md @@ -0,0 +1,20 @@ +# Vision + +All the detection relies on the usage of the Blob analysis from the OpenMV SDK + +Classes: + +Detectors: + -Detector - basic visualization, placeholders for common methods + + -ColoredObjectDetector [inherited from Detector] - blob analysis, + particular filtration methods + + -SurroundedObjectDetector [inherited from ColoredObjectDetector] - + takes surrounding pixels of the object into account to check if + the detected object has proper background + +Containers/other: + -Vision - container for the detectors + + -VisionPostprocessing - mutual spatial relations of the detected objects diff --git a/final_project/src/vision/__init__.py b/final_project/src/vision/__init__.py new file mode 100755 index 0000000..5ad0567 --- /dev/null +++ b/final_project/src/vision/__init__.py @@ -0,0 +1 @@ +from .vision import Vision \ No newline at end of file diff --git a/final_project/src/vision/vision.py b/final_project/src/vision/vision.py new file mode 100755 index 0000000..ebac38a --- /dev/null +++ b/final_project/src/vision/vision.py @@ -0,0 +1,13 @@ +import json + + +class Vision: + """ + """ + + def __init__(self): + pass + + def get(self, img): + result = "" + return result From 286291b64171068cd933fcd9937c542fa73e4e74 Mon Sep 17 00:00:00 2001 From: vorkorro14 Date: Mon, 25 Jan 2021 15:13:49 +0300 Subject: [PATCH 02/11] CV sensor --- final_project/src/vision/cv_sensor.py | 161 ++++++++++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 final_project/src/vision/cv_sensor.py diff --git a/final_project/src/vision/cv_sensor.py b/final_project/src/vision/cv_sensor.py new file mode 100644 index 0000000..93b1779 --- /dev/null +++ b/final_project/src/vision/cv_sensor.py @@ -0,0 +1,161 @@ +# Bug that sometimes appears on several computers +try: + import cv2 +except ImportError: + import cv2.cv2 as cv2 + +import math +import numpy as np + + +class Blob: + def __init__(self, x_, y_, w_, h_): + self.x_ = x_ + self.y_ = y_ + self.w_ = w_ + self.h_ = h_ + + def x(self): + return self.x_ + + def y(self): + return self.y_ + + def w(self): + return self.w_ + + def h(self): + return self.h_ + + def cx(self): + return int(self.x_ + self.w_ / 2) + + def cy(self): + return int(self.y_ + self.h_ / 2) + + def rect(self): + return self.x_, self.y_, self.w_, self.h_ + + +class Line: + def __init__(self, x1_, y1_, x2_, y2_, theta_): + self.x1_ = x1_ + self.y1_ = y1_ + self.x2_ = x2_ + self.y2_ = y2_ + self.theta_ = theta_ + + def x1(self): + return self.x1_ + + def y1(self): + return self.y1_ + + def x2(self): + return self.x2_ + + def y2(self): + return self.y2_ + + def theta(self): + return self.theta_ + + def line(self): + return self.x1_, self.y1_, self.x2_, self.y2_ + + +class Image: + def __init__(self, img_): + self.img = img_ + + def find_blobs(self, th, pixels_threshold, area_threshold, merge, margin): + low_th = (int(th[0] * 2.55), th[2] + 128, th[4] + 128) + high_th = (int(th[1] * 2.55), th[3] + 128, th[5] + 128) + + labimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB) + + mask = cv2.inRange(labimg, low_th, high_th) + + #cv2.imshow ("a", mask) + + output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S) + + #labels_count = output [0] + #labels = output [1] + stats = output [2] + #labels_count, labels, stats = output[:3] + sz = stats.shape[0] + + blobs = [] + + for label_num in range(1, sz): + area = stats[label_num, cv2.CC_STAT_AREA] + + if area >= pixels_threshold: + new_blob = Blob(stats[label_num, cv2.CC_STAT_LEFT], + stats[label_num, cv2.CC_STAT_TOP], + stats[label_num, cv2.CC_STAT_WIDTH], + stats[label_num, cv2.CC_STAT_HEIGHT]) + + #print ("append", area) + blobs.append(new_blob) + + return blobs + + def binary(self, th): + low = (th[0], th[2], th[4]) + high = (th[1], th[3], th[5]) + + mask = cv2.inRange(self.img, low, high) + + sh = mask.shape + + result = np.zeros((sh[0], sh[1], 3), self.img.dtype) + + for i in range(0, 3): + result[:, :, i] = mask.copy() + + return result + + def find_lines(self): + # - Почему Колумб приплыл в Америку, а не в Индию? + # - Потому что он плавал по одометрии + + gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) + edges = cv2.Canny(gray, 50, 150, apertureSize=3) + + #cv2.imshow ("a", edges) + + #lines = cv2.HoughLines(edges, 5, np.pi / 18, 20) + lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 150) + + resultant_lines = [] + + #print (lines) + + for line in lines: + x1, y1, x2, y2 = line [0] + + theta = math.atan((y2 - y1) / (x2 - x1)) + + new_line = Line(x1, y1, x2, y2, theta) + + resultant_lines.append(new_line) + + return resultant_lines + + def draw_line(self, line): + x1, y1, x2, y2 = line + cv2.line(self.img, (x1, y1), (x2, y2), (0, 0, 255), 2) + + def draw_rectangle(self, rect): + x, y, w, h = rect + cv2.rectangle(self.img, (x, y), (x+w, y+h), (255, 0, 0), 3) + +class Sensor: + def __init__(self, filename_): + self.filename = filename_ + self.img = cv2.imread(self.filename) + + def snapshot(self): + return Image(self.img.copy()) \ No newline at end of file From 93b799c66a93244f1fc690f31d12c55ef75b2670 Mon Sep 17 00:00:00 2001 From: vorkorro14 Date: Mon, 25 Jan 2021 18:40:43 +0300 Subject: [PATCH 03/11] Refactoring model --- final_project/src/model/__init__.py | 5 +- final_project/src/model/kal.json | 208 -------------------------- final_project/src/model/kondomv.py | 32 ---- final_project/src/model/servo_dict.py | 37 ----- final_project/src/vision/__init__.py | 3 +- 5 files changed, 3 insertions(+), 282 deletions(-) delete mode 100644 final_project/src/model/kal.json delete mode 100644 final_project/src/model/kondomv.py delete mode 100644 final_project/src/model/servo_dict.py diff --git a/final_project/src/model/__init__.py b/final_project/src/model/__init__.py index 9817666..7040c63 100644 --- a/final_project/src/model/__init__.py +++ b/final_project/src/model/__init__.py @@ -1,4 +1 @@ -from .model import Model -from .kondomv import KondoMV -from .servo_dict import ServoDict -__all__ = ["Model", "KondoMV", "ServoDict"] \ No newline at end of file +from .model import Model \ No newline at end of file diff --git a/final_project/src/model/kal.json b/final_project/src/model/kal.json deleted file mode 100644 index 6d5a348..0000000 --- a/final_project/src/model/kal.json +++ /dev/null @@ -1,208 +0,0 @@ -{ - "torso": { - "id": 0, - "sio": 2, - "simulation_joint_name" : "Tors_0", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - }, - "left_shoulder_pitch": - { - "id": 1, - "sio": 1, - "simulation_joint_name" : "hand_left_1", - "zero": 0, - "limits": [-180, 180], - "inverse": -1 - }, - "right_shoulder_pitch": - { - "id": 1, - "sio": 2, - "simulation_joint_name" : "hand_right_1", - "zero": 0, - "limits": [-180, 180], - "inverse": -1 - }, - "left_shoulder_roll": - { - "id": 2, - "sio": 1, - "simulation_joint_name" : "hand_left_2", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - }, - "right_shoulder_roll": - { - "id": 2, - "sio": 2, - "simulation_joint_name" : "hand_right_2", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - }, - "left_elbow_yaw": - { - "id": 3, - "sio": 1, - "simulation_joint_name" : "hand_left_3", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - }, - "right_elbow_yaw": - { - "id": 3, - "sio": 2, - "simulation_joint_name" : "hand_right_3", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - }, - "left_elbow_pitch": - { - "id": 4, - "sio": 1, - "simulation_joint_name" : "hand_left_4", - "zero": 0, - "limits": [-180, 180], - "inverse": -1 - }, - "right_elbow_pitch": - { - "id": 4, - "sio": 2, - "simulation_joint_name" : "hand_right_4", - "zero": 0, - "limits": [-180, 180], - "inverse": -1 - }, - "left_hip_yaw": - { - "id": 5, - "sio": 1, - "simulation_joint_name" : "leg_left_5", - "zero": 0, - "limits": [-90, 90], - "inverse": -1 - }, - "right_hip_yaw": - { - "id": 5, - "sio": 2, - "simulation_joint_name" : "leg_right_5", - "zero": 0, - "limits": [-90, 90], - "inverse": -1 - }, - "left_hip_roll": - { - "id": 6, - "sio": 1, - "simulation_joint_name" : "leg_left_6", - "zero": 0, - "limits": [-100, 25], - "inverse": -1 - }, - "right_hip_roll": - { - "id": 6, - "sio": 2, - "simulation_joint_name" : "leg_right_6", - "zero": 0, - "limits": [-100, 25], - "inverse": -1 - }, - "left_hip_pitch": - { - "id": 7, - "sio": 1, - "simulation_joint_name" : "leg_left_7", - "zero": 0, - "limits": [-120, 110], - "inverse": 1 - }, - "right_hip_pitch": - { - "id": 7, - "sio": 2, - "simulation_joint_name" : "leg_right_7", - "zero": 0, - "limits": [-120, 110], - "inverse": 1 - }, - "left_knee": - { - "id": 8, - "sio": 1, - "simulation_joint_name" : "leg_left_8", - "zero": 0, - "limits": [-140, 0], - "inverse": 1 - }, - "right_knee": - { - "id": 8, - "sio": 2, - "simulation_joint_name" : "leg_right_8", - "zero": 0, - "limits": [-140, 0], - "inverse": -1 - }, - "left_ankle_pitch": - { - "id": 9, - "sio": 1, - "simulation_joint_name" : "leg_left_9", - "zero": 0, - "limits": [-135, 100], - "inverse": -1 - }, - "right_ankle_pitch": - { - "id": 9, - "sio": 2, - "simulation_joint_name" : "leg_right_9", - "zero": 0, - "limits": [-135, 100], - "inverse": -1 - }, - "left_ankle_roll": - { - "id": 10, - "sio": 1, - "simulation_joint_name" : "leg_left_10", - "zero": 0, - "limits": [-95, 20], - "inverse": 1 - }, - "right_ankle_roll": - { - "id": 10, - "sio": 2, - "simulation_joint_name" : "leg_right_10", - "zero": 0, - "limits": [-95, 20], - "inverse": 1 - }, - "head_yaw": - { - "id": 0, - "sio": 1, - "simulation_joint_name" : "head0", - "zero": 0, - "limits": [-180, 180], - "inverse": -1 - }, - "head_pitch": - { - "id": 12, - "sio": 2, - "simulation_joint_name" : "head12", - "zero": 0, - "limits": [-180, 180], - "inverse": 1 - } -} \ No newline at end of file diff --git a/final_project/src/model/kondomv.py b/final_project/src/model/kondomv.py deleted file mode 100644 index c28bc6d..0000000 --- a/final_project/src/model/kondomv.py +++ /dev/null @@ -1,32 +0,0 @@ -import sys -import json -from .model import Model -from ..odometry import Odometry -from .servo_dict import ServoDict - -class KondoMV(Model): - def __init__(self, imu=None): - super().__init__() - with open("model/kal.json") as f: - self.servos = ServoDict(json.loads(f.read())) - - self.robot_height = 0.42 - - self.sizes = { - "a5": 0.0215, # m distance from symmetry axis to servo 5 - "b5": 0.0185, # м расстояние от оси сервы 5 до оси сервы 6 по горизонтали - "c5": 0, # м расстояние от оси сервы 6 до нуля Z по вертикали - "a6": 0.042, # м расстояние от оси сервы 6 до оси сервы 7 - "a7": 0.0655, # м расстояние от оси сервы 7 до оси сервы 8 - "a8": 0.0638, # м расстояние от оси сервы 8 до оси сервы 9 - "a9": 0.0355, # м расстояние от оси сервы 9 до оси сервы 10 - "a10": 0.0254, # м расстояние от оси сервы 10 до центра стопы по горизонтали - "b10": 0.0164, # м расстояние от оси сервы 10 до низа стопы - "c10": 0.012 # м расстояние от оси сервы 6 до оси сервы 10 по горизонтали - } - - self.odometry = Odometry(imu) - -if __name__ == "__main__": - kondo = KondoMV() - print(kondo.servos['torso']) diff --git a/final_project/src/model/servo_dict.py b/final_project/src/model/servo_dict.py deleted file mode 100644 index 16bbbc9..0000000 --- a/final_project/src/model/servo_dict.py +++ /dev/null @@ -1,37 +0,0 @@ -class ServoDict: - def __init__(self, base_dict): - self.container = base_dict - - def __getitem__(self, key): - if isinstance(key, tuple) or isinstance(key, list): - if len(key) == 2: - for item in self.container.values(): - if item['id'] == key[0] and item['sio'] == key[1]: - return item - else: - raise KeyError - else: - return self.container[key] - - def __setitem__(self, key, item): - self.container[key] = item - - def __iter__(self): - return iter(self.container) - - def __contains__(self, item): - return self.container.__contains__(item) - - def __getattr__(self, attr): - if attr == 'ids': - values = self.container.values - return [(value['id'], value['sio']) for value in values] - - def __repr__(self): - return self.container - -if __name__ == "__main__": - d = {'a': 0, 'b': 1} - servos = ServoDict(d) - for servo in servos: - print(servo, servos[servo]) \ No newline at end of file diff --git a/final_project/src/vision/__init__.py b/final_project/src/vision/__init__.py index 5ad0567..5b573b5 100755 --- a/final_project/src/vision/__init__.py +++ b/final_project/src/vision/__init__.py @@ -1 +1,2 @@ -from .vision import Vision \ No newline at end of file +from .vision import Vision +from .cv_sensor import Sensor, Image \ No newline at end of file From cc131ec21295160893670b03df28d79b583e0a62 Mon Sep 17 00:00:00 2001 From: Ilya Ryakin Date: Mon, 25 Jan 2021 23:53:25 +0300 Subject: [PATCH 04/11] add loc files --- final_project/src/localization/__init__.py | 1 + final_project/src/localization/localization.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100755 final_project/src/localization/__init__.py create mode 100644 final_project/src/localization/localization.py diff --git a/final_project/src/localization/__init__.py b/final_project/src/localization/__init__.py new file mode 100755 index 0000000..d515567 --- /dev/null +++ b/final_project/src/localization/__init__.py @@ -0,0 +1 @@ +from .localization import Localization \ No newline at end of file diff --git a/final_project/src/localization/localization.py b/final_project/src/localization/localization.py new file mode 100644 index 0000000..a79e667 --- /dev/null +++ b/final_project/src/localization/localization.py @@ -0,0 +1,15 @@ + + +class Localization: + def __init__(self): + pass + + + def update(self, data): + pass + + def return_robot_position(self): + pass + + def return_ball_position(self): + pass From d4ee59598e254842e48900f9d0527be227414a5f Mon Sep 17 00:00:00 2001 From: Ilya Osokin Date: Tue, 26 Jan 2021 16:52:17 +0300 Subject: [PATCH 05/11] Added io lib --- notebooks/input_output.py | 294 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 notebooks/input_output.py diff --git a/notebooks/input_output.py b/notebooks/input_output.py new file mode 100644 index 0000000..f3644ab --- /dev/null +++ b/notebooks/input_output.py @@ -0,0 +1,294 @@ +import cv2 +from pathlib import Path +import numpy as np + + + +def get_available_cameras(upper_bound=10, lower_bound=0): + available = [] + + for i in range(lower_bound, upper_bound): + cap = cv2.VideoCapture(i) + + if cap.isOpened(): + available.append(i) + + cap.release() + + return available + + +def folder_files(path): + files_png = sorted(Path(path).glob('*.png')) + files_jpg = sorted(Path(path).glob('*.jpg')) + files_bmp = sorted(Path(path).glob('*.bmp')) + + files = files_png + files_jpg + files_bmp + + return files + +# Incapsulates reading frames from the following types of sources: +# photo (single image), photos series, video, camera, ROS node (?) + +# If an input file or camera number is given, desired input type +# can be omitted. Numeric value given in initialization is considered +# as a number of camera desired to be opened. Value "-1" opens camera +# with minimal available id, "-2" - with maximal. + +# TODO: semi-automatic source type detection +# TODO: implementation of reading from all the sources (except for ROS) [later] +# TODO: output [later] + + +class Source: + #type = "" + #path = "" + + # Taking sample image requires reading, in case of non-constant + # sources like camera or video it can lead to a loss of a single + # frame. These is the fix :) + sample_image_obtained = False + sample_image_incoherency_fixed = False + + def __init__(self, path_, type_="", instant_init=True): + if (not isinstance(path_, str) and not isinstance(path_, unicode)): + self.path = path_["data_path"] + + else: + self.path = path_ + + if type_ == "": + if (self.path.endswith(".jpg") or + self.path.endswith(".png") or + self.path.endswith(".bmp")): + self.type = "photo" + + elif (self.path.endswith(".webm") or + self.path.endswith(".mp4") or + self.path.endswith(".MTS") or + self.path.endswith(".mkv") or + self.path.endswith(".mov") or + self.path.endswith(".avi")): + self.type = "video" + + elif self.path.endswith("/"): + self.type = "photo series" + + elif (self.path.isnumeric() or + self.path[1:].isnumeric()): + self.type = "camera" + + num = int(self.path) + + if num < 0: + cameras = get_available_cameras() + + if num == -1: + self.cam_num = min(cameras) + + else: + self.cam_num = max(cameras) + + else: + self.cam_num = num + + else: + self.type = "ros flex" + + else: + self.type = type_ + + #print (self.type) + + if instant_init: + self.init_source() + + def shape(self): + return self.sample_image().shape + + def sample_image(self): + if not self.sample_image_obtained: + self.sample_image = self.get_frame() + + self.sample_image_obtained = True + + return self.sample_image + + def init_source(self): + self.sources = {} + + self.sources.update( + {"photo": (self.init_photo, self.get_frame_photo)}) + self.sources.update( + {"photo series": (self.init_photo_series, self.get_frame_photo_series)}) + self.sources.update( + {"video": (self.init_video, self.get_frame_video)}) + self.sources.update( + {"camera": (self.init_camera, self.get_frame_camera)}) + #self.sources.update ({"ros flex" : (self.init_ros_flex, self.get_frame_ros_flex)}) + + self.sources[self.type][0]() + + def init_photo(self): + self.img = cv2.imread(self.path) + if self.img is None: + print("Failed to load img{}".format(self.path)) + + def init_photo_series(self): + self.file_num = 0 + self.files = folder_files(self.path) + + #print (self.files) + #print (len (self.files), " files") + + def init_video(self): + self.video = cv2.VideoCapture(self.path) + + def init_camera(self): + self.camera = cv2.VideoCapture(self.cam_num) + + # def init_photo (self): + # self.img = cv2.imread (self.path) + + def get_frame(self): + if (self.sample_image_obtained and not + self.sample_image_incoherency_fixed): + self.sample_image_incoherency_fixed = True + + return True, self.sample_image + + return self.sources[self.type][1]() + + def get_frame_photo(self): + return True, self.img.copy() + + def get_frame_photo_series(self): + filename = str(self.files[self.file_num]) + + #print (filename) + + img = cv2.imread(filename) + + self.file_num += 1 + + if (self.file_num == len(self.files)): + self.file_num = 0 + + #print ("big shap ", img.shape) + + return img + + def get_frame_video(self): + reading_success, frame = self.video.read() + + if not reading_success: + self.video.release() + self.init_video() + + reading_success, frame = self.video.read() + + return reading_success, frame + + def get_frame_camera(self): + reading_success, frame = self.camera.read() + + return reading_success, frame + + def get_frame_photo(self): + return self.img.copy() + + def release(self): + if self.type == "camera": + self.camera.release() + +# output (stream to video file) + +# generalize to the desired a by b cells grid (?) +# generalize to the desired acpect ratio (?) + + +def form_grid(images_, window_x_sz=-1, one_img_x_sz=-1, names=[]): + images = [] + forms = { + # (number of images x, number of images y, number of empty images) + 1: [1, 1, 0], + 2: [2, 1, 0], + 3: [3, 1, 0], + 4: [2, 2, 0], + 5: [3, 2, 1], + 6: [3, 2, 0], + 7: [4, 2, 1], + 8: [4, 2, 0], + 9: [3, 3, 0], + 10: [4, 3, 2], + 11: [4, 3, 1], + 12: [4, 3, 0] + } + if len(images) <= 12: + form = forms[len(images_)] + else: + print("Can process only 12 images") + return 0 + + #print ("images0 shape", images_) + shape = images_[0].shape + if one_img_x_sz != -1: + rescale_factor = one_img_x_sz/shape[1] + shape = [int(x*rescale_factor) for x in shape] + + if window_x_sz != -1: + rescale_factor = window_x_sz/shape[1]/form[0] + shape = [int(x*rescale_factor) for x in shape] + + #print ("len", len (images_)) + + #print ("0", images_ [0].shape) + #print ("1", images_ [1].shape) + #print ("2", images_ [2].shape) + #print ("3", images_ [3].shape) + + for img, i in zip(images_, range(len(images_))): + #print ("before resize", img.shape) + + img = cv2.resize(img, (shape[1], shape[0])) + if len(img.shape) == 2: # gray_scale + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + if img.shape[2] == 4: # rgba + img = img[:, :, :3] + + if len(names) != 0: + cv2.putText(img, names[i], (30, 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.4, (20, 250, 231), 1, cv2.LINE_AA) + + images.append(img) + + for j in range(form[2]): + images.append(np.zeros_like(images[0])) + + rows = [] + for i in range(form[1]): + rows.append(np.concatenate(images[i*form[0]:(i+1)*form[0]], axis=1)) + return np.concatenate(rows) + + +class Writer: + def __init__(self, name_, xsz_, ysz_, fps_=30): + self.name = name_ + self.xsz = xsz_ + self.ysz = ysz_ + self.fps = fps_ + + #self.fourcc = cv2.VideoWriter_fourcc(*'MP4V') + #self.out = cv2.VideoWriter(self.name, self.fourcc, self.fps, (self.xsz, self.ysz)) + self.fourcc = cv2.VideoWriter_fourcc(*'mp4v') + self.out = cv2.VideoWriter( + self.name, self.fourcc, self.fps, (self.xsz, self.ysz)) + + def write(self, frame): + self.out.write(frame) + + def __del__(self): + self.release() + + def release(self): + self.out.release() From 9722f14141c1bc8c09e5aea895d5ea1290864be1 Mon Sep 17 00:00:00 2001 From: Ilya Ryakin Date: Tue, 26 Jan 2021 17:05:07 +0300 Subject: [PATCH 06/11] add var to loc --- final_project/src/localization/localization.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/final_project/src/localization/localization.py b/final_project/src/localization/localization.py index a79e667..7f32583 100644 --- a/final_project/src/localization/localization.py +++ b/final_project/src/localization/localization.py @@ -2,14 +2,16 @@ class Localization: def __init__(self): - pass + self.robot_position = None + self.ball_position_local = None + self.ball_position_global = None def update(self, data): pass - def return_robot_position(self): - pass + def get_robot_position(self): + return self.robot_position - def return_ball_position(self): - pass + def get_ball_position(self): + return self.ball_position_global From 51d76a93f41494645d3ddcb02cfb6ec5d5d865bb Mon Sep 17 00:00:00 2001 From: vorkorro14 Date: Tue, 26 Jan 2021 17:25:32 +0300 Subject: [PATCH 07/11] final baseline --- final_project/src/vision/cv_sensor.py | 13 +++++++++---- .../src/vision}/input_output.py | 0 2 files changed, 9 insertions(+), 4 deletions(-) rename {notebooks => final_project/src/vision}/input_output.py (100%) diff --git a/final_project/src/vision/cv_sensor.py b/final_project/src/vision/cv_sensor.py index 93b1779..e080457 100644 --- a/final_project/src/vision/cv_sensor.py +++ b/final_project/src/vision/cv_sensor.py @@ -150,12 +150,17 @@ def draw_line(self, line): def draw_rectangle(self, rect): x, y, w, h = rect - cv2.rectangle(self.img, (x, y), (x+w, y+h), (255, 0, 0), 3) + cv2.rectangle(self.img, (x, y), (x + w, y + h), (255, 0, 0), 3) + class Sensor: - def __init__(self, filename_): - self.filename = filename_ - self.img = cv2.imread(self.filename) + def __init__(self): + self.img = None def snapshot(self): + if self.img is not None: + return Image(self.img.copy()) + + def get_frame(self, filename): + self.img = cv2.imread(filename) return Image(self.img.copy()) \ No newline at end of file diff --git a/notebooks/input_output.py b/final_project/src/vision/input_output.py similarity index 100% rename from notebooks/input_output.py rename to final_project/src/vision/input_output.py From 7a8057cac804bf4d357e0e64328588f198f4e594 Mon Sep 17 00:00:00 2001 From: vorkorro14 Date: Tue, 26 Jan 2021 17:28:02 +0300 Subject: [PATCH 08/11] final baseline --- final_project/server.py | 150 ++++-------- final_project/src/model/model.py | 3 +- final_project/src/vision/input_output.py | 294 ----------------------- 3 files changed, 46 insertions(+), 401 deletions(-) delete mode 100644 final_project/src/vision/input_output.py diff --git a/final_project/server.py b/final_project/server.py index 4aeb329..9a95b08 100644 --- a/final_project/server.py +++ b/final_project/server.py @@ -3,118 +3,56 @@ import math import json import os -import warnings -from src.simulation import sensor -from src.simulation import image -warnings.warn("CV reload imported") - -from src.localization.tools import median -from src.vision import Vision, VisionPostProcessing +from src.vision import Vision, Sensor from src.localization import Localization -from src.model import KondoMV +from src.model import Model + class Server: def __init__(self): - self.vision = Vision() - self.vision_postprocessing = VisionPostProcessing() - self.vision.load_detectors("vision/detectors_config.json") - - with open("calibration/camera_calibration.json") as f: calibration = json.load(f) + self.camera = Sensor() + self.vision = Vision() + self.model = Model() + robot_height = 0.42 + # setting model parameters + self.model.set_parameters(calibration["cam_col"], + robot_height, + [0, 0, 0, 0, 0, 0], + [0, 0]) + self.localization = Localization() + with open("logs/data.json") as f: + self.logs_data = json.load(f) - side = "left" - - self.localization = Localization(-0.7, -1.3, math.pi/2, side) - self.model = KondoMV() - - # setting model parametrs - self.model.setParams(calib["cam_col"], - self.model.robot_height, - [0, 0, 0, 0, 0, 0], - [0, 0]) - - - - - def run(self): - while True: - if not self.no_vision: - self.clock = time.clock() - self.clock.tick() - curr_t = pyb.millis() - #print (curr_t - t) - t = curr_t - selfData = {} - for i in range(12): - # motion part. Head movement. - self.motion.apply({'name': 'head'}) - if not self.no_vision: - self.model.update_camera_pan_tilt(self.motion.head.pan, self.motion.head.tilt) - # vision part. Taking picture. - img = sensor.snapshot().lens_corr(strength=1.2, zoom=1.0) - - cameraDataRaw = self.vision.get(img, objects_list=["yellow_posts", "blue_posts", "ball"], - drawing_list=["yellow_posts", "blue_posts", "ball"]) - - # cameraDataRaw=vision.get(img, objects_list=["yellow_posts", "ball", "white_posts_support"], - # drawing_list=["yellow_posts", "ball", "white_posts_support"]) - - # vision_postprocessing.process (cameraDataRaw, "yellow_posts", "white_posts_support") - cameraDataProcessed = cameraDataRaw - # model part. Mapping to world coords. - - # self means in robots coords - for observationType in cameraDataProcessed: - if observationType not in selfData.keys(): - selfData[observationType] = [] - selfPoints = [] - - # if (len (cameraDataProcessed[observationType]) > 0): - # print ("obser", cameraDataProcessed[observationType] [0]) - - for observation in cameraDataProcessed[observationType]: - cx = observation[5] - cy = observation[6] - w = observation[2] - h = observation[3] - - selfPoints.append(self.model.pic2r(cx, cy + (w+h)/4)) - selfData[observationType] += selfPoints - - print("eto self yello points", #can be turned off, but now thats needs for debug - selfData['yellow_posts'], "eto self blue points", selfData["blue_posts"]) - - if len(selfData['yellow_posts']) != 0: - general = [] - first_side = [] - second_side = [] - k = selfData['yellow_posts'][0] - for pep in selfData['yellow_posts']: - if math.fabs(math.atan(pep[0]/pep[1]) - math.atan(k[0]/k[1])) < 0.3: - first_side.append(list(pep)) - else: - second_side.append(list(pep)) - if len(first_side) != 1: - first_side = median(first_side) - else: - first_side = first_side[0] - - if second_side != 0 and len(second_side) != 1: - second_side = median(second_side) - elif len(second_side) == 1: - second_side = second_side[0] - - general.append(first_side) - if len(second_side) != 0: - general.append(second_side) - selfData['yellow_posts'] = general - print("eto self yello points", #can be turned off, but now thats needs for debug - selfData['yellow_posts'], "eto self blue points", selfData["blue_posts"]) - - self.localization.update(selfData) - self.localization.update_ball(selfData) - self.localization.localized = True #can be turned off, but now thats needs for debug - # print(loc.ballPosSelf) + for sample in self.logs_data: + self_data = {} + self.model.update_camera_pan_tilt( + sample['head_yaw'], + sample['head_pitch']) + + # Taking picture. + img = self.camera.get_frame(sample['img']) + # Processing image + camera_data = self.vision.get(img) + + for observation_type in camera_data: + # self means in robots coords + if observation_type not in self_data.keys(): + self_data[observation_type] = [] + self_points = [] + + for observation in camera_data[observation_type]: + cx = observation[5] + cy = observation[6] + w = observation[2] + h = observation[3] + + self_points.append(self.model.pic2r(cx, cy + (w + h) / 4)) + self_data[observation_type] += self_points + + self.localization.update(self_data) + print(f'ETO ball: {self.localization.ball_position_global}, \ + ETO robot: {self.localization.robot_position}') diff --git a/final_project/src/model/model.py b/final_project/src/model/model.py index 296abd9..e2cfa61 100644 --- a/final_project/src/model/model.py +++ b/final_project/src/model/model.py @@ -1,5 +1,6 @@ import math + class Model: def __init__(self): self.camera_pan = 0 @@ -10,7 +11,7 @@ def update_camera_pan_tilt(self, camera_pan, camera_tilt): self.camera_pan = camera_pan self.camera_tilt = camera_tilt - def setParams(self, cam_matrix, h, k_coefs, p_coefs): + def set_parameters(self, cam_matrix, h, k_coefs, p_coefs): # cam_matrix - camera matrix # h - camera height # k_coefs, p_coefs - coefficients of lense distortion diff --git a/final_project/src/vision/input_output.py b/final_project/src/vision/input_output.py deleted file mode 100644 index f3644ab..0000000 --- a/final_project/src/vision/input_output.py +++ /dev/null @@ -1,294 +0,0 @@ -import cv2 -from pathlib import Path -import numpy as np - - - -def get_available_cameras(upper_bound=10, lower_bound=0): - available = [] - - for i in range(lower_bound, upper_bound): - cap = cv2.VideoCapture(i) - - if cap.isOpened(): - available.append(i) - - cap.release() - - return available - - -def folder_files(path): - files_png = sorted(Path(path).glob('*.png')) - files_jpg = sorted(Path(path).glob('*.jpg')) - files_bmp = sorted(Path(path).glob('*.bmp')) - - files = files_png + files_jpg + files_bmp - - return files - -# Incapsulates reading frames from the following types of sources: -# photo (single image), photos series, video, camera, ROS node (?) - -# If an input file or camera number is given, desired input type -# can be omitted. Numeric value given in initialization is considered -# as a number of camera desired to be opened. Value "-1" opens camera -# with minimal available id, "-2" - with maximal. - -# TODO: semi-automatic source type detection -# TODO: implementation of reading from all the sources (except for ROS) [later] -# TODO: output [later] - - -class Source: - #type = "" - #path = "" - - # Taking sample image requires reading, in case of non-constant - # sources like camera or video it can lead to a loss of a single - # frame. These is the fix :) - sample_image_obtained = False - sample_image_incoherency_fixed = False - - def __init__(self, path_, type_="", instant_init=True): - if (not isinstance(path_, str) and not isinstance(path_, unicode)): - self.path = path_["data_path"] - - else: - self.path = path_ - - if type_ == "": - if (self.path.endswith(".jpg") or - self.path.endswith(".png") or - self.path.endswith(".bmp")): - self.type = "photo" - - elif (self.path.endswith(".webm") or - self.path.endswith(".mp4") or - self.path.endswith(".MTS") or - self.path.endswith(".mkv") or - self.path.endswith(".mov") or - self.path.endswith(".avi")): - self.type = "video" - - elif self.path.endswith("/"): - self.type = "photo series" - - elif (self.path.isnumeric() or - self.path[1:].isnumeric()): - self.type = "camera" - - num = int(self.path) - - if num < 0: - cameras = get_available_cameras() - - if num == -1: - self.cam_num = min(cameras) - - else: - self.cam_num = max(cameras) - - else: - self.cam_num = num - - else: - self.type = "ros flex" - - else: - self.type = type_ - - #print (self.type) - - if instant_init: - self.init_source() - - def shape(self): - return self.sample_image().shape - - def sample_image(self): - if not self.sample_image_obtained: - self.sample_image = self.get_frame() - - self.sample_image_obtained = True - - return self.sample_image - - def init_source(self): - self.sources = {} - - self.sources.update( - {"photo": (self.init_photo, self.get_frame_photo)}) - self.sources.update( - {"photo series": (self.init_photo_series, self.get_frame_photo_series)}) - self.sources.update( - {"video": (self.init_video, self.get_frame_video)}) - self.sources.update( - {"camera": (self.init_camera, self.get_frame_camera)}) - #self.sources.update ({"ros flex" : (self.init_ros_flex, self.get_frame_ros_flex)}) - - self.sources[self.type][0]() - - def init_photo(self): - self.img = cv2.imread(self.path) - if self.img is None: - print("Failed to load img{}".format(self.path)) - - def init_photo_series(self): - self.file_num = 0 - self.files = folder_files(self.path) - - #print (self.files) - #print (len (self.files), " files") - - def init_video(self): - self.video = cv2.VideoCapture(self.path) - - def init_camera(self): - self.camera = cv2.VideoCapture(self.cam_num) - - # def init_photo (self): - # self.img = cv2.imread (self.path) - - def get_frame(self): - if (self.sample_image_obtained and not - self.sample_image_incoherency_fixed): - self.sample_image_incoherency_fixed = True - - return True, self.sample_image - - return self.sources[self.type][1]() - - def get_frame_photo(self): - return True, self.img.copy() - - def get_frame_photo_series(self): - filename = str(self.files[self.file_num]) - - #print (filename) - - img = cv2.imread(filename) - - self.file_num += 1 - - if (self.file_num == len(self.files)): - self.file_num = 0 - - #print ("big shap ", img.shape) - - return img - - def get_frame_video(self): - reading_success, frame = self.video.read() - - if not reading_success: - self.video.release() - self.init_video() - - reading_success, frame = self.video.read() - - return reading_success, frame - - def get_frame_camera(self): - reading_success, frame = self.camera.read() - - return reading_success, frame - - def get_frame_photo(self): - return self.img.copy() - - def release(self): - if self.type == "camera": - self.camera.release() - -# output (stream to video file) - -# generalize to the desired a by b cells grid (?) -# generalize to the desired acpect ratio (?) - - -def form_grid(images_, window_x_sz=-1, one_img_x_sz=-1, names=[]): - images = [] - forms = { - # (number of images x, number of images y, number of empty images) - 1: [1, 1, 0], - 2: [2, 1, 0], - 3: [3, 1, 0], - 4: [2, 2, 0], - 5: [3, 2, 1], - 6: [3, 2, 0], - 7: [4, 2, 1], - 8: [4, 2, 0], - 9: [3, 3, 0], - 10: [4, 3, 2], - 11: [4, 3, 1], - 12: [4, 3, 0] - } - if len(images) <= 12: - form = forms[len(images_)] - else: - print("Can process only 12 images") - return 0 - - #print ("images0 shape", images_) - shape = images_[0].shape - if one_img_x_sz != -1: - rescale_factor = one_img_x_sz/shape[1] - shape = [int(x*rescale_factor) for x in shape] - - if window_x_sz != -1: - rescale_factor = window_x_sz/shape[1]/form[0] - shape = [int(x*rescale_factor) for x in shape] - - #print ("len", len (images_)) - - #print ("0", images_ [0].shape) - #print ("1", images_ [1].shape) - #print ("2", images_ [2].shape) - #print ("3", images_ [3].shape) - - for img, i in zip(images_, range(len(images_))): - #print ("before resize", img.shape) - - img = cv2.resize(img, (shape[1], shape[0])) - if len(img.shape) == 2: # gray_scale - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - if img.shape[2] == 4: # rgba - img = img[:, :, :3] - - if len(names) != 0: - cv2.putText(img, names[i], (30, 30), - cv2.FONT_HERSHEY_SIMPLEX, 0.4, (20, 250, 231), 1, cv2.LINE_AA) - - images.append(img) - - for j in range(form[2]): - images.append(np.zeros_like(images[0])) - - rows = [] - for i in range(form[1]): - rows.append(np.concatenate(images[i*form[0]:(i+1)*form[0]], axis=1)) - return np.concatenate(rows) - - -class Writer: - def __init__(self, name_, xsz_, ysz_, fps_=30): - self.name = name_ - self.xsz = xsz_ - self.ysz = ysz_ - self.fps = fps_ - - #self.fourcc = cv2.VideoWriter_fourcc(*'MP4V') - #self.out = cv2.VideoWriter(self.name, self.fourcc, self.fps, (self.xsz, self.ysz)) - self.fourcc = cv2.VideoWriter_fourcc(*'mp4v') - self.out = cv2.VideoWriter( - self.name, self.fourcc, self.fps, (self.xsz, self.ysz)) - - def write(self, frame): - self.out.write(frame) - - def __del__(self): - self.release() - - def release(self): - self.out.release() From 8fdf98e61f89b69b9afc4b20bd471d2a5caed866 Mon Sep 17 00:00:00 2001 From: ar0usel Date: Tue, 26 Jan 2021 19:44:29 +0300 Subject: [PATCH 09/11] setuped to work with logs --- final_project/calibration/calib.json | 31 ++++ final_project/main.py | 4 +- final_project/server.py | 32 ++--- .../src/localization/localization.py | 6 +- final_project/src/model/model.py | 6 +- final_project/src/vision/cv_sensor.py | 132 ------------------ final_project/src/vision/vision.py | 15 +- 7 files changed, 67 insertions(+), 159 deletions(-) create mode 100644 final_project/calibration/calib.json diff --git a/final_project/calibration/calib.json b/final_project/calibration/calib.json new file mode 100644 index 0000000..b4dce68 --- /dev/null +++ b/final_project/calibration/calib.json @@ -0,0 +1,31 @@ +{ + "camera_matrix": [ + [ + 587.467217162, + 0, + 746.834662176 + ], + [ + 0, + 583.279916347, + 560.059131772 + ], + [ + 0, + 0, + 1 + ] + ], + "k_coef": [ + 0.172686496, + -0.092024968, + -0.002735723, + 0.494416241, + -0.10974415, + -0.017876025 + ], + "p_coef": [ + 0.000299857, + -0.000261183 + ] +} \ No newline at end of file diff --git a/final_project/main.py b/final_project/main.py index 7b76a87..de17222 100644 --- a/final_project/main.py +++ b/final_project/main.py @@ -1,4 +1,6 @@ from server import Server -server = Server() +SEQ_NUMBER = 1 # 1, 2 or 3 + +server = Server(SEQ_NUMBER) server.run() diff --git a/final_project/server.py b/final_project/server.py index 9a95b08..e8a0a14 100644 --- a/final_project/server.py +++ b/final_project/server.py @@ -10,49 +10,43 @@ class Server: - def __init__(self): - with open("calibration/camera_calibration.json") as f: + def __init__(self, sequence_number): + with open("calibration/calib.json") as f: calibration = json.load(f) self.camera = Sensor() self.vision = Vision() self.model = Model() - robot_height = 0.42 + robot_height = 0.67 # setting model parameters - self.model.set_parameters(calibration["cam_col"], + self.model.set_parameters(calibration["camera_matrix"], robot_height, - [0, 0, 0, 0, 0, 0], - [0, 0]) + calibration["k_coef"], + calibration["p_coef"]) self.localization = Localization() - with open("logs/data.json") as f: + with open(f"cvr_logs/seq_{sequence_number}.json") as f: self.logs_data = json.load(f) def run(self): for sample in self.logs_data: self_data = {} self.model.update_camera_pan_tilt( - sample['head_yaw'], - sample['head_pitch']) + float(sample['head_yaw']), + float(sample['head_pitch'])) # Taking picture. - img = self.camera.get_frame(sample['img']) + img = self.camera.get_frame(os.path.join("cvr_logs", sample['img_path'])) # Processing image camera_data = self.vision.get(img) - for observation_type in camera_data: # self means in robots coords if observation_type not in self_data.keys(): self_data[observation_type] = [] self_points = [] - for observation in camera_data[observation_type]: - cx = observation[5] - cy = observation[6] - w = observation[2] - h = observation[3] - + cx, cy, w, h = observation self_points.append(self.model.pic2r(cx, cy + (w + h) / 4)) self_data[observation_type] += self_points self.localization.update(self_data) - print(f'ETO ball: {self.localization.ball_position_global}, \ - ETO robot: {self.localization.robot_position}') + print(f'ETO ball: {self.localization.get_ball_position()}, \ + ETO robot: {self.localization.get_robot_position()}') diff --git a/final_project/src/localization/localization.py b/final_project/src/localization/localization.py index 7f32583..4b60440 100644 --- a/final_project/src/localization/localization.py +++ b/final_project/src/localization/localization.py @@ -3,8 +3,8 @@ class Localization: def __init__(self): self.robot_position = None - self.ball_position_local = None - self.ball_position_global = None + self.ball_pos_self = None + self.ball_pos_world = None def update(self, data): @@ -14,4 +14,4 @@ def get_robot_position(self): return self.robot_position def get_ball_position(self): - return self.ball_position_global + return self.ball_pos_world diff --git a/final_project/src/model/model.py b/final_project/src/model/model.py index e2cfa61..9673e3b 100644 --- a/final_project/src/model/model.py +++ b/final_project/src/model/model.py @@ -19,9 +19,9 @@ def set_parameters(self, cam_matrix, h, k_coefs, p_coefs): self.h = h self.k_coefs = k_coefs self.p_coefs = p_coefs - self.head_to_base_matrix = [[0.99936382, 0.034978, -0.00696373], - [-0.03510436, 0.99920425, -0.01893518], - [0.00629587, 0.01916759, 0.99979646]] + self.head_to_base_matrix = [[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]] # function calculating dot product of matrix and vector def matrix_dot_vec(self, matr, vect): diff --git a/final_project/src/vision/cv_sensor.py b/final_project/src/vision/cv_sensor.py index e080457..9d465c9 100644 --- a/final_project/src/vision/cv_sensor.py +++ b/final_project/src/vision/cv_sensor.py @@ -8,142 +8,10 @@ import numpy as np -class Blob: - def __init__(self, x_, y_, w_, h_): - self.x_ = x_ - self.y_ = y_ - self.w_ = w_ - self.h_ = h_ - - def x(self): - return self.x_ - - def y(self): - return self.y_ - - def w(self): - return self.w_ - - def h(self): - return self.h_ - - def cx(self): - return int(self.x_ + self.w_ / 2) - - def cy(self): - return int(self.y_ + self.h_ / 2) - - def rect(self): - return self.x_, self.y_, self.w_, self.h_ - - -class Line: - def __init__(self, x1_, y1_, x2_, y2_, theta_): - self.x1_ = x1_ - self.y1_ = y1_ - self.x2_ = x2_ - self.y2_ = y2_ - self.theta_ = theta_ - - def x1(self): - return self.x1_ - - def y1(self): - return self.y1_ - - def x2(self): - return self.x2_ - - def y2(self): - return self.y2_ - - def theta(self): - return self.theta_ - - def line(self): - return self.x1_, self.y1_, self.x2_, self.y2_ - - class Image: def __init__(self, img_): self.img = img_ - def find_blobs(self, th, pixels_threshold, area_threshold, merge, margin): - low_th = (int(th[0] * 2.55), th[2] + 128, th[4] + 128) - high_th = (int(th[1] * 2.55), th[3] + 128, th[5] + 128) - - labimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB) - - mask = cv2.inRange(labimg, low_th, high_th) - - #cv2.imshow ("a", mask) - - output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S) - - #labels_count = output [0] - #labels = output [1] - stats = output [2] - #labels_count, labels, stats = output[:3] - sz = stats.shape[0] - - blobs = [] - - for label_num in range(1, sz): - area = stats[label_num, cv2.CC_STAT_AREA] - - if area >= pixels_threshold: - new_blob = Blob(stats[label_num, cv2.CC_STAT_LEFT], - stats[label_num, cv2.CC_STAT_TOP], - stats[label_num, cv2.CC_STAT_WIDTH], - stats[label_num, cv2.CC_STAT_HEIGHT]) - - #print ("append", area) - blobs.append(new_blob) - - return blobs - - def binary(self, th): - low = (th[0], th[2], th[4]) - high = (th[1], th[3], th[5]) - - mask = cv2.inRange(self.img, low, high) - - sh = mask.shape - - result = np.zeros((sh[0], sh[1], 3), self.img.dtype) - - for i in range(0, 3): - result[:, :, i] = mask.copy() - - return result - - def find_lines(self): - # - Почему Колумб приплыл в Америку, а не в Индию? - # - Потому что он плавал по одометрии - - gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) - edges = cv2.Canny(gray, 50, 150, apertureSize=3) - - #cv2.imshow ("a", edges) - - #lines = cv2.HoughLines(edges, 5, np.pi / 18, 20) - lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 150) - - resultant_lines = [] - - #print (lines) - - for line in lines: - x1, y1, x2, y2 = line [0] - - theta = math.atan((y2 - y1) / (x2 - x1)) - - new_line = Line(x1, y1, x2, y2, theta) - - resultant_lines.append(new_line) - - return resultant_lines - def draw_line(self, line): x1, y1, x2, y2 = line cv2.line(self.img, (x1, y1), (x2, y2), (0, 0, 255), 2) diff --git a/final_project/src/vision/vision.py b/final_project/src/vision/vision.py index ebac38a..dae5eb6 100755 --- a/final_project/src/vision/vision.py +++ b/final_project/src/vision/vision.py @@ -7,7 +7,20 @@ class Vision: def __init__(self): pass + + def get_ball(self, img): + """ + Find bounding box for ball + Args: + img(Image) + + Returns: + list[tuple(int, int, int, int)]: return list of all founded balls(cx, cy - center of bounding box; w, h - width, height of bounding box) + """ + cx, cy, w, h = (0, 0, 0, 0) + return [(cx, cy, w, h)] def get(self, img): - result = "" + result = {} + result['ball'] = self.get_ball(img) return result From 17981a8cc91ab82c5839e4ee65b6b79120686413 Mon Sep 17 00:00:00 2001 From: ar0usel Date: Tue, 26 Jan 2021 20:07:25 +0300 Subject: [PATCH 10/11] Added task description --- final_project/requirements.txt | 2 ++ final_project/task.md | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 final_project/requirements.txt create mode 100644 final_project/task.md diff --git a/final_project/requirements.txt b/final_project/requirements.txt new file mode 100644 index 0000000..fc4586c --- /dev/null +++ b/final_project/requirements.txt @@ -0,0 +1,2 @@ +numpy +opencv-python \ No newline at end of file diff --git a/final_project/task.md b/final_project/task.md new file mode 100644 index 0000000..fccc773 --- /dev/null +++ b/final_project/task.md @@ -0,0 +1,26 @@ +## Дано +Для быстрого старта в выполнении проекта Вам дан класс Server, в котором уже реализован весь пайплайн, обработки. Вам остается реализовать конкретные модули зрения и локализации. Для связки их в единое целое реализован модуль Model, который переводит координаты пикселя в сообственную систему координат робота. Считываение логов также реализованно. Они состоят из: + +1. 3 последовательности кадров с разных точек поля +2. Положение головы в пространстве для каждого кадра + +В файлах **seq_N.json** хранится путь до картинки и соответствующий наклон головы. Скачать их и картинки можно [здесь](https://drive.google.com/file/d/18doF65lbSbgEXF4T12vHGWH3LrI3JTB5/view?usp=sharing). Папка **cvr_logs** должна лежать рядом с ```main.py```. + +Быстрый старт: + +``` +pip3 install -r requirements.txt +python3 main.py +``` +## Задание +Зрение: +1. Найти мяч на всех кадрах, где есть мяч *3 балла* +2. Найти синие стойки ворот на всех кадрах, где есть стойки ворот *3 балла* +3. *Найти белые стойки ворот на всех кадрах, где есть стойки ворот *4 баллов* +3. *Найти линии разметки *5 баллов* + +Локализация: +1. Реализовать триангулцию и альфа-бета фильтр с использованием стоек ворот *5 баллов* +2. *Добавить в альфа-бета информацию о линиях разметки *10 баллов* + +*- доп задачи \ No newline at end of file From b2bab4cb4e7182df3b64f3c0a6cf416a3724633c Mon Sep 17 00:00:00 2001 From: Merrah Date: Tue, 11 May 2021 21:54:49 +0300 Subject: [PATCH 11/11] model fixes --- final_project/src/model/model.py | 169 +++++++++++++++++++------------ 1 file changed, 105 insertions(+), 64 deletions(-) diff --git a/final_project/src/model/model.py b/final_project/src/model/model.py index 9673e3b..d04c55a 100644 --- a/final_project/src/model/model.py +++ b/final_project/src/model/model.py @@ -41,13 +41,13 @@ def r2cam(self, xb, yb): b = self.camera_tilt r2cam_rot_matrix = [[math.cos(a) * math.cos(b), - math.sin(a), - -math.cos(a) * math.sin(b)], - [-math.sin(a) * math.cos(b), + math.sin(a) * math.cos(b), + math.sin(b)], + [-math.sin(a), math.cos(a), - math.sin(a) * math.sin(b)], - [math.sin(b), - 0.0, + 0.0], + [-math.sin(b) * math.cos(a), + -math.sin(a) * math.sin(b), math.cos(b)]] return self.matrix_dot_vec(r2cam_rot_matrix, r_coords) @@ -59,44 +59,18 @@ def cam2r(self, cam_coords): cam2r_rot_matrix = [[math.cos(a) * math.cos(b), -math.sin(a), - math.cos(a) * math.sin(b)], + -math.cos(a) * math.sin(b)], [math.sin(a) * math.cos(b), math.cos(a), - math.sin(a) * math.sin(b)], - [-math.sin(b), + -math.sin(a) * math.sin(b)], + [math.sin(b), 0.0, math.cos(b)]] return self.matrix_dot_vec(cam2r_rot_matrix, cam_coords) - # transformation from robot coords to pixel picture coords - def r2pic(self, xb, yb): - # xb, yb - coords of the ball in the robot system - - # transformation to camera coords and back to apply head-to-base matrix - cam_coords = self.r2cam(xb, yb) - fixed_cam_coords = self.matrix_dot_vec(self.head_to_base_matrix, - cam_coords) - xb, yb, zb = self.cam2r(fixed_cam_coords) - - # rotating the coordinate system by the cameraPan - # xbi,ybi - coords of the ball in the same system after rotation - xbi = xb * math.cos(self.camera_pan) + yb * math.sin(self.camera_pan) - ybi = yb * math.cos(self.camera_pan) - xb * math.sin(self.camera_pan) - # taking into account the case when the ball is behind the camera - if xbi <= 0: - print("Ball behind the camera") - return (-1, -1) - - # alp - artificially created angle, used for simplification - # x,y - coords of the ball in system, parallel to the camera screen - # u,v - pixel coords of the ball in the screen system - alp = math.atan(self.h / xbi) + self.camera_tilt - x = ybi / math.cos(alp) / math.sqrt(xbi**2 + self.h ** 2) - y = math.tan(alp) - - # applying the lense distortion-fix formula + def distortion_straight(self, x, y): r_sq = x ** 2 + y ** 2 coef_numerator = (1 + self.k_coefs[0] * r_sq + @@ -108,54 +82,121 @@ def r2pic(self, xb, yb): self.k_coefs[4] * (r_sq ** 2) + self.k_coefs[5] * (r_sq ** 3)) - coef = coef_numerator / coef_denominator + g = coef_numerator / coef_denominator # x_cor, y_cor - ball cords in artificially created coord system - x_cor_parts = [x * coef, + x_cor_parts = [x * g, 2 * self.p_coefs[0] * x * y, self.p_coefs[1] * (r_sq + 2 * x ** 2)] x_cor = sum(x_cor_parts) - y_cor_parts = [y * coef, + y_cor_parts = [y * g, 2 * self.p_coefs[1] * x * y, self.p_coefs[0] * (r_sq + 2 * y ** 2)] y_cor = sum(y_cor_parts) + return [x_cor, y_cor] + + def dist_error(self, x, y, c1, c2): + r1, r2 = self.distortion_straight(x, y) + return [r1 - c1, r2 - c2] + + def dist_er_jac_inv(self, x, y): + r_sq = x ** 2 + y ** 2 + coef_numerator = (1 + + self.k_coefs[0] * r_sq + + self.k_coefs[1] * (r_sq ** 2) + + self.k_coefs[2] * (r_sq ** 3)) + + coef_denominator = (1 + + self.k_coefs[3] * r_sq + + self.k_coefs[4] * (r_sq ** 2) + + self.k_coefs[5] * (r_sq ** 3)) + + g = coef_numerator / coef_denominator + gdx = ((2 * self.k_coefs[0] * x + + 4 * self.k_coefs[1] * x * r_sq + + 6 * self.k_coefs[2] * x * r_sq**2) / + coef_denominator - + (coef_numerator * + (2 * self.k_coefs[3] * x + + 4 * self.k_coefs[4] * x * r_sq + + 6 * self.k_coefs[5] * x * r_sq**2)) / + coef_denominator**2) + + gdy = ((2 * self.k_coefs[0] * y + + 4 * self.k_coefs[1] * y * r_sq + + 6 * self.k_coefs[2] * y * r_sq**2) / + coef_denominator - + (coef_numerator * + (2 * self.k_coefs[3] * y + + 4 * self.k_coefs[4] * y * r_sq + + 6 * self.k_coefs[5] * y * r_sq**2)) / + coef_denominator**2) + + a11 = g + x * gdx + 2 * self.p_coefs[0] * y + 6 * self.p_coefs[1] * x + a12 = y * gdx + 2 * self.p_coefs[0] * x + 2 * self.p_coefs[1] * y + a21 = x * gdy + 2 * self.p_coefs[0] * x + 2 * self.p_coefs[1] * y + a22 = g + y * gdy + 2 * self.p_coefs[1] * x + 6 * self.p_coefs[0] * y + + det = a11 * a22 - a12 * a21 + return [[a22 / det, -a12 / det], [-a21 / det, a11 / det]] + + + def distortion_inverse(self, c1, c2): + x = c1 + y = c2 + for i in range(100): + x_new = (x - + (self.dist_er_jac_inv(x, y)[0][0] * self.dist_error(x, y, c1, c2)[0] + + self.dist_er_jac_inv(x, y)[0][1] * self.dist_error(x, y, c1, c2)[1])) + y_new = (y - + (self.dist_er_jac_inv(x, y)[1][0] * self.dist_error(x, y, c1, c2)[0] + + self.dist_er_jac_inv(x, y)[1][1] * self.dist_error(x, y, c1, c2)[1])) + + if abs(x_new - x) + abs(y_new - y) < 0.00001: + break + else: + x = x_new + y = y_new + return [x, y] + + + def r2pic(self, xb, yb): + # xb, yb - coords of the ball in the robot system + + # transformation to camera coords and back to apply head-to-base matrix + cam_coords = self.r2cam(xb, yb) + x_c, y_c, z_c = self.matrix_dot_vec(self.head_to_base_matrix, + cam_coords) + x = y_c / x_c + y = z_c / x_c + + # applying the lense distortion-fix formula + x_cor, y_cor = self.distortion_straight(x, y) + # u, v - pixel coords of the ball u = -x_cor * self.cam_matrix[0][0] + self.cam_matrix[0][2] - v = y_cor * self.cam_matrix[1][1] + self.cam_matrix[1][2] + v = -y_cor * self.cam_matrix[1][1] + self.cam_matrix[1][2] return (int(u), int(v)) - # transformation from pixel coords to robot coords + def pic2r(self, u, v): # u,v - pixel coords of the ball in the screen system # x,y - coords of the ball in the coordinate system, # parallel to the camera screen - x = (float(u) - self.cam_matrix[0][2]) / self.cam_matrix[0][0] - y = (float(v) - self.cam_matrix[1][2]) / self.cam_matrix[1][1] + x = -(float(u) - self.cam_matrix[0][2]) / self.cam_matrix[0][0] + y = -(float(v) - self.cam_matrix[1][2]) / self.cam_matrix[1][1] - # alp,bet - vertical and horizontal angles - # of the ball radius-vector relative to the camera 0,0 direction - alp = math.atan(y) - bet = math.atan(x) + # transformation of x and y using the distortion coefficients + x, y = self.distortion_inverse(x, y) - robot_alp = alp - self.camera_tilt - - # xb, yb - coordinates of the ball in the system turned by cameraPan - xb = self.h / math.tan(robot_alp) - yb = math.tan(bet) * math.sqrt(self.h ** 2 + xb ** 2) - - # xb_r, yb_r - coordinates of the ball in the robot system - xb_r = xb * math.cos(self.camera_pan) + yb * math.sin(self.camera_pan) - yb_r = yb * math.cos(self.camera_pan) - xb * math.sin(self.camera_pan) - - # transformation to camera coords and back to apply head-to-base matrix - cam_coords = self.r2cam(xb_r, -yb_r) - fixed_cam_coords = self.matrix_dot_vec(self.head_to_base_matrix, - cam_coords) + x_cam = -self.h / (math.sin(self.camera_tilt) + y * math.cos(self.camera_tilt)) + x_cam = abs(x_cam) + y_cam = x_cam * x + z_cam = x_cam * y - # final coords in robot system - xb_r, yb_r, zb_r = self.cam2r(fixed_cam_coords) + x_r, y_r, _ = self.cam2r([x_cam, y_cam, z_cam]) - return (xb_r, yb_r) + return (x_r, y_r) \ No newline at end of file