From db0f0ebfb19d9749affe89aac7aa62f96adb0d9d Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Sat, 10 Jul 2021 12:33:19 +0200 Subject: [PATCH 01/17] Add algorithm for photomosaic task --- surface/vision/photomosaic_photo.py | 263 ++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) create mode 100644 surface/vision/photomosaic_photo.py diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py new file mode 100644 index 0000000..c5e8569 --- /dev/null +++ b/surface/vision/photomosaic_photo.py @@ -0,0 +1,263 @@ +""" +Photomosaic +=========== + +Module storing an implementation of the cube photomosaic task. +""" +import cv2 as _cv2 +import numpy as _np +import typing as _typing +from ..constants.vision import MOSAIC_HEIGHT, COLOR_DICT + + +def _filter_color(lower: _np.ndarray, upper: _np.ndarray, images: list) -> list: + """ + Filter the color according to the threshold. + + :param lower: Lower threshold for filter + :param upper: Upper threshold for filter + :param images: List of HSV images + :return: Masks after applying the filter + """ + return [_cv2.inRange(image, lower, upper) for image in images] + + +def _cut_images(images: list) -> list: + """ + Cut the square in the images + + :param images: list of images + :return: the list of cut images + """ + + # Used for grabCut function + rect = (30, 30, 220, 220) + + img_white = list() + for i in range(5): + images[i] = _cv2.resize(images[i], (256, 256)) + mask = _np.zeros(images[i].shape[:2], _np.uint8) + + # Extract background and foreground images + bgd_model = _np.zeros((1, 65), _np.float64) + fgd_model = _np.zeros((1, 65), _np.float64) + + _cv2.grabCut(images[i], mask, rect, bgd_model, fgd_model, 5, _cv2.GC_INIT_WITH_RECT) + mask2 = _np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') + img_white.append(images[i] * mask2[:, :, _np.newaxis]) + + img_white = _find_rectangle(img_white) + for idx in range(len(img_white)): + img_white[idx] = _cv2.resize(img_white[idx], (int(img_white[idx].shape[0] / 2), int(img_white[idx].shape[1]/2))) + print(len(img_white)) + + return img_white + + +def _find_rectangle(images: list) -> list: + """ + Function uses thresholding to find face of the object. Then it passes this masked image + to function _get_contours + :param images: List of found sides of the object + :return: Cut images with the biggest rectangle + """ + for i in range(len(images)): + image_gray = _cv2.cvtColor(images[i], _cv2.COLOR_BGR2GRAY) + thresh = _cv2.threshold(image_gray, 45, 255, _cv2.THRESH_BINARY)[1] + img_dilate = _cv2.dilate(thresh, _np.ones((5, 5)), iterations=1) + x, y, w, h = _get_contours(img_dilate) + images[i] = images[i][y: y + h, x: x + w] + return images + + +def _get_contours(image): + """ + Function to get the biggest contours of cut image. After finding the biggest contour, + function returns points of rectangle and lengths of sides. + :param Image: masked images of sides + :return: Points of the biggest rectangle in masked photo + """ + contours, hierarchy = _cv2.findContours(image, _cv2.RETR_EXTERNAL, _cv2.CHAIN_APPROX_NONE) + area_list = [[_cv2.contourArea(cnt), cnt] for cnt in contours] + biggest_contour_area = 0 + biggest_contour = 0 + for i in area_list: + if i[0] > biggest_contour_area: + biggest_contour_area = i[0] + biggest_contour = i[1] + peri = _cv2.arcLength(biggest_contour, True) + approx = _cv2.approxPolyDP(biggest_contour, 0.02 * peri, True) + x, y, w, h = _cv2.boundingRect(approx) + print(x, y, w, h) + return x, y, w, h + + +def _resize_images(images: list) -> list: + """ + resize the images for combining + :param images: list of images + :return: the resized cut images + """ + index = 0 + for img in images: + # Dimensions of object = 120 cm long, 60 cm wide, and 60 cm tall + # It is better to divide by height of the image than width + width = int(img.shape[0] * MOSAIC_HEIGHT / img.shape[1]) + images[index] = _cv2.resize(src=img, dsize=(width, MOSAIC_HEIGHT)) + index += 1 + + return images + + +def _type_division(dict_color_map: list) -> \ + _typing.Tuple[list, int]: + """ + divide the type of squares(upper and lower squares) + Function assumes that first photo is taken of the top side's box + :param dict_color_map: the color map for squares + :return: the index list of bottom squares, the index of top square + """ + index = 1 + bottom_index = list() + top_index = 0 + for dict_color in range(1, len(dict_color_map)): + bottom_index.append(index) + index += 1 + return bottom_index, top_index + + +def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, top_index: int) -> _np.ndarray: + """ + combine the squares to a image + :param img_white: the cut images + :param dict_color_map: the color map for squares + :param bottom_index: the index list of bottom squares + :param top_index: the index of top square + :return: the combined picture + """ + left_img = img_white[bottom_index[0]] + length_top = 0 + connect_color = _get_key(dict_color_map[bottom_index[0]], 1) + for i in range(3): + for k in range(3): + img_index = k + 1 + if connect_color == _get_key(dict_color_map[bottom_index[img_index]], 3): + left_img = _np.concatenate((left_img, img_white[bottom_index[img_index]]), axis=1) + connect_color = _get_key(dict_color_map[bottom_index[img_index]], 1) + if _get_key(dict_color_map[bottom_index[img_index]], 0) == _get_key(dict_color_map[top_index], 2): + length_top = left_img.shape[0] - img_white[bottom_index[img_index]].shape[0] + + canvas_top = _np.ones((left_img.shape[0], left_img.shape[1], 3), dtype="uint8") + canvas_top[:] = (0, 0, 0) + top_img = img_white[top_index] + width_top = top_img.shape[0] + length_top + height_top = top_img.shape[1] + MOSAIC_HEIGHT + result = _np.concatenate((canvas_top, left_img), axis=0) + result[length_top: width_top, MOSAIC_HEIGHT:height_top] = top_img + # Changed due to the width of result image that has to have 2 widths of smaller face and 2 widths of bigger face + return result[:, :2 * img_white[1].shape[1] + 2 * img_white[2].shape[1]] + + +def _color_detect(images: list) -> list: + """ + detect the color in images + :param images: the list of images + :return: the color map of squares + """ + color_content = [{}, {}, {}, {}, {}] + for color in COLOR_DICT: + masks = _filter_color(COLOR_DICT[color][0], COLOR_DICT[color][1], images) + index_mask = 0 + for mask in masks: + shape = mask.shape + contours, hi = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) + for k in range(len(contours)): + area = _cv2.contourArea(contours[k]) + if area > int(shape[0] / 4): + cnt = contours[0] + M = _cv2.moments(cnt) + if M["m00"] != 0: + cx = int(M["m10"] / M["m00"]) + cy = int(M["m01"] / M["m00"]) + else: + cx, cy = 0, 0 + horizontal = cx / shape[1] + vertical = cy / shape[0] + if color != "white": + if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): + color_content[index_mask][color] = 0 + elif (vertical > 0.8) & (horizontal < 0.7) & (horizontal > 0.3): + color_content[index_mask][color] = 2 + elif (horizontal > 0.8) & (vertical < 0.7) & (vertical > 0.3): + color_content[index_mask][color] = 1 + elif (horizontal < 0.2) & (vertical < 0.7) & (vertical > 0.3): + color_content[index_mask][color] = 3 + else: + print("error") + index_mask += 1 + return color_content + + +def _get_key(dictionary: dict, value: int) -> list: + """ + get the key of dict() by value + :param dictionary: the dict() + :param value: the value of dict() + :return: the key of dict() + """ + return [l for l, v in dictionary.items() if v == value] + + +def helper_display(tag, img): + for k, i in enumerate(img): + _cv2.imshow(tag + str(k), i) + + +def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, list]: + """ + Process the images and combine them by their color into a photomosaic. + :param images: List of images in OpenCV format + :return: Original images, Combined picture, the list of original pictures, the list of cut images + """ + # Convert images to HSV color from a copy of the original images + images_hsv = [_cv2.cvtColor(_image, _cv2.COLOR_BGR2HSV) for _image in images.copy()] + # cut the useless part of the image + img_cut = _cut_images(images_hsv) + + dict_color_map = _color_detect(img_cut[:1]) + print(dict_color_map) + # resize the images for combining + img_white = _resize_images(img_cut) + + # divide the top and bottom image + bottom_index, top_index = _type_division(dict_color_map) + print("Image count", len(img_cut)) + print(bottom_index, top_index) + + # combine the images + result = _combine_images(img_white, dict_color_map, bottom_index, top_index) + + return images, _cv2.cvtColor(result, _cv2.COLOR_HSV2BGR), img, img_cut + + +if __name__ == "__main__": + img = [] + # Added new dictionary of photos with different background and appropriate faces of object + # TODO prepare folder for storing images/or taking them directly from taken photo from camera + # PHOTOS MUST BE TAKEN IN CORRECT ORDER: TOP -> LEFT -> FRONT -> RIGHT -> BACK + for i in range(5): + # This line to be changed when we get the camera + img.append(_cv2.imread("./proper_samples/" + str(i + 1) + ".png")) + print(len(img)) + _, result_img, image, image_cut = create_photomosaic(img.copy()) + + _cv2.imshow("result", _cv2.resize(result_img, (0, 0), fx=0.5, fy=0.5)) + k = _cv2.waitKey(0) + if k == 27: # wait for ESC key to exit + _cv2.destroyAllWindows() + +# import os +# if os.path.exists("result.png"): +# os.remove("result.png") +# _cv2.imwrite("result.png", result_img) + From 832123e1d730ff03727af3ebd0d0d5695f985c0f Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Sat, 10 Jul 2021 12:33:39 +0200 Subject: [PATCH 02/17] Add constants for vision regarding to photomosaic task --- surface/constants/vision.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/surface/constants/vision.py b/surface/constants/vision.py index 23b9f27..70bbe42 100644 --- a/surface/constants/vision.py +++ b/surface/constants/vision.py @@ -1,3 +1,17 @@ """ Computer vision constants. """ +import numpy as _np + +# The height of the cut picture +MOSAIC_HEIGHT = 300 +# The filter map for each color +COLOR_DICT = {"white": (_np.array([0, 0, 50]), _np.array([255, 255, 255])), + "yellow": (_np.array([15, 0, 0]), _np.array([30, 255, 255])), + "green": (_np.array([60, 0, 0]), _np.array([75, 255, 255])), + "blue": (_np.array([105, 0, 0]), _np.array([120, 255, 255])), + "purple": (_np.array([145, 0, 0]), _np.array([160, 255, 255])), + "orange": (_np.array([5, 0, 0]), _np.array([15, 255, 255])), + "red": (_np.array([175, 0, 0]), _np.array([190, 255, 255])), + "pink": (_np.array([160, 0, 0]), _np.array([175, 255, 255])), + "light_blue": (_np.array([90, 0, 0]), _np.array([105, 255, 255]))} From a01f9deb24968ab48db5037b1813b65a922eb131 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 14:01:07 +0200 Subject: [PATCH 03/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 77 ++++++++++++++--------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index c5e8569..f108fde 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -4,9 +4,9 @@ Module storing an implementation of the cube photomosaic task. """ +import typing as _typing import cv2 as _cv2 import numpy as _np -import typing as _typing from ..constants.vision import MOSAIC_HEIGHT, COLOR_DICT @@ -34,9 +34,9 @@ def _cut_images(images: list) -> list: rect = (30, 30, 220, 220) img_white = list() - for i in range(5): - images[i] = _cv2.resize(images[i], (256, 256)) - mask = _np.zeros(images[i].shape[:2], _np.uint8) + for idx in range(5): + images[idx] = _cv2.resize(images[i], (256, 256)) + mask = _np.zeros(images[idx].shape[:2], _np.uint8) # Extract background and foreground images bgd_model = _np.zeros((1, 65), _np.float64) @@ -44,9 +44,10 @@ def _cut_images(images: list) -> list: _cv2.grabCut(images[i], mask, rect, bgd_model, fgd_model, 5, _cv2.GC_INIT_WITH_RECT) mask2 = _np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') - img_white.append(images[i] * mask2[:, :, _np.newaxis]) + img_white.append(images[idx] * mask2[:, :, _np.newaxis]) img_white = _find_rectangle(img_white) + for idx in range(len(img_white)): img_white[idx] = _cv2.resize(img_white[idx], (int(img_white[idx].shape[0] / 2), int(img_white[idx].shape[1]/2))) print(len(img_white)) @@ -61,23 +62,23 @@ def _find_rectangle(images: list) -> list: :param images: List of found sides of the object :return: Cut images with the biggest rectangle """ - for i in range(len(images)): - image_gray = _cv2.cvtColor(images[i], _cv2.COLOR_BGR2GRAY) + for idx in range(len(images)): + image_gray = _cv2.cvtColor(images[idx], _cv2.COLOR_BGR2GRAY) thresh = _cv2.threshold(image_gray, 45, 255, _cv2.THRESH_BINARY)[1] img_dilate = _cv2.dilate(thresh, _np.ones((5, 5)), iterations=1) - x, y, w, h = _get_contours(img_dilate) - images[i] = images[i][y: y + h, x: x + w] + _x, _y, _w, _h = _get_contours(img_dilate) + images[idx] = images[idx][_y: _y + _h, _x:_x + _w] return images -def _get_contours(image): +def _get_contours(cut_image): """ Function to get the biggest contours of cut image. After finding the biggest contour, function returns points of rectangle and lengths of sides. - :param Image: masked images of sides + :param cut_image: masked images of sides :return: Points of the biggest rectangle in masked photo """ - contours, hierarchy = _cv2.findContours(image, _cv2.RETR_EXTERNAL, _cv2.CHAIN_APPROX_NONE) + contours, _ = _cv2.findContours(cut_image, _cv2.RETR_EXTERNAL, _cv2.CHAIN_APPROX_NONE) area_list = [[_cv2.contourArea(cnt), cnt] for cnt in contours] biggest_contour_area = 0 biggest_contour = 0 @@ -87,9 +88,9 @@ def _get_contours(image): biggest_contour = i[1] peri = _cv2.arcLength(biggest_contour, True) approx = _cv2.approxPolyDP(biggest_contour, 0.02 * peri, True) - x, y, w, h = _cv2.boundingRect(approx) - print(x, y, w, h) - return x, y, w, h + x_, y_, w_, h_ = _cv2.boundingRect(approx) + print(x_, y_, w_, h_) + return x_, y_, w_, h_ def _resize_images(images: list) -> list: @@ -99,11 +100,11 @@ def _resize_images(images: list) -> list: :return: the resized cut images """ index = 0 - for img in images: + for _img in images: # Dimensions of object = 120 cm long, 60 cm wide, and 60 cm tall # It is better to divide by height of the image than width - width = int(img.shape[0] * MOSAIC_HEIGHT / img.shape[1]) - images[index] = _cv2.resize(src=img, dsize=(width, MOSAIC_HEIGHT)) + width = int(_img.shape[0] * MOSAIC_HEIGHT / _img.shape[1]) + images[index] = _cv2.resize(src=_img, dsize=(width, MOSAIC_HEIGHT)) index += 1 return images @@ -120,7 +121,7 @@ def _type_division(dict_color_map: list) -> \ index = 1 bottom_index = list() top_index = 0 - for dict_color in range(1, len(dict_color_map)): + for _ in range(1, len(dict_color_map)): bottom_index.append(index) index += 1 return bottom_index, top_index @@ -138,9 +139,9 @@ def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, t left_img = img_white[bottom_index[0]] length_top = 0 connect_color = _get_key(dict_color_map[bottom_index[0]], 1) - for i in range(3): - for k in range(3): - img_index = k + 1 + for _ in range(3): + for idx in range(3): + img_index = idx + 1 if connect_color == _get_key(dict_color_map[bottom_index[img_index]], 3): left_img = _np.concatenate((left_img, img_white[bottom_index[img_index]]), axis=1) connect_color = _get_key(dict_color_map[bottom_index[img_index]], 1) @@ -170,19 +171,19 @@ def _color_detect(images: list) -> list: index_mask = 0 for mask in masks: shape = mask.shape - contours, hi = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) + contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) for k in range(len(contours)): area = _cv2.contourArea(contours[k]) if area > int(shape[0] / 4): cnt = contours[0] - M = _cv2.moments(cnt) - if M["m00"] != 0: - cx = int(M["m10"] / M["m00"]) - cy = int(M["m01"] / M["m00"]) + _M = _cv2.moments(cnt) + if _M["m00"] != 0: + c_x = int(_M["m10"] / _M["m00"]) + c_y = int(_M["m01"] / _M["m00"]) else: - cx, cy = 0, 0 - horizontal = cx / shape[1] - vertical = cy / shape[0] + c_x, c_y = 0, 0 + horizontal = c_x / shape[1] + vertical = c_y / shape[0] if color != "white": if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): color_content[index_mask][color] = 0 @@ -208,9 +209,12 @@ def _get_key(dictionary: dict, value: int) -> list: return [l for l, v in dictionary.items() if v == value] -def helper_display(tag, img): - for k, i in enumerate(img): - _cv2.imshow(tag + str(k), i) +def helper_display(tag, image_helper): + """ + Helper function for images for displaying + """ + for index, name in enumerate(image_helper): + _cv2.imshow(tag + str(index), name) def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, list]: @@ -243,7 +247,6 @@ def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, l if __name__ == "__main__": img = [] # Added new dictionary of photos with different background and appropriate faces of object - # TODO prepare folder for storing images/or taking them directly from taken photo from camera # PHOTOS MUST BE TAKEN IN CORRECT ORDER: TOP -> LEFT -> FRONT -> RIGHT -> BACK for i in range(5): # This line to be changed when we get the camera @@ -255,9 +258,3 @@ def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, l k = _cv2.waitKey(0) if k == 27: # wait for ESC key to exit _cv2.destroyAllWindows() - -# import os -# if os.path.exists("result.png"): -# os.remove("result.png") -# _cv2.imwrite("result.png", result_img) - From f9fe55a7630fa24711da89216da8d80145a00d92 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 14:17:19 +0200 Subject: [PATCH 04/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 39 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index f108fde..ead9410 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -19,7 +19,7 @@ def _filter_color(lower: _np.ndarray, upper: _np.ndarray, images: list) -> list: :param images: List of HSV images :return: Masks after applying the filter """ - return [_cv2.inRange(image, lower, upper) for image in images] + return [_cv2.inRange(image_in_range, lower, upper) for image_in_range in images] def _cut_images(images: list) -> list: @@ -48,8 +48,9 @@ def _cut_images(images: list) -> list: img_white = _find_rectangle(img_white) - for idx in range(len(img_white)): - img_white[idx] = _cv2.resize(img_white[idx], (int(img_white[idx].shape[0] / 2), int(img_white[idx].shape[1]/2))) + for index, image_to_resize in enumerate(img_white): + img_white[index] = _cv2.resize(image_to_resize, (int(image_to_resize.shape[0] / 2), + int(image_to_resize.shape[1]/2))) print(len(img_white)) return img_white @@ -62,8 +63,8 @@ def _find_rectangle(images: list) -> list: :param images: List of found sides of the object :return: Cut images with the biggest rectangle """ - for idx in range(len(images)): - image_gray = _cv2.cvtColor(images[idx], _cv2.COLOR_BGR2GRAY) + for idx, image_to_find_rectangle in enumerate(images): + image_gray = _cv2.cvtColor(image_to_find_rectangle, _cv2.COLOR_BGR2GRAY) thresh = _cv2.threshold(image_gray, 45, 255, _cv2.THRESH_BINARY)[1] img_dilate = _cv2.dilate(thresh, _np.ones((5, 5)), iterations=1) _x, _y, _w, _h = _get_contours(img_dilate) @@ -82,15 +83,15 @@ def _get_contours(cut_image): area_list = [[_cv2.contourArea(cnt), cnt] for cnt in contours] biggest_contour_area = 0 biggest_contour = 0 - for i in area_list: - if i[0] > biggest_contour_area: - biggest_contour_area = i[0] - biggest_contour = i[1] + for elem in area_list: + if elem[0] > biggest_contour_area: + biggest_contour_area = elem[0] + biggest_contour = elem[1] peri = _cv2.arcLength(biggest_contour, True) approx = _cv2.approxPolyDP(biggest_contour, 0.02 * peri, True) - x_, y_, w_, h_ = _cv2.boundingRect(approx) - print(x_, y_, w_, h_) - return x_, y_, w_, h_ + x_point, y_point, width, height = _cv2.boundingRect(approx) + print(x_point, y_point, width, height) + return x_point, y_point, width, height def _resize_images(images: list) -> list: @@ -172,14 +173,12 @@ def _color_detect(images: list) -> list: for mask in masks: shape = mask.shape contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) - for k in range(len(contours)): - area = _cv2.contourArea(contours[k]) - if area > int(shape[0] / 4): - cnt = contours[0] - _M = _cv2.moments(cnt) - if _M["m00"] != 0: - c_x = int(_M["m10"] / _M["m00"]) - c_y = int(_M["m01"] / _M["m00"]) + for idx in range(len(contours)): + if _cv2.contourArea(contours[idx]) > int(shape[0] / 4): + moments = _cv2.moments(contours[0]) + if moments["m00"] != 0: + c_x = int(moments["m10"] / moments["m00"]) + c_y = int(moments["m01"] / moments["m00"]) else: c_x, c_y = 0, 0 horizontal = c_x / shape[1] From e780566f33c66574a8314cc98ea9541f0286ff91 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 14:32:16 +0200 Subject: [PATCH 05/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 30 ++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index ead9410..d913ea5 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -167,14 +167,14 @@ def _color_detect(images: list) -> list: :return: the color map of squares """ color_content = [{}, {}, {}, {}, {}] - for color in COLOR_DICT: + for color, value in COLOR_DICT.items(): masks = _filter_color(COLOR_DICT[color][0], COLOR_DICT[color][1], images) index_mask = 0 for mask in masks: shape = mask.shape contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) - for idx in range(len(contours)): - if _cv2.contourArea(contours[idx]) > int(shape[0] / 4): + for idx, contour in enumerate(contours): + if _cv2.contourArea(contour) > int(shape[0] / 4): moments = _cv2.moments(contours[0]) if moments["m00"] != 0: c_x = int(moments["m10"] / moments["m00"]) @@ -184,20 +184,24 @@ def _color_detect(images: list) -> list: horizontal = c_x / shape[1] vertical = c_y / shape[0] if color != "white": - if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): - color_content[index_mask][color] = 0 - elif (vertical > 0.8) & (horizontal < 0.7) & (horizontal > 0.3): - color_content[index_mask][color] = 2 - elif (horizontal > 0.8) & (vertical < 0.7) & (vertical > 0.3): - color_content[index_mask][color] = 1 - elif (horizontal < 0.2) & (vertical < 0.7) & (vertical > 0.3): - color_content[index_mask][color] = 3 - else: - print("error") + check_for_color(horizontal, vertical, color_content, index_mask, color) index_mask += 1 return color_content +def check_for_color(horizontal, vertical, color_content, index_mask, color): + if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): + color_content[index_mask][color] = 0 + elif (vertical > 0.8) & (horizontal < 0.7) & (horizontal > 0.3): + color_content[index_mask][color] = 2 + elif (horizontal > 0.8) & (vertical < 0.7) & (vertical > 0.3): + color_content[index_mask][color] = 1 + elif (horizontal < 0.2) & (vertical < 0.7) & (vertical > 0.3): + color_content[index_mask][color] = 3 + else: + print("error") + + def _get_key(dictionary: dict, value: int) -> list: """ get the key of dict() by value From 5b177866a16787076a9bafacd6a168c1713605c6 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 14:39:35 +0200 Subject: [PATCH 06/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index d913ea5..21dce98 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -167,22 +167,21 @@ def _color_detect(images: list) -> list: :return: the color map of squares """ color_content = [{}, {}, {}, {}, {}] - for color, value in COLOR_DICT.items(): + for color in COLOR_DICT.keys(): masks = _filter_color(COLOR_DICT[color][0], COLOR_DICT[color][1], images) index_mask = 0 for mask in masks: - shape = mask.shape contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) - for idx, contour in enumerate(contours): - if _cv2.contourArea(contour) > int(shape[0] / 4): + for _, contour in enumerate(contours): + if _cv2.contourArea(contour) > int(mask.shape[0] / 4): moments = _cv2.moments(contours[0]) if moments["m00"] != 0: c_x = int(moments["m10"] / moments["m00"]) c_y = int(moments["m01"] / moments["m00"]) else: c_x, c_y = 0, 0 - horizontal = c_x / shape[1] - vertical = c_y / shape[0] + horizontal = c_x / mask.shape[1] + vertical = c_y / mask.shape[0] if color != "white": check_for_color(horizontal, vertical, color_content, index_mask, color) index_mask += 1 @@ -190,6 +189,9 @@ def _color_detect(images: list) -> list: def check_for_color(horizontal, vertical, color_content, index_mask, color): + """ + Helper function to check for color + """ if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): color_content[index_mask][color] = 0 elif (vertical > 0.8) & (horizontal < 0.7) & (horizontal > 0.3): From 2e695ace68f6ecefe2f06137a4dac2f5cb6b1912 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 14:54:30 +0200 Subject: [PATCH 07/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index 21dce98..ce224fc 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -167,8 +167,8 @@ def _color_detect(images: list) -> list: :return: the color map of squares """ color_content = [{}, {}, {}, {}, {}] - for color in COLOR_DICT.keys(): - masks = _filter_color(COLOR_DICT[color][0], COLOR_DICT[color][1], images) + for color, value in COLOR_DICT.items(): + masks = _filter_color(value[0], value[1], images) index_mask = 0 for mask in masks: contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) From d027037370a9583da94cbb01eff44ec0756a8e39 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 15:04:28 +0200 Subject: [PATCH 08/17] Update photomosaic_photo.py --- surface/vision/photomosaic_photo.py | 37 +++++++++++++++++------------ 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index ce224fc..05566c5 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -1,9 +1,9 @@ """ -Photomosaic -=========== +Photomosaic. Module storing an implementation of the cube photomosaic task. """ + import typing as _typing import cv2 as _cv2 import numpy as _np @@ -24,12 +24,11 @@ def _filter_color(lower: _np.ndarray, upper: _np.ndarray, images: list) -> list: def _cut_images(images: list) -> list: """ - Cut the square in the images + Cut the square in the images. :param images: list of images :return: the list of cut images """ - # Used for grabCut function rect = (30, 30, 220, 220) @@ -58,8 +57,8 @@ def _cut_images(images: list) -> list: def _find_rectangle(images: list) -> list: """ - Function uses thresholding to find face of the object. Then it passes this masked image - to function _get_contours + Use function thresholding to find face of the object. Then it passes this masked image to function _get_contours. + :param images: List of found sides of the object :return: Cut images with the biggest rectangle """ @@ -74,7 +73,9 @@ def _find_rectangle(images: list) -> list: def _get_contours(cut_image): """ - Function to get the biggest contours of cut image. After finding the biggest contour, + Add function to get the biggest contours of cut image. + + After finding the biggest contour, function returns points of rectangle and lengths of sides. :param cut_image: masked images of sides :return: Points of the biggest rectangle in masked photo @@ -96,7 +97,8 @@ def _get_contours(cut_image): def _resize_images(images: list) -> list: """ - resize the images for combining + Resize the images for combining. + :param images: list of images :return: the resized cut images """ @@ -114,8 +116,9 @@ def _resize_images(images: list) -> list: def _type_division(dict_color_map: list) -> \ _typing.Tuple[list, int]: """ - divide the type of squares(upper and lower squares) - Function assumes that first photo is taken of the top side's box + Divide the type of squares(upper and lower squares). + + Function assumes that first photo is taken of the top side's box. :param dict_color_map: the color map for squares :return: the index list of bottom squares, the index of top square """ @@ -130,7 +133,8 @@ def _type_division(dict_color_map: list) -> \ def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, top_index: int) -> _np.ndarray: """ - combine the squares to a image + Combine the squares to a image. + :param img_white: the cut images :param dict_color_map: the color map for squares :param bottom_index: the index list of bottom squares @@ -162,7 +166,8 @@ def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, t def _color_detect(images: list) -> list: """ - detect the color in images + Detect the color in images. + :param images: the list of images :return: the color map of squares """ @@ -190,7 +195,7 @@ def _color_detect(images: list) -> list: def check_for_color(horizontal, vertical, color_content, index_mask, color): """ - Helper function to check for color + User helper function to check for color. """ if (vertical < 0.2) & (horizontal < 0.7) & (horizontal > 0.3): color_content[index_mask][color] = 0 @@ -206,7 +211,8 @@ def check_for_color(horizontal, vertical, color_content, index_mask, color): def _get_key(dictionary: dict, value: int) -> list: """ - get the key of dict() by value + Get the key of dict() by value. + :param dictionary: the dict() :param value: the value of dict() :return: the key of dict() @@ -216,7 +222,7 @@ def _get_key(dictionary: dict, value: int) -> list: def helper_display(tag, image_helper): """ - Helper function for images for displaying + Use helper function for images for displaying. """ for index, name in enumerate(image_helper): _cv2.imshow(tag + str(index), name) @@ -225,6 +231,7 @@ def helper_display(tag, image_helper): def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, list]: """ Process the images and combine them by their color into a photomosaic. + :param images: List of images in OpenCV format :return: Original images, Combined picture, the list of original pictures, the list of cut images """ From 09f639ee807353ee1fd089a54d27dd862a897afc Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Sat, 10 Jul 2021 16:06:33 +0200 Subject: [PATCH 09/17] Change ndarrays to np.ndarrays --- surface/vision/mussels.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/surface/vision/mussels.py b/surface/vision/mussels.py index 94dfe8c..7eb4b2f 100644 --- a/surface/vision/mussels.py +++ b/surface/vision/mussels.py @@ -11,21 +11,21 @@ from ..utils import logger -def _remove_circles(mask: ndarray) -> ndarray: +def _remove_circles(mask: np.ndarray) -> np.ndarray: """ Remove the small circles (mussels) from the image. """ contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - for contour in contours: - area = cv2.contourArea(contour) + for i in range(len(contours)): + area = cv2.contourArea(contours[i]) if area < 2000: - cv2.drawContours(mask, [contour], 0, 0, -1) + cv2.drawContours(mask, [contours[i]], 0, 0, -1) return mask -def _gaussian_blur_smooth(mask: ndarray) -> ndarray: +def _gaussian_blur_smooth(mask: np.ndarray) -> np.ndarray: """ Convert the image to Canny Line with Gaussian blur. """ @@ -42,7 +42,7 @@ def _gaussian_blur_smooth(mask: ndarray) -> ndarray: return blurred -def _get_edge_points(mask: ndarray) -> list: +def _get_edge_points(mask) -> list: """ Get the list of points on the edge of the square. """ @@ -58,7 +58,7 @@ def _get_edge_points(mask: ndarray) -> list: return points -def _get_corner_points(points: list) -> ndarray: +def _get_corner_points(points: list) -> np.ndarray: """ Find the points on four edge by K-Means Cluster. """ @@ -132,7 +132,7 @@ def _find_mussels(image_greyscale: ndarray, mask: ndarray, hull_rect: ndarray) - return num, mask -def count_mussels(image: ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarray, ndarray]: +def count_mussels(image: np.ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarray, ndarray]: """ Count the number of the mussels in the given image. @@ -164,7 +164,7 @@ def count_mussels(image: ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarr hull_rect = _get_corner_points(points) # Draw hull rect on the original image - convex_hull: ndarray = image.copy() + convex_hull = image.copy() cv2.drawContours(convex_hull, [hull_rect], 0, (0, 0, 255), 3) # Find, count and draw the circles and square on the image From 670226558338b6cf348b5efc450c04e26fe04b42 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 16:07:55 +0200 Subject: [PATCH 10/17] Change ndarrays to np.ndarrays --- surface/vision/mussels.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/surface/vision/mussels.py b/surface/vision/mussels.py index 94dfe8c..7eb4b2f 100644 --- a/surface/vision/mussels.py +++ b/surface/vision/mussels.py @@ -11,21 +11,21 @@ from ..utils import logger -def _remove_circles(mask: ndarray) -> ndarray: +def _remove_circles(mask: np.ndarray) -> np.ndarray: """ Remove the small circles (mussels) from the image. """ contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - for contour in contours: - area = cv2.contourArea(contour) + for i in range(len(contours)): + area = cv2.contourArea(contours[i]) if area < 2000: - cv2.drawContours(mask, [contour], 0, 0, -1) + cv2.drawContours(mask, [contours[i]], 0, 0, -1) return mask -def _gaussian_blur_smooth(mask: ndarray) -> ndarray: +def _gaussian_blur_smooth(mask: np.ndarray) -> np.ndarray: """ Convert the image to Canny Line with Gaussian blur. """ @@ -42,7 +42,7 @@ def _gaussian_blur_smooth(mask: ndarray) -> ndarray: return blurred -def _get_edge_points(mask: ndarray) -> list: +def _get_edge_points(mask) -> list: """ Get the list of points on the edge of the square. """ @@ -58,7 +58,7 @@ def _get_edge_points(mask: ndarray) -> list: return points -def _get_corner_points(points: list) -> ndarray: +def _get_corner_points(points: list) -> np.ndarray: """ Find the points on four edge by K-Means Cluster. """ @@ -132,7 +132,7 @@ def _find_mussels(image_greyscale: ndarray, mask: ndarray, hull_rect: ndarray) - return num, mask -def count_mussels(image: ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarray, ndarray]: +def count_mussels(image: np.ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarray, ndarray]: """ Count the number of the mussels in the given image. @@ -164,7 +164,7 @@ def count_mussels(image: ndarray) -> Tuple[int, ndarray, ndarray, ndarray, ndarr hull_rect = _get_corner_points(points) # Draw hull rect on the original image - convex_hull: ndarray = image.copy() + convex_hull = image.copy() cv2.drawContours(convex_hull, [hull_rect], 0, (0, 0, 255), 3) # Find, count and draw the circles and square on the image From bd9ec77f0ac8f49ed24875eabe43346cd3d9b029 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski <74041386+mradzikowski@users.noreply.github.com> Date: Sat, 10 Jul 2021 16:12:19 +0200 Subject: [PATCH 11/17] Update mussels.py --- surface/vision/mussels.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/surface/vision/mussels.py b/surface/vision/mussels.py index 7eb4b2f..e6d3e9c 100644 --- a/surface/vision/mussels.py +++ b/surface/vision/mussels.py @@ -17,10 +17,10 @@ def _remove_circles(mask: np.ndarray) -> np.ndarray: """ contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - for i in range(len(contours)): - area = cv2.contourArea(contours[i]) + for _, contour in enumerate(contours): + area = cv2.contourArea(contour) if area < 2000: - cv2.drawContours(mask, [contours[i]], 0, 0, -1) + cv2.drawContours(mask, [contour], 0, 0, -1) return mask From ab0208f84a3538ae8e0b95dab185ac28ac195443 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Sat, 10 Jul 2021 17:08:12 +0200 Subject: [PATCH 12/17] Change typo --- surface/vision/mussels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/surface/vision/mussels.py b/surface/vision/mussels.py index 1bf05b9..3f5a8cc 100644 --- a/surface/vision/mussels.py +++ b/surface/vision/mussels.py @@ -21,7 +21,7 @@ def _remove_circles(mask: np.ndarray) -> np.ndarray: area = cv2.contourArea(contour) if area < 2000: - cv2.drawContours(mask, [contours[i]], 0, 0, -1) + cv2.drawContours(mask, [contour], 0, 0, -1) return mask From 142e8e5e42be3273514e9896fa9e307b522439cb Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Sat, 10 Jul 2021 17:08:33 +0200 Subject: [PATCH 13/17] Specify version of opencv-python --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d70c0a5..f0143f6 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ "numpy", "sklearn", "pandas", - "opencv-python", + "opencv-python==4.5.1.48", "inputs" ], python_requires=">=3.6", From f1efaf6e606944b8db812b961cdf319513f33465 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Mon, 12 Jul 2021 13:36:01 +0200 Subject: [PATCH 14/17] Change imports not to be underscored MOSAIC_HEIGHT is referred to the result picture --- surface/constants/vision.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/surface/constants/vision.py b/surface/constants/vision.py index 70bbe42..af7eeff 100644 --- a/surface/constants/vision.py +++ b/surface/constants/vision.py @@ -1,17 +1,18 @@ """ Computer vision constants. """ -import numpy as _np +import numpy as np -# The height of the cut picture +# The height of result cut picture (when combining this is set to be default height, so it is easier to combine 5 faces) MOSAIC_HEIGHT = 300 + # The filter map for each color -COLOR_DICT = {"white": (_np.array([0, 0, 50]), _np.array([255, 255, 255])), - "yellow": (_np.array([15, 0, 0]), _np.array([30, 255, 255])), - "green": (_np.array([60, 0, 0]), _np.array([75, 255, 255])), - "blue": (_np.array([105, 0, 0]), _np.array([120, 255, 255])), - "purple": (_np.array([145, 0, 0]), _np.array([160, 255, 255])), - "orange": (_np.array([5, 0, 0]), _np.array([15, 255, 255])), - "red": (_np.array([175, 0, 0]), _np.array([190, 255, 255])), - "pink": (_np.array([160, 0, 0]), _np.array([175, 255, 255])), - "light_blue": (_np.array([90, 0, 0]), _np.array([105, 255, 255]))} +COLOR_DICT = {"white": (np.array([0, 0, 50]), np.array([255, 255, 255])), + "yellow": (np.array([15, 0, 0]), np.array([30, 255, 255])), + "green": (np.array([60, 0, 0]), np.array([75, 255, 255])), + "blue": (np.array([105, 0, 0]), np.array([120, 255, 255])), + "purple": (np.array([145, 0, 0]), np.array([160, 255, 255])), + "orange": (np.array([5, 0, 0]), np.array([15, 255, 255])), + "red": (np.array([175, 0, 0]), np.array([190, 255, 255])), + "pink": (np.array([160, 0, 0]), np.array([175, 255, 255])), + "light_blue": (np.array([90, 0, 0]), np.array([105, 255, 255]))} From 0e5138976fc1a4e363c1e3fdb1d09d1f6201bbbd Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Mon, 12 Jul 2021 13:37:26 +0200 Subject: [PATCH 15/17] Change opencv installation version to be default Change in mussels.py parsing to integers by function int() instead of astype() --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f0143f6..d70c0a5 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ "numpy", "sklearn", "pandas", - "opencv-python==4.5.1.48", + "opencv-python", "inputs" ], python_requires=">=3.6", From 5ae6c1c2bed538c6f3eb6fde60f436ca6badfd7d Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Mon, 12 Jul 2021 13:41:46 +0200 Subject: [PATCH 16/17] Refactor code to be more clear and consistent Added logger and got rid of print statements. Changed imports to not be underscored. Added documentation about _cut_images function, _find_rectangle, _get_contours, _combine_images, _color_detect. _get_key function is to help with combining images and getting colors of interest to match while combining. --- surface/vision/photomosaic_photo.py | 132 ++++++++++++++++------------ 1 file changed, 74 insertions(+), 58 deletions(-) diff --git a/surface/vision/photomosaic_photo.py b/surface/vision/photomosaic_photo.py index 05566c5..138f373 100644 --- a/surface/vision/photomosaic_photo.py +++ b/surface/vision/photomosaic_photo.py @@ -4,13 +4,14 @@ Module storing an implementation of the cube photomosaic task. """ -import typing as _typing -import cv2 as _cv2 -import numpy as _np +import typing +import cv2 +import numpy as np from ..constants.vision import MOSAIC_HEIGHT, COLOR_DICT +from ..utils import logger -def _filter_color(lower: _np.ndarray, upper: _np.ndarray, images: list) -> list: +def _filter_color(lower: np.ndarray, upper: np.ndarray, images: list) -> list: """ Filter the color according to the threshold. @@ -19,7 +20,7 @@ def _filter_color(lower: _np.ndarray, upper: _np.ndarray, images: list) -> list: :param images: List of HSV images :return: Masks after applying the filter """ - return [_cv2.inRange(image_in_range, lower, upper) for image_in_range in images] + return [cv2.inRange(image_in_range, lower, upper) for image_in_range in images] def _cut_images(images: list) -> list: @@ -29,69 +30,75 @@ def _cut_images(images: list) -> list: :param images: list of images :return: the list of cut images """ - # Used for grabCut function + # Used for grabCut function, rectangle of points of predited foreground rect = (30, 30, 220, 220) img_white = list() + # Iterating through the images and resizing them to preprocess the images with grabCut for idx in range(5): - images[idx] = _cv2.resize(images[i], (256, 256)) - mask = _np.zeros(images[idx].shape[:2], _np.uint8) + images[idx] = cv2.resize(images[idx], (256, 256)) + mask = np.zeros(images[idx].shape[:2], np.uint8) # Extract background and foreground images - bgd_model = _np.zeros((1, 65), _np.float64) - fgd_model = _np.zeros((1, 65), _np.float64) + bgd_model = np.zeros((1, 65), np.float64) + fgd_model = np.zeros((1, 65), np.float64) - _cv2.grabCut(images[i], mask, rect, bgd_model, fgd_model, 5, _cv2.GC_INIT_WITH_RECT) - mask2 = _np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') - img_white.append(images[idx] * mask2[:, :, _np.newaxis]) + # grabCut extracts background from the image and gives foreground when combining masks + cv2.grabCut(images[idx], mask, rect, bgd_model, fgd_model, 5, cv2.GC_INIT_WITH_RECT) + mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') + img_white.append(images[idx] * mask2[:, :, np.newaxis]) + # List of found images is passed to the function that finds contours and biggest rectangle img_white = _find_rectangle(img_white) for index, image_to_resize in enumerate(img_white): - img_white[index] = _cv2.resize(image_to_resize, (int(image_to_resize.shape[0] / 2), - int(image_to_resize.shape[1]/2))) - print(len(img_white)) + img_white[index] = cv2.resize(image_to_resize, (int(image_to_resize.shape[0] / 2), + int(image_to_resize.shape[1] / 2))) return img_white def _find_rectangle(images: list) -> list: """ - Use function thresholding to find face of the object. Then it passes this masked image to function _get_contours. + Use thresholding function to find face of the object. :param images: List of found sides of the object :return: Cut images with the biggest rectangle """ for idx, image_to_find_rectangle in enumerate(images): - image_gray = _cv2.cvtColor(image_to_find_rectangle, _cv2.COLOR_BGR2GRAY) - thresh = _cv2.threshold(image_gray, 45, 255, _cv2.THRESH_BINARY)[1] - img_dilate = _cv2.dilate(thresh, _np.ones((5, 5)), iterations=1) + # Preprocessing of the image with thresholding and dilation + image_gray = cv2.cvtColor(image_to_find_rectangle, cv2.COLOR_BGR2GRAY) + thresh = cv2.threshold(image_gray, 45, 255, cv2.THRESH_BINARY)[1] + img_dilate = cv2.dilate(thresh, np.ones((5, 5)), iterations=1) + + # Preprocessed image is passed to get points of interest (points fo rectangle with width and height) _x, _y, _w, _h = _get_contours(img_dilate) + + # Slice the points of interest (face of the object from photo) images[idx] = images[idx][_y: _y + _h, _x:_x + _w] + return images def _get_contours(cut_image): """ - Add function to get the biggest contours of cut image. + Add function to get the biggest contours of cut image. Function returns points of rectangle and lengths of sides. - After finding the biggest contour, - function returns points of rectangle and lengths of sides. :param cut_image: masked images of sides :return: Points of the biggest rectangle in masked photo """ - contours, _ = _cv2.findContours(cut_image, _cv2.RETR_EXTERNAL, _cv2.CHAIN_APPROX_NONE) - area_list = [[_cv2.contourArea(cnt), cnt] for cnt in contours] + contours, _ = cv2.findContours(cut_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + area_list = [[cv2.contourArea(cnt), cnt] for cnt in contours] biggest_contour_area = 0 biggest_contour = 0 for elem in area_list: if elem[0] > biggest_contour_area: biggest_contour_area = elem[0] biggest_contour = elem[1] - peri = _cv2.arcLength(biggest_contour, True) - approx = _cv2.approxPolyDP(biggest_contour, 0.02 * peri, True) - x_point, y_point, width, height = _cv2.boundingRect(approx) - print(x_point, y_point, width, height) + peri = cv2.arcLength(biggest_contour, True) + approx = cv2.approxPolyDP(biggest_contour, 0.02 * peri, True) + x_point, y_point, width, height = cv2.boundingRect(approx) + return x_point, y_point, width, height @@ -103,18 +110,17 @@ def _resize_images(images: list) -> list: :return: the resized cut images """ index = 0 - for _img in images: + for image_to_resize in images: # Dimensions of object = 120 cm long, 60 cm wide, and 60 cm tall # It is better to divide by height of the image than width - width = int(_img.shape[0] * MOSAIC_HEIGHT / _img.shape[1]) - images[index] = _cv2.resize(src=_img, dsize=(width, MOSAIC_HEIGHT)) + width = int(image_to_resize.shape[0] * MOSAIC_HEIGHT / image_to_resize.shape[1]) + images[index] = cv2.resize(src=image_to_resize, dsize=(width, MOSAIC_HEIGHT)) index += 1 return images -def _type_division(dict_color_map: list) -> \ - _typing.Tuple[list, int]: +def _type_division(dict_color_map: list) -> typing.Tuple[list, int]: """ Divide the type of squares(upper and lower squares). @@ -131,7 +137,7 @@ def _type_division(dict_color_map: list) -> \ return bottom_index, top_index -def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, top_index: int) -> _np.ndarray: +def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, top_index: int) -> np.ndarray: """ Combine the squares to a image. @@ -147,19 +153,28 @@ def _combine_images(img_white: list, dict_color_map: list, bottom_index: list, t for _ in range(3): for idx in range(3): img_index = idx + 1 + # Connect colors for side faces, color has to match with the next side if connect_color == _get_key(dict_color_map[bottom_index[img_index]], 3): - left_img = _np.concatenate((left_img, img_white[bottom_index[img_index]]), axis=1) + left_img = np.concatenate((left_img, img_white[bottom_index[img_index]]), axis=1) connect_color = _get_key(dict_color_map[bottom_index[img_index]], 1) + + # Connect top side with the face and get the length of the top face to be matched if _get_key(dict_color_map[bottom_index[img_index]], 0) == _get_key(dict_color_map[top_index], 2): length_top = left_img.shape[0] - img_white[bottom_index[img_index]].shape[0] - canvas_top = _np.ones((left_img.shape[0], left_img.shape[1], 3), dtype="uint8") + # Get black background for matching photos + canvas_top = np.ones((left_img.shape[0], left_img.shape[1], 3), dtype="uint8") canvas_top[:] = (0, 0, 0) + + # Get dimension to combine correctly top image with the bottom top_img = img_white[top_index] width_top = top_img.shape[0] + length_top height_top = top_img.shape[1] + MOSAIC_HEIGHT - result = _np.concatenate((canvas_top, left_img), axis=0) + + # Concatenate side faces with the black background and then add top face to correct position + result = np.concatenate((canvas_top, left_img), axis=0) result[length_top: width_top, MOSAIC_HEIGHT:height_top] = top_img + # Changed due to the width of result image that has to have 2 widths of smaller face and 2 widths of bigger face return result[:, :2 * img_white[1].shape[1] + 2 * img_white[2].shape[1]] @@ -173,13 +188,16 @@ def _color_detect(images: list) -> list: """ color_content = [{}, {}, {}, {}, {}] for color, value in COLOR_DICT.items(): + # Mask the images to get the colors we are interested in masks = _filter_color(value[0], value[1], images) - index_mask = 0 - for mask in masks: - contours, _ = _cv2.findContours(mask, _cv2.RETR_TREE, _cv2.CHAIN_APPROX_SIMPLE) + + for index_mask, mask in enumerate(masks): + # Get contours of the masked image, focus on colors + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + for _, contour in enumerate(contours): - if _cv2.contourArea(contour) > int(mask.shape[0] / 4): - moments = _cv2.moments(contours[0]) + if cv2.contourArea(contour) > int(mask.shape[0] / 4): + moments = cv2.moments(contours[0]) if moments["m00"] != 0: c_x = int(moments["m10"] / moments["m00"]) c_y = int(moments["m01"] / moments["m00"]) @@ -189,7 +207,7 @@ def _color_detect(images: list) -> list: vertical = c_y / mask.shape[0] if color != "white": check_for_color(horizontal, vertical, color_content, index_mask, color) - index_mask += 1 + return color_content @@ -206,12 +224,12 @@ def check_for_color(horizontal, vertical, color_content, index_mask, color): elif (horizontal < 0.2) & (vertical < 0.7) & (vertical > 0.3): color_content[index_mask][color] = 3 else: - print("error") + logger.info("Error while trying to find the colour.") def _get_key(dictionary: dict, value: int) -> list: """ - Get the key of dict() by value. + Get the key of dict() by value. Used for getting colours of object's faces to match. :param dictionary: the dict() :param value: the value of dict() @@ -225,10 +243,10 @@ def helper_display(tag, image_helper): Use helper function for images for displaying. """ for index, name in enumerate(image_helper): - _cv2.imshow(tag + str(index), name) + cv2.imshow(tag + str(index), name) -def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, list]: +def create_photomosaic(images: list) -> typing.Tuple[list, np.ndarray, list, list]: """ Process the images and combine them by their color into a photomosaic. @@ -236,24 +254,22 @@ def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, l :return: Original images, Combined picture, the list of original pictures, the list of cut images """ # Convert images to HSV color from a copy of the original images - images_hsv = [_cv2.cvtColor(_image, _cv2.COLOR_BGR2HSV) for _image in images.copy()] + images_hsv = [cv2.cvtColor(_image, cv2.COLOR_BGR2HSV) for _image in images.copy()] # cut the useless part of the image img_cut = _cut_images(images_hsv) dict_color_map = _color_detect(img_cut[:1]) - print(dict_color_map) + # resize the images for combining img_white = _resize_images(img_cut) # divide the top and bottom image bottom_index, top_index = _type_division(dict_color_map) - print("Image count", len(img_cut)) - print(bottom_index, top_index) # combine the images result = _combine_images(img_white, dict_color_map, bottom_index, top_index) - return images, _cv2.cvtColor(result, _cv2.COLOR_HSV2BGR), img, img_cut + return images, cv2.cvtColor(result, cv2.COLOR_HSV2BGR), img, img_cut if __name__ == "__main__": @@ -262,11 +278,11 @@ def create_photomosaic(images: list) -> _typing.Tuple[list, _np.ndarray, list, l # PHOTOS MUST BE TAKEN IN CORRECT ORDER: TOP -> LEFT -> FRONT -> RIGHT -> BACK for i in range(5): # This line to be changed when we get the camera - img.append(_cv2.imread("./proper_samples/" + str(i + 1) + ".png")) - print(len(img)) + img.append(cv2.imread("./proper_samples/" + str(i + 1) + ".png")) + _, result_img, image, image_cut = create_photomosaic(img.copy()) - _cv2.imshow("result", _cv2.resize(result_img, (0, 0), fx=0.5, fy=0.5)) - k = _cv2.waitKey(0) + cv2.imshow("result", cv2.resize(result_img, (0, 0), fx=0.5, fy=0.5)) + k = cv2.waitKey(0) if k == 27: # wait for ESC key to exit - _cv2.destroyAllWindows() + cv2.destroyAllWindows() From 37ead0b26512b0937dec42cdc7018fa7f2759221 Mon Sep 17 00:00:00 2001 From: Mateusz Radzikowski Date: Mon, 12 Jul 2021 13:43:51 +0200 Subject: [PATCH 17/17] Add type-hinting and change parsing to integers Changed parsing to be compatible with newer versions of OpenCV --- surface/vision/mussels.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/surface/vision/mussels.py b/surface/vision/mussels.py index 3f5a8cc..0329249 100644 --- a/surface/vision/mussels.py +++ b/surface/vision/mussels.py @@ -43,7 +43,7 @@ def _gaussian_blur_smooth(mask: np.ndarray) -> np.ndarray: return blurred -def _get_edge_points(mask) -> list: +def _get_edge_points(mask: np.ndarray) -> list: """ Get the list of points on the edge of the square. """ @@ -122,12 +122,11 @@ def _find_mussels(image_greyscale: ndarray, mask: ndarray, hull_rect: ndarray) - # Draw the circles on the image (and count the circles) num = 0 for i in circles[0, :]: - i = i.astype(np.int32) - if cv2.pointPolygonTest(hull_rect, (i[0], i[1]), measureDist=True) > (-i[2] / 3): + if cv2.pointPolygonTest(hull_rect, (int(i[0]), int(i[1])), measureDist=True) > (-int(i[2]) / 3): # Draw the outer circle, the center of the circle and increment the counter - cv2.circle(mask, (i[0], i[1]), i[2], (0, 255, 0), 2) - cv2.circle(mask, (i[0], i[1]), 2, (0, 0, 255), 3) + cv2.circle(mask, (int(i[0]), int(i[1])), int(i[2]), (0, 255, 0), 2) + cv2.circle(mask, (int(i[0]), int(i[1])), 2, (0, 0, 255), 3) num += 1 return num, mask @@ -165,7 +164,7 @@ def count_mussels(image: np.ndarray) -> Tuple[int, ndarray, ndarray, ndarray, nd hull_rect = _get_corner_points(points) # Draw hull rect on the original image - convex_hull = image.copy() + convex_hull: np.ndarray = image.copy() cv2.drawContours(convex_hull, [hull_rect], 0, (0, 0, 255), 3) # Find, count and draw the circles and square on the image