diff --git a/apperception/utils/__init__.py b/apperception/utils/__init__.py index 5f71b01b..7e4b38c7 100644 --- a/apperception/utils/__init__.py +++ b/apperception/utils/__init__.py @@ -14,7 +14,7 @@ from .ingest_road import ingest_road from .join import join from .overlay_bboxes import overlay_bboxes -from .overlay_trajectory import fetch_camera_config, overlay_trajectory +from .overlay_trajectory import fetch_camera_config, fetch_camera_trajectory, overlay_trajectory from .query_to_str import query_to_str from .recognize import recognize from .reformat_bbox_trajectories import reformat_bbox_trajectories @@ -40,6 +40,7 @@ "get_video_roi", "get_video_box", "fetch_camera_config", + "fetch_camera_trajectory", "join", "transformation", "fetch_camera", diff --git a/apperception/utils/overlay_trajectory.py b/apperception/utils/overlay_trajectory.py index 61e5ca27..50696346 100644 --- a/apperception/utils/overlay_trajectory.py +++ b/apperception/utils/overlay_trajectory.py @@ -185,13 +185,16 @@ def fetch_camera_config(filename: str, database): frameNum, fileName, cameraHeading, - egoHeading + egoHeading, + timestamp, + roadDirection FROM Cameras WHERE fileName = '{filename}' ORDER BY cameraId ASC, frameNum ASC; """ result = database.execute(query)[0] + print(result) camera_config = { "cameraId": result[0], "egoTranslation": result[1], @@ -199,14 +202,54 @@ def fetch_camera_config(filename: str, database): "cameraTranslation": result[3], "cameraRotation": result[4], "cameraIntrinsic": result[5], - "frameNum": result[5], + "frameNum": result[6], "fileName": result[7], "cameraHeading": result[8], "egoHeading": result[9], + "timestamp": result[10], + "roadDirection": result[11], } return camera_config +def fetch_camera_trajectory(video_name: str, database): + query = f""" + CREATE OR REPLACE FUNCTION ST_XYZ (g geometry) RETURNS real[] AS $$ + BEGIN + RETURN ARRAY[ST_X(g), ST_Y(g), ST_Z(g)]; + END; + $$ LANGUAGE plpgsql; + + SELECT + cameraId, + ST_XYZ(egoTranslation), + frameNum, + timestamp, + fileName, + cameraHeading, + egoHeading + FROM Cameras + WHERE + fileName LIKE '%{video_name}%' + ORDER BY cameraId ASC, frameNum ASC; + """ + result = database._execute_query(query) + camera_config = [] + for row in result: + camera_config.append( + { + "cameraId": row[0], + "egoTranslation": row[1], + "frameNum": row[2], + "timestamp": row[3], + "fileName": row[4], + "cameraHeading": row[5], + "egoHeading": row[6], + } + ) + return camera_config + + def fetch_trajectory(itemId: str, time: str, database): query = f""" CREATE OR REPLACE FUNCTION ST_XYZ (g geometry) RETURNS real[] AS $$ diff --git a/apperception/utils/transformation.py b/apperception/utils/transformation.py index 789c92de..43c98813 100644 --- a/apperception/utils/transformation.py +++ b/apperception/utils/transformation.py @@ -1,16 +1,18 @@ -from typing import Any, Dict, Tuple, Union +from typing import Any, Dict, Tuple import numpy as np +import numpy.typing as npt from pyquaternion import Quaternion def transformation( - copy_centroid_3d: Union[np.ndarray, Tuple[float, float, float]], camera_config: Dict[str, Any] -) -> np.ndarray: + copy_centroid_3d: "npt.NDArray[np.floating] | Tuple[float, float, float]", + camera_config: Dict[str, Any], +) -> "npt.NDArray[np.floating]": """ - TODO: transformation from 3d world coordinate to 2d frame coordinate given the camera config + Transformation from 3d world coordinate to 2d frame coordinate given the camera config """ - centroid_3d: np.ndarray = np.copy(copy_centroid_3d) + centroid_3d: npt.NDArray[np.floating] = np.copy(copy_centroid_3d) centroid_3d -= camera_config["egoTranslation"] centroid_3d = np.dot( diff --git a/detection_estimation.ipynb b/detection_estimation.ipynb new file mode 100644 index 00000000..37555da6 --- /dev/null +++ b/detection_estimation.ipynb @@ -0,0 +1,126 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c5c9badc", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import os\n", + "import pickle\n", + "\n", + "from optimized_ingestion.camera_config import camera_config\n", + "from optimized_ingestion.payload import Payload\n", + "from optimized_ingestion.pipeline import Pipeline\n", + "from optimized_ingestion.stages.decode_frame.parallel_decode_frame import ParallelDecodeFrame\n", + "from optimized_ingestion.stages.detection_2d.yolo_detection import YoloDetection\n", + "from optimized_ingestion.stages.filter_car_facing_sideway import FilterCarFacingSideway\n", + "from optimized_ingestion.stages.detection_estimation import DetectionEstimation\n", + "from optimized_ingestion.stages.tracking_2d.strongsort import StrongSORT\n", + "from optimized_ingestion.stages.tracking_2d.tracking_2d import Tracking2D\n", + "from optimized_ingestion.stages.tracking_3d.from_2d_and_road import From2DAndRoad\n", + "from optimized_ingestion.stages.tracking_3d.tracking_3d import Tracking3DResult\n", + "# from optimized_ingestion.trackers.yolov5_strongsort_osnet_tracker import TrackingResult\n", + "from optimized_ingestion.video import Video" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75cd29a3", + "metadata": {}, + "outputs": [], + "source": [ + "BOSTON_VIDEOS = [\n", + "# \"scene-0757-CAM_FRONT\",\n", + " # \"scene-0103-CAM_FRONT\",\n", + " # \"scene-0553-CAM_FRONT\",\n", + " # \"scene-0665-CAM_FRONT\",\n", + "# \"scene-0655-CAM_FRONT_RIGHT\",\n", + " \"scene-0655-CAM_BACK_RIGHT\",\n", + "]\n", + "\n", + "NUSCENES_PROCESSED_DATA = \"NUSCENES_PROCESSED_DATA\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "436432cc", + "metadata": {}, + "outputs": [], + "source": [ + "pipeline = Pipeline()\n", + "# pipeline.add_filter(filter=InView(distance=10, segment_type=\"intersection\"))\n", + "# pipeline.add_filter(filter=Stopped(min_stopped_frames=2, stopped_threshold=1.0))\n", + "pipeline.add_filter(filter=ParallelDecodeFrame())\n", + "pipeline.add_filter(filter=YoloDetection())\n", + "# pipeline.add_filter(filter=DetectionEstimation())\n", + "pipeline.add_filter(filter=StrongSORT())\n", + "pipeline.add_filter(filter=From2DAndRoad())\n", + "pipeline.add_filter(filter=FilterCarFacingSideway())\n", + "\n", + "if NUSCENES_PROCESSED_DATA in os.environ:\n", + " DATA_DIR = os.environ[NUSCENES_PROCESSED_DATA]\n", + "else:\n", + " DATA_DIR = \"/work/apperception/data/nuScenes/full-dataset-v1.0/Mini\"\n", + "with open(os.path.join(DATA_DIR, \"videos/boston-seaport\", \"frames.pickle\"), \"rb\") as f:\n", + " videos = pickle.load(f)\n", + "\n", + "for name, video in videos.items():\n", + "# if name not in BOSTON_VIDEOS:\n", + "# continue\n", + "\n", + " print(name)\n", + " frames = Video(\n", + " os.path.join(DATA_DIR, \"videos/boston-seaport\", video[\"filename\"]),\n", + " [camera_config(*f, 0) for f in video[\"frames\"]],\n", + " video[\"start\"],\n", + " )\n", + "\n", + " output = pipeline.run(Payload(frames))\n", + "\n", + " benchmark = []\n", + " for stage in pipeline.stages:\n", + " benchmark.append({\n", + " \"stage\": stage.classname(),\n", + " \"runtimes\": stage.runtimes,\n", + " })\n", + "\n", + " with open(\"./outputs/benchmark.json\", \"w\") as f3:\n", + " json.dump(benchmark, f3)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b1d2293", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (apperception)", + "language": "python", + "name": "apperception" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/optimization_playground/detection_estimation.ipynb b/optimization_playground/detection_estimation.ipynb new file mode 100644 index 00000000..3a171548 --- /dev/null +++ b/optimization_playground/detection_estimation.ipynb @@ -0,0 +1,3508 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "id": "3a4ea91f", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import time\n", + "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))\n", + "from detection_estimation.segment_mapping import *\n", + "import matplotlib.pyplot as plt\n", + "from apperception.utils import fetch_camera_config, fetch_camera_trajectory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae32e753", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ad4f95a7", + "metadata": {}, + "outputs": [], + "source": [ + "# test_config = fetch_camera_config('samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385108912404.jpg', database)\n", + "# test_config" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e1f3e155", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# mapping = map_imgsegment_roadsegment(test_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a081db0c", + "metadata": {}, + "outputs": [], + "source": [ + "# test_file_path = '/home/yongming/workspace/research/apperception/v1.0-mini/samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385108912404.jpg'\n", + "# visualization(test_file_path, test_config, mapping)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d8abf4e6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Errno 2] No such file or directory: '../Yolov5_StrongSORT_OSNet'\n", + "/data/chanwutk/code/apperception/optimization_playground\n" + ] + }, + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'sample_frame_tracker'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn [8], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m get_ipython()\u001b[38;5;241m.\u001b[39mrun_line_magic(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcd\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m../Yolov5_StrongSORT_OSNet\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m----> 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01msample_frame_tracker\u001b[39;00m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'sample_frame_tracker'" + ] + } + ], + "source": [ + "%cd ../Yolov5_StrongSORT_OSNet\n", + "import sample_frame_tracker\n", + "# full_img_detection = sample_frame_tracker.run(test_file_path, save_vid=True, detect_only=True)\n", + "# full_img_detection" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "a96223b4", + "metadata": {}, + "outputs": [], + "source": [ + "def display_detection(test_file_path, full_img_detection):\n", + " import cv2\n", + " test_frame = cv2.imread(test_file_path)\n", + " for obj_idx, detection in full_img_detection.items():\n", + " obj_cls, bbox = detection\n", + " if obj_cls == 'car':\n", + " x,y,w,h = list(map(int,bbox))\n", + " cv2.rectangle(test_frame,(x-w//2,y-h//2),(x+w//2,y+h//2),(0,255,0),2)\n", + " cv2.putText(test_frame, '_'.join([obj_cls, str(obj_idx)]), (x+w//2+5,y+h//2+5),0,0.3,(0,255,0))\n", + " cv2.imshow('detection', test_frame)\n", + " cv2.waitKey(0)\n", + " cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6e94f5a8", + "metadata": {}, + "outputs": [], + "source": [ + "from detection_estimation.utils import *\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "b6e8f502", + "metadata": {}, + "outputs": [], + "source": [ + "# ego car trajectory\n", + "def prepare_ego(test_video):\n", + " ego_trajectory = get_ego_trajectory(test_video)\n", + " video_trajectory = fetch_camera_trajectory(test_video, database)\n", + " sorted_ego_configs = [fetch_camera_config(e['fileName'], database) for e in video_trajectory]\n", + " return sorted_ego_configs, ego_trajectory" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "72b0672d", + "metadata": {}, + "outputs": [], + "source": [ + "# ego_speed = get_ego_speed(ego_trajectory)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "8f57a456", + "metadata": {}, + "outputs": [], + "source": [ + "# [time_to_nearest_frame(test_video, point.timestamp) for point in ego_trajectory]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "8d6659ca", + "metadata": {}, + "outputs": [], + "source": [ + "import datetime\n", + "# test_timestamp = datetime.datetime(2018, 8, 27, 8, 51, 32, 162404, tzinfo=datetime.timezone.utc)\n", + "# timestamp_to_nearest_trajectory(ego_trajectory, test_timestamp)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "764ed7ba", + "metadata": {}, + "outputs": [], + "source": [ + "# test_point = [1772, 865, 0.0]\n", + "# point_to_nearest_trajectory(test_point, ego_trajectory)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "d0fa4e88", + "metadata": {}, + "outputs": [], + "source": [ + "from detection_estimation.sample_plan_algorithms import *" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "73351a20", + "metadata": {}, + "outputs": [], + "source": [ + "base_dir = '/home/yongming/workspace/research/apperception_new_local/boston-seaport'\n", + "test_img_base_dir = '/home/yongming/workspace/research/apperception/v1.0-mini/'" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "af8f35b9", + "metadata": {}, + "outputs": [], + "source": [ + "# import pickle\n", + "# with open(os.path.join(base_dir, f'frames.pickle'), \"rb\") as f:\n", + "# df_sample_data = pickle.loads(f.read())\n", + "# df_sample_data\n", + "# i = 0\n", + "# for frame in df_sample_data['scene-0655-CAM_FRONT']['frames']:\n", + "# if frame[2] == 1194:\n", + "# print(i)\n", + "# break\n", + "# i += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "848684da", + "metadata": {}, + "outputs": [], + "source": [ + "from detection_estimation.detection_estimation import *\n", + "# ### Integration\n", + "# target_config_idx = 218\n", + "# video = 'scene-0655-CAM_FRONT'\n", + "# configs = df_sample_data[video]\n", + "# sorted_ego_config = [dict(zip(configs['columns'], frame))\n", + "# for frame in configs['frames']]\n", + "# len(sorted_ego_config)\n", + "# # all_car_loc3d = " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "3071563e", + "metadata": {}, + "outputs": [], + "source": [ + "# current_ego_config = sorted_ego_config[target_config_idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "6e88fbab", + "metadata": {}, + "outputs": [], + "source": [ + "car_loc3d_ground_truth = [(1991, 874), (1949.181, 873.164)]" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "8942c719", + "metadata": {}, + "outputs": [], + "source": [ + "from shapely.geometry import Point, Polygon\n", + "def generate_sample_plan_once(video, ego_config, mapping, next_frame_num, car_loc3d=None, target_car_detection=None, all_detection_info=None):\n", + " if all_detection_info is None:\n", + " assert target_car_detection and car_loc3d\n", + " x,y,w,h = list(map(int, target_car_detection))\n", + " car_loc2d = (x, y+h//2)\n", + " car_bbox2d = (x-w//2,y-h//2,x+w//2,y+h//2)\n", + " car_bbox3d = None\n", + " all_detections = []\n", + " all_detections.append(obj_detection('car_1', car_loc3d, car_loc2d, car_bbox3d, car_bbox2d))\n", + " all_detection_info = construct_all_detection_info(\n", + " current_frame, cam_segment_mapping, ego_trajectory, ego_config, all_detections)\n", + " if all_detection_info:\n", + " print(all_detection_info[0].road_type)\n", + " next_sample_plan = generate_sample_plan(video, next_frame_num, all_detection_info, 50)\n", + " next_frame = None\n", + " if next_sample_plan.get_next_sample_frame_info():\n", + " next_sample_frame_name, next_sample_frame_num, next_sample_frame_time = (\n", + " next_sample_plan.get_next_sample_frame_info())\n", + " print(\"next frame name\", next_sample_frame_name)\n", + " print(\"next frame num\", next_sample_frame_num)\n", + " # print(next_sample_plan.action)\n", + " next_frame = cv2.imread(test_img_base_dir+next_sample_frame_name)\n", + "# cv2.imshow(\"next_frame\", next_frame)\n", + "# cv2.waitKey(0)\n", + "# cv2.destroyAllWindows()\n", + " return next_sample_plan, next_frame\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "b40ad4a8", + "metadata": {}, + "outputs": [], + "source": [ + "def construct_estimated_all_detection_info(current_frame, cam_segment_mapping, ego_config, ego_trajectory):\n", + " all_detections = []\n", + " full_img_detection = sample_frame_tracker.run(current_frame, save_vid=True, detect_only=True)\n", + "# display_detection(current_frame, full_img_detection)\n", + " for obj_idx, detection in full_img_detection.items():\n", + " obj_cls, bbox = detection\n", + " x,y,w,h = list(map(int,bbox))\n", + " car_loc2d = (x,y+h//2)\n", + "# print(car_loc2d)\n", + " car_bbox2d = (x-w//2,y-h//2,x+w//2,y+h//2)\n", + " car_bbox3d = None\n", + " estimate_3d = detection_to_img_segment(car_loc2d, cam_segment_mapping)\n", + " if estimate_3d and estimate_3d.road_segment_info.segment_type in ['lane', 'laneSection']:\n", + " car_loc3d = tuple(Polygon(estimate_3d.road_segment_info.segment_polygon).centroid.coords)\n", + "# print(tuple(car_loc3d))\n", + " all_detections.append(obj_detection('car_1', car_loc3d, car_loc2d, car_bbox3d, car_bbox2d))\n", + " print(\"all_detections\", all_detections)\n", + " all_detection_info = construct_all_detection_info(\n", + " current_frame, cam_segment_mapping, ego_trajectory, ego_config, all_detections)\n", + " return all_detection_info" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "71f2624b", + "metadata": {}, + "outputs": [], + "source": [ + "def dry_run(sorted_ego_configs, start_frame_num, ego_trajectory, video):\n", + " skipped_frame_num = []\n", + " next_frame_num = start_frame_num\n", + " action_type_counts = {}\n", + " fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n", + " display_video = cv2.VideoWriter(f'sampled_frames_{video.replace(\"/\", \"_\")}.avi',fourcc, 10, (1600, 900))\n", + " start_time = time.time()\n", + " total_detection_time = 0\n", + " total_sample_plan_time = 0\n", + " for i in range(len(sorted_ego_configs)-1):\n", + " current_ego_config = sorted_ego_configs[i]\n", + " if current_ego_config['frameNum'] != next_frame_num:\n", + " skipped_frame_num.append(current_ego_config['frameNum'])\n", + " continue\n", + " next_frame_num = sorted_ego_configs[i+1]['frameNum']\n", + " cam_segment_mapping = map_imgsegment_roadsegment(current_ego_config)\n", + " print(\"mapping length\", len(cam_segment_mapping))\n", + " current_frame = test_img_base_dir + current_ego_config['fileName']\n", + " display_video.write(cv2.imread(current_frame))\n", + " start_detection_time = time.time()\n", + " all_detection_info = construct_estimated_all_detection_info(\n", + " current_frame, cam_segment_mapping, current_ego_config, ego_trajectory)\n", + " total_detection_time += time.time()-start_detection_time\n", + " start_generate_sample_plan = time.time()\n", + " next_sample_plan, next_frame = generate_sample_plan_once(\n", + " video, current_ego_config, cam_segment_mapping, next_frame_num, all_detection_info=all_detection_info)\n", + " total_sample_plan_time += time.time() - start_generate_sample_plan\n", + " next_action_type = next_sample_plan.get_action_type()\n", + " if next_action_type not in action_type_counts:\n", + " action_type_counts[next_action_type] = 1\n", + " else:\n", + " action_type_counts[next_action_type] += 1\n", + " next_frame_num = next_sample_plan.get_next_frame_num(next_frame_num)\n", + " display_video.release()\n", + " print(\"sorted_ego_config_length\", len(sorted_ego_configs))\n", + " print(\"number of skipped\", len(skipped_frame_num))\n", + " print(skipped_frame_num)\n", + " print(action_type_counts)\n", + " total_run_time = time.time()-start_time\n", + " num_runs = len(sorted_ego_configs) - len(skipped_frame_num)\n", + " print(\"total_run_time\", total_run_time)\n", + " print(\"avg run time\", total_run_time/num_runs)\n", + " print(\"total_detection_time\", total_detection_time)\n", + " print(\"avg detection time\", total_detection_time/num_runs)\n", + " print(\"total_generate_sample_plan_time\", total_sample_plan_time)\n", + " print(\"avg generate_sample_plan time\", total_sample_plan_time/num_runs)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "b25f486a", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2646017074584961\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1810.3982084033205, 860.0461031162465),), car_loc2d=(1058, 532), car_bbox3d=None, car_bbox2d=(970, 466, 1146, 532)), obj_detection(id='car_1', car_loc3d=((1758.9680235211245, 866.051795447452),), car_loc2d=(826, 492), car_bbox3d=None, car_bbox2d=(810, 474, 842, 492)), obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(786, 493), car_bbox3d=None, car_bbox2d=(769, 475, 803, 493))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385092262404.jpg\n", + "next frame num 27\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3152017593383789\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1810.3982084033205, 860.0461031162465),), car_loc2d=(1079, 533), car_bbox3d=None, car_bbox2d=(999, 469, 1159, 533)), obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(785, 493), car_bbox3d=None, car_bbox2d=(770, 475, 800, 493)), obj_detection(id='car_1', car_loc3d=((1758.9680235211245, 866.051795447452),), car_loc2d=(830, 493), car_bbox3d=None, car_bbox2d=(813, 473, 847, 493))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385092412404.jpg\n", + "next frame num 51\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.29831790924072266\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1758.9680235211245, 866.051795447452),), car_loc2d=(828, 497), car_bbox3d=None, car_bbox2d=(812, 477, 844, 497))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385092912404.jpg\n", + "next frame num 130\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.20048141479492188\n", + "mapping length 27\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(784, 500), car_bbox3d=None, car_bbox2d=(766, 480, 802, 500))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385093612404.jpg\n", + "next frame num 239\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2082970142364502\n", + "mapping length 25\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.18761110305786133\n", + "mapping length 25\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(780, 498), car_bbox3d=None, car_bbox2d=(764, 476, 796, 498))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385094112404.jpg\n", + "next frame num 317\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.20786166191101074\n", + "mapping length 30\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(777, 495), car_bbox3d=None, car_bbox2d=(761, 473, 793, 495)), obj_detection(id='car_1', car_loc3d=((1792.2387632936693, 866.2860758544127),), car_loc2d=(836, 493), car_bbox3d=None, car_bbox2d=(821, 473, 851, 493))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385094362404.jpg\n", + "next frame num 358\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.20419859886169434\n", + "mapping length 30\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1884.6902160362843, 872.6426751594242),), car_loc2d=(768, 498), car_bbox3d=None, car_bbox2d=(741, 474, 795, 498)), obj_detection(id='car_1', car_loc3d=((1792.2387632936693, 866.2860758544127),), car_loc2d=(837, 493), car_bbox3d=None, car_bbox2d=(823, 473, 851, 493))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385094612404.jpg\n", + "next frame num 397\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.21256232261657715\n", + "mapping length 30\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1792.2387632936693, 866.2860758544127),), car_loc2d=(841, 496), car_bbox3d=None, car_bbox2d=(827, 478, 855, 496))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385094862404.jpg\n", + "next frame num 437\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2148144245147705\n", + "mapping length 22\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1840.0557722899414, 871.6466297656302),), car_loc2d=(764, 510), car_bbox3d=None, car_bbox2d=(738, 478, 790, 510))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385095012404.jpg\n", + "next frame num 463\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.4012308120727539\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1840.0557722899414, 871.6466297656302),), car_loc2d=(755, 513), car_bbox3d=None, car_bbox2d=(730, 481, 780, 513)), obj_detection(id='car_1', car_loc3d=((1914.9664084334593, 869.7293497060274),), car_loc2d=(843, 498), car_bbox3d=None, car_bbox2d=(830, 478, 856, 498))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385095662404.jpg\n", + "next frame num 566\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3985898494720459\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1840.0557722899414, 871.6466297656302),), car_loc2d=(735, 528), car_bbox3d=None, car_bbox2d=(704, 482, 766, 528)), obj_detection(id='car_1', car_loc3d=((1884.795349035151, 869.0106721305141),), car_loc2d=(853, 507), car_bbox3d=None, car_bbox2d=(837, 483, 869, 507))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385096912413.jpg\n", + "next frame num 763\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3496286869049072\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3930349349975586\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2485487461090088\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.21434855461120605\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3789982795715332\n", + "mapping length 31\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2669203281402588\n", + "mapping length 31\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2921590805053711\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24833321571350098\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.4104955196380615\n", + "mapping length 45\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1839.9707355596638, 868.0084638351562),), car_loc2d=(865, 487), car_bbox3d=None, car_bbox2d=(852, 467, 878, 487))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385099112404.jpg\n", + "next frame num 1112\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.46069908142089844\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(806, 494), car_bbox3d=None, car_bbox2d=(792, 476, 820, 494))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385100612404.jpg\n", + "next frame num 1346\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.36666393280029297\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.35140347480773926\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(715, 510), car_bbox3d=None, car_bbox2d=(689, 474, 741, 510))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385101262404.jpg\n", + "next frame num 1449\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.32883501052856445\n", + "mapping length 42\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.33611273765563965\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(703, 513), car_bbox3d=None, car_bbox2d=(672, 469, 734, 513))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385102012404.jpg\n", + "next frame num 1566\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.33342623710632324\n", + "mapping length 42\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(714, 501), car_bbox3d=None, car_bbox2d=(682, 453, 746, 501))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385102662404.jpg\n", + "next frame num 1668\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3056814670562744\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(718, 519), car_bbox3d=None, car_bbox2d=(685, 471, 751, 519)), obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(721, 519), car_bbox3d=None, car_bbox2d=(690, 471, 752, 519))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385103162404.jpg\n", + "next frame num 1749\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7746765613555908\n", + "mapping length 64\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(725, 512), car_bbox3d=None, car_bbox2d=(690, 456, 760, 512))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385103762404.jpg\n", + "next frame num 1843\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.6979737281799316\n", + "mapping length 63\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(720, 538), car_bbox3d=None, car_bbox2d=(677, 474, 763, 538))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385104412404.jpg\n", + "next frame num 1949\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8397731781005859\n", + "mapping length 61\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(781, 527), car_bbox3d=None, car_bbox2d=(773, 493, 789, 527)), obj_detection(id='car_1', car_loc3d=((1900.0438147510804, 880.6944960294375),), car_loc2d=(413, 534), car_bbox3d=None, car_bbox2d=(286, 438, 540, 534))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385104662404.jpg\n", + "next frame num 1988\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7175426483154297\n", + "mapping length 61\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(677, 533), car_bbox3d=None, car_bbox2d=(628, 455, 726, 533)), obj_detection(id='car_1', car_loc3d=((1900.0438147510804, 880.6944960294375),), car_loc2d=(385, 543), car_bbox3d=None, car_bbox2d=(253, 413, 517, 543)), obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(774, 526), car_bbox3d=None, car_bbox2d=(765, 488, 783, 526))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385104912404.jpg\n", + "next frame num 2026\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7350845336914062\n", + "mapping length 66\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "TopologyException: Input geom 1 is invalid: Self-intersection at 1902.9833080953297 882.54139685454481\n", + "Self-intersection at or near point 1902.9833080953297 882.54139685454481\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(664, 549), car_bbox3d=None, car_bbox2d=(611, 459, 717, 549)), obj_detection(id='car_1', car_loc3d=((1900.6347723897259, 879.0371752935228),), car_loc2d=(342, 557), car_bbox3d=None, car_bbox2d=(184, 437, 500, 557))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385105512404.jpg\n", + "next frame num 2120\n", + "total mapping time: 0.7414085865020752\n", + "mapping length 66\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "TopologyException: Input geom 1 is invalid: Self-intersection at 1902.9833080953297 882.54139685454481\n", + "Self-intersection at or near point 1902.9833080953297 882.54139685454481\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(610, 557), car_bbox3d=None, car_bbox2d=(534, 461, 686, 557)), obj_detection(id='car_1', car_loc3d=((1900.6347723897259, 879.0371752935228),), car_loc2d=(214, 565), car_bbox3d=None, car_bbox2d=(0, 415, 428, 565))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385106162404.jpg\n", + "next frame num 2222\n", + "total mapping time: 0.7268157005310059\n", + "mapping length 61\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(541, 564), car_bbox3d=None, car_bbox2d=(435, 436, 647, 564))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385106762404.jpg\n", + "next frame num 2317\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8110146522521973\n", + "mapping length 61\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1914.8829541031619, 873.3757827536206),), car_loc2d=(459, 601), car_bbox3d=None, car_bbox2d=(314, 435, 604, 601))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385107362404.jpg\n", + "next frame num 2411\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9105303287506104\n", + "mapping length 68\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1931.91239670335, 886.3316907920956),), car_loc2d=(46, 523), car_bbox3d=None, car_bbox2d=(6, 447, 86, 523))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385107612404.jpg\n", + "next frame num 2450\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7521004676818848\n", + "mapping length 68\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7843153476715088\n", + "mapping length 68\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8346855640411377\n", + "mapping length 67\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7926990985870361\n", + "mapping length 67\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(781, 496), car_bbox3d=None, car_bbox2d=(764, 472, 798, 496))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385108512404.jpg\n", + "next frame num 2585\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 1.447481632232666\n", + "mapping length 91\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(774, 497), car_bbox3d=None, car_bbox2d=(756, 467, 792, 497))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385110012404.jpg\n", + "next frame num 2826\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9953773021697998\n", + "mapping length 76\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(739, 509), car_bbox3d=None, car_bbox2d=(703, 453, 775, 509))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385110862404.jpg\n", + "next frame num 2956\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.859342098236084\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(684, 549), car_bbox3d=None, car_bbox2d=(623, 453, 745, 549))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8328063488006592\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(678, 550), car_bbox3d=None, car_bbox2d=(613, 450, 743, 550))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8429522514343262\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((1972.356772971168, 874.6494952773176),), car_loc2d=(664, 556), car_bbox3d=None, car_bbox2d=(594, 448, 734, 556))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9003217220306396\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((2054.228685288684, 858.414282187199),), car_loc2d=(1111, 505), car_bbox3d=None, car_bbox2d=(1091, 477, 1131, 505))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9277088642120361\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9272370338439941\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.9185504913330078\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7918155193328857\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7939267158508301\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8257570266723633\n", + "mapping length 57\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.7872545719146729\n", + "mapping length 58\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.8464281558990479\n", + "mapping length 58\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.6866118907928467\n", + "mapping length 58\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "sorted_ego_config_length 237\n", + "number of skipped 180\n", + "[13, 41, 66, 81, 90, 105, 118, 143, 160, 168, 185, 199, 210, 224, 263, 278, 289, 303, 328, 342, 367, 384, 408, 421, 446, 477, 488, 502, 515, 527, 541, 557, 582, 596, 607, 636, 645, 660, 674, 685, 698, 713, 722, 737, 752, 883, 896, 912, 921, 937, 952, 962, 977, 990, 1002, 1015, 1032, 1042, 1057, 1071, 1082, 1096, 1121, 1137, 1150, 1161, 1174, 1189, 1198, 1214, 1229, 1239, 1253, 1266, 1277, 1290, 1306, 1315, 1331, 1370, 1386, 1395, 1411, 1425, 1435, 1474, 1487, 1502, 1514, 1527, 1542, 1551, 1582, 1591, 1607, 1620, 1631, 1644, 1658, 1684, 1698, 1709, 1723, 1738, 1763, 1778, 1789, 1803, 1818, 1829, 1859, 1868, 1884, 1898, 1909, 1924, 1937, 1962, 1978, 2002, 2017, 2041, 2056, 2066, 2081, 2095, 2105, 2136, 2143, 2159, 2172, 2184, 2197, 2212, 2237, 2252, 2261, 2277, 2292, 2302, 2332, 2342, 2357, 2371, 2382, 2396, 2421, 2436, 2506, 2522, 2531, 2547, 2560, 2572, 2602, 2610, 2627, 2641, 2652, 2666, 2680, 2691, 2706, 2721, 2730, 2745, 2759, 2770, 2785, 2801, 2810, 2839, 2851, 2864, 2878, 2887, 2903, 2917, 2929, 2941]\n", + "{'car_exit_segment': 20, 'ego_exit_segment': 8, None: 27, 'meet_up': 1}\n", + "total_run_time 68.98414993286133\n", + "avg run time 1.2102482444361637\n", + "total_detection_time 31.918983459472656\n", + "avg detection time 0.5599821659556606\n", + "total_generate_sample_plan_time 2.0482518672943115\n", + "avg generate_sample_plan time 0.035934243285865114\n" + ] + } + ], + "source": [ + "test_video1 = 'CAM_FRONT/n008-2018-08-27'\n", + "sorted_ego_configs1, ego_trajectory1 = prepare_ego(test_video1)\n", + "dry_run(sorted_ego_configs1, 2, ego_trajectory1, test_video1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "c133465f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.4129672050476074\n", + "mapping length 60\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((615.4809011304164, 1637.3069001205674),), car_loc2d=(1036, 531), car_bbox3d=None, car_bbox2d=(999, 483, 1073, 531)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(827, 526), car_bbox3d=None, car_bbox2d=(802, 484, 852, 526)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(885, 528), car_bbox3d=None, car_bbox2d=(853, 480, 917, 528)), obj_detection(id='car_1', car_loc3d=((618.8886816744962, 1640.5713210085441),), car_loc2d=(599, 554), car_bbox3d=None, car_bbox2d=(586, 482, 612, 554)), obj_detection(id='car_1', car_loc3d=((618.8886816744962, 1640.5713210085441),), car_loc2d=(550, 550), car_bbox3d=None, car_bbox2d=(537, 488, 563, 550))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151604012404.jpg\n", + "next frame num 82\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3781287670135498\n", + "mapping length 58\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((619.4950977561506, 1639.892146379212),), car_loc2d=(540, 565), car_bbox3d=None, car_bbox2d=(527, 479, 553, 565)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(832, 539), car_bbox3d=None, car_bbox2d=(775, 479, 889, 539)), obj_detection(id='car_1', car_loc3d=((645.48790554265, 1618.5684959928494),), car_loc2d=(889, 496), car_bbox3d=None, car_bbox2d=(871, 472, 907, 496)), obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(962, 502), car_bbox3d=None, car_bbox2d=(944, 472, 980, 502))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151604512404.jpg\n", + "next frame num 162\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.36525559425354004\n", + "mapping length 59\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(775, 565), car_bbox3d=None, car_bbox2d=(724, 483, 826, 565)), obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(925, 511), car_bbox3d=None, car_bbox2d=(903, 475, 947, 511)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(851, 505), car_bbox3d=None, car_bbox2d=(835, 479, 867, 505)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(838, 509), car_bbox3d=None, car_bbox2d=(820, 479, 856, 509))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151605162404.jpg\n", + "next frame num 265\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3536515235900879\n", + "mapping length 48\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(1004, 511), car_bbox3d=None, car_bbox2d=(972, 459, 1036, 511)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(870, 500), car_bbox3d=None, car_bbox2d=(843, 460, 897, 500)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(910, 470), car_bbox3d=None, car_bbox2d=(894, 442, 926, 470)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(804, 487), car_bbox3d=None, car_bbox2d=(783, 457, 825, 487)), obj_detection(id='car_1', car_loc3d=((548.168727975707, 1674.931277335449),), car_loc2d=(822, 480), car_bbox3d=None, car_bbox2d=(805, 454, 839, 480))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151606662404.jpg\n", + "next frame num 503\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.22083497047424316\n", + "mapping length 33\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(924, 488), car_bbox3d=None, car_bbox2d=(894, 438, 954, 488)), obj_detection(id='car_1', car_loc3d=((645.48790554265, 1618.5684959928494),), car_loc2d=(479, 546), car_bbox3d=None, car_bbox2d=(399, 460, 559, 546))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151607262404.jpg\n", + "next frame num 596\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.17551708221435547\n", + "mapping length 36\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.19234633445739746\n", + "mapping length 35\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.19839048385620117\n", + "mapping length 35\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((645.7815955106389, 1618.2647024723365),), car_loc2d=(315, 608), car_bbox3d=None, car_bbox2d=(209, 490, 421, 608))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151608012404.jpg\n", + "next frame num 714\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.20242071151733398\n", + "mapping length 35\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.16537857055664062\n", + "mapping length 35\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.18453311920166016\n", + "mapping length 35\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.16639924049377441\n", + "mapping length 35\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.1605980396270752\n", + "mapping length 32\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.18601369857788086\n", + "mapping length 32\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.17302346229553223\n", + "mapping length 32\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.21153926849365234\n", + "mapping length 48\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(890, 510), car_bbox3d=None, car_bbox2d=(862, 462, 918, 510))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151609262404.jpg\n", + "next frame num 911\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.22137069702148438\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(888, 494), car_bbox3d=None, car_bbox2d=(860, 450, 916, 494))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151609912404.jpg\n", + "next frame num 1014\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3549020290374756\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(887, 495), car_bbox3d=None, car_bbox2d=(860, 451, 914, 495))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151610612404.jpg\n", + "next frame num 1122\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.25465965270996094\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(884, 491), car_bbox3d=None, car_bbox2d=(861, 451, 907, 491))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151611262404.jpg\n", + "next frame num 1225\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2685966491699219\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(883, 496), car_bbox3d=None, car_bbox2d=(862, 458, 904, 496))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151611912404.jpg\n", + "next frame num 1331\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24779582023620605\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(1435, 757), car_bbox3d=None, car_bbox2d=(1274, 481, 1596, 757)), obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(879, 499), car_bbox3d=None, car_bbox2d=(859, 467, 899, 499))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151612612404.jpg\n", + "next frame num 1437\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24138188362121582\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(764, 631), car_bbox3d=None, car_bbox2d=(602, 461, 926, 631)), obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(1460, 782), car_bbox3d=None, car_bbox2d=(1322, 484, 1598, 782)), obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(876, 497), car_bbox3d=None, car_bbox2d=(856, 467, 896, 497))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151613262404.jpg\n", + "next frame num 1543\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24245142936706543\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(858, 608), car_bbox3d=None, car_bbox2d=(753, 460, 963, 608)), obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(719, 508), car_bbox3d=None, car_bbox2d=(696, 468, 742, 508))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151613912404.jpg\n", + "next frame num 1646\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.1995551586151123\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((642.4554891057132, 1615.0472191851095),), car_loc2d=(908, 586), car_bbox3d=None, car_bbox2d=(832, 458, 984, 586))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151614612404.jpg\n", + "next frame num 1752\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.26476311683654785\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(716, 513), car_bbox3d=None, car_bbox2d=(692, 471, 740, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.1946423053741455\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24986767768859863\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.22597336769104004\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.26027512550354004\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(715, 513), car_bbox3d=None, car_bbox2d=(692, 471, 738, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.26091504096984863\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(713, 513), car_bbox3d=None, car_bbox2d=(690, 471, 736, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2595832347869873\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(714, 513), car_bbox3d=None, car_bbox2d=(690, 473, 738, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.23244047164916992\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24914789199829102\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections []\n", + "total mapping time: 0.20069265365600586\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((736.4570932870629, 1564.663622038146),), car_loc2d=(711, 515), car_bbox3d=None, car_bbox2d=(688, 473, 734, 515))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.25981664657592773\n", + "mapping length 40\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(946, 552), car_bbox3d=None, car_bbox2d=(894, 466, 998, 552))]\n", + "lane\n", + "next frame name samples/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151616412404.jpg\n", + "next frame num 2040\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2560694217681885\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(935, 544), car_bbox3d=None, car_bbox2d=(893, 474, 977, 544))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151617512404.jpg\n", + "next frame num 2210\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.23214268684387207\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(912, 546), car_bbox3d=None, car_bbox2d=(874, 488, 950, 546))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151618512404.jpg\n", + "next frame num 2366\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2241225242614746\n", + "mapping length 37\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((712.0948640676032, 1554.7660721968286),), car_loc2d=(897, 533), car_bbox3d=None, car_bbox2d=(864, 483, 930, 533))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151619162404.jpg\n", + "next frame num 2469\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.1921672821044922\n", + "mapping length 36\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((712.0948640676032, 1554.7660721968286),), car_loc2d=(890, 532), car_bbox3d=None, car_bbox2d=(858, 482, 922, 532)), obj_detection(id='car_1', car_loc3d=((692.0359155683578, 1578.1208580935452),), car_loc2d=(447, 606), car_bbox3d=None, car_bbox2d=(425, 542, 469, 606))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151619362404.jpg\n", + "next frame num 2500\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.18963003158569336\n", + "mapping length 32\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((749.0654936630655, 1529.1136572934306),), car_loc2d=(751, 522), car_bbox3d=None, car_bbox2d=(727, 482, 775, 522)), obj_detection(id='car_1', car_loc3d=((712.0948640676032, 1554.7660721968286),), car_loc2d=(887, 529), car_bbox3d=None, car_bbox2d=(855, 477, 919, 529)), obj_detection(id='car_1', car_loc3d=((692.0359155683578, 1578.1208580935452),), car_loc2d=(380, 626), car_bbox3d=None, car_bbox2d=(349, 538, 411, 626))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151620012404.jpg\n", + "next frame num 2606\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.18906855583190918\n", + "mapping length 29\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((715.4230810897625, 1558.1658004638898),), car_loc2d=(734, 533), car_bbox3d=None, car_bbox2d=(709, 477, 759, 533))]\n", + "lane\n", + "next frame name sweeps/CAM_FRONT/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151620362404.jpg\n", + "next frame num 2655\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3010718822479248\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((715.4230810897625, 1558.1658004638898),), car_loc2d=(720, 538), car_bbox3d=None, car_bbox2d=(687, 476, 753, 538)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(879, 520), car_bbox3d=None, car_bbox2d=(849, 474, 909, 520))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.30506229400634766\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((715.4230810897625, 1558.1658004638898),), car_loc2d=(718, 538), car_bbox3d=None, car_bbox2d=(685, 478, 751, 538)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(879, 520), car_bbox3d=None, car_bbox2d=(849, 474, 909, 520))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.29217529296875\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((715.4230810897625, 1558.1658004638898),), car_loc2d=(714, 543), car_bbox3d=None, car_bbox2d=(677, 479, 751, 543)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 519), car_bbox3d=None, car_bbox2d=(849, 475, 907, 519)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(843, 500), car_bbox3d=None, car_bbox2d=(828, 472, 858, 500))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.31339144706726074\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((715.4230810897625, 1558.1658004638898),), car_loc2d=(709, 546), car_bbox3d=None, car_bbox2d=(672, 482, 746, 546)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 519), car_bbox3d=None, car_bbox2d=(849, 475, 907, 519)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(957, 513), car_bbox3d=None, car_bbox2d=(942, 473, 972, 513)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(842, 500), car_bbox3d=None, car_bbox2d=(826, 472, 858, 500))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.31870174407958984\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 518), car_bbox3d=None, car_bbox2d=(849, 474, 907, 518)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(958, 513), car_bbox3d=None, car_bbox2d=(943, 475, 973, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3290998935699463\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 519), car_bbox3d=None, car_bbox2d=(849, 477, 907, 519)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(958, 513), car_bbox3d=None, car_bbox2d=(943, 473, 973, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.28633713722229004\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 519), car_bbox3d=None, car_bbox2d=(849, 475, 907, 519)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(956, 513), car_bbox3d=None, car_bbox2d=(942, 475, 970, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2955052852630615\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(881, 518), car_bbox3d=None, car_bbox2d=(854, 476, 908, 518)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(928, 505), car_bbox3d=None, car_bbox2d=(908, 473, 948, 505)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(949, 510), car_bbox3d=None, car_bbox2d=(929, 474, 969, 510))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.30309438705444336\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 516), car_bbox3d=None, car_bbox2d=(850, 476, 906, 516)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(955, 511), car_bbox3d=None, car_bbox2d=(940, 473, 970, 511))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3199498653411865\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 514), car_bbox3d=None, car_bbox2d=(849, 470, 905, 514)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(953, 510), car_bbox3d=None, car_bbox2d=(937, 472, 969, 510))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.28388428688049316\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 513), car_bbox3d=None, car_bbox2d=(848, 469, 906, 513)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(930, 504), car_bbox3d=None, car_bbox2d=(912, 472, 948, 504)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(953, 510), car_bbox3d=None, car_bbox2d=(938, 472, 968, 510)), obj_detection(id='car_1', car_loc3d=((730.7364787561393, 1556.0959890031118),), car_loc2d=(575, 529), car_bbox3d=None, car_bbox2d=(552, 481, 598, 529))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3065972328186035\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(876, 516), car_bbox3d=None, car_bbox2d=(848, 472, 904, 516)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(956, 510), car_bbox3d=None, car_bbox2d=(943, 472, 969, 510))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.31461000442504883\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 516), car_bbox3d=None, car_bbox2d=(849, 474, 905, 516)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(954, 510), car_bbox3d=None, car_bbox2d=(938, 474, 970, 510))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3222355842590332\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 518), car_bbox3d=None, car_bbox2d=(849, 478, 905, 518)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(932, 508), car_bbox3d=None, car_bbox2d=(910, 476, 954, 508))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2623636722564697\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(880, 520), car_bbox3d=None, car_bbox2d=(852, 478, 908, 520)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(974, 518), car_bbox3d=None, car_bbox2d=(957, 474, 991, 518)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(948, 513), car_bbox3d=None, car_bbox2d=(929, 477, 967, 513))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.28440308570861816\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(880, 521), car_bbox3d=None, car_bbox2d=(851, 477, 909, 521)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(973, 518), car_bbox3d=None, car_bbox2d=(956, 476, 990, 518))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2580528259277344\n", + "mapping length 41\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 519), car_bbox3d=None, car_bbox2d=(849, 475, 905, 519)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(934, 510), car_bbox3d=None, car_bbox2d=(914, 478, 954, 510)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(948, 512), car_bbox3d=None, car_bbox2d=(929, 478, 967, 512)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(972, 517), car_bbox3d=None, car_bbox2d=(956, 477, 988, 517))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.3000328540802002\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 519), car_bbox3d=None, car_bbox2d=(849, 477, 905, 519)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(948, 513), car_bbox3d=None, car_bbox2d=(929, 479, 967, 513)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(972, 518), car_bbox3d=None, car_bbox2d=(954, 478, 990, 518))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.24942946434020996\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 525), car_bbox3d=None, car_bbox2d=(848, 481, 906, 525)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(933, 514), car_bbox3d=None, car_bbox2d=(915, 480, 951, 514))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2863450050354004\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 528), car_bbox3d=None, car_bbox2d=(848, 482, 906, 528))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.25943422317504883\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((745.7063084178615, 1522.755975416549),), car_loc2d=(877, 524), car_bbox3d=None, car_bbox2d=(848, 482, 906, 524))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.30635714530944824\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(876, 516), car_bbox3d=None, car_bbox2d=(848, 472, 904, 516)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(967, 514), car_bbox3d=None, car_bbox2d=(945, 472, 989, 514))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2474358081817627\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(875, 511), car_bbox3d=None, car_bbox2d=(848, 465, 902, 511)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(952, 504), car_bbox3d=None, car_bbox2d=(930, 468, 974, 504)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(968, 510), car_bbox3d=None, car_bbox2d=(951, 470, 985, 510))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2865324020385742\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(875, 511), car_bbox3d=None, car_bbox2d=(848, 467, 902, 511))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.26761794090270996\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 518), car_bbox3d=None, car_bbox2d=(849, 474, 905, 518))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.30632710456848145\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(876, 522), car_bbox3d=None, car_bbox2d=(848, 478, 904, 522))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.31458282470703125\n", + "mapping length 39\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(877, 521), car_bbox3d=None, car_bbox2d=(850, 479, 904, 521)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(981, 522), car_bbox3d=None, car_bbox2d=(952, 476, 1010, 522))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.2899599075317383\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(878, 520), car_bbox3d=None, car_bbox2d=(851, 478, 905, 520)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(976, 520), car_bbox3d=None, car_bbox2d=(951, 476, 1001, 520))]\n", + "lane\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n", + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:85: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " for intersect in intersection:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total mapping time: 0.28183746337890625\n", + "mapping length 43\n", + "Model: osnet_x0_25\n", + "- params: 203,568\n", + "- flops: 82,316,000\n", + "Successfully loaded pretrained weights from \"osnet_x0_25_msmt17.pt\"\n", + "** The following layers are discarded due to unmatched keys or layer size: ['classifier.weight', 'classifier.bias']\n", + "all_detections [obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(954, 515), car_bbox3d=None, car_bbox2d=(931, 477, 977, 515)), obj_detection(id='car_1', car_loc3d=((687.5019146385094, 1576.3385317678726),), car_loc2d=(880, 521), car_bbox3d=None, car_bbox2d=(853, 479, 907, 521)), obj_detection(id='car_1', car_loc3d=((730.7364787561393, 1556.0959890031118),), car_loc2d=(493, 552), car_bbox3d=None, car_bbox2d=(457, 506, 529, 552)), obj_detection(id='car_1', car_loc3d=((744.777968055767, 1519.4513720645464),), car_loc2d=(972, 522), car_bbox3d=None, car_bbox2d=(947, 476, 997, 522))]\n", + "lane\n", + "sorted_ego_config_length 229\n", + "number of skipped 158\n", + "[18, 27, 43, 57, 68, 96, 108, 121, 138, 145, 176, 187, 201, 214, 226, 239, 256, 281, 295, 307, 320, 334, 345, 359, 376, 384, 401, 415, 424, 438, 451, 463, 477, 493, 516, 531, 541, 556, 571, 581, 636, 650, 660, 688, 699, 816, 831, 846, 856, 871, 886, 896, 925, 935, 949, 964, 974, 990, 1004, 1029, 1044, 1053, 1069, 1083, 1094, 1108, 1132, 1146, 1161, 1171, 1187, 1200, 1212, 1241, 1250, 1266, 1281, 1291, 1306, 1319, 1344, 1361, 1368, 1385, 1398, 1410, 1423, 1448, 1462, 1479, 1489, 1504, 1517, 1529, 1558, 1568, 1583, 1598, 1608, 1622, 1637, 1661, 1674, 1698, 1713, 1723, 1738, 1897, 1913, 1922, 1938, 1951, 1962, 1976, 1990, 2001, 2016, 2030, 2054, 2068, 2079, 2093, 2109, 2118, 2134, 2147, 2157, 2171, 2185, 2225, 2235, 2248, 2262, 2272, 2286, 2301, 2311, 2326, 2341, 2350, 2381, 2391, 2406, 2419, 2431, 2444, 2461, 2487, 2512, 2525, 2538, 2550, 2563, 2581, 2588, 2617, 2628, 2641]\n", + "{'car_exit_segment': 18, 'ego_exit_segment': 4, None: 48}\n", + "total_run_time 66.34341382980347\n", + "avg run time 0.9344142792930066\n", + "total_detection_time 41.99688220024109\n", + "avg detection time 0.5915053831019872\n", + "total_generate_sample_plan_time 2.615060329437256\n", + "avg generate_sample_plan time 0.036831835625876844\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/yongming/workspace/research/apperception_new_local/apperception/optimization_playground/detection_estimation/utils.py:45: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry.\n", + " a, b = line.boundary\n" + ] + } + ], + "source": [ + "test_video2 = 'CAM_FRONT/n008-2018-08-01'\n", + "sorted_ego_configs2, ego_trajectory2 = prepare_ego(test_video2)\n", + "dry_run(sorted_ego_configs2, 2, ego_trajectory2, test_video2)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa5ef874", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (apperception)", + "language": "python", + "name": "apperception" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "ef986073a7322f2daa7cef2e5604e6018e5522cc159657af8e7aa863491a7631" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/optimization_playground/segment_mapping.py b/optimization_playground/segment_mapping.py deleted file mode 100644 index 158aa455..00000000 --- a/optimization_playground/segment_mapping.py +++ /dev/null @@ -1,332 +0,0 @@ -""" Goal to map the road segment to the frame segment - Now only get the segment of type lane and intersection - except for the segment that contains the ego camera - -Usage example: - from optimization_playground.segment_mapping import map_imgsegment_roadsegment - from apperception.utils import fetch_camera_config - - test_config = fetch_camera_config(test_img, database) - mapping = map_imgsegment_roadsegment(test_config) -""" - -from typing import Any, Dict, Tuple, List, Set, Union - -from collections import namedtuple -import os -import math -import time -import sys -sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir))) - -import cv2 -import numpy as np -import pandas as pd -from matplotlib import pyplot as plt -from shapely.geometry import Point, Polygon, LineString -from plpygis import Geometry -pd.get_option("display.max_columns") - -from apperception.database import database -from apperception.utils import F, transformation, fetch_camera_config - -data_path = '/home/yongming/workspace/research/apperception/v1.0-mini/' -input_video_dir = os.path.join(data_path, 'sample_videos/') -input_video_name = 'CAM_FRONT_n008-2018-08-27.mp4' -input_date = input_video_name.split('_')[-1][:-4] -test_img = 'samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385108412412.jpg' - -CAMERA_COLUMNS = [ - "cameraId", - "frameId", - "frameNum", - "filename", - "cameraTranslation", - "cameraRotation", - "cameraIntrinsic", - "egoTranslation", - "egoRotation", - "timestamp", - "cameraHeading", - "egoHeading", - "cameraTranslationAbs", - "roadDirection"] -CAM_CONFIG_QUERY = """SELECT * FROM Cameras - WHERE filename like 'samples/CAM_FRONT/%{date}%' - ORDER BY frameNum""" - -camera_config = database.execute(CAM_CONFIG_QUERY.format(date=input_date)) -camera_config_df = pd.DataFrame(camera_config, columns=CAMERA_COLUMNS) -camera_config_df - -SEGMENT_CONTAIN_QUERY = """SELECT segmentpolygon.*, segment.heading FROM segmentpolygon - LEFT OUTER JOIN segment ON segmentpolygon.elementid = segment.elementid - WHERE ST_Contains(segmentpolygon.elementpolygon, \'{ego_translation}\'::geometry);""" -SEGMENT_DWITHIN_QUERY = """SELECT segmentpolygon.*, segment.heading FROM segmentpolygon - LEFT OUTER JOIN segment ON segmentpolygon.elementid = segment.elementid - WHERE ST_DWithin(elementpolygon, \'{start_segment}\'::geometry, {view_distance}) - AND segmentpolygon.segmenttypes in (ARRAY[\'lane\'], ARRAY[\'intersection\']);""" - -cam_segment_mapping = namedtuple('cam_segment_mapping', ['cam_segment', 'road_segment_info']) - -class roadSegmentInfo: - def __init__( - self, - segment_id: int, - segment_polygon: Tuple[Tuple[float, float]], - segment_type: str, - segment_heading: float, - contains_ego: bool, - ego_config: Dict[str, Any], - fov_lines: Tuple[Tuple[Tuple[float, float], Tuple[float, float]], - Tuple[Tuple[float, float], Tuple[float, float]]]): - """ - segment_id: unique segment id - segment_polygon: tuple of (x, y) coordinates - segment_type: road segment type - contains_ego: whether the segment contains ego camera - ego_config: ego camfig for the frame we asks info for - facing_relative: float - fov_lines: field of view lines - """ - self.segment_id = segment_id - self.segment_polygon = segment_polygon - self.segment_type = segment_type - self.segment_heading = segment_heading - self.contains_ego = contains_ego - self.ego_config = ego_config - self.facing_relative = self.facing_relative(ego_config['egoHeading'], segment_id) - self.fov_lines = fov_lines - - def facing_relative(self, ego_heading: float, segment_id: str) -> float: - return - -def road_segment_contains(ego_config: Dict[str, Any])\ - -> Tuple[Tuple[str, str, set]]: - query = SEGMENT_CONTAIN_QUERY.format( - ego_translation=Point(*ego_config['egoTranslation']).wkb_hex) - - return database.execute(query) - -def find_segment_dwithin(start_segment: Tuple[str], - view_distance=50) -> Tuple[Tuple[str, str, set]]: - start_segment_id, start_segment_polygon, segmenttype, segmentheading, contains_ego = start_segment - query = SEGMENT_DWITHIN_QUERY.format( - start_segment=start_segment_polygon, view_distance=view_distance) - - return database.execute(query) - -def reformat_return_segment(segments: Tuple[str, str, set])\ - -> List[Tuple[str, str, Tuple[str]]]: - return list(map( - lambda x: (x[0], x[1], tuple(x[2]) if x[2] is not None else None, x[3]), segments)) - -def annotate_contain(segments: tuple, contain=False): - for i in range(len(segments)): - segments[i] = segments[i] + (contain,) - -def construct_search_space(ego_config: Dict[str, Any], - view_distance=50) -> Set[Tuple[str, str, Tuple[str]]]: - ''' - road segment: (elementid, elementpolygon, segmenttype, contains_ego?) - view_distance: in meters, default 50 because scenic standard - return: set(road_segment) - ''' - search_space = set() - all_contain_segment = reformat_return_segment(road_segment_contains(ego_config)) - annotate_contain(all_contain_segment, contain=True) - search_space.update(all_contain_segment) - start_segment = all_contain_segment[0] - - segment_within_distance = reformat_return_segment(find_segment_dwithin(start_segment, view_distance)) - annotate_contain(segment_within_distance, contain=False) - search_space.update(segment_within_distance) - return search_space - -def get_fov_lines(ego_config: Dict[str, Any], - ego_fov=70) -> Tuple[Tuple[Tuple[float, float], Tuple[float, float]], - Tuple[Tuple[float, float], Tuple[float, float]]]: - ''' - return: two lines representing fov in world coord - ''' - ego_heading = ego_config['egoHeading'] - x_ego, y_ego = ego_config['egoTranslation'][:2] - left_degree = math.radians(ego_heading + ego_fov/2 + 90) - left_fov_line = ((x_ego, y_ego), - (x_ego + math.cos(left_degree)*50, - y_ego + math.sin(left_degree)*50)) - right_degree = math.radians(ego_heading - ego_fov/2 + 90) - right_fov_line = ((x_ego, y_ego), - (x_ego + math.cos(right_degree)*50, - y_ego + math.sin(right_degree)*50)) - return left_fov_line, right_fov_line - -def intersection(fov_line: Tuple[Tuple[Tuple[float, float], Tuple[float, float]], - Tuple[Tuple[float, float], Tuple[float, float]]], - segmentpolygon: Polygon) -> Tuple[Tuple[float, float]]: - ''' - return: intersection point: tuple[tuple] - ''' - left_fov_line, right_fov_line = fov_line - left_intersection = tuple(LineString(left_fov_line).intersection(segmentpolygon).coords) - right_intersection = tuple(LineString(right_fov_line).intersection(segmentpolygon).coords) - return left_intersection + right_intersection - -def in_frame(transformed_point: np.array, frame_size: Tuple[int, int]): - return transformed_point[0] > 0 and transformed_point[0] < frame_size[0] and \ - transformed_point[1] < frame_size[1] and transformed_point[1] > 0 - -def in_view( - road_point: Tuple, - ego_translation: List[float], - fov_lines: Tuple[Tuple[Tuple[float, float], Tuple[float, float]], - Tuple[Tuple[float, float], Tuple[float, float]]]) -> bool: - ''' - return if the road_point is on the left of the left fov line and - on the right of the right fov line - ''' - left_fov_line, right_fov_line = fov_lines - Ax, Ay = ego_translation[:2] - Mx, My = road_point - left_fov_line_x, left_fov_line_y = left_fov_line[1] - right_fov_line_x, right_fov_line_y = right_fov_line[1] - return (left_fov_line_x - Ax) * (My - Ay) - (left_fov_line_y - Ay) * (Mx - Ax) <= 0 and \ - (right_fov_line_x - Ax) * (My - Ay) - (right_fov_line_y - Ay) * (Mx - Ax) >= 0 - -def construct_mapping( - decoded_road_segment: Tuple[Tuple[float, float]], - frame_size: Tuple[int, int], - fov_lines: Tuple[Tuple[Tuple[float, float], Tuple[float, float]], - Tuple[Tuple[float, float], Tuple[float, float]]], - segmentid: str, - segmenttype: str, - segmentheading: float, - contains_ego: bool, - ego_config: Dict[str, Any]) -> Tuple[bool, cam_segment_mapping]: - """ - Given current road segment - determine whether add it to the mapping - """ - ego_translation = ego_config['egoTranslation'][:2] - if contains_ego: - decoded_road_segment += (ego_translation,) - deduced_cam_segment = tuple(map( - lambda point: transformation(tuple(point)+(0,), ego_config), decoded_road_segment)) - assert len(deduced_cam_segment) == len(decoded_road_segment) - keep_cam_segment_point = [] - keep_road_segment_point = [] - for i in range(len(decoded_road_segment)): - current_cam_point = deduced_cam_segment[i] - current_road_point = decoded_road_segment[i] - if in_frame(current_cam_point, frame_size) and \ - in_view(current_road_point, ego_translation, fov_lines): - keep_cam_segment_point.append(current_cam_point) - keep_road_segment_point.append(current_road_point) - return (len(keep_cam_segment_point) > 2 \ - and Polygon(tuple(keep_cam_segment_point)).area > 100, - cam_segment_mapping( - keep_cam_segment_point, - roadSegmentInfo( - segmentid, - keep_road_segment_point, - segmenttype, - segmentheading, - contains_ego, - ego_config, - fov_lines - )) - ) - -def map_imgsegment_roadsegment(ego_config: Dict[str, Any], - frame_size=(1600, 900)) -> List[cam_segment_mapping]: - ''' - FULL ALGORITHM: - road_segment_info: {segmentid, - segmentpolygon, - segment_type, - ego_in_segment?, - ego_config, - fov_line, - facing_relative(ego_heading, segment_direction)} - 1. Get the lines of fov in world coord - 2. For road_segment in search_space: - intersect_point = intersection(fov_lines, road_segment) - cam_segment = filter([road_segment_world_coord, intersection_point], - lambda point: point in view) - if cam_segment is valid: - append_to_mapping({cam_segments:road_segment}) - ''' - fov_lines = get_fov_lines(ego_config) - start_time = time.time() - search_space = construct_search_space(ego_config, view_distance=100) - mapping = [] - for road_segment in search_space: - segmentid, segmentpolygon, segmenttype, segmentheading, contains_ego = road_segment - segmentpolygon_points = tuple(zip(*Geometry(segmentpolygon).exterior.shapely.xy)) - segmentpolygon = Polygon(segmentpolygon_points) - - road_filter = all(map( - lambda point: not in_view( - point, ego_config['egoTranslation'], fov_lines), - segmentpolygon_points)) - if road_filter: - continue - - intersection_points = tuple( - intersection(fov_lines, segmentpolygon)) - decoded_road_segment = segmentpolygon_points+intersection_points - - valid_mapping, current_mapping = construct_mapping( - decoded_road_segment, frame_size, fov_lines, segmentid, - segmenttype, segmentheading, contains_ego, ego_config) - if valid_mapping: - mapping.append(current_mapping) - - print('total mapping time: ', time.time() - start_time) - return mapping - -def visualization(test_img_path: str, test_config: Dict[str, Any], mapping: Tuple): - """ - visualize the mapping from camera segment to road segment - for testing only - """ - from moviepy.editor import VideoClip - from moviepy.video.io.bindings import mplfig_to_npimage - frame = cv2.imread(test_img_path) - fig, axs = plt.subplots() - axs.set_aspect('equal', 'datalim') - x_ego, y_ego = test_config['egoTranslation'][:2] - axs.plot(x_ego, y_ego, color='green', marker='o', markersize=5) - colormap = plt.cm.get_cmap('hsv', len(mapping)) - i = 0 - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - display_video = cv2.VideoWriter('in_videw_test_display.avi',fourcc, 1, (1600, 900)) - for cam_segment, road_segment_info in mapping: - color = colormap(i) - xs = [point[0] for point in road_segment_info.segment_polygon] - ys = [point[1] for point in road_segment_info.segment_polygon] - segmenttype = road_segment_info.segment_type - axs.fill(xs, ys, alpha=0.5, fc=color, ec='none') - axs.text(np.mean(np.array(xs)), np.mean(np.array(ys)), - ','.join(segmenttype) if segmenttype and ('lane' in segmenttype or 'intersection' in segmenttype) else '') - current_plt = mplfig_to_npimage(fig) - i += 1 - - fov_lines = road_segment_info.fov_lines - axs.plot([p[0] for p in fov_lines[0]], [p[1] for p in fov_lines[0]], color='red', marker='o', markersize=2) - axs.plot([p[0] for p in fov_lines[1]], [p[1] for p in fov_lines[1]], color='red', marker='o', markersize=2) - - display_frame = frame.copy() - cv2.polylines(display_frame, [np.array(cam_segment, np.int32).reshape((-1, 1, 2))], True, (0, 255, 0), 2) - display_frame[:current_plt.shape[0], :current_plt.shape[1]] = current_plt - display_video.write(display_frame) - - display_video.release() - -if __name__ == '__main__': - test_img_path = os.path.join(data_path, test_img) - test_config = fetch_camera_config( - test_img, - database) - mapping = map_imgsegment_roadsegment(test_config) - visualization(test_img_path, test_config, mapping) \ No newline at end of file diff --git a/optimized_ingestion/__main__.py b/optimized_ingestion/__main__.py index d1738f27..2ae81122 100644 --- a/optimized_ingestion/__main__.py +++ b/optimized_ingestion/__main__.py @@ -6,7 +6,9 @@ from .payload import Payload from .pipeline import Pipeline from .stages.decode_frame import DecodeFrame +from .stages.detection_2d.yolo_detection import YoloDetection from .stages.filter_car_facing_sideway import FilterCarFacingSideway +from .stages.tracking_2d.from_detection import FromDetection from .stages.tracking_2d.tracking_2d import Tracking2D from .stages.tracking_3d.from_2d_and_road import From2DAndRoad from .stages.tracking_3d.tracking_3d import Tracking3DResult @@ -55,20 +57,27 @@ def default(self, o): if __name__ == "__main__": pipeline = Pipeline() - # pipeline \ - # .add_filter(filter=InView(distance=10, segment_type="intersection")) \ - # .add_filter(filter=Stopped(min_stopped_frames=2, stopped_threshold=1.0)) - pipeline \ - .add_filter(filter=DecodeFrame()) \ - .add_filter(filter=Tracking2D()) - pipeline \ - .add_filter(filter=From2DAndRoad()) - # # pipeline \ - # .add_filter(filter=DepthEstimation()) \ - # .add_filter(filter=From2DAndDepth()) - + # pipeline.add_filter(filter=InView(distance=10, segment_type="intersection")) + # pipeline.add_filter(filter=Stopped(min_stopped_frames=2, stopped_threshold=1.0)) + pipeline.add_filter(filter=DecodeFrame()) + # pipeline.add_filter(filter=Tracking2D()) + pipeline.add_filter(filter=YoloDetection()) + pipeline.add_filter(filter=FromDetection()) + pipeline.add_filter(filter=From2DAndRoad()) + # pipeline.add_filter(filter=DepthEstimation()) + # pipeline.add_filter(filter=From2DAndDepth()) pipeline.add_filter(filter=FilterCarFacingSideway()) + pbase = Pipeline() + pbase.add_filter(filter=DecodeFrame()) + + p1 = Pipeline() + p1.add_filter(filter=YoloDetection()) + p1.add_filter(filter=FromDetection()) + + p2 = Pipeline() + p2.add_filter(filter=Tracking2D()) + if NUSCENES_PROCESSED_DATA in os.environ: DATA_DIR = os.environ[NUSCENES_PROCESSED_DATA] else: @@ -77,8 +86,8 @@ def default(self, o): videos = pickle.load(f) for name, video in videos.items(): - # if name not in BOSTON_VIDEOS: - # continue + if name not in BOSTON_VIDEOS: + continue print(name) frames = Video( diff --git a/optimized_ingestion/camera_config.py b/optimized_ingestion/camera_config.py index a1e64035..4c29ac47 100644 --- a/optimized_ingestion/camera_config.py +++ b/optimized_ingestion/camera_config.py @@ -62,12 +62,11 @@ def frame_num(self) -> float: @property def camera_translation(self) -> Float3: - return self._data[1:4].tolist() + return tuple(self._data[1:4].tolist()) @property - def camera_rotation(self) -> Float4: - rot = Quaternion(self._data[4:8]).unit - return (rot[0], rot[1], rot[2], rot[3]) + def camera_rotation(self) -> "Quaternion": + return Quaternion(self._data[4:8]).unit @property def camera_intrinsic(self) -> Float33: @@ -75,12 +74,11 @@ def camera_intrinsic(self) -> Float33: @property def ego_translation(self) -> Float3: - return self._data[17:20].tolist() + return tuple(self._data[17:20].tolist()) @property - def ego_rotation(self) -> Float4: - rot = Quaternion(self._data[20:24]).unit - return (rot[0], rot[1], rot[2], rot[3]) + def ego_rotation(self) -> "Quaternion": + return Quaternion(self._data[20:24]).unit @property def camera_heading(self) -> float: @@ -95,19 +93,21 @@ def road_direction(self) -> float: return self._data[26].item() def __iter__(self): - yield self.camera_id - yield self.frame_id - yield self.frame_num - yield self.filename - yield self.camera_translation - yield self.camera_rotation - yield self.camera_intrinsic - yield self.ego_translation - yield self.ego_rotation - yield self.timestamp - yield self.camera_heading - yield self.ego_heading - yield self.road_direction + return iter([ + self.camera_id, + self.frame_id, + self.frame_num, + self.filename, + self.camera_translation, + self.camera_rotation, + self.camera_intrinsic, + self.ego_translation, + self.ego_rotation, + self.timestamp, + self.camera_heading, + self.ego_heading, + self.road_direction, + ]) def interpolate(f1: CameraConfig, f2: CameraConfig, timestamp: datetime): diff --git a/optimized_ingestion/depth_notebook.ipynb b/optimized_ingestion/depth_notebook.ipynb index df61a87f..159f3f10 100644 --- a/optimized_ingestion/depth_notebook.ipynb +++ b/optimized_ingestion/depth_notebook.ipynb @@ -148,7 +148,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.8.8 ('base')", + "display_name": "Python 3.10.6 ('apperception')", "language": "python", "name": "python3" }, @@ -162,12 +162,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.10.6" }, "orig_nbformat": 4, "vscode": { "interpreter": { - "hash": "5c9f2372a2bfaf539cf701a38e7f23ab828911ee177c2e7bc9c32aa1f4b546df" + "hash": "5560535bf452f11618fb2fa3db6748d62f4c71741dba4b45c070403e5baa92cd" } } }, diff --git a/optimized_ingestion/payload.py b/optimized_ingestion/payload.py index 29bf7a17..8f579547 100644 --- a/optimized_ingestion/payload.py +++ b/optimized_ingestion/payload.py @@ -55,7 +55,7 @@ def filter(self, filter: "Stage"): metadata = {**self.metadata, **metadata} print(f" filtered frames: {sum(keep) * 100.0 / len(keep)}%") - print("\n".join(_split_keep(keep))) + print("\n".join(_split_keep(keep, 100))) return Payload(self.video, keep, metadata) diff --git a/optimized_ingestion/stages/decode_frame.py b/optimized_ingestion/stages/decode_frame/decode_frame.py similarity index 93% rename from optimized_ingestion/stages/decode_frame.py rename to optimized_ingestion/stages/decode_frame/decode_frame.py index 1e3903ec..54420127 100644 --- a/optimized_ingestion/stages/decode_frame.py +++ b/optimized_ingestion/stages/decode_frame/decode_frame.py @@ -3,12 +3,12 @@ from tqdm import tqdm from typing import TYPE_CHECKING, Dict, List, Optional, Tuple -from .stage import Stage +from ..stage import Stage if TYPE_CHECKING: import numpy.typing as npt - from ..payload import Payload + from ...payload import Payload class DecodeFrame(Stage): diff --git a/optimized_ingestion/stages/decode_frame/parallel_decode_frame.py b/optimized_ingestion/stages/decode_frame/parallel_decode_frame.py new file mode 100644 index 00000000..13dfcc7c --- /dev/null +++ b/optimized_ingestion/stages/decode_frame/parallel_decode_frame.py @@ -0,0 +1,58 @@ +import cv2 +import multiprocessing +from bitarray import bitarray +from functools import reduce +from multiprocessing import Pool +from tqdm import tqdm +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple + +from .decode_frame import DecodeFrame + +if TYPE_CHECKING: + import numpy.typing as npt + + from ...payload import Payload + + +def decode(args: "Tuple[str, int, int]"): + videofile, start, end = args + cap = cv2.VideoCapture(videofile) + cap.set(cv2.CAP_PROP_POS_FRAMES, start) + out: "List[npt.NDArray]" = [] + for _ in range(start, end): + ret, frame = cap.read() + if not ret: + break + out.append(frame) + cap.release() + assert len(out) == end - start + return out, start, end + + +class ParallelDecodeFrame(DecodeFrame): + def _run(self, payload: "Payload") -> "Tuple[Optional[bitarray], Optional[Dict[str, list]]]": + metadata: "List[npt.NDArray]" = [] + + n_cpus = multiprocessing.cpu_count() + n_frames = len(payload.video) + + q, mod = divmod(n_frames, n_cpus) + frames_per_cpu = [q + (i < mod) for i in range(n_cpus)] + + def _r(acc: "Tuple[int, List[Tuple[int, int]]]", frames: int): + start, arr = acc + end = start + frames + return (end, arr + [(start, end)]) + + frame_slices = reduce(_r, frames_per_cpu, (0, []))[1] + + with Pool(n_cpus) as pool: + inputs = ((payload.video.videofile, start, end) for start, end in frame_slices) + out = [*tqdm(pool.imap_unordered(decode, inputs), total=n_cpus)] + for o, _, _ in sorted(out, key=lambda x: x[1]): + metadata.extend(o) + cv2.destroyAllWindows() + + assert len(metadata) == len(payload.video), (len(metadata), len(payload.video), [(s, e, len(o)) for o, s, e in sorted(out, key=lambda x: x[1])]) + + return None, {self.classname(): metadata} diff --git a/optimized_ingestion/stages/detection_2d/detection_2d.py b/optimized_ingestion/stages/detection_2d/detection_2d.py new file mode 100644 index 00000000..f7c3df55 --- /dev/null +++ b/optimized_ingestion/stages/detection_2d/detection_2d.py @@ -0,0 +1,5 @@ +from ..stage import Stage + + +class Detection2D(Stage): + pass diff --git a/optimized_ingestion/stages/detection_2d/yolo_detection.py b/optimized_ingestion/stages/detection_2d/yolo_detection.py new file mode 100644 index 00000000..e7d0663c --- /dev/null +++ b/optimized_ingestion/stages/detection_2d/yolo_detection.py @@ -0,0 +1,175 @@ +import os +from pathlib import Path +from tqdm import tqdm +from typing import TYPE_CHECKING, Iterable, Iterator, List, NamedTuple + +# limit the number of cpus used by high performance libraries +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" + +import cv2 +import numpy as np +import numpy.typing as npt +import torch +from yolo_tracker.yolov5.utils.augmentations import letterbox +from yolo_tracker.yolov5.utils.general import (check_img_size, + non_max_suppression, + scale_boxes) +from yolo_tracker.yolov5.utils.torch_utils import select_device + +from ...stages.decode_frame.decode_frame import DecodeFrame +from .detection_2d import Detection2D + +if TYPE_CHECKING: + from yolo_tracker.yolov5.models.common import DetectMultiBackend + + from ...payload import Payload + from ...stages.stage import StageOutput + + +FILE = Path(__file__).resolve() +APPERCEPTION = FILE.parent.parent.parent.parent.parent +WEIGHTS = APPERCEPTION / "weights" +torch.hub.set_dir(str(WEIGHTS)) + + +class YoloDetection(Detection2D): + def __init__( + self, + half: bool = False, + conf_thres: float = 0.25, + iou_thres: float = 0.45, + max_det: int = 1000, + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + ): + self.device = select_device("") + self.model: "DetectMultiBackend" = torch.hub.load('ultralytics/yolov5', 'yolov5s').model.to(self.device) + stride, self.pt = self.model.stride, self.model.pt + self.imgsz = check_img_size((640, 640), s=stride) + self.half = half + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.max_det = max_det + self.classes = classes + self.agnostic_nms = agnostic_nms + self.augment = augment + + def _run(self, payload: "Payload") -> "StageOutput": + with torch.no_grad(): + names: "List[str]" = self.model.names + dataset = LoadImages(payload, img_size=self.imgsz, auto=self.pt) + self.model.eval() + self.model.warmup(imgsz=(1, 3, *self.imgsz)) # warmup + metadata: "List[npt.NDArray]" = [] + for frame_idx, im, im0s in tqdm(dataset): + if not payload.keep[frame_idx]: + metadata.append(np.ndarray([])) + continue + # t1 = time_sync() + im = torch.from_numpy(im).to(self.device) + im = im.half() if self.half else im.float() + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + pred = self.model(im, augment=self.augment) + + # Apply NMS + pred = non_max_suppression( + pred, + self.conf_thres, + self.iou_thres, + self.classes, + self.agnostic_nms, + max_det=self.max_det + ) + + # Process detections + assert isinstance(pred, list), type(pred) + assert len(pred) == 1, len(pred) + det = pred[0] + assert isinstance(det, torch.Tensor), type(det) + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0s.shape).round() + metadata.append((det, names)) + return None, {self.classname(): metadata} + + +class ImageOutput(NamedTuple): + frame_idx: int + image: "npt.NDArray" + original_image: "npt.NDArray" + # path: str + + +class LoadImages(Iterator[ImageOutput], Iterable[ImageOutput]): + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, payload: "Payload", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + + self.img_size = img_size + self.stride = stride + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + self._new_video(payload.video.videofile) # new video + self.keep = payload.keep + + images = DecodeFrame.get(payload) + assert images is not None + self.images = images + + def __iter__(self): + self.count = 0 + return self + + def __next__(self) -> "ImageOutput": + if self.frame >= self.frames: + raise StopIteration + + # Read video + self.mode = 'video' + im0 = self.images[self.frame] + assert isinstance(im0, np.ndarray) + + frame_idx = self.frame + self.frame += self.vid_stride + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + # s = f'video {self.count + 1}/{self.len} ({self.frame}/{self.frames}): ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + # return frame_idx, im, im0, s + return ImageOutput(frame_idx, im, im0) + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.len = int(self.frames / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.len diff --git a/optimized_ingestion/stages/detection_estimation/__init__.py b/optimized_ingestion/stages/detection_estimation/__init__.py new file mode 100644 index 00000000..ff2980a2 --- /dev/null +++ b/optimized_ingestion/stages/detection_estimation/__init__.py @@ -0,0 +1,154 @@ +import logging +import numpy.typing as npt +import time +from bitarray import bitarray +from shapely.geometry import Polygon +from tqdm import tqdm +from typing import List, Tuple + +from ...camera_config import CameraConfig +from ...payload import Payload +from ...video import Video +from ..detection_2d.detection_2d import Detection2D +from ..detection_2d.yolo_detection import YoloDetection +from ..stage import Stage, StageOutput +from .detection_estimation import (DetectionInfo, construct_all_detection_info, + detection_to_img_segment, + generate_sample_plan, obj_detection, + samplePlan) +from .segment_mapping import CameraSegmentMapping, map_imgsegment_roadsegment +from .utils import trajectory_3d + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARN) + + +class DetectionEstimation(Stage): + def _run(self, payload: "Payload") -> "StageOutput": + if Detection2D.get(payload) is None: + raise Exception() + + ego_trajectory = [trajectory_3d(f.ego_translation, f.timestamp) for f in payload.video] + return dry_run(payload, 0, ego_trajectory) + + +def generate_sample_plan_once( + video: "Video", + ego_config: "CameraConfig", + mapping: "List[CameraSegmentMapping]", + next_frame_num: "int", + car_loc3d=None, + target_car_detection=None, + all_detection_info: "List[DetectionInfo] | None" = None +) -> "Tuple[samplePlan, None]": + # if all_detection_info is None: + # assert target_car_detection and car_loc3d + # x,y,w,h = list(map(int, target_car_detection)) + # car_loc2d = (x, y+h//2) + # car_bbox2d = (x-w//2,y-h//2,x+w//2,y+h//2) + # car_bbox3d = None + # all_detections = [] + # all_detections.append(obj_detection('car_1', car_loc3d, car_loc2d, car_bbox3d, car_bbox2d)) + # all_detection_info = construct_all_detection_info(cam_segment_mapping, ego_trajectory, ego_config, all_detections) + assert all_detection_info is not None + if all_detection_info: + logger.info(all_detection_info[0].road_type) + next_sample_plan = generate_sample_plan(video, next_frame_num, all_detection_info, 50) + # next_frame = None + next_sample_frame_info = next_sample_plan.get_next_sample_frame_info() + if next_sample_frame_info: + next_sample_frame_name, next_sample_frame_num, _ = next_sample_frame_info + logger.info(f"next frame name {next_sample_frame_name}") + logger.info(f"next frame num {next_sample_frame_num}") + logger.info(f"Action {next_sample_plan.action}") + # TODO: should not read next frame -> get the next frame from frames.pickle + # next_frame = cv2.imread(test_img_base_dir+next_sample_frame_name) + # cv2.imshow("next_frame", next_frame) + # cv2.waitKey(0) + # cv2.destroyAllWindows() + return next_sample_plan, None + + +def construct_estimated_all_detection_info( + detections: "npt.NDArray", + cam_segment_mapping: "List[CameraSegmentMapping]", + ego_config: "CameraConfig", + ego_trajectory: "List[trajectory_3d]" +) -> "List[DetectionInfo]": + all_detections = [] + for det in detections: + bbox = det[:4] + obj_cls = det[5] + x, y, x2, y2 = list(map(int, bbox)) + w = x2 - x + h = y2 - y + car_loc2d = (x + w // 2, y + h // 2) + # print(car_loc2d) + car_bbox2d = ((x - w // 2, y - h // 2), (x + w // 2, y + h // 2)) + car_bbox3d = None + ### TODO: replace the following estimation of car_loc3d with the depth estimation + ### algorithm that converts 2d loc to 3d loc + estimate_3d = detection_to_img_segment(car_loc2d, cam_segment_mapping) + if estimate_3d and estimate_3d.road_segment_info.segment_type in ['lane', 'laneSection']: + car_loc3d = tuple(Polygon(estimate_3d.road_segment_info.segment_polygon).centroid.coords) + # logger.info(tuple(car_loc3d)) + all_detections.append(obj_detection('car_1', car_loc3d, car_loc2d, car_bbox3d, car_bbox2d)) + # logger.info("all_detections", all_detections) + all_detection_info = construct_all_detection_info(cam_segment_mapping, ego_config, ego_trajectory, all_detections) + return all_detection_info + + +def dry_run( + payload: "Payload", + start_frame_num: "int", + ego_trajectory: "List[trajectory_3d]", +) -> "Tuple[bitarray, None]": + skipped_frame_num = [] + next_frame_num = start_frame_num + action_type_counts = {} + start_time = time.time() + total_detection_time = 0 + total_sample_plan_time = 0 + for i in tqdm(range(len(payload.video) - 1)): + current_ego_config = payload.video[i] + if i != next_frame_num: + skipped_frame_num.append(i) + continue + next_frame_num = i + 1 + cam_segment_mapping = map_imgsegment_roadsegment(current_ego_config) + logger.info(f"mapping length {len(cam_segment_mapping)}") + start_detection_time = time.time() + dets = YoloDetection.get(payload) + assert dets is not None + det = dets[i] + all_detection_info = construct_estimated_all_detection_info(det[0], cam_segment_mapping, current_ego_config, ego_trajectory) + total_detection_time += time.time() - start_detection_time + start_generate_sample_plan = time.time() + next_sample_plan, _ = generate_sample_plan_once(payload.video, current_ego_config, cam_segment_mapping, next_frame_num, all_detection_info=all_detection_info) + total_sample_plan_time += time.time() - start_generate_sample_plan + next_action_type = next_sample_plan.get_action_type() + if next_action_type not in action_type_counts: + action_type_counts[next_action_type] = 1 + else: + action_type_counts[next_action_type] += 1 + next_frame_num = next_sample_plan.get_next_frame_num(next_frame_num) + logger.info(f"sorted_ego_config_length {len(payload.video)}") + logger.info(f"number of skipped {len(skipped_frame_num)}") + logger.info(skipped_frame_num) + logger.info(action_type_counts) + total_run_time = time.time() - start_time + num_runs = len(payload.video) - len(skipped_frame_num) + logger.info(f"total_run_time {total_run_time}") + logger.info(f"avg run time {total_run_time/num_runs}") + logger.info(f"total_detection_time {total_detection_time}") + logger.info(f"avg detection time {total_detection_time/num_runs}") + logger.info(f"total_generate_sample_plan_time {total_sample_plan_time}") + logger.info(f"avg generate_sample_plan time {total_sample_plan_time/num_runs}") + + keep = bitarray(len(payload.video)) + keep[:] = 1 + for f in skipped_frame_num: + keep[f] = 0 + + return keep, None diff --git a/optimized_ingestion/stages/detection_estimation/detection_estimation.py b/optimized_ingestion/stages/detection_estimation/detection_estimation.py new file mode 100644 index 00000000..2c01b1a9 --- /dev/null +++ b/optimized_ingestion/stages/detection_estimation/detection_estimation.py @@ -0,0 +1,280 @@ +"""Detection Estimation Module + +This module is responsible for estimating the object detection throughout the whole video. +The sampling algorithm skips frames based on the current frame geo information. +We estimate objects' metadata only based on the sampled frames. + +Usage example: + from detection_estimation import detection_estimation + detection_estimation(sorted_ego_config, video, start_frame_num, view_distance=50, img_base_dir='') + +TODO: + 1. incoporate yolo detection, either merge this module to the tracking pipeline + or call yolo detection in this module + 2. ssave the detection and tracking result in the sample plan object + +""" + +import datetime +import os +import sys +from dataclasses import dataclass, field +from typing import Any, List, Literal, NamedTuple + +sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir))) + +import numpy as np +import numpy.typing as npt + +from ...camera_config import CameraConfig +from ...video import Video +from .sample_plan_algorithms import Action, get_sample_action_alg +from .segment_mapping import (CameraSegmentMapping, RoadSegmentInfo, + map_imgsegment_roadsegment) +from .utils import (Float2, Float3, Float22, compute_area, compute_distance, + detection_to_img_segment, get_ego_trajectory, + get_largest_segment, get_segment_line, + relative_direction_to_ego, trajectory_3d) + + +class obj_detection(NamedTuple): + id: str + car_loc3d: "Float3" + car_loc2d: "Float2" + car_bbox3d: Any + car_bbox2d: "Float22" + + +@dataclass +class DetectionInfo: + obj_id: str + frame_segment: "List[npt.NDArray[np.floating]]" + road_segment_info: "RoadSegmentInfo" + car_loc3d: "Float3" + car_loc2d: "Float2" + car_bbox3d: Any + car_bbox2d: "Float22" + ego_trajectory: "List[trajectory_3d]" + ego_config: "CameraConfig" + ego_road_segment_info: "RoadSegmentInfo" + timestamp: "datetime.datetime" = field(init=False) + road_type: str = field(init=False) + distance: float = field(init=False) + segment_area_2d: float = field(init=False) + relative_direction: "Literal['same_direction', 'opposite_direction']" = field(init=False) + priority: float = field(init=False) + + def __post_init__(self): + timestamp = self.ego_config.timestamp + assert isinstance(timestamp, datetime.datetime) + self.timestamp = timestamp + self.road_type = self.road_segment_info.segment_type + + self.compute_geo_info() + self.compute_priority() + + def compute_geo_info(self): + self.distance = (compute_distance(self.car_loc3d, + self.ego_config.ego_translation) + if self.ego_config is not None else 0) + self.segment_area_2d = (compute_area([*self.car_bbox2d[0], + *self.car_bbox2d[1]]) + if self.car_bbox2d is not None else 0) + + ego_heading = self.ego_config.ego_heading if self.ego_config is not None else 0 + assert isinstance(ego_heading, float) + self.segment_line, self.segment_heading = get_segment_line(self.road_segment_info, + self.car_loc3d) + if self.segment_heading is None: + self.relative_direction = None + else: + self.relative_direction = relative_direction_to_ego( + self.segment_heading, ego_heading) + + def compute_priority(self): + self.priority = self.segment_area_2d / self.distance + + def generate_single_sample_action(self, view_distance: float = 50.): + """Generate a sample plan for the given detection of a single car + + Condition 1: detected car is driving towards ego, opposite direction + Condition 2: detected car driving along the same direction as ego + Condition 3: detected car and ego are driving into each other + i.e. one to north, one from west to it + Condition 4: detected car and ego are driving away from each other + i.e. one to north, one from it to east + Return: a list of actions + """ + sample_action_alg = get_sample_action_alg(self.relative_direction) + if sample_action_alg is not None: + return self.priority, sample_action_alg(self, view_distance) + return self.priority, None + + +# TODO +@dataclass +class samplePlan: + video: "Video" + next_frame_num: int + all_detection_info: "List[DetectionInfo]" + metadata: Any = None + current_priority: "float | None" = None + action: "Action | None" = None + + def generate_sample_plan(self, view_distance: float = 50.0): + assert self.all_detection_info is not None + for detection_info in self.all_detection_info: + priority, sample_action = detection_info.generate_single_sample_action(view_distance) + if sample_action is not None: + self.add(priority, sample_action) + + def add(self, priority: float, sample_action: "Action", time_threshold: float = 0.5): + assert sample_action is not None + if sample_action.invalid_action: + return + # assert not sample_action.invalid_action + + assert (self.action is None) == (self.current_priority is None) + if self.action is None or self.current_priority is None: + self.current_priority = priority + self.action = sample_action + else: + if sample_action.estimated_time < self.action.estimated_time: + if (priority >= self.current_priority + or sample_action.estimated_time / self.action.estimated_time + < time_threshold): + self.current_priority = priority + self.action = sample_action + + def get_action(self): + return self.action + + def get_action_type(self): + if self.action is None: + return None + return self.action.action_type + + def get_next_sample_frame_info(self): + if self.action is None: + return None + + nearest_index = None + min_diff = None + for i, config in enumerate(self.video): + timestamp = config.timestamp + diff = self.action.finish_time - timestamp + if diff.total_seconds() < 0: + diff = -diff + if min_diff is None or min_diff > diff: + min_diff = diff + nearest_index = i + + return None, nearest_index, None + + def get_next_frame_num(self, next_frame_num: int): + next_sample_frame_info = self.get_next_sample_frame_info() + if next_sample_frame_info: + _, next_sample_frame_num, _ = next_sample_frame_info + self.next_frame_num = max(next_sample_frame_num, next_frame_num) + return self.next_frame_num + + +def yolo_detect(current_frame: str) -> "List[obj_detection]": + # TODO: return a list of obj_detection + # onj_detection : namedtuple('id', 'car_loc3d', 'car_loc2d', 'car_bbox3d', + # 'car_bbox2d') + return [] + + +def construct_all_detection_info( + cam_segment_mapping: "List[CameraSegmentMapping]", + ego_config: "CameraConfig", + ego_trajectory: "List[trajectory_3d]", + all_detections: "List[obj_detection]" +): + all_detection_info: "List[DetectionInfo]" = [] + if len(all_detections) == 0: + return all_detection_info + ego_mapping = get_largest_segment(cam_segment_mapping) + if ego_mapping is None: + # for mapping in cam_segment_mapping: + # cam_segment, road_segment_info = mapping + raise ValueError('Ego segment not included') + + _, ego_road_segment_info = ego_mapping + + for detection in all_detections: + obj_id, car_loc3d, car_loc2d, car_bbox3d, car_bbox2d = detection + related_mapping = detection_to_img_segment(car_loc2d, cam_segment_mapping) + if related_mapping is None: + continue + cam_segment, road_segment_info = related_mapping + + detection_info = DetectionInfo(obj_id, + cam_segment, + road_segment_info, + car_loc3d, + car_loc2d, + car_bbox3d, + car_bbox2d, + ego_trajectory, + ego_config, + ego_road_segment_info) + all_detection_info.append(detection_info) + + return all_detection_info + + +def generate_sample_plan( + video: "Video", + next_frame_num: int, + all_detection_info: "List[DetectionInfo]", + view_distance: float, +): + ### the object detection with higher priority doesn't necessarily get sampled first, + # it also based on the sample plan + sample_plan = samplePlan(video, next_frame_num, all_detection_info) + sample_plan.generate_sample_plan(view_distance) + return sample_plan + + +def detection_estimation( + sorted_ego_config: "List[CameraConfig]", + video: str, + start_frame_num: int, + view_distance: float = 50, + img_base_dir: str = '' +): + """Estimated detection throughout the whole video + + Args: + sorted_ego_config: a sorted list of ego_configs of the given video + video: the video name + start_frame_num: the frame number to start the sample + view_distance: the maximum view distance from ego + img_base_dir: the base directory of the images, + TODO:deprecate later + + Return: TODO metadata of the video including all object trajectories + and other useful information + + """ + # TODO: use camera configuration from the frames.pickle + ego_trajectory = get_ego_trajectory(video, sorted_ego_config) + next_frame_num = start_frame_num + for i in range(len(sorted_ego_config) - 1): + current_ego_config = sorted_ego_config[i] + if i != next_frame_num: + continue + next_frame_num = i + 1 + assert isinstance(next_frame_num, int) + cam_segment_mapping = map_imgsegment_roadsegment(current_ego_config) + # current_frame: str = img_base_dir + current_ego_config['fileName'] + all_detection_info = construct_all_detection_info(cam_segment_mapping, current_ego_config, ego_trajectory) + next_sample_plan = generate_sample_plan( + video, + next_frame_num, + all_detection_info=all_detection_info, + view_distance=view_distance + ) + next_frame_num = next_sample_plan.get_next_frame_num(next_frame_num) diff --git a/optimized_ingestion/stages/detection_estimation/sample_plan_algorithms.py b/optimized_ingestion/stages/detection_estimation/sample_plan_algorithms.py new file mode 100644 index 00000000..50407083 --- /dev/null +++ b/optimized_ingestion/stages/detection_estimation/sample_plan_algorithms.py @@ -0,0 +1,182 @@ +import datetime +import logging +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, List, Literal + +from .utils import (OPPOSITE_DIRECTION, SAME_DIRECTION, Float2, Float3, + ego_departure, meetup, time_to_exit_current_segment, + time_to_exit_view, trajectory_3d) + +if TYPE_CHECKING: + from ...camera_config import CameraConfig + from .detection_estimation import DetectionInfo + + +logger = logging.getLogger(__name__) + + +""" +Action: + trajectory so far + next timestamp to sample + next frame to sample and it's frame num + Heuristic to sample +""" + +EGO_EXIT_SEGMENT = 'ego_exit_segment' +CAR_EXIT_SEGMENT = 'car_exit_segment' +EXIT_VIEW = 'exit_view' +MEET_UP = 'meet_up' +EGO_STOP = 'ego_stop' +OBJ_BASED_ACTION = [CAR_EXIT_SEGMENT, EXIT_VIEW, MEET_UP] + +ActionType = Literal['ego_exit_segment', 'car_exit_segment', 'exit_view', 'meet_up', 'ego_stop'] + + +@dataclass +class Action: + start_time: "datetime.datetime" + finish_time: "datetime.datetime" + start_loc: "Float2 | Float3" # TODO: should either be Float2 or Float3 + end_loc: "Float2 | Float3" # TODO: should either be Float2 or Float3 + action_type: "ActionType" + target_obj_id: "str | None" = None + invalid_action: bool = field(init=False) + estimated_time: "datetime.timedelta" = field(init=False) + + def __post_init__(self): + self.invalid_action = self.finish_time < self.start_time + self.estimated_time = self.finish_time - self.start_time + if self.action_type and self.action_type in OBJ_BASED_ACTION: + assert self.target_obj_id is not None + + def __repr__(self): + return self.__str__() + + def __str__(self): + return f'''action type: {self.action_type}, + start time: {self.start_time}, + finish time: {self.finish_time}, + start loc: {self.start_loc}, + end loc: {self.end_loc} + estimated time: {self.estimated_time}''' + + +def ego_stop(ego_trajectory: "List[trajectory_3d]", ego_config: "CameraConfig"): + current_time = ego_config.timestamp + ego_loc = ego_config.ego_translation[:2] + _ego_stop, ego_departure_time, ego_departure_loc = ego_departure(ego_trajectory, current_time) + action = None + if _ego_stop: + action = Action(current_time, ego_departure_time, ego_loc, ego_departure_loc, action_type=EGO_STOP) + return _ego_stop, action + + +def ego_exit_current_segment(detection_info: "DetectionInfo", ego_trajectory: "trajectory_3d", ego_config: "CameraConfig"): + current_segment_info = detection_info.ego_road_segment_info + current_time = detection_info.timestamp + ego_loc = ego_config.ego_translation[:2] + exit_time, exit_point = time_to_exit_current_segment( + current_segment_info, current_time, ego_loc, ego_trajectory) + exit_action = Action(current_time, exit_time, ego_loc, exit_point, + action_type=EGO_EXIT_SEGMENT) + return exit_action + + +def car_exit_current_segment(detection_info: "DetectionInfo"): + """ + Assumption: detected car drives at max speed + """ + current_segment_info = detection_info.road_segment_info + current_time = detection_info.timestamp + car_loc = detection_info.car_loc3d + exit_time, exit_point = time_to_exit_current_segment(current_segment_info, current_time, car_loc) + exit_action = Action(current_time, exit_time, start_loc=car_loc, + end_loc=exit_point, action_type=CAR_EXIT_SEGMENT, + target_obj_id=detection_info.obj_id) + return exit_action + + +def car_meet_up_with_ego(detection_info: "DetectionInfo", ego_trajectory: "trajectory_3d", ego_config: "CameraConfig"): + current_time = detection_info.timestamp + car2_loc = detection_info.car_loc3d + car1_heading = ego_config.ego_heading + car2_heading = detection_info.road_segment_info.segment_heading + road_type = detection_info.road_type + car1_trajectory = ego_trajectory + ego_loc = tuple(ego_config.ego_translation) + meet_up_time, meetup_point = meetup(ego_loc, car2_loc, car1_heading, + car2_heading, road_type, current_time, car1_trajectory) + if meet_up_time < current_time: + return None + meet_up_action = Action(current_time, meet_up_time, start_loc=car2_loc, + end_loc=meetup_point, action_type=MEET_UP, + target_obj_id=detection_info.obj_id) + return meet_up_action + + +def car_exit_view(detection_info: "DetectionInfo", ego_trajectory: "trajectory_3d", ego_config: "CameraConfig", view_distance: float): + current_time = detection_info.timestamp + road_type = detection_info.road_type + ego_loc = ego_config.ego_translation + car_loc = detection_info.car_loc3d + car_heading = detection_info.road_segment_info.segment_heading + exit_view_point, exit_view_time = time_to_exit_view( + ego_loc, car_loc, car_heading, ego_trajectory, current_time, road_type, view_distance) + exit_view_action = Action(current_time, exit_view_time, start_loc=car_loc, + end_loc=exit_view_point, action_type=EXIT_VIEW, + target_obj_id=detection_info.obj_id) + return exit_view_action + + +def ego_by_pass_car(detection_info: "DetectionInfo") -> "Action": + raise Exception() + + +def combine_sample_actions(sample_plan: "List[Action]"): + return min(sample_plan, key=lambda x: x.finish_time) + + +def same_direction_sample_action(detection_info: "DetectionInfo", view_distance: float): + ego_trajectory = detection_info.ego_trajectory + ego_config = detection_info.ego_config + _ego_stop, ego_stop_action = ego_stop(ego_trajectory, ego_config) + if _ego_stop: + return ego_stop_action + ego_exit_segment_action = ego_exit_current_segment(detection_info, ego_trajectory, ego_config) + # logger.info(f'ego_exit_segment_action {ego_exit_segment_action}') + car_exit_segment_action = car_exit_current_segment(detection_info) + # logger.info(f'car_exit_segment_action {car_exit_segment_action}') + car_go_beyong_view_action = car_exit_view( + detection_info, ego_trajectory, ego_config, view_distance) + # logger.info(f'car_go_beyong_view_action {car_go_beyong_view_action}') + # ego_by_pass_car_action = ego_by_pass_car(detection_info, ego_trajectory, ego_config) + return combine_sample_actions([ego_exit_segment_action, + car_exit_segment_action, + car_go_beyong_view_action, ]) + # ego_by_pass_car_action]) + + +def opposite_direction_sample_action(detection_info: "DetectionInfo", view_distance: float): + ego_trajectory = detection_info.ego_trajectory + ego_config = detection_info.ego_config + _ego_stop, ego_stop_action = ego_stop(ego_trajectory, ego_config) + if _ego_stop: + return ego_stop_action + ego_exit_segment_action = ego_exit_current_segment(detection_info, ego_trajectory, ego_config) + # logger.info(f'ego_exit_segment_action {ego_exit_segment_action}') + car_exit_segment_action = car_exit_current_segment(detection_info) + # logger.info(f'car_exit_segment_action {car_exit_segment_action}') + meet_ego_action = car_meet_up_with_ego(detection_info, ego_trajectory, ego_config) + # logger.info(f'meet_ego_action {meet_ego_action}') + # return car_exit_segment_action + return combine_sample_actions([ego_exit_segment_action, + car_exit_segment_action, + meet_ego_action]) + + +def get_sample_action_alg(relative_direction: "Literal['same_direction', 'opposite_direction']"): + if relative_direction == SAME_DIRECTION: + return same_direction_sample_action + elif relative_direction == OPPOSITE_DIRECTION: + return opposite_direction_sample_action diff --git a/optimized_ingestion/stages/detection_estimation/segment_mapping.py b/optimized_ingestion/stages/detection_estimation/segment_mapping.py new file mode 100644 index 00000000..fe62bac5 --- /dev/null +++ b/optimized_ingestion/stages/detection_estimation/segment_mapping.py @@ -0,0 +1,452 @@ +""" Goal to map the road segment to the frame segment + Now only get the segment of type lane and intersection + except for the segment that contains the ego camera + +Usage example: + from optimization_playground.segment_mapping import map_imgsegment_roadsegment + from apperception.utils import fetch_camera_config + + test_config = fetch_camera_config(test_img, database) + mapping = map_imgsegment_roadsegment(test_config) +""" + +import array +import logging +import math +import numpy as np +import numpy.typing as npt +import os +import pandas as pd +import postgis +import psycopg2 +import time +from plpygis import Geometry +from shapely.geometry import LineString, Polygon +from typing import List, NamedTuple, Tuple, Union + +from ...camera_config import CameraConfig + +# from pyquaternion import Quaternion +pd.get_option("display.max_columns") + +from apperception.database import database + +# from apperception.utils import fetch_camera_config +from .utils import line_to_polygon_intersection + +logger = logging.getLogger(__name__) + +data_path = '/home/yongming/workspace/research/apperception/v1.0-mini/' +input_video_dir = os.path.join(data_path, 'sample_videos/') +input_video_name = 'CAM_FRONT_n008-2018-08-27.mp4' +input_date = input_video_name.split('_')[-1][:-4] +test_img = 'samples/CAM_FRONT/n008-2018-08-01-15-52-19-0400__CAM_FRONT__1533153253912404.jpg' + +# CAMERA_COLUMNS = [ +# "cameraId", +# "frameId", +# "frameNum", +# "filename", +# "cameraTranslation", +# "cameraRotation", +# "cameraIntrinsic", +# "egoTranslation", +# "egoRotation", +# "timestamp", +# "cameraHeading", +# "egoHeading",] +# # "roadDirection"] +# CAM_CONFIG_QUERY = """SELECT * FROM Cameras +# WHERE filename like 'samples/CAM_FRONT/%{date}%' +# ORDER BY frameNum""" + +# _camera_config = database.execute(CAM_CONFIG_QUERY.format(date=input_date)) +# camera_config_df = pd.DataFrame(_camera_config, columns=CAMERA_COLUMNS) +# camera_config_df + +SegmentPolygonWithHeading = Tuple[ + str, + postgis.polygon.Polygon, + postgis.linestring.LineString, + Union[List[str], None], + Union[float, None], +] +SEGMENT_CONTAIN_QUERY = """ +SELECT + segmentpolygon.elementid, + segmentpolygon.elementpolygon, + segment.segmentline, + segmentpolygon.segmenttypes, + segment.heading +FROM segmentpolygon + LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_Contains( + segmentpolygon.elementpolygon, + {ego_translation}::geometry +); +""" + +SEGMENT_DWITHIN_QUERY = """ +SELECT + segmentpolygon.elementid, + segmentpolygon.elementpolygon, + segment.segmentline, + segmentpolygon.segmenttypes, + segment.heading +FROM segmentpolygon + LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_DWithin( + elementpolygon, + {start_segment}::geometry, + {view_distance} + ) AND + segmentpolygon.segmenttypes in ( + ARRAY[\'lane\'], + ARRAY[\'intersection\'], + ARRAY[\'laneSection\'] + );""" + + +Float2 = Tuple[float, float] +Float3 = Tuple[float, float, float] +Float22 = Tuple[Float2, Float2] +Segment = Tuple[str, postgis.polygon.Polygon, postgis.linestring.LineString, + Union[str, None], Union[float, None]] +AnnotatedSegment = Tuple[str, postgis.polygon.Polygon, Union[str, None], Union[float, None], bool] + + +class RoadSegmentInfo(NamedTuple): + """ + segment_id: unique segment id + segment_polygon: tuple of (x, y) coordinates + segment_line: tuple of (x, y) coordinates + segment_type: road segment type + contains_ego: whether the segment contains ego camera + ego_config: ego camfig for the frame we asks info for + facing_relative: float + fov_lines: field of view lines + """ + segment_id: int + segment_polygon: Polygon + segment_line: LineString + segment_type: str + segment_heading: float + contains_ego: bool + ego_config: "CameraConfig" + fov_lines: "Tuple[Float22, Float22]" + + +# CameraSegmentMapping = namedtuple('cam_segment_mapping', ['cam_segment', 'road_segment_info']) +class CameraSegmentMapping(NamedTuple): + cam_segment: "List[npt.NDArray[np.floating]]" + road_segment_info: "RoadSegmentInfo" + + +def road_segment_contains(ego_config: "CameraConfig")\ + -> List[SegmentPolygonWithHeading]: + query = psycopg2.sql.SQL(SEGMENT_CONTAIN_QUERY).format( + ego_translation=psycopg2.sql.Literal(postgis.point.Point(*ego_config.ego_translation[:2])) + ) + + return database.execute(query) + + +def find_segment_dwithin(start_segment: "AnnotatedSegment", + view_distance=50) -> "List[SegmentPolygonWithHeading]": + _, start_segment_polygon, _, _, _, _ = start_segment + query = psycopg2.sql.SQL(SEGMENT_DWITHIN_QUERY).format( + start_segment=psycopg2.sql.Literal(start_segment_polygon), + view_distance=psycopg2.sql.Literal(view_distance) + ) + + return database.execute(query) + + +def reformat_return_segment(segments: "List[SegmentPolygonWithHeading]") -> "List[Segment]": + def _(x: "SegmentPolygonWithHeading") -> Segment: + i, polygon, line, types, heading = x + return ( + i, + polygon, + line, + types[0] if types is not None else None, + math.degrees(heading) if heading is not None else None, + ) + return list(map(_, segments)) + + +def annotate_contain( + segments: "List[Segment]", + contain: bool = False +) -> "List[AnnotatedSegment]": + return [s + (contain,) for s in segments] + + +class HashableAnnotatedSegment: + val: "AnnotatedSegment" + + def __init__(self, val: "AnnotatedSegment"): + self.val = val + + def __hash__(self): + h1 = hash(self.val[0]) + h2 = hash(self.val[1].wkt_coords) + h3 = hash(self.val[2].wkt_coords) if self.val[2] else '' + h4 = hash(self.val[3:]) + return hash((h1, h2, h3, h4)) + + def __eq__(self, __o: object) -> bool: + if not isinstance(__o, HashableAnnotatedSegment): + return False + return self.val == __o.val + + +def construct_search_space( + ego_config: "CameraConfig", + view_distance: float = 50. +) -> "List[AnnotatedSegment]": + ''' + road segment: (elementid, elementpolygon, segmenttype, heading, contains_ego?) + view_distance: in meters, default 50 because scenic standard + return: set(road_segment) + ''' + all_contain_segment = reformat_return_segment(road_segment_contains(ego_config)) + all_contain_segment = annotate_contain(all_contain_segment, contain=True) + start_segment = all_contain_segment[0] + + segment_within_distance = reformat_return_segment(find_segment_dwithin(start_segment, view_distance)) + segment_within_distance = annotate_contain(segment_within_distance, contain=False) + + return [ + s.val + for s in { + # To remove duplicates + *map(HashableAnnotatedSegment, all_contain_segment), + *map(HashableAnnotatedSegment, segment_within_distance) + } + ] + + +def get_fov_lines(ego_config: "CameraConfig", ego_fov: float = 70.) -> Tuple[Float22, Float22]: + ''' + return: two lines representing fov in world coord + ((lx1, ly1), (lx2, ly2)), ((rx1, ry1), (rx2, ry2)) + ''' + + # TODO: accuracy improvement: find fov in 3d -> project down to z=0 plane + ego_heading = ego_config.ego_heading + x_ego, y_ego = ego_config.ego_translation[:2] + left_degree = math.radians(ego_heading + ego_fov / 2 + 90) + left_fov_line = ((x_ego, y_ego), + (x_ego + math.cos(left_degree) * 50, + y_ego + math.sin(left_degree) * 50)) + right_degree = math.radians(ego_heading - ego_fov / 2 + 90) + right_fov_line = ((x_ego, y_ego), + (x_ego + math.cos(right_degree) * 50, + y_ego + math.sin(right_degree) * 50)) + return left_fov_line, right_fov_line + + +def intersection(fov_line: Tuple[Float22, Float22], segmentpolygon: Polygon): + ''' + return: intersection point: tuple[tuple] + ''' + left_fov_line, right_fov_line = fov_line + left_intersection = line_to_polygon_intersection(segmentpolygon, left_fov_line) + right_intersection = line_to_polygon_intersection(segmentpolygon, right_fov_line) + return left_intersection + right_intersection + + +def in_frame(transformed_point: np.array, frame_size: Tuple[int, int]): + return transformed_point[0] > 0 and transformed_point[0] < frame_size[0] and \ + transformed_point[1] < frame_size[1] and transformed_point[1] > 0 + + +def in_view( + road_point: "Float2", + ego_translation: "Float3", + fov_lines: Tuple[Float22, Float22] +) -> bool: + ''' + return if the road_point is on the left of the left fov line and + on the right of the right fov line + ''' + left_fov_line, right_fov_line = fov_lines + Ax, Ay = ego_translation[:2] + Mx, My = road_point + left_fov_line_x, left_fov_line_y = left_fov_line[1] + right_fov_line_x, right_fov_line_y = right_fov_line[1] + return (left_fov_line_x - Ax) * (My - Ay) - (left_fov_line_y - Ay) * (Mx - Ax) <= 0 and \ + (right_fov_line_x - Ax) * (My - Ay) - (right_fov_line_y - Ay) * (Mx - Ax) >= 0 + + +def world2pixel_factory(config: "CameraConfig"): + def world2pixel(point3d: "Float2") -> "npt.NDArray[np.floating]": + point = np.copy((*point3d, 0)) + + point -= config.camera_translation + point = np.dot(config.camera_rotation.inverse.rotation_matrix, point) + + view = np.array(config.camera_intrinsic) + viewpad = np.eye(4) + viewpad[: view.shape[0], : view.shape[1]] = view + + point = point.reshape((3, 1)) + point = np.concatenate((point, np.ones((1, 1)))) + point = np.dot(viewpad, point) + point = point[:3, :] + + point = point / point[2:3, :].repeat(3, 0).reshape(3, 1) + return point[:2, :] + return world2pixel + + +def construct_mapping( + decoded_road_segment: "List[Float2]", + frame_size: Tuple[int, int], + fov_lines: Tuple[Float22, Float22], + segmentid: str, + segmentline: LineString, + segmenttype: str, + segmentheading: float, + contains_ego: bool, + ego_config: "CameraConfig" +) -> "Union[CameraSegmentMapping, None]": + """ + Given current road segment + determine whether add it to the mapping + - segment that contains the ego + - segment that is larger than 100 pixel x pixel + """ + ego_translation = ego_config.ego_translation[:2] + + deduced_cam_segment = list(map(world2pixel_factory(ego_config), decoded_road_segment)) + assert len(deduced_cam_segment) == len(decoded_road_segment) + if contains_ego: + keep_cam_segment_point = deduced_cam_segment + keep_road_segment_point = decoded_road_segment + else: + keep_cam_segment_point: "List[npt.NDArray[np.floating]]" = [] + keep_road_segment_point: "List[Float2]" = [] + for current_cam_point, current_road_point in zip(deduced_cam_segment, decoded_road_segment): + if in_frame(current_cam_point, frame_size) and \ + in_view(current_road_point, ego_translation, fov_lines): + keep_cam_segment_point.append(current_cam_point) + keep_road_segment_point.append(current_road_point) + if contains_ego or (len(keep_cam_segment_point) > 2 + and Polygon(tuple(keep_cam_segment_point)).area > 100): + return CameraSegmentMapping( + keep_cam_segment_point, + RoadSegmentInfo( + segmentid, + Polygon(keep_road_segment_point), + segmentline, + segmenttype, + segmentheading, + contains_ego, + ego_config, + fov_lines + ) + ) + + +def map_imgsegment_roadsegment( + ego_config: "CameraConfig", + frame_size: "Tuple[int, int]" = (1600, 900) +) -> List[CameraSegmentMapping]: + """Construct a mapping from frame segment to road segment + + Given an image, we know that different roads/lanes belong to different + road segment in the road network. We want to find a mapping + from the road/lane/intersection to the real world road segment so that + we know which part of the image belong to which part of the real world + + Return List[namedtuple(cam_segment_mapping)]: each tuple looks like this + (polygon in frame that represents a portion of lane/road/intersection, + roadSegmentInfo) + """ + fov_lines = get_fov_lines(ego_config) + start_time = time.time() + search_space = construct_search_space(ego_config, view_distance=100) + mapping = [] + + def not_in_view(point: "Float2"): + return not in_view(point, ego_config.ego_translation, fov_lines) + + for road_segment in search_space: + segmentid, segmentpolygon, segmentline, segmenttype, segmentheading, contains_ego = road_segment + segmentline = Geometry(segmentline.to_ewkb()).shapely if segmentline else None + XYs: "Tuple[array.array[float], array.array[float]]" = Geometry(segmentpolygon.to_ewkb()).exterior.shapely.xy + assert isinstance(XYs, tuple) + assert isinstance(XYs[0], array.array), type(XYs[0]) + assert isinstance(XYs[1], array.array), type(XYs[1]) + assert isinstance(XYs[0][0], float), type(XYs[0][0]) + assert isinstance(XYs[1][0], float), type(XYs[1][0]) + segmentpolygon_points = list(zip(*XYs)) + segmentpolygon = Polygon(segmentpolygon_points) + decoded_road_segment = segmentpolygon_points + if not contains_ego: + road_filter = all(map(not_in_view, segmentpolygon_points)) + if road_filter: + continue + + intersection_points = intersection(fov_lines, segmentpolygon) + decoded_road_segment += intersection_points + + current_mapping = construct_mapping( + decoded_road_segment, frame_size, fov_lines, segmentid, + segmentline, segmenttype, segmentheading, contains_ego, ego_config) + if current_mapping is not None: + mapping.append(current_mapping) + + logger.info(f'total mapping time: {time.time() - start_time}') + return mapping + +# def visualization(test_img_path: str, test_config: Dict[str, Any], mapping: Tuple): +# """ +# visualize the mapping from camera segment to road segment +# for testing only +# """ +# from moviepy.editor import VideoClip +# from moviepy.video.io.bindings import mplfig_to_npimage +# frame = cv2.imread(test_img_path) +# fig, axs = plt.subplots() +# axs.set_aspect('equal', 'datalim') +# x_ego, y_ego = test_config['egoTranslation'][:2] +# axs.plot(x_ego, y_ego, color='green', marker='o', markersize=5) +# colormap = plt.cm.get_cmap('hsv', len(mapping)) +# i = 0 +# fourcc = cv2.VideoWriter_fourcc(*'mp4v') +# display_video = cv2.VideoWriter('in_videw_test_display.avi',fourcc, 1, (1600, 900)) +# for cam_segment, road_segment_info in mapping: +# color = colormap(i) +# xs = [point[0] for point in road_segment_info.segment_polygon.exterior.coords] +# ys = [point[1] for point in road_segment_info.segment_polygon.exterior.coords] +# segmenttype = road_segment_info.segment_type +# axs.fill(xs, ys, alpha=0.5, fc=color, ec='none') +# axs.text(np.mean(np.array(xs)), np.mean(np.array(ys)), +# segmenttype if segmenttype else '') +# current_plt = mplfig_to_npimage(fig) +# i += 1 + +# fov_lines = road_segment_info.fov_lines +# axs.plot([p[0] for p in fov_lines[0]], [p[1] for p in fov_lines[0]], color='red', marker='o', markersize=2) +# axs.plot([p[0] for p in fov_lines[1]], [p[1] for p in fov_lines[1]], color='red', marker='o', markersize=2) + +# display_frame = frame.copy() +# cv2.polylines(display_frame, [np.array(cam_segment, np.int32).reshape((-1, 1, 2))], True, (0, 255, 0), 2) +# display_frame[:current_plt.shape[0], :current_plt.shape[1]] = current_plt +# display_video.write(display_frame) + +# display_video.release() + +# if __name__ == '__main__': +# test_img_path = os.path.join(data_path, test_img) +# test_config = fetch_camera_config( +# test_img, +# database) +# test_config = camera_config(**test_config) +# mapping = map_imgsegment_roadsegment(test_config) +# visualization(test_img_path, test_config, mapping) diff --git a/optimized_ingestion/stages/detection_estimation/utils.py b/optimized_ingestion/stages/detection_estimation/utils.py new file mode 100644 index 00000000..3c609eec --- /dev/null +++ b/optimized_ingestion/stages/detection_estimation/utils.py @@ -0,0 +1,467 @@ +from apperception.database import database +from apperception.utils import fetch_camera_trajectory + +import datetime +import logging +import math +import numpy as np +from shapely.geometry import LineString, MultiLineString, Point, Polygon, box +from typing import TYPE_CHECKING, List, NamedTuple, Tuple + +if TYPE_CHECKING: + from ...camera_config import CameraConfig + from .segment_mapping import CameraSegmentMapping + + +logger = logging.getLogger(__name__) + +Float2 = Tuple[float, float] +Float3 = Tuple[float, float, float] +Float22 = Tuple[Float2, Float2] + + +SAME_DIRECTION = 'same_direction' +OPPOSITE_DIRECTION = 'opposite_direction' + + +class trajectory_3d(NamedTuple): + coordinates: "Float3" + timestamp: "datetime.datetime" + + +class temporal_speed(NamedTuple): + speed: float + timestamp: "datetime.datetime" + + +def mph_to_mps(mph): + return mph * 0.44704 + + +MAX_CAR_SPEED = { + 'lane': 35, + 'road': 35, + 'laneSection': 35, + 'roadSection': 35, + 'intersection': 25, + 'highway': 55, + 'residential': 25, +} +MAX_CAR_SPEED.update({k: mph_to_mps(v) for k, v in MAX_CAR_SPEED.items()}) + + +def time_elapse(current_time, elapsed_time): + return current_time + datetime.timedelta(seconds=elapsed_time) + + +def compute_area(polygon) -> float: + return box(*polygon).area + + +def compute_distance(loc1, loc2) -> float: + return Point(loc1).distance(Point(loc2)) + + +def relative_direction(vec1, vec2): + return (vec1[0] * vec2[0] + vec1[1] * vec2[1]) / math.sqrt(vec1[0]**2 + vec1[1]**2) / math.sqrt(vec2[0]**2 + vec2[1]**2) > 0 + + +def project_point_onto_linestring( + point: "Point", + line: "LineString") -> "Point": + x = np.array(point.coords[0]) + + u = np.array(line.coords[0]) + v = np.array(line.coords[len(line.coords) - 1]) + + n = v - u + n /= np.linalg.norm(n, 2) + + P = u + n * np.dot(x - u, n) + return Point(P) + + +def _construct_extended_line(polygon: "Polygon", line: "Float22"): + """ + line: represented by 2 points + Find the line segment that can possibly intersect with the polygon + """ + polygon = Polygon(polygon) + line = LineString(line) + minx, miny, maxx, maxy = polygon.bounds + bounding_box = box(minx, miny, maxx, maxy) + a, b = line.boundary + if a.x == b.x: # vertical line + extended_line = LineString([(a.x, miny), (a.x, maxy)]) + elif a.y == b.y: # horizonthal line + extended_line = LineString([(minx, a.y), (maxx, a.y)]) + else: + # linear equation: y = k*x + m + slope = (b.y - a.y) / (b.x - a.x) + y_intercept = a.y - slope * a.x + + y0 = slope * minx + y_intercept + y1 = slope * maxx + y_intercept + x0 = (miny - y_intercept) / slope + x1 = (maxy - y_intercept) / slope + points_on_boundary_lines = [Point(minx, y0), Point(maxx, y1), + Point(x0, miny), Point(x1, maxy)] + points_sorted_by_distance = sorted(points_on_boundary_lines, key=bounding_box.distance) + extended_line = LineString(points_sorted_by_distance[:2]) + return extended_line + + +def intersection_between_line_and_trajectory(line, trajectory): + """Find the intersection between a line and a trajectory.""" + # trajectory_to_polygon = Polygon(trajectory) + extended_line = _construct_extended_line(trajectory, line) + if len(trajectory) == 1: + intersection = extended_line.intersection(Point(trajectory[0])) + else: + intersection = extended_line.intersection(LineString(trajectory)) + if not isinstance(intersection, LineString) or intersection.is_empty: + return tuple() + elif isinstance(intersection, LineString): + return tuple(intersection.coords) + + +def line_to_polygon_intersection(polygon: "Polygon", line: "Float22") -> "List[Float2]": + """Find the intersection between a line and a polygon.""" + try: + extended_line = _construct_extended_line(polygon, line) + intersection = extended_line.intersection(polygon) + except BaseException: + return [] + if intersection.is_empty: + return [] + elif isinstance(intersection, LineString): + return list(intersection.coords) + elif isinstance(intersection, MultiLineString): + all_intersections = [] + for intersect in intersection: + all_intersections.extend(list(intersect.coords)) + return list(all_intersections) + else: + raise ValueError('Unexpected intersection type') + + +### ASSUMPTIONS ### +def max_car_speed(road_type): + """Maximum speed of a car on the given road type + + For example, the maximum speed of a car on a highway is 65mph, + and 25mph on a residential road. + """ + return MAX_CAR_SPEED[road_type] + + +def min_car_speed(road_type): + return max_car_speed(road_type) / 2 + + +### HELPER FUNCTIONS ### +def get_ego_trajectory(video: str, sorted_ego_config: "List[CameraConfig]"): + """Get the ego trajectory from the database.""" + if sorted_ego_config is None: + raise Exception() + camera_trajectory_config = fetch_camera_trajectory(video, database) + else: + camera_trajectory_config = sorted_ego_config + return [trajectory_3d(config.ego_translation, config.timestamp) for config in camera_trajectory_config] + + +def get_ego_speed(ego_trajectory): + """Get the ego speed based on the ego trajectory.""" + point_wise_temporal_speed = [] + for i in range(len(ego_trajectory) - 1): + x, y, z = ego_trajectory[i].coordinates + timestamp = ego_trajectory[i].timestamp + x_next, y_next, z_next = ego_trajectory[i + 1].coordinates + timestamp_next = ego_trajectory[i + 1].timestamp + distance = compute_distance((x, y), (x_next, y_next)) + point_wise_temporal_speed.append( + temporal_speed(distance / (timestamp_next - timestamp).total_seconds(), + timestamp)) + return point_wise_temporal_speed + + +def get_ego_avg_speed(ego_trajectory): + """Get the ego average speed based on the ego trajectory.""" + point_wise_ego_speed = get_ego_speed(ego_trajectory) + return sum([speed.speed for speed in point_wise_ego_speed]) / len(point_wise_ego_speed) + + +def detection_to_img_segment( + car_loc2d: "Float2", + cam_segment_mapping: "List[CameraSegmentMapping]", +): + """Get the image segment that contains the detected car.""" + maximum_mapping: "CameraSegmentMapping | None" = None + maximum_mapping_area: float = 0.0 + point = Point(car_loc2d) + + for mapping in cam_segment_mapping: + cam_segment, road_segment_info = mapping + p_cam_segment = Polygon(cam_segment) + if p_cam_segment.contains(point) and road_segment_info.segment_type in ['lane', 'laneSection']: + area = p_cam_segment.area + if area > maximum_mapping_area: + maximum_mapping = mapping + maximum_mapping_area = area + + return maximum_mapping + + +def get_segment_line(road_segment_info, car_loc3d): + """Get the segment line the location is in.""" + segment_lines = road_segment_info.segment_lines + segment_headings = road_segment_info.segment_headings + closest_segment_line = None + closest_segment_heading = None + for i in range(len(segment_lines)): + segment_line = segment_lines[i] + segment_heading = segment_headings[i] + if segment_line is not None: + projection = project_point_onto_linestring( + Point(car_loc3d[:2]), segment_line) + if projection.intersects(segment_line): + return segment_line, segment_heading + if closest_segment_line is None: + closest_segment_line = segment_line + closest_segment_heading = segment_heading + else: + if (projection.distance(closest_segment_line) + > projection.distance(segment_line)): + closest_segment_line = segment_line + closest_segment_heading = segment_heading + return closest_segment_line, closest_segment_heading + + +def location_calibration( + car_loc3d: "Float3", + road_segment_info: "RoadSegmentInfo") -> "Float3": + """Calibrate the 3d location of the car with the road segment + the car lies in. + """ + segment_polygon = road_segment_info.segment_polygon + assert road_segment_polygon is not None + segment_line = road_segment_info.segment_line + if segment_line is None: + return car_loc3d + projection = project_point_to_line(Point(car_loc3d[:2]), segment_line).coords + return projection[0], projection[1], car_loc3d[2] + + +def get_largest_segment(cam_segment_mapping: "List[CameraSegmentMapping]"): + maximum_mapping: "CameraSegmentMapping | None" = None + maximum_mapping_area: float = 0.0 + + for mapping in cam_segment_mapping: + _, road_segment_info = mapping + area = Polygon(road_segment_info.segment_polygon).area + if road_segment_info.contains_ego and area > maximum_mapping_area: + maximum_mapping = mapping + maximum_mapping_area = area + + return maximum_mapping + + +def time_to_nearest_frame(video: str, timestamp: "datetime.datetime") -> "Tuple[str, int, datetime.datetime]": + """Return the frame that is closest to the timestamp + """ + query = f""" + WITH Cameras_with_diff as ( + SELECT *, abs(extract(epoch from timestamp-\'{timestamp}\')) as diff + FROM Cameras + WHERE fileName LIKE '%{video}%' + ) + SELECT + fileName, + frameNum, + cameras.timestamp + FROM Cameras_with_diff c1 + WHERE c1.diff = (SELECT MIN(c2.diff) from Cameras_with_diff c2) + LIMIT 1 + """ + return database.execute(query)[0] + + +def timestamp_to_nearest_trajectory(trajectory, timestamp): + """Return the trajectory point that is closest to the timestamp + """ + return min(trajectory, + key=lambda x: abs((x.timestamp - timestamp).total_seconds())) + + +def point_to_nearest_trajectory(point, trajectory): + """Return the trajectory point that is closest to the point + """ + return min(trajectory, + key=lambda x: compute_distance(x.coordinates, point)) + + +def ego_departure(ego_trajectory: "List[trajectory_3d]", current_time: "datetime.datetime"): + for i in range(len(ego_trajectory)): + point = ego_trajectory[i] + if point.timestamp > current_time: + for j in range(i, len(ego_trajectory)): + if compute_distance(ego_trajectory[j].coordinates, + point.coordinates) < 5: + non_stop_point = ego_trajectory[j] + break + if i == j: + return False, point.timestamp, point.coordinates + elif j == len(ego_trajectory) - 1: + return True, ego_trajectory[j].timestamp, ego_trajectory[j].coordinates + return True, non_stop_point.timestamp, non_stop_point.coordinates + return False, ego_trajectory[-1].timestamp, ego_trajectory[-1].coordinates + + +def time_to_exit_current_segment(current_segment_info, + current_time, car_loc, car_trajectory=None): + """Return the time that the car exit the current segment + + Assumption: + car heading is the same as road heading + car drives at max speed if no trajectory is given + """ + segmentpolygon = current_segment_info.segment_polygon + if car_trajectory: + for point in car_trajectory: + if (point.timestamp > current_time + and not Polygon(segmentpolygon).contains(Point(point.coordinates))): + return point.timestamp, point.coordinates + return time_elapse(current_time, -1), None + segmentheading = current_segment_info.segment_heading + 90 + car_loc = Point(car_loc) + car_vector = (car_loc.x + math.cos(math.radians(segmentheading)), + car_loc.y + math.sin(math.radians(segmentheading))) + car_heading_line = LineString([car_loc, car_vector]) + # logger.info(f'car_heading_vector {car_heading_line}') + intersection = line_to_polygon_intersection(segmentpolygon, car_heading_line) + # logger.info(f"mapped polygon", segat intersection + if len(intersection) == 2: + intersection_1_vector = (intersection[0][0] - car_loc.x, + intersection[0][1] - car_loc.y) + relative_direction_1 = relative_direction(car_vector, intersection_1_vector) + intersection_2_vector = (intersection[1][0] - car_loc.x, + intersection[1][1] - car_loc.y) + relative_direction_2 = relative_direction(car_vector, intersection_2_vector) + distance1 = compute_distance(car_loc, intersection[0]) + distance2 = compute_distance(car_loc, intersection[1]) + if relative_direction_1: + logger.info(f'relative_dierction_1 {distance1} {current_time} {max_car_speed(current_segment_info.segment_type)}') + return time_elapse(current_time, distance1 / max_car_speed(current_segment_info.segment_type)), intersection[0] + elif relative_direction_2: + logger.info(f'relative_direction_2 {distance2} {current_time}') + return time_elapse(current_time, distance2 / max_car_speed(current_segment_info.segment_type)), intersection[1] + else: + logger.info(f"wrong car moving direction") + return time_elapse(current_time, -1), None + return time_elapse(current_time, -1), None + + +def meetup(car1_loc, + car2_loc, + car1_heading, + car2_heading, + road_type, + current_time, + car1_trajectory=None, + car2_trajectory=None, + car1_speed=None, + car2_speed=None): + """estimate the meetup point as the middle point between car1's loc and car2's loc + + If both trajectories are given, the meetup point is the point where the two trajectories meets + If one trajectory is given, use that trajectory and other car's speed and direction + If none trajectory is given, use both cars' speed, or max speed if no speed is given and cars' direction + + For timestamp, it's an estimation based on the trajectory or speed + Return: (timestamp, meetup_point) + + Assumptions: + car1 and car2 are driving towards each other, not necessarily the opposite direction + There shouldn't be a point they intersect, otherwise it's a collision + If no trajectory, or speed is given, car drives at max speed + TODO: now I've just implemented the case for ego car to meet another detected car + """ + car1_loc = Point(car1_loc) if isinstance(car1_loc, tuple) else car1_loc + car2_loc = Point(car2_loc) if isinstance(car2_loc, tuple) else car2_loc + if car1_trajectory is not None and car2_trajectory is None: + car2_speed = max_car_speed(road_type) if car2_speed is None else car2_speed + car2_heading += 90 + car2_vector = (car2_loc.x + math.cos(math.radians(car2_heading)), + car2_loc.y + math.sin(math.radians(car2_heading)),) + car2_heading_line = (car2_loc, car2_vector) + car1_trajectory_points = [point.coordinates for point in car1_trajectory + if point.timestamp > current_time] + intersection = intersection_between_line_and_trajectory( + car2_heading_line, car1_trajectory_points) + if len(intersection) == 1: # i.e. one car drives towards south, the other towards east + # logger.info(f"at intersection 1") + meetup_point = intersection[0] + time1 = point_to_nearest_trajectory(meetup_point, car1_trajectory) + distance2 = compute_distance(car2_loc, meetup_point) + time2 = time_elapse(current_time, distance2 / car2_speed) + return (min(time1, time2), meetup_point) + elif len(intersection) == 0: # i.e. one car drives towards south, the other towards north + # logger.info(f"at intersection 0") + meetup_point = Point((car1_loc.x + car2_loc.x) / 2, (car1_loc.y + car2_loc.y) / 2) + time1 = point_to_nearest_trajectory(meetup_point, car1_trajectory).timestamp + if time1 < current_time: + time1 = current_time + distance2 = compute_distance(car2_loc, meetup_point) + time2 = time_elapse(current_time, distance2 / car2_speed) + if time2 < current_time: + time2 = current_time + return (min(time1, time2), meetup_point) + + +def catchup_time(car1_loc, + car2_loc, + road_type=None, + car1_trajectory=None, + car2_trajectory=None, + car1_speed=None, + car2_speed=None): + """Return the time that car1 catches up to car2 + + Assumption: + 1. car1 and car2 are driving towards the same direction + 2. car1 drives at max speed, car2 drives at min speed + if no trajectory or speed is given + 3. TODO: assume now ego is the slowest, it won't bypass another car + """ + + +def in_view(car_loc, ego_loc, view_distance): + """At this point, we only care about detect cars. So they are in the frame + in_view means whether the car is recognizable enough + """ + return compute_distance(car_loc, ego_loc) < view_distance + + +def time_to_exit_view(ego_loc, car_loc, car_heading, ego_trajectory, current_time, road_type, view_distance): + """Return the time, and location that the car goes beyond ego's view distance + + Assumption: car drives at max speed + """ + ego_speed = get_ego_avg_speed(ego_trajectory) + car_speed = max_car_speed(road_type) + exit_view_time = time_elapse(current_time, view_distance / (car_speed - ego_speed)) + return timestamp_to_nearest_trajectory(ego_trajectory, exit_view_time) + + +def relative_direction_to_ego(obj_heading: float, ego_heading: float): + """Return the relative direction to ego + Now only support opposite and same direction + TODO: add driving into and driving away from + """ + assert obj_heading is not None + + relative_heading = abs(obj_heading - ego_heading) % 360 + if math.cos(math.radians(relative_heading)) > 0: + return SAME_DIRECTION + else: + return OPPOSITE_DIRECTION diff --git a/optimized_ingestion/stages/segment_trajectory/construct_segment_trajectory.py b/optimized_ingestion/stages/segment_trajectory/construct_segment_trajectory.py new file mode 100644 index 00000000..14c62bdd --- /dev/null +++ b/optimized_ingestion/stages/segment_trajectory/construct_segment_trajectory.py @@ -0,0 +1,412 @@ +from apperception.database import database + +import datetime +import math +import postgis +import psycopg2 +from collections import namedtuple +from detection_estimation.detection_estimation import DetectionInfo +from detection_estimation.segment_mapping import RoadSegmentInfo +from detection_estimation.utils import (get_segment_line, + project_point_onto_linestring) +from plpygis import Geometry +from shapely.geometry import Point + +test_segment_query = """ +SELECT + segmentpolygon.elementid, + segmentpolygon.elementpolygon, + segment.segmentline, + segmentpolygon.segmenttypes, + segment.heading +FROM segmentpolygon + LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE segmentpolygon.elementid = \'{segment_id}\'; +""" + + +segment_closest_query = """ +WITH min_distance AS ( +SELECT + MIN(ST_Distance(segmentpolygon.elementpolygon, {point}::geometry)) distance +FROM segmentpolygon + LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_Distance(segmentpolygon.elementpolygon, {point}::geometry) > 0 + AND cos(radians( + facingRelative({point_heading}::real, + degrees(segment.heading)::real)) + ) > 0 +) +SELECT + segmentpolygon.elementid, + segmentpolygon.elementpolygon, + segment.segmentline, + segmentpolygon.segmenttypes, + segment.heading +FROM min_distance, segmentpolygon +LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_Distance(segmentpolygon.elementpolygon, {point}::geometry) = min_distance.distance; +""" + + +segment_contain_vector_query = """ +WITH min_contain AS ( +SELECT + MIN(ST_Area(segmentpolygon.elementpolygon)) min_segment_area +FROM segmentpolygon + LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_Contains( + segmentpolygon.elementpolygon, + {point}::geometry + ) + AND cos(radians( + facingRelative({point_heading}::real, + degrees(segment.heading)::real)) + ) > 0 +) +SELECT + segmentpolygon.elementid, + segmentpolygon.elementpolygon, + segment.segmentline, + segmentpolygon.segmenttypes, + segment.heading +FROM min_contain, segmentpolygon +LEFT OUTER JOIN segment + ON segmentpolygon.elementid = segment.elementid +WHERE ST_Area(segmentpolygon.elementpolygon) = min_contain.min_segment_area; +""" + + +segment_trajectory_point = namedtuple( + "segment_trajectory_point", ['car_loc3d', 'timestamp', 'segment_line', + 'segment_heading', 'road_segment_info']) + + +def get_test_trajectory(test_trajectory_points): + start_time = datetime.datetime.now() + trajectory = [] + for i in range(len(test_trajectory_points)): + start_time += datetime.timedelta(seconds=5) + trajectory.append((test_trajectory_points[i], start_time)) + return trajectory + + +def get_test_detection_infos(test_trajectory, test_segments): + assert len(test_trajectory) == len(test_segments) + detection_infos = [] + for i in range(len(test_trajectory)): + point, timestamp = test_trajectory[i] + test_segments_for_current_point = test_segments[i] + road_segment_info = None + for test_segment in test_segments_for_current_point: + segmentid, segmentpolygon, segmentline, segmenttype, segmentheading = test_segment + segmentheading = math.degrees(segmentheading) if segmentheading is not None else None + segmentline = Geometry(segmentline.to_ewkb()).shapely if segmentline else None + if road_segment_info is not None: + road_segment_info.segment_lines.append(segmentline) + road_segment_info.segment_headings.append(segmentheading) + continue + segmentpolygon = Geometry(segmentpolygon.to_ewkb()).shapely + road_segment_info = RoadSegmentInfo( + segmentid, segmentpolygon, [segmentline], segmenttype, + [segmentheading], None, False, None) + detection_info = DetectionInfo(obj_id=segmentid, + frame_segment=None, + road_segment_info=road_segment_info, + car_loc3d=point, + car_loc2d=None, + car_bbox3d=None, + car_bbox2d=None, + ego_trajectory=None, + ego_config=None, + ego_road_segment_info=None, + timestamp=timestamp) + detection_infos.append(detection_info) + return detection_infos + + +def update_current_road_segment_info(current_detection_info, result): + kept_segment = (None, None) + road_segment_info = None + for road_segment in result: + segmentid, segmentpolygon, segmentline, segmenttype, segmentheading = road_segment + segmenttype = segmenttype[0] if segmenttype is not None else None, + segmentheading = math.degrees(segmentheading) if segmentheading is not None else None + segmentid = segmentid.split('_')[0] + segmentline = Geometry(segmentline.to_ewkb()).shapely if segmentline else None + if segmentid == kept_segment[0]: + road_segment_info.segment_lines.append(segmentline) + road_segment_info.segment_headings.append(segmentheading) + else: + if kept_segment[0] is not None: + if kept_segment[1] is not None: + continue + segmentpolygon = Geometry(segmentpolygon.to_ewkb()).shapely + road_segment_info = RoadSegmentInfo( + segmentid, segmentpolygon, [segmentline], segmenttype, + [segmentheading], None, False, None) + kept_segment = (segmentid, segmenttype) + return road_segment_info + + +def calibrate(trajectory_3d, detection_infos): + """Calibrate the trajectory to the road segments.""" + road_segment_trajectory = [] + for i in range(len(trajectory_3d)): + current_point3d, timestamp = trajectory_3d[i] + current_point = current_point3d[:2] + detection_info = detection_infos[i] + current_road_segment_heading = detection_info.segment_heading + current_segment_line = detection_info.segment_line + current_road_segment_info = detection_info.road_segment_info + if i != len(trajectory_3d) - 1: + next_point = trajectory_3d[i + 1][0][:2] + if current_road_segment_heading is not None: + current_point_heading = math.atan2(next_point[1] - current_point[1], + next_point[0] - current_point[0]) + current_point_heading = math.degrees(current_point_heading) + + relative_heading = (abs(current_road_segment_heading + 90 + - current_point_heading) % 360) + + if (current_road_segment_heading is None + or math.cos(math.radians(relative_heading)) > 0): + road_segment_trajectory.append( + segment_trajectory_point(current_point3d, + timestamp, + current_segment_line, + current_road_segment_heading, + current_road_segment_info)) + continue + + ### project current_point to the segment line of the previous point + ### and then find the segment that contains the projected point + ### however this requires querying for road segment once for each point to be calibrated + if len(road_segment_trajectory) == 0: + query = psycopg2.sql.SQL(segment_closest_query).format( + point=psycopg2.sql.Literal(postgis.point.Point(current_point)), + point_heading=psycopg2.sql.Literal(current_point_heading - 90) + ) + else: + prev_calibrated_point = road_segment_trajectory[-1] + prev_segment_line = prev_calibrated_point[2] + prev_segment_heading = prev_calibrated_point[3] + projection = project_point_onto_linestring(Point(current_point), prev_segment_line) + current_point3d = (projection.x, projection.y, 0.0) + query = psycopg2.sql.SQL(segment_contain_vector_query).format( + point=psycopg2.sql.Literal(postgis.point.Point((projection.x, projection.y))), + point_heading=psycopg2.sql.Literal(prev_segment_heading - 90) + ) + result = database.execute(query) + current_road_segment_info = update_current_road_segment_info( + detection_info, result) + current_segment_line, current_heading = get_segment_line(current_road_segment_info, current_point3d) + road_segment_trajectory.append( + segment_trajectory_point(current_point3d, + timestamp, + current_segment_line, + current_road_segment_heading, + current_road_segment_info)) + return road_segment_trajectory + + +def test_same_segment(): + print("test same segment") + test_segment_ids = ['99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1955, 870), (1960, 874), (1980, 872), (1990, 875)] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['99c90907-e7a2-4b19-becc-afe2b7f013c7', + '53f56897-4795-4d75-a721-3c969bb3206c', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7'] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_wrong_start_same_segment(): + print("test wrong start same segment") + test_segment_ids = ['c67e592f-2e73-4165-b8cf-64165bb300a8', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1955, 874), (1960, 870), (1980, 872), (1990, 875)] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7'] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_connected_segments(): + print("test connected segment") + """Some trajectory points are in the wrong segments.""" + test_segment_ids = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'e39e4059-3a55-42f9-896f-475d89a70e86', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1910, 869), (1915, 873), (1920, 871), (1940, 870), + (1955, 870), (1960, 874), (1980, 872), (1990, 875),] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '2e6d0881-bb10-4145-a45f-28382c46e476', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '53f56897-4795-4d75-a721-3c969bb3206c', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7'] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_complete_story1(): + """Simplest complete story case. + + The trajectories are all in the correct segments. + """ + print("test complete story 1") + test_segment_ids = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1910, 869), (1920, 871), (1955, 870), (1960, 871), + (1980, 871), (1990, 871),] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7',] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_complete_story2(): + """Some trajectory points are in the wrong segments.""" + print("test complete story 2") + test_segment_ids = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'e39e4059-3a55-42f9-896f-475d89a70e86', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + 'c67e592f-2e73-4165-b8cf-64165bb300a8',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1910, 869), (1920, 874), (1955, 870), + (1960, 874), (1980, 872), (1990, 875),] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7', + '99c90907-e7a2-4b19-becc-afe2b7f013c7',] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_complete_story3(): + """Some trajectory points are in the wrong segments.""" + print("test complete story 3") + test_segment_ids = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'e39e4059-3a55-42f9-896f-475d89a70e86', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '53c5901a-dad9-4f0d-bcb6-c127dda2be09', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '53c5901a-dad9-4f0d-bcb6-c127dda2be09',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1910, 869), (1915, 873), (1920, 871), + (1937, 882), (1932, 885), (1937, 887), (1932, 892),] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf',] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +def test_complete_story4(): + """Most trajectory points are in the wrong segments.""" + print("test complete story 4") + test_segment_ids = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'e39e4059-3a55-42f9-896f-475d89a70e86', + '53c5901a-dad9-4f0d-bcb6-c127dda2be09', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '53c5901a-dad9-4f0d-bcb6-c127dda2be09',] + test_segments = [database.execute(test_segment_query.format(segment_id=segment_id)) + for segment_id in test_segment_ids] + test_trajectory_points = [(1910, 868), (1920, 873), (1932, 885), (1937, 887), (1932, 892),] + test_trajectory = get_test_trajectory(test_trajectory_points) + test_detection_infos = get_test_detection_infos(test_trajectory, test_segments) + segment_trajectory = calibrate(test_trajectory, test_detection_infos) + correct_result = ['34c01bd5-f649-42e2-be32-30f9a4d02b25', + '34c01bd5-f649-42e2-be32-30f9a4d02b25', + 'aa22ee59-c9ef-4759-a69c-c295469f3e37_inter', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf', + '9eef6c56-c5d9-46ed-a44e-9848676bdddf',] + print([segment_trajectory_point.road_segment_info.segment_id + for segment_trajectory_point in segment_trajectory]) + print("correct result", correct_result) + + +if __name__ == '__main__': + test_same_segment() + test_wrong_start_same_segment() + test_connected_segments() + test_complete_story1() + test_complete_story2() + test_complete_story3() + test_complete_story4() + print('All tests passed!') diff --git a/optimized_ingestion/stages/stage.py b/optimized_ingestion/stages/stage.py index a01494d0..4dd7f0ce 100644 --- a/optimized_ingestion/stages/stage.py +++ b/optimized_ingestion/stages/stage.py @@ -10,8 +10,12 @@ class Stage: - def __init__(self) -> None: - self.runtimes: "List[float]" = [] + runtimes: "List[float]" + + def __new__(cls): + obj = object.__new__(cls) + obj.runtimes = [] + return obj def _run(self, payload: "Payload") -> "StageOutput": return payload.keep, payload.metadata diff --git a/optimized_ingestion/stages/tracking_2d/strongsort.py b/optimized_ingestion/stages/tracking_2d/strongsort.py new file mode 100644 index 00000000..8bc96d6d --- /dev/null +++ b/optimized_ingestion/stages/tracking_2d/strongsort.py @@ -0,0 +1,98 @@ +import numpy.typing as npt +import torch +from bitarray import bitarray +from pathlib import Path +from tqdm import tqdm +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from yolo_tracker.trackers.multi_tracker_zoo import create_tracker +from yolo_tracker.yolov5.utils.torch_utils import select_device + +from ..decode_frame.decode_frame import DecodeFrame +from ..detection_2d.detection_2d import Detection2D +from ..detection_2d.yolo_detection import YoloDetection +from .tracking_2d import Tracking2D, Tracking2DResult + +if TYPE_CHECKING: + from yolo_tracker.trackers.multi_tracker_zoo import \ + StrongSORT as StrongSORTTracker + + from ...payload import Payload + + +FILE = Path(__file__).resolve() +APPERCEPTION = FILE.parent.parent.parent.parent +WEIGHTS = APPERCEPTION / "weights" +reid_weights = WEIGHTS / "osnet_x0_25_msmt17.pt" + + +class StrongSORT(Tracking2D): + def _run(self, payload: "Payload") -> "Tuple[Optional[bitarray], Optional[Dict[str, list]]]": + if Detection2D.get(payload) is None: + raise Exception() + + detections: "List[Tuple[npt.NDArray, List[str]]]" = YoloDetection.get(payload) + images: "List[npt.NDArray]" = DecodeFrame.get(payload) + metadata: "List[Dict[int, Tracking2DResult]]" = [] + trajectories: "Dict[int, List[Tracking2DResult]]" = {} + device = select_device("") + strongsort: "StrongSORTTracker" = create_tracker('strongsort', reid_weights, device, False) + curr_frame, prev_frame = None, None + with torch.no_grad(): + if hasattr(strongsort, 'model'): + if hasattr(strongsort.model, 'warmup'): + strongsort.model.warmup() + + assert len(detections) == len(images) + for idx, ((det, names), im0s) in tqdm(enumerate(zip(detections, images)), total=len(images)): + if not payload.keep[idx] or len(det) == 0: + metadata.append({}) + strongsort.increment_ages() + prev_frame = im0s.copy() + continue + im0 = im0s.copy() + curr_frame = im0 + + if hasattr(strongsort, 'tracker') and hasattr(strongsort.tracker, 'camera_update'): + if prev_frame is not None and curr_frame is not None: + strongsort.tracker.camera_update(prev_frame, curr_frame) + + confs = det[:, 4] + output_ = strongsort.update(det.cpu(), im0) + + if len(output_) > 0: + labels: "Dict[int, Tracking2DResult]" = {} + for output, conf in zip(output_, confs): + obj_id = int(output[4]) + cls = int(output[5]) + + bbox_left = output[0] + bbox_top = output[1] + bbox_w = output[2] - output[0] + bbox_h = output[3] - output[1] + labels[obj_id] = Tracking2DResult( + idx, + obj_id, + bbox_left, + bbox_top, + bbox_w, + bbox_h, + names[cls], + conf.item(), + ) + if obj_id not in trajectories: + trajectories[obj_id] = [] + trajectories[obj_id].append(labels[obj_id]) + metadata.append(labels) + else: + metadata.append({}) + prev_frame = curr_frame + + for trajectory in trajectories.values(): + last = len(trajectory) - 1 + for i, t in enumerate(trajectory): + if i > 0: + t.prev = trajectory[i - 1] + if i < last: + t.next = trajectory[i + 1] + + return None, {self.classname(): metadata} diff --git a/optimized_ingestion/stages/tracking_2d/tracking_2d.py b/optimized_ingestion/stages/tracking_2d/tracking_2d.py index 3c89d3ee..268f25de 100644 --- a/optimized_ingestion/stages/tracking_2d/tracking_2d.py +++ b/optimized_ingestion/stages/tracking_2d/tracking_2d.py @@ -1,10 +1,35 @@ +import cv2 +import numpy as np +import numpy.typing as npt +import os +import torch from bitarray import bitarray +from collections.abc import Iterable, Iterator +from dataclasses import dataclass, field +from pathlib import Path +from tqdm import tqdm from typing import TYPE_CHECKING, Dict, List, Optional, Tuple -from ...trackers import yolov5_strongsort_osnet_tracker as tracker +# limit the number of cpus used by high performance libraries +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" + +from yolo_tracker.trackers.multi_tracker_zoo import create_tracker +from yolo_tracker.yolov5.utils.augmentations import letterbox +from yolo_tracker.yolov5.utils.general import (check_img_size, + non_max_suppression, + scale_boxes) +from yolo_tracker.yolov5.utils.torch_utils import select_device # , time_sync + +from ..decode_frame.decode_frame import DecodeFrame from ..stage import Stage if TYPE_CHECKING: + from yolo_tracker.trackers.strong_sort.strong_sort import StrongSORT + from ...payload import Payload @@ -14,10 +39,10 @@ def _run(self, payload: "Payload") -> "Tuple[Optional[bitarray], Optional[Dict[s # with open("./_Tracking2D.pickle", "rb") as f: # return None, {self.classname(): pickle.load(f)} - results = tracker.track(payload) + results = track(payload) results = sorted(results, key=lambda r: r.frame_idx) - metadata: "List[Dict[float, tracker.TrackingResult] | None]" = [] - trajectories: "Dict[float, List[tracker.TrackingResult]]" = {} + metadata: "List[Dict[float, Tracking2DResult] | None]" = [] + trajectories: "Dict[float, List[Tracking2DResult]]" = {} for k in payload.keep: if k: @@ -45,3 +70,231 @@ def _run(self, payload: "Payload") -> "Tuple[Optional[bitarray], Optional[Dict[s # pickle.dump(metadata, f) return None, {self.classname(): metadata} + + +@dataclass +class Tracking2DResult: + frame_idx: int + object_id: int + bbox_left: float + bbox_top: float + bbox_w: float + bbox_h: float + object_type: str + confidence: float + next: "Tracking2DResult | None" = field(default=None, compare=False, repr=False) + prev: "Tracking2DResult | None" = field(default=None, compare=False, repr=False) + + +FILE = Path(__file__).resolve() +APPERCEPTION = FILE.parent.parent.parent.parent +WEIGHTS = APPERCEPTION / "weights" +torch.hub.set_dir(str(WEIGHTS)) +reid_weights = WEIGHTS / "osnet_x0_25_msmt17.pt" # model.pt path + +# Load model +device = select_device("") +print("Using", device) +half = False +model = torch.hub.load('ultralytics/yolov5', 'yolov5s').model.to(device) +stride, names, pt = model.stride, model.names, model.pt +imgsz = check_img_size((640, 640), s=stride) # check image size + + +@torch.no_grad() +def track( + source: "Payload", # take in a list of frames, treat it as a video + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference +): + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + + nr_sources = 1 + labels: "List[Tracking2DResult]" = [] + + # Create a strong sort instances as there is an only one video source + strongsort: "StrongSORT" = create_tracker('strongsort', reid_weights, device, half) + if hasattr(strongsort, 'model'): + if hasattr(strongsort.model, 'warmup'): + strongsort.model.warmup() + + # Run tracking + model.eval() + model.warmup(imgsz=(1 if pt else nr_sources, 3, *imgsz)) # warmup + # dt, seen = [0.0, 0.0, 0.0, 0.0], 0 + curr_frame, prev_frame = None, None + for frame_idx, im, im0s in tqdm(dataset): + if not source.keep[frame_idx]: + strongsort.increment_ages() + prev_frame = im0s.copy() + continue + + # t1 = time_sync() + im = torch.from_numpy(im).to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + # t2 = time_sync() + # dt[0] += t2 - t1 + + # Inference + pred = model(im, augment=augment) + # t3 = time_sync() + # dt[1] += t3 - t2 + + # Apply NMS + pred = non_max_suppression( + pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det + ) + # dt[2] += time_sync() - t3 + + # Process detections + assert isinstance(pred, list) + assert len(pred) == 1 + det = pred[0] + # seen += 1 + im0, _ = im0s.copy(), getattr(dataset, "frame", 0) + curr_frame = im0 + + # s += "%gx%g " % im.shape[2:] # print string + + if hasattr(strongsort, 'tracker') and hasattr(strongsort.tracker, 'camera_update'): + if prev_frame is not None and curr_frame is not None: # camera motion compensation + strongsort.tracker.camera_update(prev_frame, curr_frame) + if det is not None and len(det): + + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # xyxy + + # Print results + # for c in det[:, -1].unique(): + # n = (det[:, -1] == c).sum() # detections per class + # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + confs = det[:, 4] + + # pass detections to strongsort + # t4 = time_sync() + output_ = strongsort.update(det.cpu(), im0) + # t5 = time_sync() + # dt[3] += t5 - t4 + + # draw boxes for visualization + if len(output_) > 0: + for output, conf in zip(output_, confs): + + id = output[4] + cls = output[5] + c = int(cls) + + # to MOT format + bbox_left = output[0] + bbox_top = output[1] + bbox_w = output[2] - output[0] + bbox_h = output[3] - output[1] + labels.append( + Tracking2DResult( + frame_idx, + int(id), + bbox_left, + bbox_top, + bbox_w, + bbox_h, + f"{names[c]}", + conf.item(), + ) + ) + # LOGGER.info(f"{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)") + + else: + strongsort.increment_ages() + # LOGGER.info("No detections") + + prev_frame = curr_frame + + # Print results + # t = tuple(x / seen * 1e3 for x in dt) # speeds per image + # LOGGER.info( + # f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS, %.1fms strong sort update per image at shape {(1, 3, *imgsz)}" + # % t + # ) + # with open(f"strongsort_{source.video.videofile.split('/')[-1]}.json", "w") as f: + # json.dump(strongsort.benchmark, f) + return labels + + +# ImageOutput = Tuple[int, npt.NDArray, npt.NDArray, str] +ImageOutput = Tuple[int, npt.NDArray, npt.NDArray] + + +class LoadImages(Iterator, Iterable): + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, payload: "Payload", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + + self.img_size = img_size + self.stride = stride + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + self._new_video(payload.video.videofile) # new video + self.keep = payload.keep + + images = DecodeFrame.get(payload) + assert images is not None + self.images = images + + def __iter__(self): + self.count = 0 + return self + + def __next__(self) -> "ImageOutput": + if self.frame >= self.frames: + raise StopIteration + + # Read video + self.mode = 'video' + im0 = self.images[self.frame] + assert isinstance(im0, np.ndarray) + + frame_idx = self.frame + self.frame += self.vid_stride + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + # s = f'video {self.count + 1}/{self.len} ({self.frame}/{self.frames}): ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + # return frame_idx, im, im0, s + return frame_idx, im, im0 + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.len = int(self.frames / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.len diff --git a/optimized_ingestion/stages/tracking_3d/from_2d_and_depth.py b/optimized_ingestion/stages/tracking_3d/from_2d_and_depth.py index 7ead8ce5..e8b0d4b1 100644 --- a/optimized_ingestion/stages/tracking_3d/from_2d_and_depth.py +++ b/optimized_ingestion/stages/tracking_3d/from_2d_and_depth.py @@ -1,7 +1,6 @@ import numpy as np import numpy.typing as npt from bitarray import bitarray -from pyquaternion import Quaternion from typing import TYPE_CHECKING, Dict, List, Optional, Tuple from ...utils.depth_to_3d import depth_to_3d @@ -45,7 +44,7 @@ def _run(self, payload: "Payload") -> "Tuple[Optional[bitarray], Optional[Dict[s intrinsic = camera.camera_intrinsic point_from_camera = depth_to_3d(x, y, d, intrinsic) - rotated_offset = Quaternion(camera.camera_rotation).rotate( + rotated_offset = camera.camera_rotation.rotate( np.array(point_from_camera) ) point = np.array(camera.camera_translation) + rotated_offset diff --git a/optimized_ingestion/stages/tracking_3d/from_2d_and_road.py b/optimized_ingestion/stages/tracking_3d/from_2d_and_road.py index f0024b81..bf95a4dd 100644 --- a/optimized_ingestion/stages/tracking_3d/from_2d_and_road.py +++ b/optimized_ingestion/stages/tracking_3d/from_2d_and_road.py @@ -35,7 +35,7 @@ def _run(self, payload: "Payload") -> "StageOutput": trackings3d: "Dict[float, Tracking3DResult]" = {} [[fx, _, x0], [_, fy, y0], [_, _, s]] = frame.camera_intrinsic - rotation = Quaternion(frame.camera_rotation).unit + rotation = frame.camera_rotation translation = np.array(frame.camera_translation) ids: "List[float]" = [] diff --git a/optimized_ingestion/stages/tracking_3d/from_2d_and_road_naive.py b/optimized_ingestion/stages/tracking_3d/from_2d_and_road_naive.py index 50afb97a..8debce8b 100644 --- a/optimized_ingestion/stages/tracking_3d/from_2d_and_road_naive.py +++ b/optimized_ingestion/stages/tracking_3d/from_2d_and_road_naive.py @@ -35,7 +35,7 @@ def _run(self, payload: "Payload") -> "StageOutput": trackings3d: "Dict[float, Tracking3DResult]" = {} [[fx, _, x0], [_, fy, y0], [_, _, s]] = frame.camera_intrinsic - rotation = Quaternion(frame.camera_rotation).unit + rotation = frame.camera_rotation translation = np.array(frame.camera_translation) for oid, t in tracking.items(): diff --git a/optimized_ingestion/trackers/__init__.py b/optimized_ingestion/trackers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/optimized_ingestion/trackers/yolov5_strongsort_osnet_tracker.py b/optimized_ingestion/trackers/yolov5_strongsort_osnet_tracker.py deleted file mode 100644 index 64be2046..00000000 --- a/optimized_ingestion/trackers/yolov5_strongsort_osnet_tracker.py +++ /dev/null @@ -1,258 +0,0 @@ -import cv2 -import json -import numpy as np -import numpy.typing as npt -import os -import torch -from collections.abc import Iterable, Iterator -from dataclasses import dataclass -from pathlib import Path -from tqdm import tqdm -from typing import TYPE_CHECKING, List, Tuple - -# limit the number of cpus used by high performance libraries -os.environ["OMP_NUM_THREADS"] = "1" -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["MKL_NUM_THREADS"] = "1" -os.environ["VECLIB_MAXIMUM_THREADS"] = "1" -os.environ["NUMEXPR_NUM_THREADS"] = "1" - - -if TYPE_CHECKING: - from ..payload import Payload - -from yolo_tracker.trackers.multi_tracker_zoo import create_tracker -from yolo_tracker.yolov5.utils.augmentations import letterbox -from yolo_tracker.yolov5.utils.general import (check_img_size, - non_max_suppression, - scale_boxes) -from yolo_tracker.yolov5.utils.torch_utils import select_device # , time_sync - -from ..stages.decode_frame import DecodeFrame - -FILE = Path(__file__).resolve() -APPERCEPTION = FILE.parent.parent.parent -WEIGHTS = APPERCEPTION / "weights" -reid_weights = WEIGHTS / "osnet_x0_25_msmt17.pt" # model.pt path - - -# Load model -device = select_device("") -print("Using", device) -half = False -model = torch.hub.load('ultralytics/yolov5', 'yolov5s').model.to(device) -stride, names, pt = model.stride, model.names, model.pt -imgsz = check_img_size((640, 640), s=stride) # check image size - - -@torch.no_grad() -def track( - source: "Payload", # take in a list of frames, treat it as a video - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference -): - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) - - nr_sources = 1 - labels: "List[TrackingResult]" = [] - - # Create a strong sort instances as there is an only one video source - strongsort = create_tracker('strongsort', reid_weights, device, half) - if hasattr(strongsort, 'model'): - if hasattr(strongsort.model, 'warmup'): - strongsort.model.warmup() - - # Run tracking - model.eval() - model.warmup(imgsz=(1 if pt else nr_sources, 3, *imgsz)) # warmup - # dt, seen = [0.0, 0.0, 0.0, 0.0], 0 - curr_frame, prev_frame = None, None - for frame_idx, im, im0s in tqdm(dataset): - if not source.keep[frame_idx]: - continue - - # t1 = time_sync() - im = torch.from_numpy(im).to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - # t2 = time_sync() - # dt[0] += t2 - t1 - - # Inference - pred = model(im, augment=augment, visualize=False) - # t3 = time_sync() - # dt[1] += t3 - t2 - - # Apply NMS - pred = non_max_suppression( - pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det - ) - # dt[2] += time_sync() - t3 - - # Process detections - assert isinstance(pred, list) - assert len(pred) == 1 - det = pred[0] - # seen += 1 - im0, _ = im0s.copy(), getattr(dataset, "frame", 0) - curr_frame = im0 - - # s += "%gx%g " % im.shape[2:] # print string - - if hasattr(strongsort, 'tracker') and hasattr(strongsort.tracker, 'camera_update'): - if prev_frame is not None and curr_frame is not None: # camera motion compensation - strongsort.tracker.camera_update(prev_frame, curr_frame) - if det is not None and len(det): - - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # xyxy - - # Print results - # for c in det[:, -1].unique(): - # n = (det[:, -1] == c).sum() # detections per class - # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - confs = det[:, 4] - - # pass detections to strongsort - # t4 = time_sync() - output_ = strongsort.update(det.cpu(), im0) - # t5 = time_sync() - # dt[3] += t5 - t4 - - # draw boxes for visualization - if len(output_) > 0: - for output, conf in zip(output_, confs): - - id = output[4] - cls = output[5] - c = int(cls) - - # to MOT format - bbox_left = output[0] - bbox_top = output[1] - bbox_w = output[2] - output[0] - bbox_h = output[3] - output[1] - labels.append( - TrackingResult( - frame_idx, - int(id), - bbox_left, - bbox_top, - bbox_w, - bbox_h, - 0, # TODO: remove - f"{names[c]}", - conf.item(), - ) - ) - # LOGGER.info(f"{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)") - - else: - strongsort.increment_ages() - # LOGGER.info("No detections") - - prev_frame = curr_frame - - # Print results - # t = tuple(x / seen * 1e3 for x in dt) # speeds per image - # LOGGER.info( - # f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS, %.1fms strong sort update per image at shape {(1, 3, *imgsz)}" - # % t - # ) - with open(f"strongsort_{source.video.videofile.split('/')[-1]}.json", "w") as f: - json.dump(strongsort.benchmark, f) - return labels - - -@dataclass -class TrackingResult: - frame_idx: int - object_id: int - bbox_left: float - bbox_top: float - bbox_w: float - bbox_h: float - pred_idx: int - object_type: str - confidence: float - prev: "TrackingResult | None" = None - next: "TrackingResult | None" = None - - -# ImageOutput = Tuple[int, npt.NDArray, npt.NDArray, str] -ImageOutput = Tuple[int, npt.NDArray, npt.NDArray] - - -class LoadImages(Iterator[ImageOutput], Iterable[ImageOutput]): - # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, payload: "Payload", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - - self.img_size = img_size - self.stride = stride - self.mode = 'image' - self.auto = auto - self.transforms = transforms # optional - self.vid_stride = vid_stride # video frame-rate stride - self._new_video(payload.video.videofile) # new video - self.keep = payload.keep - - images = DecodeFrame.get(payload) - assert images is not None - self.images = images - - def __iter__(self): - self.count = 0 - return self - - def __next__(self) -> "ImageOutput": - if self.frame >= self.frames: - raise StopIteration - - # Read video - self.mode = 'video' - im0 = self.images[self.frame] - assert isinstance(im0, np.ndarray) - - frame_idx = self.frame - self.frame += self.vid_stride - # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - # s = f'video {self.count + 1}/{self.len} ({self.frame}/{self.frames}): ' - - if self.transforms: - im = self.transforms(im0) # transforms - else: - im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - - # return frame_idx, im, im0, s - return frame_idx, im, im0 - - def _new_video(self, path): - # Create a new video capture object - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - self.len = int(self.frames / self.vid_stride) - self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees - # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 - - def _cv2_rotate(self, im): - # Rotate a cv2 video manually - if self.orientation == 0: - return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) - elif self.orientation == 180: - return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) - elif self.orientation == 90: - return cv2.rotate(im, cv2.ROTATE_180) - return im - - def __len__(self): - return self.len diff --git a/optimized_ingestion/utils/iterate_video.py b/optimized_ingestion/utils/iterate_video.py index 1f854854..5e4f0f92 100644 --- a/optimized_ingestion/utils/iterate_video.py +++ b/optimized_ingestion/utils/iterate_video.py @@ -7,7 +7,7 @@ def iterate_video(cap: "cv2.VideoCapture"): return VideoIterator(cap) -class VideoIterator(collections.abc.Iterator["npt.NDArray"], collections.abc.Sized): +class VideoIterator(collections.abc.Iterator, collections.abc.Sized): def __init__(self, cap: "cv2.VideoCapture"): self._n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) self._count = 0 diff --git a/optimized_ingestion/utils/overlay_roads.py b/optimized_ingestion/utils/overlay_roads.py index bf83cc42..3653402d 100644 --- a/optimized_ingestion/utils/overlay_roads.py +++ b/optimized_ingestion/utils/overlay_roads.py @@ -8,7 +8,6 @@ from multiprocessing import Pool from os import environ from psycopg2 import sql -from pyquaternion import Quaternion from tqdm import tqdm from typing import TYPE_CHECKING, List, Tuple @@ -50,7 +49,7 @@ def overlay_to_frame(args: "Tuple[CameraConfig, npt.NDArray]") -> "npt.NDArray": np.zeros((1, len(p.coords))) ]) coords = coords - np.array(frame.camera_translation)[:, np.newaxis] - coords = rotate(coords, Quaternion(frame.camera_rotation).inverse.unit) + coords = rotate(coords, frame.camera_rotation.inverse.unit) coords = intrinsic @ coords coords = coords / coords[2:3, :] diff --git a/optimized_ingestion/video.py b/optimized_ingestion/video.py index 2065880c..8d3aa4a6 100644 --- a/optimized_ingestion/video.py +++ b/optimized_ingestion/video.py @@ -2,12 +2,12 @@ import collections.abc import cv2 from datetime import datetime, timedelta -from typing import List, Optional +from typing import Iterable, List, Optional from .camera_config import CameraConfig, interpolate -class Video(collections.abc.Iterable["CameraConfig"]): +class Video(Iterable["CameraConfig"]): videofile: str def __init__( @@ -52,7 +52,7 @@ def fps(self): def __getitem__(self, index): return self.interpolated_frames[index] - def __iter__(self) -> "collections.abc.Iterator[CameraConfig]": + def __iter__(self) -> "collections.abc.Iterator": return iter(self.interpolated_frames) def __len__(self): @@ -64,6 +64,7 @@ def __len__(self): def __get_fps_and_num_frames(self): if self._num_frames is None or self._fps is None: cap = cv2.VideoCapture(self.videofile) + assert cap.isOpened(), self.videofile self._num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) self._fps = float(cap.get(cv2.CAP_PROP_FPS)) cap.release() diff --git a/poetry.lock b/poetry.lock index 42b9367d..41893bda 100644 --- a/poetry.lock +++ b/poetry.lock @@ -76,7 +76,7 @@ python-versions = ">=3.5" dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] -tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] +tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "backcall" @@ -366,7 +366,7 @@ notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test_extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.19)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.19)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] [[package]] name = "ipython_genutils" @@ -811,6 +811,17 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "plpygis" +version = "0.2.0" +description = "PostGIS Python tools" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +shapely-support = ["Shapely (>=1.5.0)"] + [[package]] name = "pluggy" version = "1.0.0" @@ -1153,6 +1164,19 @@ typing-extensions = "*" test = ["pytest (>=6.2)", "virtualenv (>20)"] toml = ["setuptools (>=42)"] +[[package]] +name = "shapely" +version = "1.8.5.post1" +description = "Geometric objects, predicates, and operations" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +all = ["numpy", "pytest", "pytest-cov"] +test = ["pytest", "pytest-cov"] +vectorized = ["numpy"] + [[package]] name = "six" version = "1.16.0" @@ -1318,7 +1342,7 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "487a4cadddafa0d77632067d26cf179f9286536006972bd0edc753394103fc3a" +content-hash = "b11c647b2ca51142087dca3e9cdc9707b2fed2ac87f493818d617a33e715450e" [metadata.files] appnope = [ @@ -2214,6 +2238,9 @@ pkgutil_resolve_name = [ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, ] +plpygis = [ + {file = "plpygis-0.2.0.tar.gz", hash = "sha256:f9d1bb3913970b6c40c67188be3716f9fa490c1441e6c0d915221c8291826079"}, +] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, @@ -2430,6 +2457,7 @@ pywin32 = [ ] pywinpty = [ {file = "pywinpty-2.0.8-cp310-none-win_amd64.whl", hash = "sha256:9cbf89834abc8d4d4c5f295f11e15dd93889a8069db876f2bc10cc64aa4060ac"}, + {file = "pywinpty-2.0.8-cp37-none-win_amd64.whl", hash = "sha256:a2f9a95f3b74262ef73f1be5257c295c8caab1f79f081aa3400ca61c724f9bc6"}, {file = "pywinpty-2.0.8-cp38-none-win_amd64.whl", hash = "sha256:23389d56258d6a1fbc4b41257bd65e5bdabaf6fde7f30a13806e557ea9ee6865"}, {file = "pywinpty-2.0.8-cp39-none-win_amd64.whl", hash = "sha256:ea7c1da94eed5ef93e75026c67c60d4dca33ea9a1c212fa89221079a7b463c68"}, {file = "pywinpty-2.0.8.tar.gz", hash = "sha256:a89b9021c63ef78b1e7d8e14f0fac4748c88a0c2e4f529c84f37f6e72b914280"}, @@ -2522,6 +2550,47 @@ setuptools-scm = [ {file = "setuptools_scm-7.0.5-py3-none-any.whl", hash = "sha256:7930f720905e03ccd1e1d821db521bff7ec2ac9cf0ceb6552dd73d24a45d3b02"}, {file = "setuptools_scm-7.0.5.tar.gz", hash = "sha256:031e13af771d6f892b941adb6ea04545bbf91ebc5ce68c78aaf3fff6e1fb4844"}, ] +shapely = [ + {file = "Shapely-1.8.5.post1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d048f93e42ba578b82758c15d8ae037d08e69d91d9872bca5a1895b118f4e2b0"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99ab0ddc05e44acabdbe657c599fdb9b2d82e86c5493bdae216c0c4018a82dee"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:99a2f0da0109e81e0c101a2b4cd8412f73f5f299e7b5b2deaf64cd2a100ac118"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6fe855e7d45685926b6ba00aaeb5eba5862611f7465775dacd527e081a8ced6d"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec14ceca36f67cb48b34d02d7f65a9acae15cd72b48e303531893ba4a960f3ea"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-win32.whl", hash = "sha256:21776184516a16bf82a0c3d6d6a312b3cd15a4cabafc61ee01cf2714a82e8396"}, + {file = "Shapely-1.8.5.post1-cp310-cp310-win_amd64.whl", hash = "sha256:a354199219c8d836f280b88f2c5102c81bb044ccea45bd361dc38a79f3873714"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:783bad5f48e2708a0e2f695a34ed382e4162c795cb2f0368b39528ac1d6db7ed"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a23ef3882d6aa203dd3623a3d55d698f59bfbd9f8a3bfed52c2da05a7f0f8640"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab38f7b5196ace05725e407cb8cab9ff66edb8e6f7bb36a398e8f73f52a7aaa2"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d086591f744be483b34628b391d741e46f2645fe37594319e0a673cc2c26bcf"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4728666fff8cccc65a07448cae72c75a8773fea061c3f4f139c44adc429b18c3"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-win32.whl", hash = "sha256:84010db15eb364a52b74ea8804ef92a6a930dfc1981d17a369444b6ddec66efd"}, + {file = "Shapely-1.8.5.post1-cp311-cp311-win_amd64.whl", hash = "sha256:48dcfffb9e225c0481120f4bdf622131c8c95f342b00b158cdbe220edbbe20b6"}, + {file = "Shapely-1.8.5.post1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2fd15397638df291c427a53d641d3e6fd60458128029c8c4f487190473a69a91"}, + {file = "Shapely-1.8.5.post1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a74631e511153366c6dbe3229fa93f877e3c87ea8369cd00f1d38c76b0ed9ace"}, + {file = "Shapely-1.8.5.post1-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:66bdac74fbd1d3458fa787191a90fa0ae610f09e2a5ec398c36f968cc0ed743f"}, + {file = "Shapely-1.8.5.post1-cp36-cp36m-win32.whl", hash = "sha256:6d388c0c1bd878ed1af4583695690aa52234b02ed35f93a1c8486ff52a555838"}, + {file = "Shapely-1.8.5.post1-cp36-cp36m-win_amd64.whl", hash = "sha256:be9423d5a3577ac2e92c7e758bd8a2b205f5e51a012177a590bc46fc51eb4834"}, + {file = "Shapely-1.8.5.post1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5d7f85c2d35d39ff53c9216bc76b7641c52326f7e09aaad1789a3611a0f812f2"}, + {file = "Shapely-1.8.5.post1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:adcf8a11b98af9375e32bff91de184f33a68dc48b9cb9becad4f132fa25cfa3c"}, + {file = "Shapely-1.8.5.post1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:753ed0e21ab108bd4282405b9b659f2e985e8502b1a72b978eaa51d3496dee19"}, + {file = "Shapely-1.8.5.post1-cp37-cp37m-win32.whl", hash = "sha256:65b21243d8f6bcd421210daf1fabb9de84de2c04353c5b026173b88d17c1a581"}, + {file = "Shapely-1.8.5.post1-cp37-cp37m-win_amd64.whl", hash = "sha256:370b574c78dc5af3a198a6da5d9b3d7c04654bd2ef7e80e80a3a0992dfb2d9cd"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:532a55ee2a6c52d23d6f7d1567c8f0473635f3b270262c44e1b0c88096827e22"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3480657460e939f45a7d359ef0e172a081f249312557fe9aa78c4fd3a362d993"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b65f5d530ba91e49ffc7c589255e878d2506a8b96ffce69d3b7c4500a9a9eaf8"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:147066da0be41b147a61f8eb805dea3b13709dbc873a431ccd7306e24d712bc0"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c2822111ddc5bcfb116e6c663e403579d0fe3f147d2a97426011a191c43a7458"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-win32.whl", hash = "sha256:2e0a8c2e55f1be1312b51c92b06462ea89e6bb703fab4b114e7a846d941cfc40"}, + {file = "Shapely-1.8.5.post1-cp38-cp38-win_amd64.whl", hash = "sha256:0d885cb0cf670c1c834df3f371de8726efdf711f18e2a75da5cfa82843a7ab65"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0b4ee3132ee90f07d63db3aea316c4c065ed7a26231458dda0874414a09d6ba3"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:02dd5d7dc6e46515d88874134dc8fcdc65826bca93c3eecee59d1910c42c1b17"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c6a9a4a31cd6e86d0fbe8473ceed83d4fe760b19d949fb557ef668defafea0f6"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:38f0fbbcb8ca20c16451c966c1f527cc43968e121c8a048af19ed3e339a921cd"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:78fb9d929b8ee15cfd424b6c10879ce1907f24e05fb83310fc47d2cd27088e40"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-win32.whl", hash = "sha256:8e59817b0fe63d34baedaabba8c393c0090f061917d18fc0bcc2f621937a8f73"}, + {file = "Shapely-1.8.5.post1-cp39-cp39-win_amd64.whl", hash = "sha256:e9c30b311de2513555ab02464ebb76115d242842b29c412f5a9aa0cac57be9f6"}, + {file = "Shapely-1.8.5.post1.tar.gz", hash = "sha256:ef3be705c3eac282a28058e6c6e5503419b250f482320df2172abcbea642c831"}, +] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, diff --git a/pyproject.toml b/pyproject.toml index 3f46b6ec..8568b853 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,8 @@ bitarray = "^2.6.0" tqdm = "^4.64.1" python-mobilitydb = "^0.1.2" postgis = "^1.0.4" +shapely = "^1.8.5.post1" +plpygis = "^0.2.0" [tool.poetry.group.dev.dependencies] mypy = "^0.961"