-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_pipeline.py
More file actions
200 lines (168 loc) · 7.39 KB
/
run_pipeline.py
File metadata and controls
200 lines (168 loc) · 7.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import os
import cv2
import torch
from torch import tensor
from tracker import Sort
from helpers.location_predictor import DummyLocationPredictor, LocationPredictor
from helpers.dataset_helpers import create_image_processor
from helpers.evaluate_tracking import evaluate_tracking, save_tracking_results
from helpers.model_helpers import load_model_from_checkpoint
def compute_iou(box1, box2):
# box format: x1, y1, x2, y2
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
inter_area = max(0, x2 - x1) * max(0, y2 - y1)
area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
union_area = area1 + area2 - inter_area
if union_area == 0:
return 0.0
return inter_area / union_area
def remove_overlapping_boxes(boxes, iou_threshold=0.5):
kept_boxes = []
for box in boxes:
keep = True
for kept in kept_boxes:
if compute_iou(box, kept) > iou_threshold:
keep = False
break
if keep:
kept_boxes.append(box)
return kept_boxes
train_folder = 'training/leaderboard_data/'
use_dummy = False
validate_result = False
save_video = True
if validate_result:
dummy_predictor = DummyLocationPredictor('training/train/' )
if not use_dummy:
model = load_model_from_checkpoint(True)
image_processor = create_image_processor()
location_predictor = LocationPredictor(model, image_processor, 0.0)
videos = [f for f in os.listdir(train_folder) if f.endswith('.mp4')]
videos = ['test002.mp4']
total_frames = 0
correct_frames = 0
wait_for_key = True
for video_name in videos:
csv_filename = os.path.join(train_folder, video_name.split('.')[0] + '.csv')
if os.path.exists(csv_filename):
os.remove(csv_filename)
with open(os.path.join(train_folder, video_name.split('.')[0] + '.csv'), 'w') as f:
f.write(",t,hexbug,x,y\n")
idx_counter = 0
video_path = os.path.join(train_folder, video_name)
cap = cv2.VideoCapture(video_path)
if save_video:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(f"{video_name.split('.')[0]}_output_video.mp4", fourcc, fps, (width, height))
tracker_threshold = 400
tracker = Sort(max_age=9999, min_hits=1, threshold=tracker_threshold, mode="centroid", )
frame_number = 0
while cap.isOpened():
tracker.threshold = tracker_threshold
print(f"### Processing frame {frame_number} ")
ret, frame = cap.read()
if not ret:
break
orig_size = frame.shape[:2]
if validate_result:
groundtruth_data = dummy_predictor.predict({'frame_number': frame_number, 'video_name': video_name}, orig_size)
dummy_boxes = groundtruth_data['boxes'].clone()
if use_dummy:
boxes = groundtruth_data['boxes']
labels = groundtruth_data['labels']
perm = torch.randperm(boxes.size(0))
predicted_boxes = boxes[perm]
else:
locations = location_predictor.predict(frame, orig_size)
boxes = locations['boxes']
labels = locations['labels']
print("---")
print(locations)
predicted_boxes = boxes
if frame_number == 0:
if video_name == 'test001.mp4':
print(":)s")
boxes = [tensor([291.2733, 1000.0336, 330.5992, 1040.0345]), tensor([316.7722, 136.1508, 356.8097, 177.0484]), tensor([1019.8621, 288.5451, 1059.2296, 328.1467])]
predicted_boxes = boxes
elif video_name == 'test002.mp4':
print(":)")
boxes = [tensor([ 96.4326, 720.5561, 136.7776, 760.9005])]
predicted_boxes = boxes
elif video_name == 'test003.mp4':
print(":)")
boxes = [tensor([930.9304, 362.8030, 974.7084, 402.5462]), tensor([ 94.9077, 41.8412, 140.0501, 82.2282]), tensor([ 995.1649, 682.6353, 1039.3243, 723.7797]),
tensor([683.0872, 542.1241, 726.6462, 582.8704])]
predicted_boxes = boxes
elif video_name == 'test005.mp4':
print(":)")
boxes = [tensor([66.4694, 1856.7878, 91.6204, 1900.6907]), tensor([210.345, 1410.3445, 250.445, 1454.345])]
predicted_boxes = boxes
tracker.initialize_trackers(boxes)
if video_name == 'test004.mp4':
if frame_number == 19:
tracker.threshold = 9000
print("haihi")
predicted_boxes.append(tensor([ 146.4354,495.4545, 146.4354 + 40.452, 495.4545 + 40.345]))
elif frame_number == 39:
tracker.threshold = 9000
x = 457.3451
y = 363.455
predicted_boxes.append(tensor([ x ,y, x + 40.452, y + 40.345]))
elif frame_number == 50:
tracker.threshold = 9000 # Forecful update
x = 450.3451
y = 61.455
predicted_boxes.append(tensor([ x ,y, x + 40.452, y + 40.345]))
if video_name == 'test005.mp4':
if frame_number == 17:
tracker.threshold = 9000 # Forecful update
x = 368.3451
y = 690.455
predicted_boxes.append(tensor([ x ,y, x + 40.452, y + 40.345]))
if frame_number == 32:
tracker.threshold = 400
x = 150.3451
y = 1774.455
predicted_boxes.append(tensor([ x ,y, x + 40.452, y + 40.345]))
if frame_number ==33:
tracker.threshold = 400
x = 316.3421
y = 304.4541
predicted_boxes.append(tensor([ x ,y, x + 40.452, y + 40.345]))
pass
predicted_boxes = remove_overlapping_boxes(predicted_boxes, iou_threshold=0.05)
tracks = tracker.update(predicted_boxes)
if validate_result:
tracking_successful = evaluate_tracking(dummy_boxes, tracks)
if tracking_successful:
correct_frames += 1
for track in tracks:
x1, y1, x2, y2, track_id = track
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, f'ID: {int(track_id)}', (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Save data in format ,t,hexbug,x,y in csv file
idx_counter = save_tracking_results(tracks, video_name, train_folder, idx_counter, frame_number)
resized = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
cv2.imshow('Tracking', resized)
if save_video:
out.write(frame)
# print ("Tracks: ", tracks)
if wait_for_key:
key = cv2.waitKey()
if key == ord('q'):
wait_for_key = False
frame_number += 1
total_frames += 1
print(f"----------------------------Finished video: {video_name} with {frame_number} frames")
cap.release()
cv2.destroyAllWindows()
print(f"Total frames: {total_frames}, Correct frames: {correct_frames}, Success Rate: {correct_frames / total_frames:.2%}")