-
Notifications
You must be signed in to change notification settings - Fork 393
Expand file tree
/
Copy pathmain.py
More file actions
120 lines (104 loc) · 3.93 KB
/
main.py
File metadata and controls
120 lines (104 loc) · 3.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import depthai as dai
from pathlib import Path
from depthai_nodes.node import ParsingNeuralNetwork
from utils.arguments import initialize_argparser
from utils.colorize_diff import ColorizeDiff
NN_DIFF_SIZE = (720, 720)
NN_CONCAT_SIZE = (300, 300)
_, args = initialize_argparser()
device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device()
visualizer = dai.RemoteConnection(httpPort=8082)
with dai.Pipeline(device) as pipeline:
platform = device.getPlatformAsString()
cam_rgb = pipeline.create(dai.node.Camera).build(
boardSocket=dai.CameraBoardSocket.CAM_A
)
cam_left = pipeline.create(dai.node.Camera).build(
boardSocket=dai.CameraBoardSocket.CAM_B
)
cam_right = pipeline.create(dai.node.Camera).build(
boardSocket=dai.CameraBoardSocket.CAM_C
)
cam_left_out = cam_left.requestOutput(
size=NN_CONCAT_SIZE,
type=dai.ImgFrame.Type.BGR888i
if platform == "RVC4"
else dai.ImgFrame.Type.BGR888p,
fps=args.fps_limit,
)
cam_right_out = cam_right.requestOutput(
size=NN_CONCAT_SIZE,
type=dai.ImgFrame.Type.BGR888i
if platform == "RVC4"
else dai.ImgFrame.Type.BGR888p,
fps=args.fps_limit,
)
cam_rgb_out = cam_rgb.requestOutput(
size=NN_CONCAT_SIZE,
type=dai.ImgFrame.Type.BGR888i
if platform == "RVC4"
else dai.ImgFrame.Type.BGR888p,
fps=args.fps_limit,
)
cam_rgb_diff_out = cam_rgb.requestOutput(
size=NN_DIFF_SIZE,
type=dai.ImgFrame.Type.BGR888i
if platform == "RVC4"
else dai.ImgFrame.Type.BGR888p,
fps=args.fps_limit,
)
# BLUR
blur_nn_archive_path = Path(__file__).parent / Path(
f"models/blur.{platform.lower()}.tar.xz"
)
blur_nn_archive = dai.NNArchive(archivePath=str(blur_nn_archive_path))
nn_blur = pipeline.create(ParsingNeuralNetwork).build(cam_rgb_out, blur_nn_archive)
# EDGE
edge_nn_archive_path = Path(__file__).parent / Path(
f"models/edge.{platform.lower()}.tar.xz"
)
edge_nn_archive = dai.NNArchive(archivePath=str(edge_nn_archive_path))
nn_edge = pipeline.create(ParsingNeuralNetwork).build(cam_rgb_out, edge_nn_archive)
# CONCAT
concat_nn_archive_path = Path(__file__).parent / Path(
f"models/concat.{platform.lower()}.tar.xz"
)
concat_nn_archive = dai.NNArchive(archivePath=str(concat_nn_archive_path))
nn_concat = pipeline.create(ParsingNeuralNetwork)
nn_concat.setNNArchive(concat_nn_archive)
cam_rgb_out.link(nn_concat.inputs["img1"])
cam_left_out.link(nn_concat.inputs["img2"])
cam_right_out.link(nn_concat.inputs["img3"])
script = pipeline.create(dai.node.Script)
script.setScript("""
old = node.io['in'].get()
while True:
frame = node.io['in'].get()
node.io['img1'].send(old)
node.io['img2'].send(frame)
old = frame
""")
cam_rgb_diff_out.link(script.inputs["in"])
# DIFF
diff_nn_archive_path = Path(__file__).parent / Path(
f"models/diff.{platform.lower()}.tar.xz"
)
diff_nn_archive = dai.NNArchive(archivePath=str(diff_nn_archive_path))
nn_diff = pipeline.create(dai.node.NeuralNetwork)
nn_diff.setNNArchive(diff_nn_archive)
script.outputs["img1"].link(nn_diff.inputs["img1"])
script.outputs["img2"].link(nn_diff.inputs["img2"])
diff_color = pipeline.create(ColorizeDiff).build(nn=nn_diff.out)
visualizer.addTopic("Blur", nn_blur.out, "images")
visualizer.addTopic("Edge", nn_edge.out, "images")
visualizer.addTopic("Concat", nn_concat.out, "images")
visualizer.addTopic("Diff", diff_color.out, "images")
print("Pipeline created.")
pipeline.start()
visualizer.registerPipeline(pipeline)
while pipeline.isRunning():
pipeline.processTasks()
key = visualizer.waitKey(1)
if key == ord("q"):
print("Got q key from the remote connection!")
break