Skip to content

Commit 7e557aa

Browse files
authored
Merge pull request #220 from zivid/2026-02-05-update-python-samples
Samples: Define settings used to capture Conclude the modifications made to which settings are used to capture in the samples.
2 parents 64c6577 + 79df0e3 commit 7e557aa

4 files changed

Lines changed: 105 additions & 21 deletions

File tree

source/applications/advanced/multi_camera/multi_camera_calibration.py

Lines changed: 26 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,17 +16,36 @@ def _args() -> argparse.Namespace:
1616
1717
Returns:
1818
Arguments from the user
19+
1920
"""
2021
parser = argparse.ArgumentParser(description="Multi-camera calibration using Zivid cameras.")
22+
2123
parser.add_argument(
2224
"transformation_matrices_save_path",
25+
type=Path,
2326
help="Path where the transformation matrices YAML files will be saved",
27+
)
28+
29+
parser.add_argument(
30+
"--settings-path",
31+
required=False,
2432
type=Path,
33+
help="Path to the camera settings YML file",
2534
)
35+
2636
return parser.parse_args()
2737

2838

2939
def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
40+
"""get a list of available cameras and connect to them.
41+
42+
Args:
43+
cameras: List of Zivid cameras
44+
45+
Returns:
46+
List of connected Zivid cameras
47+
48+
"""
3049
connected_cameras = []
3150
for camera in cameras:
3251
if camera.state.status == zivid.CameraState.Status.available:
@@ -44,12 +63,16 @@ class Detection:
4463
detection_result: zivid.calibration.DetectionResult
4564

4665

47-
def get_detections(connected_cameras: List[zivid.Camera]) -> List[Detection]:
66+
def get_detections(connected_cameras: List[zivid.Camera], settings_path: Path) -> List[Detection]:
4867
detections_list = []
4968
for camera in connected_cameras:
5069
serial = camera.info.serial_number
5170
print(f"Capturing frame with camera: {serial}")
52-
frame = zivid.calibration.capture_calibration_board(camera)
71+
if settings_path is None:
72+
frame = zivid.calibration.capture_calibration_board(camera)
73+
else:
74+
settings = zivid.Settings.load(settings_path)
75+
frame = camera.capture_2d_3d(settings)
5376
print("Detecting checkerboard in point cloud")
5477
detection_result = zivid.calibration.detect_calibration_board(frame)
5578
if detection_result:
@@ -93,7 +116,7 @@ def main() -> None:
93116
raise RuntimeError("At least two cameras need to be connected")
94117
print(f"Number of connected cameras: {len(connected_cameras)}")
95118

96-
detections = get_detections(connected_cameras)
119+
detections = get_detections(connected_cameras, args.settings_path)
97120
run_multi_camera_calibration(detections, args.transformation_matrices_save_path)
98121

99122

source/applications/advanced/multi_camera/stitch_by_transformation.py

Lines changed: 68 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,19 +25,74 @@ def _user_arguments() -> argparse.Namespace:
2525
parser = argparse.ArgumentParser(
2626
description="Stitch point clouds from multiple Zivid cameras using transformation matrices."
2727
)
28+
2829
parser.add_argument(
2930
"yaml_files",
3031
type=Path,
3132
nargs="+",
3233
help="YAML files containing the corresponding transformation matrices (one per camera).",
3334
)
35+
36+
parser.add_argument(
37+
"-o",
38+
"--output-file",
39+
required=False,
40+
type=Path,
41+
help="Save the stitched point cloud to a file with this name (.ply)",
42+
)
43+
3444
parser.add_argument(
35-
"-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)"
45+
"--settings-path",
46+
required=False,
47+
type=Path,
48+
help="Path to the camera settings YML file",
3649
)
50+
3751
return parser.parse_args()
3852

3953

54+
def sanitized_model_name(camera: zivid.Camera) -> str:
55+
"""Get a string that represents the camera model name.
56+
57+
Args:
58+
camera: Zivid camera
59+
60+
Raises:
61+
RuntimeError: If unsupported camera model for this code sample
62+
63+
Returns:
64+
A string representing the camera model name
65+
66+
"""
67+
model = camera.info.model
68+
69+
model_map = {
70+
zivid.CameraInfo.Model.zividTwo: "Zivid_Two_M70",
71+
zivid.CameraInfo.Model.zividTwoL100: "Zivid_Two_L100",
72+
zivid.CameraInfo.Model.zivid2PlusM130: "Zivid_Two_Plus_M130",
73+
zivid.CameraInfo.Model.zivid2PlusM60: "Zivid_Two_Plus_M60",
74+
zivid.CameraInfo.Model.zivid2PlusL110: "Zivid_Two_Plus_L110",
75+
zivid.CameraInfo.Model.zivid2PlusMR130: "Zivid_Two_Plus_MR130",
76+
zivid.CameraInfo.Model.zivid2PlusMR60: "Zivid_Two_Plus_MR60",
77+
zivid.CameraInfo.Model.zivid2PlusLR110: "Zivid_Two_Plus_LR110",
78+
zivid.CameraInfo.Model.zivid3XL250: "Zivid_Three_XL250",
79+
}
80+
if model not in model_map:
81+
raise RuntimeError(f"Unhandled camera model: {camera.info().model().to_string()}")
82+
83+
return model_map[model]
84+
85+
4086
def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
87+
"""get a list of available cameras and connect to them.
88+
89+
Args:
90+
cameras: List of Zivid cameras
91+
92+
Returns:
93+
List of connected Zivid cameras
94+
95+
"""
4196
connected_cameras = []
4297
for camera in cameras:
4398
if camera.state.status == zivid.CameraState.Status.available:
@@ -60,10 +115,11 @@ def get_transformation_matrices_from_yaml(
60115
cameras: List of connected Zivid cameras
61116
62117
Returns:
63-
A dictionary mapping camera serial numbers to their corresponding transformation matrices
118+
transforms_mapped_to_cameras: A dictionary mapping camera serial numbers to their corresponding transformation matrices
64119
65120
Raises:
66121
RuntimeError: If a YAML file for a camera is missing
122+
67123
"""
68124
transforms_mapped_to_cameras = {}
69125
for camera in cameras:
@@ -100,11 +156,12 @@ def main() -> None:
100156

101157
# DOCTAG-START-CAPTURE-AND-STITCH-POINT-CLOUDS-PART1
102158
for camera in connected_cameras:
103-
settings_path = (
104-
get_sample_data_path()
105-
/ "Settings"
106-
/ f"{camera.info.model_name.replace('2+', 'Two_Plus').replace('2', 'Two').replace('3', 'Three').replace(' ', '_')}_ManufacturingSpecular.yml"
107-
)
159+
if args.settings_path is not None:
160+
settings_path = args.settings_path
161+
else:
162+
settings_path = (
163+
get_sample_data_path() / "Settings" / f"{sanitized_model_name(camera)}_ManufacturingSpecular.yml"
164+
)
108165
print(f"Imaging from camera: {camera.info.serial_number}")
109166
frame = camera.capture(zivid.Settings.load(settings_path))
110167
unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
@@ -115,13 +172,13 @@ def main() -> None:
115172
print("Voxel-downsampling the stitched point cloud")
116173
final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)
117174

118-
print(f"Visualizing the stitched point cloud ({len(final_point_cloud.size())} data points)")
175+
print(f"Visualizing the stitched point cloud ({final_point_cloud.size} data points)")
119176
display_pointcloud(final_point_cloud)
120177

121-
if args.output_file:
122-
print(f"Saving {len(final_point_cloud.size())} data points to {args.output_file}")
178+
if args.output_file is not None:
179+
print(f"Saving {final_point_cloud.size} data points to {args.output_file}")
123180
export_unorganized_point_cloud(
124-
final_point_cloud, PLY(args.output_file, layout=PLY.Layout.unordered, color_space=ColorSpace.srgb)
181+
final_point_cloud, PLY(str(args.output_file), layout=PLY.Layout.unordered, color_space=ColorSpace.srgb)
125182
)
126183

127184

source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,11 @@ def _user_arguments() -> argparse.Namespace:
3030
type=Path,
3131
)
3232
parser.add_argument(
33-
"-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)."
33+
"-o",
34+
"--output-file",
35+
required=False,
36+
type=Path,
37+
help="Save the stitched point cloud to a file with this name (.ply).",
3438
)
3539
return parser.parse_args()
3640

@@ -95,7 +99,7 @@ def main() -> None:
9599
print(f"Visualizing the stitched point cloud ({final_point_cloud.size}) data points)")
96100
display_point_cloud(final_point_cloud)
97101

98-
if args.output_file:
102+
if args.output_file is not None:
99103
print(f"Saving {final_point_cloud.size} data points to {args.output_file}")
100104
export_unorganized_point_cloud(
101105
final_point_cloud,

source/applications/point_cloud_tutorial.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ point cloud. While doing so, all NaN values are removed, and the point
143143
cloud is flattened to a 1D array.
144144

145145
([go to
146-
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L109))
146+
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L166))
147147

148148
``` sourceCode python
149149
unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
@@ -155,7 +155,7 @@ The unorganized point cloud can be extended with additional unorganized
155155
point clouds.
156156

157157
([go to
158-
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76))
158+
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80))
159159

160160
``` sourceCode python
161161
stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix))
@@ -227,7 +227,7 @@ that in this sample is is not necessary to create a new instance, as the
227227
untransformed point cloud is not used after the transformation.
228228

229229
([go to
230-
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L111))
230+
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L168))
231231

232232
``` sourceCode python
233233
transformed_unorganized_point_cloud = unorganized_point_cloud.transformed(transformation_matrix)
@@ -237,7 +237,7 @@ Even the in-place API returns the transformed point cloud, so you can
237237
use it directly, as in the example below.
238238

239239
([go to
240-
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76))
240+
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80))
241241

242242
``` sourceCode python
243243
stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix))
@@ -327,7 +327,7 @@ minPointsPerVoxel can be used to only fill voxels that both captures
327327
"agree" on.
328328

329329
([go to
330-
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L115))
330+
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L172))
331331

332332
``` sourceCode python
333333
final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)

0 commit comments

Comments
 (0)