Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
385 changes: 98 additions & 287 deletions bop_toolkit_lib/dataset_params.py

Large diffs are not rendered by default.

93 changes: 33 additions & 60 deletions bop_toolkit_lib/inout.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
"""I/O functions."""

import os
import gzip
import struct
import numpy as np
import imageio
Expand Down Expand Up @@ -69,65 +68,51 @@ def save_depth(path, im):
def load_json(path, keys_to_int=False):
"""Loads content of a JSON file.

:param path: Path to the JSON file. If ".json.gz" extension, opens with gzip.
:param path: Path to the JSON file.
:return: Content of the loaded JSON file.
"""

# Keys to integers.
def convert_keys_to_int(x):
return {int(k) if k.lstrip("-").isdigit() else k: v for k, v in x.items()}

# Open+decompress with gzip if ".json.gz" file extension
if path.endswith('.json.gz'):
f = gzip.open(path, "rt", encoding="utf8")
else:
f = open(path, "r")
if keys_to_int:
content = json.load(f, object_hook=lambda x: convert_keys_to_int(x))
else:
content = json.load(f)

f.close()
with open(path, "r") as f:
if keys_to_int:
content = json.load(f, object_hook=lambda x: convert_keys_to_int(x))
else:
content = json.load(f)

return content


def save_json(path, content, compress=False):
def save_json(path, content):
"""Saves the provided content to a JSON file.

:param path: Path to the output JSON file.
:param content: Dictionary/list to save.
:param compress: Saves as a gzip archive, appends ".gz" extension to filepath.
"""
if compress:
path += ".gz"
f = gzip.open(path, "wt", encoding="utf8")
else:
f = open(path, "w")

if isinstance(content, dict):
f.write("{\n")
content_sorted = sorted(content.items(), key=lambda x: x[0])
for elem_id, (k, v) in enumerate(content_sorted):
f.write(' "{}": {}'.format(k, json.dumps(v, sort_keys=True)))
if elem_id != len(content) - 1:
f.write(",")
f.write("\n")
f.write("}")

elif isinstance(content, list):
f.write("[\n")
for elem_id, elem in enumerate(content):
f.write(" {}".format(json.dumps(elem, sort_keys=True)))
if elem_id != len(content) - 1:
f.write(",")
f.write("\n")
f.write("]")
with open(path, "w") as f:
if isinstance(content, dict):
f.write("{\n")
content_sorted = sorted(content.items(), key=lambda x: x[0])
for elem_id, (k, v) in enumerate(content_sorted):
f.write(' "{}": {}'.format(k, json.dumps(v, sort_keys=True)))
if elem_id != len(content) - 1:
f.write(",")
f.write("\n")
f.write("}")

elif isinstance(content, list):
f.write("[\n")
for elem_id, elem in enumerate(content):
f.write(" {}".format(json.dumps(elem, sort_keys=True)))
if elem_id != len(content) - 1:
f.write(",")
f.write("\n")
f.write("]")

else:
json.dump(content, f, sort_keys=True)

f.close()
else:
json.dump(content, f, sort_keys=True)


def load_cam_params(path):
Expand Down Expand Up @@ -434,7 +419,7 @@ def check_bop_results(path, version="bop19"):
def check_coco_results(path, version="bop22", ann_type="segm", enforce_no_segm_if_bbox=False):
"""Checks if the format of extended COCO results is correct.

:param path: Path to a file with coco estimates. If ".json.gz" extension, opens with gzip.
:param path: Path to a file with coco estimates.
:param version: Version of the results.
:param ann_type: type of annotation expected in the file.
"bbox" -> bounding boxes
Expand Down Expand Up @@ -485,7 +470,7 @@ def check_coco_results(path, version="bop22", ann_type="segm", enforce_no_segm_i
return check_passed, check_msg


def save_coco_results(path, results, version="bop22", compress=False):
def save_coco_results(path, results, version="bop22"):
"""Saves detections/instance segmentations for each scene in coco format.

"bbox" should be [x,y,w,h] in pixels
Expand All @@ -496,6 +481,7 @@ def save_coco_results(path, results, version="bop22", compress=False):
:param version: Version of the results.
"""

# See docs/bop_challenge_2022.md for details.
if version == "bop22":
coco_results = []
for res in results:
Expand All @@ -512,7 +498,7 @@ def save_coco_results(path, results, version="bop22", compress=False):
"time": res["run_time"] if "run_time" in res else -1,
}
)
save_json(path, coco_results, compress)
save_json(path, coco_results)
else:
raise ValueError("Unknown version of BOP detection results.")

Expand Down Expand Up @@ -621,7 +607,6 @@ def load_ply(path):
"float": ("f", 4),
"double": ("d", 8),
"int": ("i", 4),
"uint": ("I", 4),
"uchar": ("B", 1),
}

Expand Down Expand Up @@ -858,19 +843,7 @@ def save_ply2(


def get_im_targets(im_gt, im_gt_info, visib_gt_min, eval_mode="localization"):
"""
From an image gt and gt info, given a minimum visibility, get valid object evaluation targets.

Output format: dict[obj_id]
{
<obj_id1>: {'inst_count': <inst_count_1>},
<obj_id2>: {'inst_count': <inst_count_2>},
...
}
"""
im_targets = {}
# Objects gt detection are have gt and gt_info have same order.
# object id is retrieved from gt and visibility from gt info.
for gt_id, gt in enumerate(im_gt):
gt_info = im_gt_info[gt_id]
obj_id = gt["obj_id"]
Expand All @@ -883,4 +856,4 @@ def get_im_targets(im_gt, im_gt_info, visib_gt_min, eval_mode="localization"):
if obj_id not in im_targets:
im_targets[obj_id] = {"inst_count": 0}
im_targets[obj_id]["inst_count"] += 1
return im_targets
return im_targets
Copy link

Copilot AI Jan 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removing the docstring from the get_im_targets function reduces code documentation. The previous docstring provided valuable information about the function's purpose and output format.

Copilot uses AI. Check for mistakes.
2 changes: 1 addition & 1 deletion bop_toolkit_lib/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ def precompute_lazy(depth_im, K):
:return: hxw ndarray (Xs/depth_im, Ys/depth_im)
"""
if depth_im.shape != Precomputer.depth_im_shape:
Precomputer.depth_im_shape = depth_im.shape
Precomputer.xs, Precomputer.ys = np.meshgrid(
np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0])
)
Expand All @@ -142,7 +143,6 @@ def precompute_lazy(depth_im, K):
Precomputer.pre_Xs = (Precomputer.xs - K[0, 2]) / np.float64(K[0, 0])
Precomputer.pre_Ys = (Precomputer.ys - K[1, 2]) / np.float64(K[1, 1])

Precomputer.depth_im_shape = depth_im.shape
return Precomputer.pre_Xs, Precomputer.pre_Ys


Expand Down
2 changes: 1 addition & 1 deletion bop_toolkit_lib/renderer_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def run_vsd(self, all_im_errs):
for worker_id in range(num_workers_used):
cmd = [
"python",
"bop_toolkit_lib/call_vsd_worker.py",
"external/bop_toolkit/bop_toolkit_lib/call_vsd_worker.py",
Copy link

Copilot AI Jan 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The path to the worker script has been changed from bop_toolkit_lib/call_vsd_worker.py to external/bop_toolkit/bop_toolkit_lib/call_vsd_worker.py. This appears to be an environment-specific path that may not work for all users. The path should be relative to the repository root or use __file__ to construct a path relative to the current module.

Copilot uses AI. Check for mistakes.
f"--input_dir={self.tmp_dir}",
f"--worker_id={worker_id}",
]
Expand Down
35 changes: 17 additions & 18 deletions bop_toolkit_lib/tests/eval_bop22_coco_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,20 @@
from tqdm import tqdm
from bop_toolkit_lib import inout

#
Copy link

Copilot AI Jan 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Empty comment line should be removed.

Suggested change
#

Copilot uses AI. Check for mistakes.
EPS_AP = 0.001

# Define path to directories
RESULT_PATH = "./bop_toolkit_lib/tests/data/"
EVAL_PATH = "./bop_toolkit_lib/tests/eval/"
LOGS_PATH = "./bop_toolkit_lib/tests/logs"
os.makedirs(EVAL_PATH, exist_ok=True)
os.makedirs(LOGS_PATH, exist_ok=True)
# Define the input directory
INPUT_DIR = "./bop_toolkit_lib/tests/data/"

# Define the output directory
OUTPUT_DIR = "./bop_toolkit_lib/tests/logs"
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Define the dataset dictionary
# tuples: (submission name, annotation type, compressed)
FILE_DICTIONARY = {
"ycbv_zebra_segm": ("zebraposesat-effnetb4_ycbv-test_5ed0eecc-96f8-498b-9438-d586d4d92528", "segm", False),
"ycbv_gdrnppdet_bbox": ("gdrnppdet-pbrreal_ycbv-test_abe6c5f1-cb26-4bbd-addc-bb76dd722a96", "bbox", True),
"ycbv_zebra_segm": ("zebraposesat-effnetb4_ycbv-test_5ed0eecc-96f8-498b-9438-d586d4d92528", "segm"),
"ycbv_gdrnppdet_bbox": ("gdrnppdet-pbrreal_ycbv-test_abe6c5f1-cb26-4bbd-addc-bb76dd722a96", "bbox"),
}

# From BOP website
Expand Down Expand Up @@ -57,24 +57,22 @@
}

# Loop through each entry in the dictionary and execute the command
for dataset_method_name, (sub_name, ann_type, compressed) in tqdm(
for dataset_method_name, (sub_name, ann_type) in tqdm(
FILE_DICTIONARY.items(), desc="Executing..."
):
ext = ".json.gz" if compressed else ".json"
result_filename = sub_name + ext
command = [
"python",
"scripts/eval_bop22_coco.py",
"--results_path", RESULT_PATH,
"--eval_path", EVAL_PATH,
"--result_filenames", result_filename,
"--results_path", INPUT_DIR,
"--eval_path", INPUT_DIR,
"--result_filenames", sub_name+".json",
"--bbox_type", "amodal",
"--ann_type", ann_type
]
command_ = " ".join(command)
print(f"Executing: {command_}")
start_time = time.time()
log_file_path = f"{LOGS_PATH}/eval_bop22_coco_test_{dataset_method_name}.txt"
log_file_path = f"{OUTPUT_DIR}/eval_bop22_coco_test_{dataset_method_name}.txt"
with open(log_file_path, "a") as output_file:
subprocess.run(command, stdout=output_file, stderr=subprocess.STDOUT)
end_time = time.time()
Expand All @@ -84,10 +82,11 @@


# Check scores for each dataset
for sub_short_name, (sub_name, ann_type, compressed) in tqdm(FILE_DICTIONARY.items(), desc="Verifying..."):
for sub_short_name, (sub_name, ann_type) in tqdm(FILE_DICTIONARY.items(), desc="Verifying..."):
if sub_short_name in EXPECTED_OUTPUT:
ann_type = FILE_DICTIONARY[sub_short_name][1]
eval_filename = f"scores_bop22_coco_{ann_type}.json"
eval_file_path = os.path.join(RESULT_PATH, sub_name, eval_filename)
eval_file_path = os.path.join(INPUT_DIR, sub_name, eval_filename)
eval_scores = inout.load_json(eval_file_path)
for key, expected_score in EXPECTED_OUTPUT[sub_short_name].items():
eval_score = eval_scores.get(key)
Expand Down
48 changes: 0 additions & 48 deletions bop_toolkit_lib/tests/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,54 +91,6 @@ def test_project_pts(self):
proj_htt[i] = pose_error_htt.project_pts_htt(self.pts, camera, R_np[i], t_np[i])
self.assertTrue(np.allclose(proj_htt, proj_np, atol=1e-4))

def test_precomputer(self):

# precomputer static class start with None attributes
self.assertTrue(misc.Precomputer.xs is None)
self.assertTrue(misc.Precomputer.ys is None)
self.assertTrue(misc.Precomputer.pre_Xs is None)
self.assertTrue(misc.Precomputer.pre_Ys is None)
self.assertTrue(misc.Precomputer.K is None)

Ka = np.eye(3)
depth_ima = np.ones((10,10))

pre_Xs1, pre_Ys1 = misc.Precomputer.precompute_lazy(depth_ima, Ka)
self.assertEqual(depth_ima.shape, pre_Xs1.shape)
self.assertEqual(depth_ima.shape, pre_Ys1.shape)

# same inputs should return the same internal objects
pre_Xs1_bis, pre_Ys1_bis = misc.Precomputer.precompute_lazy(depth_ima, Ka)
self.assertEqual(id(pre_Xs1), id(pre_Xs1_bis))
self.assertEqual(id(pre_Ys1), id(pre_Ys1_bis))
self.assertTrue(np.allclose(pre_Xs1, pre_Xs1_bis, atol=1e-9))
self.assertTrue(np.allclose(pre_Ys1, pre_Ys1_bis, atol=1e-9))

# different intrinsics should trigger recomputation
Kb = 2*np.eye(3)
pre_Xs2, pre_Ys2 = misc.Precomputer.precompute_lazy(depth_ima, Kb)
self.assertNotEqual(id(pre_Xs1), id(pre_Xs2))
self.assertNotEqual(id(pre_Ys1), id(pre_Ys2))
self.assertFalse(np.allclose(pre_Xs1, pre_Xs2, atol=1e-9))
self.assertFalse(np.allclose(pre_Ys1, pre_Ys2, atol=1e-9))

# different depth image should trigger recomputation
depth_imb = np.ones((20,20))
pre_Xs3, pre_Ys3 = misc.Precomputer.precompute_lazy(depth_imb, Kb)
self.assertNotEqual(id(pre_Xs2), id(pre_Xs3))
self.assertNotEqual(id(pre_Ys2), id(pre_Ys3))
self.assertNotEqual(pre_Xs2.shape, pre_Xs3.shape)
self.assertNotEqual(pre_Ys2.shape, pre_Ys3.shape)

# different intrinsics and depth image should trigger recomputation
Kc = 3*np.eye(3)
depth_imc = np.ones((30,30))
pre_Xs4, pre_Ys4 = misc.Precomputer.precompute_lazy(depth_imc, Kc)
self.assertNotEqual(id(pre_Xs3), id(pre_Xs4))
self.assertNotEqual(id(pre_Ys3), id(pre_Ys4))
self.assertNotEqual(pre_Xs3.shape, pre_Xs4.shape)
self.assertNotEqual(pre_Ys3.shape, pre_Ys4.shape)


if __name__ == "__main__":
unittest.main()
2 changes: 0 additions & 2 deletions bop_toolkit_lib/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,8 +268,6 @@ def vis_object_poses(
{"name": "min diff", "fmt": ":.3f", "val": np.min(depth_diff_valid)},
{"name": "max diff", "fmt": ":.3f", "val": np.max(depth_diff_valid)},
{"name": "mean diff", "fmt": ":.3f", "val": np.mean(depth_diff_valid)},
{"name": "median diff", "fmt": ":.3f", "val": np.median(np.abs(depth_diff_valid))},
{"name": "25 percentile", "fmt": ":.3f", "val": np.percentile(np.abs(depth_diff_valid), 25)},
]
depth_diff_vis = write_text_on_image(depth_diff_vis, depth_info)
inout.save_im(vis_depth_diff_path, depth_diff_vis)
1 change: 0 additions & 1 deletion docs/bop_datasets_format.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ DATASET_NAME
│ │ ├─ scene_camera.json
│ │ ├─ scene_gt.json
│ │ ├─ scene_gt_info.json
│ │ ├─ scene_gt_coco.json
│ │ ├─ depth
│ │ ├─ mask
│ │ ├─ mask_visib
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ kiwisolver==1.3.1
matplotlib==2.2.4
imageio==2.5.0
pypng==0.0.19
Cython>=0.29.24
Cython==0.29.24
PyOpenGL==3.1.0
triangle>=20190115.2
glumpy==1.1.0
opencv-python>=4.3.0.36
Pillow>=8.2.0,<=9.5.0
Pillow>=8.2.0
git+https://github.com/MartinSmeyer/cocoapi.git@v1.0#subdirectory=PythonAPI
vispy>=0.6.5
webdataset>=0.1.62
Expand Down
Loading
Loading