Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ pip install "evalai"
evalai set-token <your token>

# Step 3: Copy the command pop above and submit to leaderboard
evalai challenge 2010 phase 4018 submit --file av2_submit.zip --large --private
# evalai challenge 2010 phase 4018 submit --file av2_submit.zip --large --private
evalai challenge 2210 phase 4396 submit --file av2_submit_v2.zip --large --private
```

Expand All @@ -318,29 +318,31 @@ python eval.py model=nsfp dataset_path=/home/kin/data/av2/h5py/demo/val
# The output of above command will be like:
Model: DeFlow, Checkpoint from: /home/kin/model_zoo/v2/seflow_best.ckpt
We already write the flow_est into the dataset, please run following commend to visualize the flow. Copy and paste it to your terminal:
python tools/visualization.py --res_name 'seflow_best' --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
python tools/visualization.py vis --res_name 'seflow_best' --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
Enjoy! ^v^ ------

# Then run the command in the terminal:
python tools/visualization.py --res_name 'seflow_best' --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
python tools/visualization.py vis --res_name 'seflow_best' --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
```

https://github.com/user-attachments/assets/f031d1a2-2d2f-4947-a01f-834ed1c146e6

For exporting easy comparsion with ground truth and other methods, we also provided multi-visulization open3d window:
```bash
python tools/visualization.py --mode mul --res_name "['flow', 'seflow_best']" --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
python tools/visualization.py vis --res_name "['flow', 'seflow_best']" --data_dir /home/kin/data/av2/preprocess_v2/sensor/vis
```

Or another way to interact with [rerun](https://github.com/rerun-io/rerun) but please only vis scene by scene, not all at once.
**Tips**: To quickly create qualitative results for all methods, you can use multiple results comparison mode, select a good viewpoint and then save screenshots for all frames by pressing `P` key. You will found all methods' results are saved in the output folder (default is `logs/imgs`). Enjoy it!


_Rerun_: Another way to interact with [rerun](https://github.com/rerun-io/rerun) but please only vis scene by scene, not all at once.

```bash
python tools/visualization_rerun.py --data_dir /home/kin/data/av2/h5py/demo/train --res_name "['flow', 'deflow']"
```

https://github.com/user-attachments/assets/07e8d430-a867-42b7-900a-11755949de21


## Cite Us

[*OpenSceneFlow*](https://github.com/KTH-RPL/OpenSceneFlow) is originally designed by [Qingwen Zhang](https://kin-zhang.github.io/) from DeFlow and SeFlow.
Expand Down
7 changes: 4 additions & 3 deletions src/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ class HDF5Dataset(Dataset):
def __init__(self, directory, \
transform=None, n_frames=2, ssl_label=None, \
eval = False, leaderboard_version=1, \
vis_name=''):
vis_name='', index_flow=False):
'''
Args:
directory: the directory of the dataset, the folder should contain some .h5 file and index_total.pkl.
Expand All @@ -199,6 +199,7 @@ def __init__(self, directory, \
* eval: if True, use the eval index (only used it for leaderboard evaluation)
* leaderboard_version: 1st or 2nd, default is 1. If '2', we will use the index_eval_v2.pkl from assets/docs.
* vis_name: the data of the visualization, default is ''.
* index_flow: if True, use the flow index for training or visualization.
'''
super(HDF5Dataset, self).__init__()
self.directory = directory
Expand Down Expand Up @@ -247,7 +248,7 @@ def __init__(self, directory, \

# for some dataset that annotated HZ is different.... like truckscene and nuscene etc.
self.train_index = None
if not eval and ssl_label is None and transform is not None: # transform indicates whether we are in training mode.
if (not eval and ssl_label is None and transform is not None) or index_flow: # transform indicates whether we are in training mode.
# check if train seq all have gt.
one_scene_id = list(self.scene_id_bounds.keys())[0]
check_flow_exist = True
Expand Down Expand Up @@ -347,7 +348,7 @@ def __getitem__(self, index_):
data_dict[f'gmh{i+1}'] = past_gm
data_dict[f'poseh{i+1}'] = past_pose

for data_key in self.vis_name + ['ego_motion', 'lidar_dt',
for data_key in self.vis_name + ['ego_motion', 'lidar_dt', 'lidar_center',
# ground truth information:
'flow', 'flow_is_valid', 'flow_category_indices', 'flow_instance_id', 'dufo']:
if data_key in f[key]:
Expand Down
2 changes: 1 addition & 1 deletion src/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def on_validation_epoch_end(self):
with open(str(self.save_res_path)+'.pkl', 'wb') as f:
pickle.dump((self.metrics.epe_3way, self.metrics.bucketed, self.metrics.epe_ssf), f)
print(f"We already write the {self.res_name} into the dataset, please run following commend to visualize the flow. Copy and paste it to your terminal:")
print(f"python tools/visualization.py --res_name '{self.res_name}' --data_dir {self.dataset_path}")
print(f"python tools/visualization.py vis --res_name '{self.res_name}' --data_dir {self.dataset_path}")
print(f"Enjoy! ^v^ ------ \n")

self.metrics = OfficialMetrics()
Expand Down
72 changes: 72 additions & 0 deletions src/utils/mics.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,78 @@ def make_colorwheel(transitions: tuple=DEFAULT_TRANSITIONS) -> np.ndarray:
return colorwheel


def error_to_color(
error_magnitude: np.ndarray,
max_error: float,
color_map: str = "jet"
) -> np.ndarray:
"""
Convert flow error to RGB color visualization.
Args:
color_map: Color map to use for visualization ("jet" recommended for error visualization)

Returns:
RGB color representation of the error of shape (..., 3)
"""
if max_error > 0:
normalized_error = np.clip(error_magnitude / max_error, 0, 1)
else:
normalized_error = np.zeros_like(error_magnitude)

# Create colormap
if color_map == "jet":
# Simple jet colormap implementation
colors = np.zeros((*normalized_error.shape, 3), dtype=np.uint8)

# Blue to cyan to green to yellow to red
# Blue (low error)
idx = normalized_error < 0.25
colors[idx, 2] = 255
colors[idx, 0] = 0
colors[idx, 1] = np.uint8(255 * normalized_error[idx] * 4)

# Cyan to green
idx = (normalized_error >= 0.25) & (normalized_error < 0.5)
colors[idx, 1] = 255
colors[idx, 0] = 0
colors[idx, 2] = np.uint8(255 * (1 - (normalized_error[idx] - 0.25) * 4))

# Green to yellow
idx = (normalized_error >= 0.5) & (normalized_error < 0.75)
colors[idx, 1] = 255
colors[idx, 2] = 0
colors[idx, 0] = np.uint8(255 * (normalized_error[idx] - 0.5) * 4)

# Yellow to red (high error)
idx = normalized_error >= 0.75
colors[idx, 0] = 255
colors[idx, 2] = 0
colors[idx, 1] = np.uint8(255 * (1 - (normalized_error[idx] - 0.75) * 4))

elif color_map == "hot":
# Hot colormap: black -> red -> yellow -> white
colors = np.zeros((*normalized_error.shape, 3), dtype=np.uint8)

# Black to red
idx = normalized_error < 0.33
colors[idx, 0] = np.uint8(255 * normalized_error[idx] * 3)

# Red to yellow
idx = (normalized_error >= 0.33) & (normalized_error < 0.67)
colors[idx, 0] = 255
colors[idx, 1] = np.uint8(255 * (normalized_error[idx] - 0.33) * 3)

# Yellow to white
idx = normalized_error >= 0.67
colors[idx, 0] = 255
colors[idx, 1] = 255
colors[idx, 2] = np.uint8(255 * (normalized_error[idx] - 0.67) * 3)

else:
raise ValueError(f"Unsupported color map: {color_map}. Use 'jet' or 'hot'.")

return colors

def flow_to_rgb(
flow: np.ndarray,
flow_max_radius: Optional[float]=None,
Expand Down
Loading