Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
6c57581
set up initial test framework for scenic store
YongmingFrankGe Jan 1, 2022
3813549
Use mongoDB for quick query in nuScenes dataset
eeeeenchanted Jan 24, 2022
b6a6d56
Add box class
eeeeenchanted Feb 4, 2022
af523f6
Update scenic_recognize to return two tables as discussed
eeeeenchanted Feb 8, 2022
f270783
scenic_recognize pass in a list of filenames & add frame num
eeeeenchanted Feb 12, 2022
61889c1
Save object and camera to database
eeeeenchanted Feb 16, 2022
8fe4383
fix front end metadata view for scenic tables, query for trajectories…
YongmingFrankGe Feb 16, 2022
328c3e5
write skeleton for the logic of overlay, most importantly the helper …
YongmingFrankGe Feb 16, 2022
438c6c9
export direct export and import tables from postgres
YongmingFrankGe Feb 23, 2022
b780f8c
delete contaminated file
YongmingFrankGe Feb 23, 2022
377ab12
Remove frameId from trajectory table
eeeeenchanted Feb 17, 2022
edd43a4
transform 3d to 2d
eeeeenchanted Feb 23, 2022
a02bde1
fix a wrong tag and delete mongoclient
jimlinntu Feb 25, 2022
6c4c7b4
XinyiJi1: Doverlay, with bug of sql
chanwutk Feb 27, 2022
62d7efa
fix bug
chanwutk Mar 1, 2022
125111f
remove scenic_* prefix; prepare for combining the original and scenic…
chanwutk Mar 2, 2022
9bead62
replace world with scenic_world
chanwutk Mar 2, 2022
17e5744
clean up util
chanwutk Mar 2, 2022
e880a32
clean up video_util.py
chanwutk Mar 2, 2022
30a4071
clean up
chanwutk Mar 2, 2022
e83b44e
fix errors from rebase
chanwutk Mar 4, 2022
9ba83b8
clean up insert_general_trajectory
chanwutk Mar 4, 2022
96b1a5c
Automated lint and format fixes
Mar 4, 2022
14868f2
indent using spaces
chanwutk Mar 4, 2022
8ad68e7
clean up
chanwutk Mar 4, 2022
b3966a3
add support for directly import all mini dataset, have uploaded the d…
chanwutk Mar 4, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ __pycache__
yolov4-deepsort
yolov5-deepsort
.mypy_cache
.idea
.idea
.ipynb_checkpoints
100 changes: 100 additions & 0 deletions apperception/box.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
import numpy as np
from pyquaternion import Quaternion


class Box:
"""Simple data class representing a 3d box including, label, score and velocity."""

def __init__(self, center, size, orientation: Quaternion):
"""
:param center: Center of box given as x, y, z.
:param size: Size of box in width, length, height.
:param orientation: Box orientation.
"""
assert not np.any(np.isnan(center))
assert not np.any(np.isnan(size))
assert len(center) == 3
assert len(size) == 3
assert isinstance(orientation, Quaternion)

self.center = np.array(center)
self.wlh = np.array(size)
self.orientation = orientation

@property
def rotation_matrix(self) -> np.ndarray:
"""
Return a rotation matrix.
:return: <np.float: 3, 3>. The box's rotation matrix.
"""
return self.orientation.rotation_matrix

def translate(self, x: np.ndarray) -> None:
"""
Applies a translation.
:param x: <np.float: 3, 1>. Translation in x, y, z direction.
"""
self.center += x

def rotate(self, quaternion: Quaternion) -> None:
"""
Rotates box.
:param quaternion: Rotation to apply.
"""
self.center = np.dot(quaternion.rotation_matrix, self.center)
self.orientation = quaternion * self.orientation

def corners(self, wlh_factor: float = 1.0) -> np.ndarray:
"""
Returns the bounding box corners.
:param wlh_factor: Multiply w, l, h by a factor to scale the box.
:return: <np.float: 3, 8>. First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
w, l, h = self.wlh * wlh_factor

# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.vstack((x_corners, y_corners, z_corners))

# Rotate
corners = np.dot(self.orientation.rotation_matrix, corners)

# Translate
x, y, z = self.center
corners[0, :] = corners[0, :] + x
corners[1, :] = corners[1, :] + y
corners[2, :] = corners[2, :] + z

return corners

def bottom_corners(self) -> np.ndarray:
"""
Returns the four bottom corners.
:return: <np.float: 3, 4>. Bottom corners. First two face forward, last two face backwards.
"""
return self.corners()[:, [2, 3, 7, 6]]

def view_points(self, points: np.ndarray, view: np.ndarray, normalize=True) -> np.ndarray:
viewpad = np.eye(4)
viewpad[: view.shape[0], : view.shape[1]] = view

nbr_points = points.shape[1]

# Do operation in homogenous coordinates.
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]

if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)

return points

def map_2d(self, view: np.ndarray = np.eye(3), normalize: bool = True) -> np.ndarray:

corners = self.view_points(self.corners(), view, normalize)[:2, :]

return corners
14 changes: 7 additions & 7 deletions apperception/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,8 @@ def __init__(self):

self.refl = nn.ReflectionPad2d(1)

self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
self.C1 = 0.01**2
self.C2 = 0.03**2

def forward(self, x, y):
x = self.refl(x)
Expand All @@ -236,12 +236,12 @@ def forward(self, x, y):
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)

sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_x = self.sig_x_pool(x**2) - mu_x**2
sigma_y = self.sig_y_pool(y**2) - mu_y**2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y

SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
SSIM_d = (mu_x**2 + mu_y**2 + self.C1) * (sigma_x + sigma_y + self.C2)

return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)

Expand All @@ -250,8 +250,8 @@ def compute_depth_errors(gt, pred):
"""Computation of error metrics between predicted and ground truth depths"""
thresh = torch.max((gt / pred), (pred / gt))
a1 = (thresh < 1.25).float().mean()
a2 = (thresh < 1.25 ** 2).float().mean()
a3 = (thresh < 1.25 ** 3).float().mean()
a2 = (thresh < 1.25**2).float().mean()
a3 = (thresh < 1.25**3).float().mean()

rmse = (gt - pred) ** 2
rmse = torch.sqrt(rmse.mean())
Expand Down
5 changes: 3 additions & 2 deletions apperception/metadata_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ def aggregate(self, func_name: str, parameters: List[str] = [], special_args: Li
return self

def get_coordinates(self):
self.aggregate("asMFJSON", special_args=["coordinates"])
# self.aggregate("asMFJSON", special_args=["coordinates"])
self.aggregate("asMFJSON")

def interval(self, starttime, endtime):
self.aggregate("atPeriodSet", parameters=["'{[%s, %s)}'" % (starttime, endtime)])
Expand All @@ -68,7 +69,7 @@ def __init__(self, func_name: str, parameters: list = []):


class asMFJSON(Aggregate):
def __init__(self, func_name="asMFJSON", parameters: list = [], interesting_fields=[""]):
def __init__(self, func_name="asMFJSON", parameters: list = [], interesting_fields=[]):
super().__init__(func_name, parameters)
self.interesting_fields = interesting_fields

Expand Down
17 changes: 8 additions & 9 deletions apperception/metadata_context_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from metadata_context import (Aggregate, Column, Filter, MetadataContext,
Predicate, Project, Scan, asMFJSON)
from metadata_util import common_aggregation, metadata_view
from scenic_util import join


class MetadataContextExecutor:
Expand Down Expand Up @@ -117,14 +118,12 @@ def execute(self, create_view: bool = False, view_name: str = ""):


def translate_aggregation(aggr_node: Aggregate, aggregated: str):
aggregated = aggr_node.func_name + "(" + aggregated
for param in aggr_node.parameters:
aggregated = aggregated + "," + param
aggregated += ")"
if aggr_node.func_name in common_aggregation:
if isinstance(aggr_node, asMFJSON):
if len(aggr_node.interesting_fields) > 0:
interesting_field = aggr_node.interesting_fields[0]
aggregated = aggregated + "::json->" + "'" + interesting_field + "'"
aggregated = f"{aggr_node.func_name}({join([aggregated, *aggr_node.parameters])})"

if isinstance(aggr_node, asMFJSON) and aggr_node.func_name in common_aggregation:
if len(aggr_node.interesting_fields) > 0:
interesting_field = aggr_node.interesting_fields[0]
aggregated += f"::json->'{interesting_field}'"
else:
aggregated += "::json"
return aggregated
148 changes: 148 additions & 0 deletions apperception/scenic_generate_df.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
import json

import pandas as pd


def scenic_generate_df():
with open("v1.0-mini/v1.0-mini/attribute.json") as f:
attribute_json = json.load(f)

with open("v1.0-mini/v1.0-mini/calibrated_sensor.json") as f:
calibrated_sensor_json = json.load(f)

with open("v1.0-mini/v1.0-mini/category.json") as f:
category_json = json.load(f)

with open("v1.0-mini/v1.0-mini/sample.json") as f:
sample_json = json.load(f)

with open("v1.0-mini/v1.0-mini/sample_data.json") as f:
sample_data_json = json.load(f)

with open("v1.0-mini/v1.0-mini/sample_annotation.json") as f:
sample_annotation_json = json.load(f)

with open("v1.0-mini/v1.0-mini/instance.json") as f:
instance_json = json.load(f)

with open("v1.0-mini/v1.0-mini/scene.json") as f:
scene_json = json.load(f)

with open("v1.0-mini/v1.0-mini/ego_pose.json") as f:
ego_pose_json = json.load(f)

df_sample_data = pd.DataFrame(sample_data_json)
df_sample_data = df_sample_data[
[
"token",
"sample_token",
"calibrated_sensor_token",
"ego_pose_token",
"timestamp",
"fileformat",
"filename",
"prev",
"next",
]
]
df_sample_data = df_sample_data[df_sample_data["fileformat"] == "jpg"]
df_sample_data.index = range(len(df_sample_data))

df_sample = pd.DataFrame(sample_json)
df_sample.columns = ["sample_token", "timestamp", "prev_sample", "next_sample", "scene_token"]
df_sample = df_sample[["sample_token", "prev_sample", "next_sample", "scene_token"]]
df_sample_data = pd.merge(df_sample_data, df_sample, on="sample_token", how="left")

df_calibrated_sensor = pd.DataFrame(calibrated_sensor_json)
df_calibrated_sensor.columns = [
"calibrated_sensor_token",
"sensor_token",
"camera_translation",
"camera_rotation",
"camera_intrinsic",
]
df_calibrated_sensor = df_calibrated_sensor[
["calibrated_sensor_token", "camera_translation", "camera_rotation", "camera_intrinsic"]
]
df_sample_data = pd.merge(
df_sample_data, df_calibrated_sensor, on="calibrated_sensor_token", how="left"
)
df_sample_data = df_sample_data.drop(columns=["calibrated_sensor_token"])

df_ego_pose = pd.DataFrame(ego_pose_json)
df_ego_pose.columns = ["ego_pose_token", "timestamp", "ego_rotation", "ego_translation"]
df_ego_pose = df_ego_pose.drop(columns=["timestamp"])
df_sample_data = pd.merge(df_sample_data, df_ego_pose, on="ego_pose_token", how="left")
df_sample_data = df_sample_data.drop(columns=["ego_pose_token"])

df_scene = pd.DataFrame(scene_json)
df_scene.columns = [
"scene_token",
"log_token",
"nbr_samples",
"first_sample_token",
"last_sample_token",
"scene_name",
"description",
]
df_scene = df_scene[["scene_token", "first_sample_token", "last_sample_token", "scene_name"]]
df_sample_data = pd.merge(df_sample_data, df_scene, on="scene_token", how="left")
df_sample_data = df_sample_data.drop(columns=["scene_token"])

df_sample_data["frame_order"] = 0
for index, row in df_sample_data.iterrows():
if len(row["prev"]) == 0:
df_sample_data.loc[index, "frame_order"] = 1
i = 2
next_frame_token = row["next"]
while len(next_frame_token) != 0:
# print(next_frame_token)
cur_index = df_sample_data[
df_sample_data["token"] == next_frame_token
].index.tolist()[0]
# print(cur_index)
df_sample_data.loc[cur_index, "frame_order"] = i
i += 1
next_frame_token = list(
df_sample_data[df_sample_data["token"] == next_frame_token]["next"]
)[0]

df_sample_data = df_sample_data.drop(
columns=[
"fileformat",
"prev",
"next",
"prev_sample",
"next_sample",
"first_sample_token",
"last_sample_token",
]
)

df_sample_annotation = pd.DataFrame(sample_annotation_json)
df_sample_annotation = df_sample_annotation[
["token", "sample_token", "instance_token", "translation", "size", "rotation"]
]

df_instance = pd.DataFrame(instance_json)
df_category = pd.DataFrame(category_json)
df_category.rename(columns={"token": "cat_token"}, inplace=True)
df_instance = pd.merge(
df_instance, df_category, left_on="category_token", right_on="cat_token", how="left"
)
df_instance = df_instance.drop(
columns=[
"category_token",
"cat_token",
"nbr_annotations",
"first_annotation_token",
"last_annotation_token",
"description",
]
)
df_instance.columns = ["instance_token", "category"]
df_sample_annotation = pd.merge(
df_sample_annotation, df_instance, on="instance_token", how="left"
)

return df_sample_data, df_sample_annotation
Loading