Datasets:
Update helvipad_utils.py
Browse files- helvipad_utils.py +64 -4
helvipad_utils.py
CHANGED
@@ -2,7 +2,7 @@ import torch
|
|
2 |
import cv2
|
3 |
import time
|
4 |
|
5 |
-
#
|
6 |
BASELINE = 0.191 # Baseline in meters
|
7 |
HEIGHT_ORIGINAL = 1920 # Original image height
|
8 |
HEIGHT_DOWNSCALED = 960 # Downscaled height for disparity
|
@@ -17,8 +17,8 @@ def compute_depth_from_disparity(disparity_map: torch.Tensor) -> torch.Tensor:
|
|
17 |
depth = B * (sin(theta) / tan(disparity_rad) + cos(theta))
|
18 |
|
19 |
where:
|
20 |
-
- B is the baseline
|
21 |
-
- theta is the vertical angle corresponding to each pixel.
|
22 |
- disparity_rad is the disparity map scaled to radians.
|
23 |
|
24 |
Parameters:
|
@@ -50,7 +50,7 @@ def compute_depth_from_disparity(disparity_map: torch.Tensor) -> torch.Tensor:
|
|
50 |
# Initialize depth map
|
51 |
depth_map = torch.zeros_like(disparity_map, dtype=torch.float32)
|
52 |
|
53 |
-
# Compute depth only where disparity is valid
|
54 |
non_zero_disparity = disparity_map != 0
|
55 |
depth_map[non_zero_disparity] = (
|
56 |
(torch.sin(theta_grid[non_zero_disparity]) / torch.tan(disparity_map_rad[non_zero_disparity]))
|
@@ -64,6 +64,66 @@ def compute_depth_from_disparity(disparity_map: torch.Tensor) -> torch.Tensor:
|
|
64 |
return depth_map
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
def disp_deg_to_disp_pix(disp_deg: float) -> float:
|
68 |
"""
|
69 |
Convert a disparity value from degrees to pixels.
|
|
|
2 |
import cv2
|
3 |
import time
|
4 |
|
5 |
+
# Dataset constants
|
6 |
BASELINE = 0.191 # Baseline in meters
|
7 |
HEIGHT_ORIGINAL = 1920 # Original image height
|
8 |
HEIGHT_DOWNSCALED = 960 # Downscaled height for disparity
|
|
|
17 |
depth = B * (sin(theta) / tan(disparity_rad) + cos(theta))
|
18 |
|
19 |
where:
|
20 |
+
- B is the baseline.
|
21 |
+
- theta is the vertical angle corresponding to each pixel in the y-grid.
|
22 |
- disparity_rad is the disparity map scaled to radians.
|
23 |
|
24 |
Parameters:
|
|
|
50 |
# Initialize depth map
|
51 |
depth_map = torch.zeros_like(disparity_map, dtype=torch.float32)
|
52 |
|
53 |
+
# Compute depth only where disparity is valid
|
54 |
non_zero_disparity = disparity_map != 0
|
55 |
depth_map[non_zero_disparity] = (
|
56 |
(torch.sin(theta_grid[non_zero_disparity]) / torch.tan(disparity_map_rad[non_zero_disparity]))
|
|
|
64 |
return depth_map
|
65 |
|
66 |
|
67 |
+
def compute_disparity_from_depth(depth_map: torch.Tensor) -> torch.Tensor:
|
68 |
+
"""
|
69 |
+
Convert a depth map to a disparity map based on dataset-specific calibration.
|
70 |
+
|
71 |
+
This function reverses the depth-to-disparity conversion, based on the relationship:
|
72 |
+
|
73 |
+
tan(disparity_rad) = sin(theta) / (depth / B - cos(theta))
|
74 |
+
|
75 |
+
The final disparity in pixel units is then:
|
76 |
+
|
77 |
+
disparity = (H / pi) * atan(tan(disparity_rad))
|
78 |
+
|
79 |
+
where:
|
80 |
+
- B is the baseline.
|
81 |
+
- theta is the vertical angle corresponding to each pixel in the y-grid.
|
82 |
+
- disparity_rad is the angular disparity in radians.
|
83 |
+
|
84 |
+
Parameters:
|
85 |
+
depth_map (torch.Tensor): Input tensor of shape (bs, 1, h, w) or (bs, h, w).
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
torch.Tensor: Disparity map of shape (bs, h, w).
|
89 |
+
"""
|
90 |
+
|
91 |
+
# Ensure input is 3D (batch, height, width)
|
92 |
+
has_channel_dim: bool = depth_map.dim() == 4 and depth_map.shape[1] == 1
|
93 |
+
if has_channel_dim:
|
94 |
+
depth_map = depth_map.squeeze(1)
|
95 |
+
|
96 |
+
bs, height, width = depth_map.shape
|
97 |
+
|
98 |
+
# Compute y-grid values
|
99 |
+
y_grid = (
|
100 |
+
torch.arange(512 + 2 * height - 1, 512, step=-2, device=depth_map.device)
|
101 |
+
.unsqueeze(0)
|
102 |
+
.unsqueeze(-1)
|
103 |
+
.expand(bs, -1, width)
|
104 |
+
)
|
105 |
+
|
106 |
+
# Compute theta (polar angle)
|
107 |
+
theta_grid = y_grid * torch.pi / HEIGHT_ORIGINAL
|
108 |
+
|
109 |
+
# Initialize depth map
|
110 |
+
disparity_map = torch.zeros_like(depth_map, dtype=torch.float32)
|
111 |
+
|
112 |
+
# Compute disparity only where depth is valid
|
113 |
+
non_zero_depth = depth_map != 0
|
114 |
+
tan_disparity_rad = torch.sin(theta_grid[non_zero_depth]) / (
|
115 |
+
(depth_map[non_zero_depth] / BASELINE) - torch.cos(theta_grid[non_zero_depth])
|
116 |
+
)
|
117 |
+
disparity_map_rad = torch.atan(tan_disparity_rad)
|
118 |
+
disparity_map[non_zero_depth] = (HEIGHT_DOWNSCALED / torch.pi) * disparity_map_rad
|
119 |
+
|
120 |
+
# Restore channel dimension if input had it
|
121 |
+
if has_channel_dim:
|
122 |
+
disparity_map = disparity_map.unsqueeze(1)
|
123 |
+
|
124 |
+
return disparity_map
|
125 |
+
|
126 |
+
|
127 |
def disp_deg_to_disp_pix(disp_deg: float) -> float:
|
128 |
"""
|
129 |
Convert a disparity value from degrees to pixels.
|