Datasets:
ArXiv:
License:
File size: 13,090 Bytes
6b88ca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
import os
import re
import json
import argparse
import logging
from typing import Any, Dict
from utils.classes import CLASS_LIST, map_sub_class_to_primary_class, map_class_id_to_class_name
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%y/%m/%d %H:%M:%S", level=logging.INFO)
class ValidateFile(argparse.Action):
"""
Custom argparse action to validate file paths.
"""
def __call__(self, parser, namespace, values, option_string=None):
# Validate the file path format
file_path_pattern = r"^[a-zA-Z0-9_\-\/.#+]+$"
if not re.match(file_path_pattern, values):
parser.error(f"Invalid file path: {values}")
# Check if the file exists
if not os.path.exists(values):
parser.error(f"File {values} does NOT exist.")
# Check if the file is readable
if not os.access(values, os.R_OK):
parser.error(f"File {values} is NOT readable.")
# Set the validated file path in the namespace
setattr(namespace, self.dest, values)
def validate_file_path(input_string: str) -> str:
"""
Validates whether the input string matches a file path pattern
:param str input_string: input string
:return: validated file path
:rtype: str
::
file_path = validate_file_path(input_string)
"""
file_path_pattern = r"^[a-zA-Z0-9_\-\/.#+]+$"
if re.match(file_path_pattern, input_string):
return input_string
else:
raise ValueError(f"Invalid file path: {input_string}")
def sanitize_string(input_string: str) -> str:
"""
Sanitizes an input string
:param str input_string: input string
:return: sanitized string
:rtype: str
::
sanitized_string = sanitize_string(input_string)
"""
# Allow alphanumeric characters, dots, slashes, underscores, hashes, and dashes
return re.sub(r"[^a-zA-Z0-9\._/#-]", "_", input_string)
def make_dir(dir_path: str) -> None:
"""
Safely create a directory.
"""
valid_dir_path = validate_file_path(dir_path)
if os.path.islink(valid_dir_path):
raise ValueError(f"Directory path {dir_path} must not be a symbolic link.")
try:
if not os.path.isdir(valid_dir_path):
os.makedirs(valid_dir_path)
except OSError as e:
raise ValueError(f"Failed to create directory {dir_path}: {e}")
def load_json_from_file(file_path: str) -> Any:
"""
Safely loads JSON data from a file.
"""
valid_file_path = validate_file_path(file_path)
try:
with open(valid_file_path, "r") as f:
return json.load(f)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON format in file {file_path}: {e}")
except Exception as e:
raise ValueError(f"An error occurred while loading file {file_path}: {e}")
def split_files_per_scene(gt_path: str, pred_path: str, output_base_dir: str, scene_id_2_scene_name: Dict[int, str], num_frames_to_eval: int = 9000):
"""
Splits GT and Pred files per scene, saving them into separate directories.
:param gt_path: Path to the ground truth JSON file.
:param pred_path: Path to the predictions JSON file.
:param output_base_dir: Base directory to save split files.
"""
# Create output base directory
os.makedirs(output_base_dir, exist_ok=True)
gt_scenes = set()
pred_scenes = set()
# convert to int
valid_scene_ids = set(int(scene_id) for scene_id in scene_id_2_scene_name.keys())
# Process GT data
scene_gt_writers = {}
with open(gt_path, "r") as gt_file:
for line in gt_file:
line_split = line.split(" ")
scene_id = int(line_split[0])
gt_scenes.add(scene_id)
if scene_id not in scene_gt_writers:
os.makedirs(os.path.join(output_base_dir, f"scene_{scene_id}"), exist_ok=True)
scene_gt_writers[scene_id] = open(os.path.join(output_base_dir, f"scene_{scene_id}", "gt.txt"), "w")
scene_gt_writers[scene_id].write(line)
# Close all GT writers
for writer in scene_gt_writers.values():
writer.close()
# convert gt_scenes to a list and sort it
gt_scenes = list(gt_scenes)
gt_scenes.sort()
logging.info(f"Found scenes {gt_scenes} in ground truth.")
# Process Pred data
scene_pred_writers = {}
with open(pred_path, "r") as pred_file:
for line in pred_file:
line_split = line.split(" ")
# Validate line length
if len(line_split) != 11:
raise ValueError(f"Found incorrect entry in predictions. Each entry should have 11 elements: (scene_id class_id object_id frame_id x y z width length height yaw)")
# Validate scene id
scene_id = int(line_split[0])
if scene_id not in valid_scene_ids:
raise ValueError(f"Found incorrect scene id in predictions: {scene_id}. Valid scene ids are: {valid_scene_ids}, defined by the scene_id_2_scene_name json file")
# Validate class id
class_id = int(line_split[1])
if class_id not in map_class_id_to_class_name:
raise ValueError(f"Found incorrect class id in predictions: {class_id}. Valid class ids are: {map_class_id_to_class_name.keys()}")
# Validate object id
object_id = int(line_split[2])
if object_id < 0:
raise ValueError(f"Found incorrect object id in predictions: {object_id}. Object id should be positive.")
# Validate frame id
frame_id = int(line_split[3])
if frame_id < 0:
raise ValueError(f"Found incorrect frame id in predictions: {frame_id}. Frame id should be 0 or positive.")
if int(frame_id) >= int(num_frames_to_eval):
continue
pred_scenes.add(scene_id)
if scene_id not in scene_pred_writers:
os.makedirs(os.path.join(output_base_dir, f"scene_{scene_id}"), exist_ok=True)
scene_pred_writers[scene_id] = open(os.path.join(output_base_dir, f"scene_{scene_id}", "pred.txt"), "w")
scene_pred_writers[scene_id].write(line)
# Close all Pred writers
for writer in scene_pred_writers.values():
writer.close()
# convert gt_scenes to a list and sort it
pred_scenes = list(pred_scenes)
pred_scenes.sort()
logging.info(f"Found scenes {pred_scenes} in predictions.")
def split_files_per_class(gt_path: str, pred_path: str, output_base_dir: str, confidence_threshold: float = 0.0, num_frames_to_eval:int = 20000, ground_truth_frame_offset_secs: float = 0.0, fps: float = 30.0):
"""
Splits GT and Pred files per class, saving them into separate directories.
:param gt_path: Path to the ground truth JSON file.
:param pred_path: Path to the predictions JSON file.
:param output_base_dir: Base directory to save split files.
"""
# Create output base directory
os.makedirs(output_base_dir, exist_ok=True)
gt_classes = set()
pred_classes = set()
# Process GT data
class_gt_writers = {}
with open(gt_path, "r") as gt_file:
for line in gt_file:
line_split = line.split(" ")
class_id = int(line_split[1])
class_name = map_class_id_to_class_name[class_id]
gt_classes.add(class_name)
if class_name not in class_gt_writers:
os.makedirs(os.path.join(output_base_dir, class_name), exist_ok=True)
class_gt_writers[class_name] = open(os.path.join(output_base_dir, class_name, "gt.txt"), "w")
class_gt_writers[class_name].write(line)
# Close all GT writers
for writer in class_gt_writers.values():
writer.close()
# convert gt_classes to a list and sort it
gt_classes = list(gt_classes)
gt_classes.sort()
logging.info(f"Found classes {gt_classes} in ground truth.")
# Process Pred data
class_pred_writers = {}
with open(pred_path, "r") as pred_file:
for line in pred_file:
line_split = line.split(" ")
class_id = int(line_split[1])
class_name = map_class_id_to_class_name[class_id]
pred_classes.add(class_name)
if class_name not in class_pred_writers:
os.makedirs(os.path.join(output_base_dir, class_name), exist_ok=True)
class_pred_writers[class_name] = open(os.path.join(output_base_dir, class_name, "pred.txt"), "w")
class_pred_writers[class_name].write(line)
# Close all Pred writers
for writer in class_pred_writers.values():
writer.close()
# convert gt_classes to a list and sort it
pred_classes = list(pred_classes)
pred_classes.sort()
logging.info(f"Found classes {pred_classes} in predictions.")
def get_no_of_objects_per_scene(gt_path: str, scene_id_2_scene_name: Dict[int, str]):
"""
Get the number of objects per scene in the ground truth file.
"""
no_of_objects_per_scene = {}
with open(gt_path, "r") as gt_file:
for line in gt_file:
line_split = line.split(" ")
scene_id = line_split[0]
if scene_id not in scene_id_2_scene_name:
continue
scene_name = scene_id_2_scene_name[scene_id]
if scene_name not in no_of_objects_per_scene:
no_of_objects_per_scene[scene_name] = 0
no_of_objects_per_scene[scene_name] += 1
return no_of_objects_per_scene
def split_files_by_sensor(gt_path: str, pred_path: str, output_base_dir: str, map_camera_name_to_bev_name, confidence_threshold, num_frames_to_eval):
"""
Splits GT and Pred files by sensor and saves them into separate directories.
:param gt_path: Path to the ground truth JSON file.
:param pred_path: Path to the predictions JSON file.
:param output_base_dir: Base directory to save split files.
"""
# Create output base directory
os.makedirs(output_base_dir, exist_ok=True)
# Set to keep track of unique sensor IDs
gt_sensors = set()
pred_sensors = set()
# Create writers for GT data
sensor_gt_writers = {}
with open(gt_path, "r") as gt_file:
for line in gt_file:
if '"' not in line and "'" in line:
line = line.replace("'", '"')
data = json.loads(line)
# Only eval frames below num_frames_to_eval
if int(data['id']) >= num_frames_to_eval:
continue
cam_sensor_name = data['sensorId']
# Convert camera id to BEV sensor id
bev_sensor_names = map_camera_name_to_bev_name[cam_sensor_name]
for bev_sensor_name in bev_sensor_names:
gt_sensors.add(bev_sensor_name)
sensor_dir = os.path.join(output_base_dir, bev_sensor_name)
os.makedirs(sensor_dir, exist_ok=True)
gt_file_path = os.path.join(sensor_dir, "gt.json")
if bev_sensor_name not in sensor_gt_writers:
sensor_gt_writers[bev_sensor_name] = open(gt_file_path, "w")
sensor_gt_writers[bev_sensor_name].write(json.dumps(data) + "\n")
# Close all GT writers
for writer in sensor_gt_writers.values():
writer.close()
# Log found BEV sensors in GT
logging.info(f"Found BEV sensors: {', '.join(sorted(gt_sensors))} in ground truth file.")
# Create writers for Pred data
sensor_pred_writers = {}
with open(pred_path, "r") as pred_file:
for line in pred_file:
if '"' not in line and "'" in line:
line = line.replace("'", '"')
data = json.loads(line)
# Only eval frames below num_frames_to_eval
if int(data['id']) >= num_frames_to_eval:
continue
sensor_name = data['sensorId']
pred_sensors.add(sensor_name)
sensor_dir = os.path.join(output_base_dir, sensor_name)
os.makedirs(sensor_dir, exist_ok=True)
if sensor_name not in sensor_pred_writers:
pred_file_path = os.path.join(sensor_dir, "pred.json")
sensor_pred_writers[sensor_name] = open(pred_file_path, "w")
filtered_objects = []
for obj in data["objects"]:
# Get the confidence value from bbox3d.
confidence = obj["bbox3d"]["confidence"]
if confidence >= confidence_threshold:
filtered_objects.append(obj)
# Replace the "objects" list with the filtered version.
data["objects"] = filtered_objects
sensor_pred_writers[sensor_name].write(json.dumps(data) + "\n")
# Close all Pred writers
for writer in sensor_pred_writers.values():
writer.close()
# Log found BEV sensors in Prediction
logging.info(f"Found BEV sensors: {', '.join(sorted(pred_sensors))} in prediction file.")
print("") |