Datasets:
ArXiv:
License:
File size: 12,894 Bytes
6b88ca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 |
import os
import logging
import numpy as np
from tabulate import tabulate
import utils.trackeval as trackeval
from typing import List, Dict, Set, Tuple, Any
from utils.io_utils import make_dir, validate_file_path, load_json_from_file
from utils.classes import CLASS_LIST
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%y/%m/%d %H:%M:%S", level=logging.INFO)
def prepare_ground_truth_file(input_file_path: str, output_file_path: str, fps: int) -> None:
"""
Converts the ground truth file into a MOT (Multiple Object Tracking) format for evaluation.
:param str input_file_path: The path to the input ground truth file.
:param str output_file_path: The path where the output MOT file will be saved.
:param int fps: The frame rate (FPS) of the videos.
:param AppConfig app_config: The application configuration object.
:return: None
:rtype: None
::
prepare_ground_truth_file(input_file_path, output_file_path, fps, app_config, ground_truth_frame_offset_secs)
"""
output_file = open(output_file_path, "w")
with open(input_file_path) as f:
for line_number, line in enumerate(f):
line = line.split(" ")
object_id = int(line[2])
frame_id = int(line[3]) + 1
x = float(line[4])
y = float(line[5])
z = float(line[6])
width = float(line[7])
length = float(line[8])
height = float(line[9])
yaw = float(line[10])
pitch = 0
roll = 0
result_str = (
f"{frame_id} {object_id} 1 "
f"{x:.5f} {y:.5f} {z:.5f} "
f"{width:.5f} {length:.5f} {height:.5f} {pitch:.5f} {roll:.5f} {yaw:.5f}\n"
)
output_file.write(result_str)
output_file.close()
def prepare_prediction_file(input_file_path: str, output_file_path: str, fps: float) -> List[int]:
"""
Converts the prediction file into a MOT (Multiple Object Tracking) format for evaluation.
:param str input_file_path: The path to the input prediction file.
:param str output_file_path: The path where the output MOT file will be saved.
:param float fps: The frame rate (FPS) of the videos.
::
prepare_prediction_file(input_file_path, output_file_path, fps)
"""
output_file = open(output_file_path, "w")
with open(input_file_path) as f:
for line_number, line in enumerate(f):
line = line.split(" ")
object_id = int(line[2])
frame_id = int(line[3]) + 1
x = float(line[4])
y = float(line[5])
z = float(line[6])
width = float(line[7])
length = float(line[8])
height = float(line[9])
yaw = float(line[10])
pitch = 0
roll = 0
result_str = (
f"{frame_id} {object_id} 1 "
f"{x:.5f} {y:.5f} {z:.5f} "
f"{width:.5f} {length:.5f} {height:.5f} {pitch:.5f} {roll:.5f} {yaw:.5f}\n"
)
output_file.write(result_str)
output_file.close()
return
def make_seq_maps_file(seq_maps_dir_path: str, sensor_ids: List[str], benchmark: str, split_to_eval: str) -> None:
"""
Creates a sequence-maps file used by the TrackEval library.
:param str seq_maps_dir_path: The directory path where the sequence-maps file will be saved.
:param List[str] sensor_ids: A list of sensor IDs to include in the sequence-maps file.
:param str benchmark: The name of the benchmark.
:param str split_to_eval: The name of the split for evaluation.
:return: None
:rtype: None
::
make_seq_maps_file(seq_maps_dir_path, sensor_ids, benchmark, split_to_eval)
"""
make_dir(seq_maps_dir_path)
seq_maps_file_name = benchmark + "-" + split_to_eval + ".txt"
seq_maps_file_path = os.path.join(seq_maps_dir_path, seq_maps_file_name)
f = open(seq_maps_file_path, "w")
f.write("name\n")
for sensor_id in sensor_ids:
f.write(sensor_id + "\n")
f.close()
def setup_evaluation_configs(results_dir_path: str, eval_type:str, num_cores:int) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Sets up the evaluation configurations for TrackEval.
:param str results_dir_path: The path to the folder that stores the results.
:param str eval_type: The type of evaluation to perform ("bbox" or "location").
:return: A tuple containing the dataset configuration and evaluation configuration.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
::
dataset_config, eval_config = setup_evaluation_configs(results_dir_path, eval_type)
"""
eval_config = trackeval.eval.Evaluator.get_default_eval_config()
eval_config["PRINT_CONFIG"] = False
eval_config["USE_PARALLEL"] = True
eval_config["NUM_PARALLEL_CORES"] = num_cores
# Create dataset configs for TrackEval library
if eval_type == "bbox":
dataset_config = trackeval.datasets.MTMCChallenge3DBBox.get_default_dataset_config()
elif eval_type == "location":
dataset_config = trackeval.datasets.MTMCChallenge3DLocation.get_default_dataset_config()
dataset_config["DO_PREPROC"] = False
dataset_config["SPLIT_TO_EVAL"] = "all"
evaluation_dir_path = os.path.join(results_dir_path, "evaluation")
make_dir(evaluation_dir_path)
dataset_config["GT_FOLDER"] = os.path.join(evaluation_dir_path, "gt")
dataset_config["TRACKERS_FOLDER"] = os.path.join(evaluation_dir_path, "scores")
dataset_config["PRINT_CONFIG"] = False
return dataset_config, eval_config
def make_seq_ini_file(gt_dir: str, camera: str, seq_length: int) -> None:
"""
Creates a sequence-ini file used by the TrackEval library.
:param str gt_dir: The directory path where the sequence-ini file will be saved.
:param str camera: The name of a single sensor
:param int seq_length: The number of frames in the sequence.
:return: None
:rtype: None
::
make_seq_ini_file(gt_dir, camera, seq_length)
"""
ini_file_name = gt_dir + "/seqinfo.ini"
f = open(ini_file_name, "w")
f.write("[Sequence]\n")
name= "name=" +str(camera)+ "\n"
f.write(name)
f.write("imDir=img1\n")
f.write("frameRate=30\n")
seq = "seqLength=" + str(seq_length) + "\n"
f.write(seq)
f.write("imWidth=1920\n")
f.write("imHeight=1080\n")
f.write("imExt=.jpg\n")
f.close()
def prepare_evaluation_folder(dataset_config: Dict[str, Any], input_file_type: str) -> Tuple[str, str]:
"""
Prepares the evaluation folder structure required for TrackEval.
:param Dict[str, Any] dataset_config: The dataset configuration dictionary.
:return: A tuple containing the prediction file path and ground truth file path.
:rtype: Tuple[str, str]
::
pred_file_path, gt_file_path = prepare_evaluation_folder(dataset_config)
"""
# Create evaluation configs for TrackEval library
sensor_ids: Set[str] = set()
sensor_ids.add(input_file_type)
sensor_ids = sorted(list(sensor_ids))
# Create sequence maps file for evaluation
seq_maps_dir_path = os.path.join(dataset_config["GT_FOLDER"], "seqmaps")
make_seq_maps_file(seq_maps_dir_path, sensor_ids, dataset_config["BENCHMARK"], dataset_config["SPLIT_TO_EVAL"])
# Create ground truth directory
mot_version = dataset_config["BENCHMARK"] + "-" + dataset_config["SPLIT_TO_EVAL"]
gt_root_dir_path = os.path.join(dataset_config["GT_FOLDER"], mot_version)
gt_dir_path = os.path.join(gt_root_dir_path, input_file_type)
make_dir(gt_dir_path)
gt_output_dir_path = os.path.join(gt_dir_path, "gt")
make_dir(gt_output_dir_path)
gt_file_path = os.path.join(gt_output_dir_path, "gt.txt")
# Generate sequence file required for TrackEval library
make_seq_ini_file(gt_dir_path, camera=input_file_type, seq_length=20000)
# Create prediction directory
pred_dir_path = os.path.join(dataset_config["TRACKERS_FOLDER"], mot_version, "data", "data")
make_dir(pred_dir_path)
pred_file_path = os.path.join(pred_dir_path, f"{input_file_type}.txt")
return pred_file_path, gt_file_path
def run_evaluation(gt_file, prediction_file, fps, app_config, dataset_config, eval_config, eval_type):
"""
Executes the evaluation process using TrackEval based on the provided configurations.
:param str gt_file: The ground truth file path.
:param str prediction_file: The prediction file path.
:param float fps: The frames per second rate.
:param AppConfig app_config: The application configuration object.
:param Dict[str, Any] dataset_config: The dataset configuration dictionary.
:param Dict[str, Any] eval_config: The evaluation configuration dictionary.
:param str eval_type: The type of evaluation to perform ("bbox" or "location").
:return: The evaluation results.
:rtype: Any
::
results = run_evaluation(gt_file, prediction_file, fps, app_config, dataset_config, eval_config, eval_type)
"""
# Define the metrics to calculate
metrics_config = {"METRICS": ["HOTA"]}
metrics_config["PRINT_CONFIG"] = False
config = {**eval_config, **dataset_config, **metrics_config} # Merge configs
eval_config = {k: v for k, v in config.items() if k in eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in metrics_config.keys()}
# Run the Evaluator
evaluator = trackeval.eval.Evaluator(eval_config)
if eval_type == "bbox":
dataset_list = [trackeval.datasets.MTMCChallenge3DBBox(dataset_config)]
elif eval_type == "location":
dataset_list = [trackeval.datasets.MTMCChallenge3DLocation(dataset_config)]
metrics_list: List[str] = list()
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity]:
if metric.get_name() in metrics_config["METRICS"]:
metrics_list.append(metric(metrics_config))
if len(metrics_list) == 0:
raise Exception("No metric selected for evaluation.")
results = evaluator.evaluate(dataset_list, metrics_list)
return results
def _evaluate_tracking_for_all_BEV_sensors(ground_truth_file: str, prediction_file: str, output_directory, num_cores, fps):
"""
Evaluates tracking performance for all BEV sensors.
:param str ground_truth_file: The path to the ground truth file.
:param str prediction_file: The path to the prediction file.
:param str output_directory: The directory where output files will be stored.
:param str eval_options: The type of evaluation ("bbox" or "location").
:return: The evaluation results.
:rtype: Any
::
results = evaluate_tracking_for_all_bev_sensors(ground_truth_file, prediction_file, output_directory, app_config_path, calibration_file, eval_options)
"""
print("")
all_results = {}
for class_name in CLASS_LIST:
class_dir = os.path.join(output_directory, class_name)
if not os.path.isdir(class_dir):
logging.warning(f"Skipping class folder '{class_name}' as it was not found.")
print("--------------------------------")
continue
logging.info(f"Evaluating all BEV sensors on class {class_name}.")
ground_truth_file = os.path.join(class_dir, "gt.txt")
prediction_file = os.path.join(class_dir, "pred.txt")
output_dir = os.path.join(class_dir, "output")
if not os.path.exists(ground_truth_file) or not os.path.exists(prediction_file):
logging.info(f"Skipping class folder '{class_name}' as it was not found.")
print("--------------------------------")
continue
# Setup evaluation library & folders
dataset_config, eval_config = setup_evaluation_configs(output_directory, "bbox", num_cores)
output_pred_file_name, output_gt_file_name = prepare_evaluation_folder(dataset_config, "MTMC")
logging.info("Completed setup for evaluation library.")
# Prepare ground truth
prepare_ground_truth_file(ground_truth_file, output_gt_file_name, fps)
logging.info(f"Completed parsing ground-truth file {ground_truth_file}.")
# Prepare prediction results
prepare_prediction_file(prediction_file, output_pred_file_name, fps)
logging.info(f"Completed parsing prediction file {prediction_file}.")
# Run evaluation
results = run_evaluation(output_gt_file_name, output_pred_file_name, fps, None, dataset_config, eval_config, "bbox")
all_results[class_name] = results
print("--------------------------------------------------------------")
return all_results
|