Datasets:

ArXiv:
License:
zhengthomastang's picture
Include evaluation tools for 2025 edition
6b88ca2
import os
import logging
import numpy as np
from tabulate import tabulate
import utils.trackeval as trackeval
from typing import List, Dict, Set, Tuple, Any
from utils.io_utils import make_dir, validate_file_path, load_json_from_file
from utils.classes import CLASS_LIST
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%y/%m/%d %H:%M:%S", level=logging.INFO)
def prepare_ground_truth_file(input_file_path: str, output_file_path: str, fps: int) -> None:
"""
Converts the ground truth file into a MOT (Multiple Object Tracking) format for evaluation.
:param str input_file_path: The path to the input ground truth file.
:param str output_file_path: The path where the output MOT file will be saved.
:param int fps: The frame rate (FPS) of the videos.
:param AppConfig app_config: The application configuration object.
:return: None
:rtype: None
::
prepare_ground_truth_file(input_file_path, output_file_path, fps, app_config, ground_truth_frame_offset_secs)
"""
output_file = open(output_file_path, "w")
with open(input_file_path) as f:
for line_number, line in enumerate(f):
line = line.split(" ")
object_id = int(line[2])
frame_id = int(line[3]) + 1
x = float(line[4])
y = float(line[5])
z = float(line[6])
width = float(line[7])
length = float(line[8])
height = float(line[9])
yaw = float(line[10])
pitch = 0
roll = 0
result_str = (
f"{frame_id} {object_id} 1 "
f"{x:.5f} {y:.5f} {z:.5f} "
f"{width:.5f} {length:.5f} {height:.5f} {pitch:.5f} {roll:.5f} {yaw:.5f}\n"
)
output_file.write(result_str)
output_file.close()
def prepare_prediction_file(input_file_path: str, output_file_path: str, fps: float) -> List[int]:
"""
Converts the prediction file into a MOT (Multiple Object Tracking) format for evaluation.
:param str input_file_path: The path to the input prediction file.
:param str output_file_path: The path where the output MOT file will be saved.
:param float fps: The frame rate (FPS) of the videos.
::
prepare_prediction_file(input_file_path, output_file_path, fps)
"""
output_file = open(output_file_path, "w")
with open(input_file_path) as f:
for line_number, line in enumerate(f):
line = line.split(" ")
object_id = int(line[2])
frame_id = int(line[3]) + 1
x = float(line[4])
y = float(line[5])
z = float(line[6])
width = float(line[7])
length = float(line[8])
height = float(line[9])
yaw = float(line[10])
pitch = 0
roll = 0
result_str = (
f"{frame_id} {object_id} 1 "
f"{x:.5f} {y:.5f} {z:.5f} "
f"{width:.5f} {length:.5f} {height:.5f} {pitch:.5f} {roll:.5f} {yaw:.5f}\n"
)
output_file.write(result_str)
output_file.close()
return
def make_seq_maps_file(seq_maps_dir_path: str, sensor_ids: List[str], benchmark: str, split_to_eval: str) -> None:
"""
Creates a sequence-maps file used by the TrackEval library.
:param str seq_maps_dir_path: The directory path where the sequence-maps file will be saved.
:param List[str] sensor_ids: A list of sensor IDs to include in the sequence-maps file.
:param str benchmark: The name of the benchmark.
:param str split_to_eval: The name of the split for evaluation.
:return: None
:rtype: None
::
make_seq_maps_file(seq_maps_dir_path, sensor_ids, benchmark, split_to_eval)
"""
make_dir(seq_maps_dir_path)
seq_maps_file_name = benchmark + "-" + split_to_eval + ".txt"
seq_maps_file_path = os.path.join(seq_maps_dir_path, seq_maps_file_name)
f = open(seq_maps_file_path, "w")
f.write("name\n")
for sensor_id in sensor_ids:
f.write(sensor_id + "\n")
f.close()
def setup_evaluation_configs(results_dir_path: str, eval_type:str, num_cores:int) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Sets up the evaluation configurations for TrackEval.
:param str results_dir_path: The path to the folder that stores the results.
:param str eval_type: The type of evaluation to perform ("bbox" or "location").
:return: A tuple containing the dataset configuration and evaluation configuration.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
::
dataset_config, eval_config = setup_evaluation_configs(results_dir_path, eval_type)
"""
eval_config = trackeval.eval.Evaluator.get_default_eval_config()
eval_config["PRINT_CONFIG"] = False
eval_config["USE_PARALLEL"] = True
eval_config["NUM_PARALLEL_CORES"] = num_cores
# Create dataset configs for TrackEval library
if eval_type == "bbox":
dataset_config = trackeval.datasets.MTMCChallenge3DBBox.get_default_dataset_config()
elif eval_type == "location":
dataset_config = trackeval.datasets.MTMCChallenge3DLocation.get_default_dataset_config()
dataset_config["DO_PREPROC"] = False
dataset_config["SPLIT_TO_EVAL"] = "all"
evaluation_dir_path = os.path.join(results_dir_path, "evaluation")
make_dir(evaluation_dir_path)
dataset_config["GT_FOLDER"] = os.path.join(evaluation_dir_path, "gt")
dataset_config["TRACKERS_FOLDER"] = os.path.join(evaluation_dir_path, "scores")
dataset_config["PRINT_CONFIG"] = False
return dataset_config, eval_config
def make_seq_ini_file(gt_dir: str, camera: str, seq_length: int) -> None:
"""
Creates a sequence-ini file used by the TrackEval library.
:param str gt_dir: The directory path where the sequence-ini file will be saved.
:param str camera: The name of a single sensor
:param int seq_length: The number of frames in the sequence.
:return: None
:rtype: None
::
make_seq_ini_file(gt_dir, camera, seq_length)
"""
ini_file_name = gt_dir + "/seqinfo.ini"
f = open(ini_file_name, "w")
f.write("[Sequence]\n")
name= "name=" +str(camera)+ "\n"
f.write(name)
f.write("imDir=img1\n")
f.write("frameRate=30\n")
seq = "seqLength=" + str(seq_length) + "\n"
f.write(seq)
f.write("imWidth=1920\n")
f.write("imHeight=1080\n")
f.write("imExt=.jpg\n")
f.close()
def prepare_evaluation_folder(dataset_config: Dict[str, Any], input_file_type: str) -> Tuple[str, str]:
"""
Prepares the evaluation folder structure required for TrackEval.
:param Dict[str, Any] dataset_config: The dataset configuration dictionary.
:return: A tuple containing the prediction file path and ground truth file path.
:rtype: Tuple[str, str]
::
pred_file_path, gt_file_path = prepare_evaluation_folder(dataset_config)
"""
# Create evaluation configs for TrackEval library
sensor_ids: Set[str] = set()
sensor_ids.add(input_file_type)
sensor_ids = sorted(list(sensor_ids))
# Create sequence maps file for evaluation
seq_maps_dir_path = os.path.join(dataset_config["GT_FOLDER"], "seqmaps")
make_seq_maps_file(seq_maps_dir_path, sensor_ids, dataset_config["BENCHMARK"], dataset_config["SPLIT_TO_EVAL"])
# Create ground truth directory
mot_version = dataset_config["BENCHMARK"] + "-" + dataset_config["SPLIT_TO_EVAL"]
gt_root_dir_path = os.path.join(dataset_config["GT_FOLDER"], mot_version)
gt_dir_path = os.path.join(gt_root_dir_path, input_file_type)
make_dir(gt_dir_path)
gt_output_dir_path = os.path.join(gt_dir_path, "gt")
make_dir(gt_output_dir_path)
gt_file_path = os.path.join(gt_output_dir_path, "gt.txt")
# Generate sequence file required for TrackEval library
make_seq_ini_file(gt_dir_path, camera=input_file_type, seq_length=20000)
# Create prediction directory
pred_dir_path = os.path.join(dataset_config["TRACKERS_FOLDER"], mot_version, "data", "data")
make_dir(pred_dir_path)
pred_file_path = os.path.join(pred_dir_path, f"{input_file_type}.txt")
return pred_file_path, gt_file_path
def run_evaluation(gt_file, prediction_file, fps, app_config, dataset_config, eval_config, eval_type):
"""
Executes the evaluation process using TrackEval based on the provided configurations.
:param str gt_file: The ground truth file path.
:param str prediction_file: The prediction file path.
:param float fps: The frames per second rate.
:param AppConfig app_config: The application configuration object.
:param Dict[str, Any] dataset_config: The dataset configuration dictionary.
:param Dict[str, Any] eval_config: The evaluation configuration dictionary.
:param str eval_type: The type of evaluation to perform ("bbox" or "location").
:return: The evaluation results.
:rtype: Any
::
results = run_evaluation(gt_file, prediction_file, fps, app_config, dataset_config, eval_config, eval_type)
"""
# Define the metrics to calculate
metrics_config = {"METRICS": ["HOTA"]}
metrics_config["PRINT_CONFIG"] = False
config = {**eval_config, **dataset_config, **metrics_config} # Merge configs
eval_config = {k: v for k, v in config.items() if k in eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in metrics_config.keys()}
# Run the Evaluator
evaluator = trackeval.eval.Evaluator(eval_config)
if eval_type == "bbox":
dataset_list = [trackeval.datasets.MTMCChallenge3DBBox(dataset_config)]
elif eval_type == "location":
dataset_list = [trackeval.datasets.MTMCChallenge3DLocation(dataset_config)]
metrics_list: List[str] = list()
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity]:
if metric.get_name() in metrics_config["METRICS"]:
metrics_list.append(metric(metrics_config))
if len(metrics_list) == 0:
raise Exception("No metric selected for evaluation.")
results = evaluator.evaluate(dataset_list, metrics_list)
return results
def _evaluate_tracking_for_all_BEV_sensors(ground_truth_file: str, prediction_file: str, output_directory, num_cores, fps):
"""
Evaluates tracking performance for all BEV sensors.
:param str ground_truth_file: The path to the ground truth file.
:param str prediction_file: The path to the prediction file.
:param str output_directory: The directory where output files will be stored.
:param str eval_options: The type of evaluation ("bbox" or "location").
:return: The evaluation results.
:rtype: Any
::
results = evaluate_tracking_for_all_bev_sensors(ground_truth_file, prediction_file, output_directory, app_config_path, calibration_file, eval_options)
"""
print("")
all_results = {}
for class_name in CLASS_LIST:
class_dir = os.path.join(output_directory, class_name)
if not os.path.isdir(class_dir):
logging.warning(f"Skipping class folder '{class_name}' as it was not found.")
print("--------------------------------")
continue
logging.info(f"Evaluating all BEV sensors on class {class_name}.")
ground_truth_file = os.path.join(class_dir, "gt.txt")
prediction_file = os.path.join(class_dir, "pred.txt")
output_dir = os.path.join(class_dir, "output")
if not os.path.exists(ground_truth_file) or not os.path.exists(prediction_file):
logging.info(f"Skipping class folder '{class_name}' as it was not found.")
print("--------------------------------")
continue
# Setup evaluation library & folders
dataset_config, eval_config = setup_evaluation_configs(output_directory, "bbox", num_cores)
output_pred_file_name, output_gt_file_name = prepare_evaluation_folder(dataset_config, "MTMC")
logging.info("Completed setup for evaluation library.")
# Prepare ground truth
prepare_ground_truth_file(ground_truth_file, output_gt_file_name, fps)
logging.info(f"Completed parsing ground-truth file {ground_truth_file}.")
# Prepare prediction results
prepare_prediction_file(prediction_file, output_pred_file_name, fps)
logging.info(f"Completed parsing prediction file {prediction_file}.")
# Run evaluation
results = run_evaluation(output_gt_file_name, output_pred_file_name, fps, None, dataset_config, eval_config, "bbox")
all_results[class_name] = results
print("--------------------------------------------------------------")
return all_results