Datasets:
ArXiv:
License:
import os | |
import json | |
import logging | |
import argparse | |
from datetime import datetime | |
from typing import List, Dict, Set, Tuple, Any | |
import tempfile | |
import time | |
import numpy as np | |
from utils.io_utils import ValidateFile, validate_file_path, load_json_from_file, split_files_per_class, split_files_per_scene, get_no_of_objects_per_scene | |
from utils.trackeval.trackeval_utils import _evaluate_tracking_for_all_BEV_sensors | |
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%y/%m/%d %H:%M:%S", level=logging.INFO) | |
def evaluate_tracking_for_all_BEV_sensors(ground_truth_file, prediction_file, output_root_dir, num_cores, scene_id, num_frames_to_eval): | |
logging.info(f"Computing tracking results for scene id: {scene_id}...") | |
output_directory = os.path.join(output_root_dir) | |
os.makedirs(output_directory, exist_ok=True) | |
split_files_per_class(ground_truth_file, prediction_file, output_directory, 0.0, num_frames_to_eval, 0.0, fps=30) | |
all_class_results = _evaluate_tracking_for_all_BEV_sensors(ground_truth_file, prediction_file, output_directory, num_cores, 30) | |
return all_class_results | |
def get_weighted_avg(weights, values): | |
common = weights.keys() & values.keys() | |
numerator = sum(weights[k] * values[k] for k in common) | |
denominator = sum(weights[k] for k in common) | |
return numerator / denominator if denominator else 0.0 | |
def run_evaluation(ground_truth_file, input_file, output_dir, num_cores, num_frames_to_eval, scene_id_2_scene_name_file): | |
is_temp_dir = False | |
if output_dir is None: | |
temp_dir = tempfile.TemporaryDirectory() | |
is_temp_dir = True | |
output_dir = temp_dir.name | |
logging.info(f"Temp files will be created here: {output_dir}") | |
scene_id_2_scene_name = load_json_from_file(scene_id_2_scene_name_file) | |
logging.info(f"Evaluating scenes: {list(scene_id_2_scene_name.keys())}") | |
split_files_per_scene(ground_truth_file, input_file, output_dir, scene_id_2_scene_name, num_frames_to_eval) | |
objects_per_scene = get_no_of_objects_per_scene(ground_truth_file, scene_id_2_scene_name) | |
hota_per_scene = dict() | |
detA_per_scene = dict() | |
assA_per_scene = dict() | |
locA_per_scene = dict() | |
detailed_results = dict() | |
for scene_id in scene_id_2_scene_name.keys(): | |
logging.info(f"Evaluating scene: {scene_id}") | |
output_directory = os.path.join(output_dir, f"scene_{scene_id}") | |
ground_truth_file = os.path.join(output_directory, "gt.txt") | |
input_file = os.path.join(output_directory, "pred.txt") | |
# check if both input & ground truth files exist | |
if not os.path.exists(ground_truth_file) or not os.path.exists(input_file): | |
logging.info(f"Skipping scene {scene_id} because input or ground truth file does not exist") | |
continue | |
results = evaluate_tracking_for_all_BEV_sensors(ground_truth_file, input_file, output_directory, num_cores, scene_id, num_frames_to_eval) | |
hota_per_class = [] | |
detA_per_class = [] | |
assA_per_class = [] | |
locA_per_class = [] | |
for class_name, scene_results in results.items(): | |
class_results = dict() | |
result = scene_results[0]["MTMCChallenge3DBBox"]["data"]["MTMC"]["class"]["HOTA"] | |
# Avg. results across all thresholds | |
hota_per_class.append(np.mean(result["HOTA"])) | |
detA_per_class.append(np.mean(result["DetA"])) | |
assA_per_class.append(np.mean(result["AssA"])) | |
locA_per_class.append(np.mean(result["LocA"])) | |
# single class results | |
class_results[class_name] = { | |
"hota": np.mean(result["HOTA"]), | |
"detA": np.mean(result["DetA"]), | |
"assA": np.mean(result["AssA"]), | |
"locA": np.mean(result["LocA"]) | |
} | |
scene_name = scene_id_2_scene_name[scene_id] | |
detailed_results[scene_name] = class_results | |
avg_hota_all_classes = np.mean(hota_per_class) | |
avg_detA_all_classes = np.mean(detA_per_class) | |
avg_assA_all_classes = np.mean(assA_per_class) | |
avg_locA_all_classes = np.mean(locA_per_class) | |
hota_per_scene[scene_name] = avg_hota_all_classes | |
detA_per_scene[scene_name] = avg_detA_all_classes | |
assA_per_scene[scene_name] = avg_assA_all_classes | |
locA_per_scene[scene_name] = avg_locA_all_classes | |
# match the keys: & then compute weighted avg | |
final_hota = get_weighted_avg(objects_per_scene, hota_per_scene) * 100 | |
final_detA = get_weighted_avg(objects_per_scene, detA_per_scene) * 100 | |
final_assA = get_weighted_avg(objects_per_scene, assA_per_scene) * 100 | |
final_locA = get_weighted_avg(objects_per_scene, locA_per_scene) * 100 | |
logging.info(f"Final HOTA: {final_hota}") | |
logging.info(f"Final DetA: {final_detA}") | |
logging.info(f"Final AssA: {final_assA}") | |
logging.info(f"Final LocA: {final_locA}") | |
if __name__ == "__main__": | |
start_time = time.time() | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--ground_truth_file", type=validate_file_path, | |
action=ValidateFile, help="Input ground truth file", required=True) | |
parser.add_argument("--input_file", type=validate_file_path, | |
action=ValidateFile, help="Input prediction file", required=True) | |
parser.add_argument("--output_dir", type=str, help="Optional Output directory") | |
parser.add_argument("--scene_id_2_scene_name_file", type=validate_file_path, | |
action=ValidateFile, help="Input scene id to scene name file in json format", required=True) | |
parser.add_argument("--num_cores", type=int, help="Number of cores to use") | |
parser.add_argument("--num_frames_to_eval", type=int, help="Number of frames to evaluate", default=9000) | |
# Parse arguments | |
args = parser.parse_args() | |
ground_truth_file = validate_file_path(args.ground_truth_file) | |
input_file = validate_file_path(args.input_file) | |
# Run evaluation | |
run_evaluation(ground_truth_file, input_file, args.output_dir, args.num_cores, args.num_frames_to_eval, args.scene_id_2_scene_name_file) | |
# Log processing time | |
end_time = time.time() | |
logging.info(f"Total time taken: {end_time - start_time} seconds") |