anilbhujel's picture
Updated code
52d6f05
import os
import json
import numpy as np
from scipy.optimize import linear_sum_assignment
import logging
from collections import defaultdict
class PixelBBoxTracker:
def __init__(self, max_disappeared=50, max_distance=100, max_pigs=9):
self.tracks = {} # Active tracks: {id: {'centroid', 'disappeared', 'bbox'}}
self.next_id = 1
self.max_disappeared = max_disappeared
self.max_distance = max_distance
self.max_pigs = max_pigs
self.disappeared_tracks = {} # Temporarily lost tracks
self.track_history = defaultdict(list) # Store recent positions
self.ambiguous_threshold = 0.1 # Cost difference threshold for ambiguity
self.iou_weight = 0.4 # Weight for IoU in cost calculation
self.centroid_weight = 0.4 # Weight for centroid distance
self.area_weight = 0.2 # Weight for area similarity
def _get_centroid(self, bbox):
x, y, w, h = bbox
return np.array([x + w/2, y + h/2])
def _calculate_area(self, bbox):
_, _, w, h = bbox
return w * h
def _area_similarity(self, area1, area2):
"""Calculate normalized area similarity (1.0 = identical areas)"""
if area1 == 0 or area2 == 0:
return 0.0
min_area = min(area1, area2)
max_area = max(area1, area2)
return min_area / max_area
def _bbox_iou(self, box1, box2):
"""Calculate Intersection over Union (IoU) of two bounding boxes"""
# Box format: [x, y, w, h]
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
# Calculate intersection coordinates
xi1 = max(x1, x2)
yi1 = max(y1, y2)
xi2 = min(x1 + w1, x2 + w2)
yi2 = min(y1 + h1, y2 + h2)
# Calculate intersection area
inter_width = max(0, xi2 - xi1)
inter_height = max(0, yi2 - yi1)
inter_area = inter_width * inter_height
# Calculate union area
box1_area = w1 * h1
box2_area = w2 * h2
union_area = box1_area + box2_area - inter_area
return inter_area / union_area if union_area > 0 else 0.0
def _calculate_cost(self, track, detection_bbox):
"""Calculate combined cost using centroid distance, IoU, and area similarity"""
# Get track information
last_centroid = track["centroid"]
last_bbox = track["bbox"]
track_area = self._calculate_area(last_bbox)
# Detection information
detection_centroid = self._get_centroid(detection_bbox)
detection_area = self._calculate_area(detection_bbox)
# Calculate components
centroid_distance = np.linalg.norm(detection_centroid - last_centroid)
normalized_distance = min(centroid_distance / self.max_distance, 1.0)
iou = self._bbox_iou(last_bbox, detection_bbox)
iou_term = 1.0 - iou
area_sim = self._area_similarity(track_area, detection_area)
area_term = 1.0 - area_sim
# Combine with weights
cost = (self.centroid_weight * normalized_distance +
self.iou_weight * iou_term +
self.area_weight * area_term)
return cost
def update(self, detections):
# Filter out small bounding boxes
detections = [d for d in detections if self._calculate_area(d['bbox']) >= 100]
# Get current frame information
current_centroids = [self._get_centroid(d['bbox']) for d in detections]
detection_bboxes = [d['bbox'] for d in detections]
track_ids = [-1] * len(detections) # Initialize all as unmatched
# Stage 1: Match existing tracks to detections
if self.tracks and detections:
track_ids_list = list(self.tracks.keys())
cost_matrix = np.full((len(track_ids_list), len(detections)), 10.0) # High default cost
# Calculate cost matrix
for t_idx, track_id in enumerate(track_ids_list):
track = self.tracks[track_id]
for d_idx, bbox in enumerate(detection_bboxes):
cost = self._calculate_cost(track, bbox)
centroid_distance = np.linalg.norm(current_centroids[d_idx] - track["centroid"])
# Only consider if within max distance
if centroid_distance <= self.max_distance:
cost_matrix[t_idx, d_idx] = cost
# Apply Hungarian algorithm for optimal matching
try:
row_ind, col_ind = linear_sum_assignment(cost_matrix)
# Process matches
for t_idx, d_idx in zip(row_ind, col_ind):
if cost_matrix[t_idx, d_idx] < 0.8: # Only accept good matches
track_id = track_ids_list[t_idx]
track = self.tracks[track_id]
bbox = detection_bboxes[d_idx]
# Update track information
track["centroid"] = current_centroids[d_idx]
track["bbox"] = bbox
track["disappeared"] = 0
self.track_history[track_id].append(current_centroids[d_idx])
# Assign track ID to detection
track_ids[d_idx] = track_id
except Exception as e:
pass
# Stage 2: Handle unmatched detections
unmatched_detections = [d_idx for d_idx, tid in enumerate(track_ids) if tid == -1]
regained_ids = []
new_track_ids = []
for d_idx in unmatched_detections:
centroid = current_centroids[d_idx]
bbox = detection_bboxes[d_idx]
# Try to regain from disappeared tracks
best_match_id = None
min_cost = float('inf')
for track_id, track in self.disappeared_tracks.items():
cost = self._calculate_cost(track, bbox)
centroid_distance = np.linalg.norm(centroid - track["centroid"])
if cost < min_cost and centroid_distance <= self.max_distance:
min_cost = cost
best_match_id = track_id
# Regain track if found
if best_match_id and len(self.tracks) < self.max_pigs:
# Update track information
self.tracks[best_match_id] = {
"centroid": centroid,
"bbox": bbox,
"disappeared": 0
}
self.track_history[best_match_id].append(centroid)
track_ids[d_idx] = best_match_id
regained_ids.append(best_match_id)
del self.disappeared_tracks[best_match_id]
# Create new track if no match and under capacity
elif len(self.tracks) < self.max_pigs:
new_id = self.next_id
self.tracks[new_id] = {
"centroid": centroid,
"bbox": bbox,
"disappeared": 0
}
self.track_history[new_id].append(centroid)
track_ids[d_idx] = new_id
new_track_ids.append(new_id)
self.next_id += 1
# Stage 3: Update disappeared tracks
lost_track_ids = []
# Check all active tracks
for track_id in list(self.tracks.keys()):
# If track wasn't matched
if track_id not in track_ids:
self.tracks[track_id]["disappeared"] += 1
# Move to disappeared if disappeared too long
if self.tracks[track_id]["disappeared"] > self.max_disappeared:
self.disappeared_tracks[track_id] = self.tracks[track_id]
del self.tracks[track_id]
lost_track_ids.append(track_id)
# Keep history for potential regain
# Stage 4: Cap at max pigs
if len(self.tracks) > self.max_pigs:
# Remove oldest lost track (highest disappeared count)
oldest_id = None
max_disappeared = -1
for track_id, track in self.tracks.items():
if track["disappeared"] > max_disappeared:
max_disappeared = track["disappeared"]
oldest_id = track_id
if oldest_id:
self.disappeared_tracks[oldest_id] = self.tracks[oldest_id]
del self.tracks[oldest_id]
lost_track_ids.append(oldest_id)
# Return only current detections with track IDs
all_track_ids = []
all_bboxes = []
for i, tid in enumerate(track_ids):
if tid != -1:
all_track_ids.append(tid)
all_bboxes.append(detection_bboxes[i])
return all_track_ids, all_bboxes, regained_ids, lost_track_ids
# The rest of your code remains the same (read_json_file, save_json_file, setup_logger, and main)
def read_json_file(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
frame_data = {}
for item in data:
frame_id = item['frame_id']
det = {
"bbox": item["bbox"],
"area": item.get("area", 0)
}
if frame_id not in frame_data:
frame_data[frame_id] = {
"frame_width": item.get("frame_width", 1280),
"frame_height": item.get("frame_height", 720),
"detections": []
}
frame_data[frame_id]["detections"].append(det)
return frame_data
def save_json_file(output_path, results):
coco_output = {
"images": [],
"annotations": [],
"categories": [{"id": 1, "name": "pig"}]
}
annotation_id = 1
for frame_id in sorted(results.keys()):
frame = results[frame_id]
width = frame["frame_width"]
height = frame["frame_height"]
file_name = f"{frame_id:08d}.jpg"
coco_output["images"].append({
"id": frame_id,
"file_name": file_name,
"width": width,
"height": height
})
for det in frame["detections"]:
x, y, w, h = det["bbox"]
area = det.get("area", w * h)
coco_output["annotations"].append({
"id": annotation_id,
"image_id": frame_id,
"category_id": 1,
"bbox": [x, y, w, h],
"track_id": det["track_id"],
"area": area,
"iscrowd": 0
})
annotation_id += 1
with open(output_path, 'w') as f:
json.dump(coco_output, f, indent=2)
def setup_logger(log_path):
logger = logging.getLogger('tracking_logger')
logger.setLevel(logging.INFO)
for handler in logger.handlers[:]:
logger.removeHandler(handler)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
if __name__ == "__main__":
input_dir = "path/to/your/detected_json"
output_dir = "path/to/your/tracked_json"
log_dir = "path/to/your/tracking_log"
os.makedirs(output_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
for file_name in os.listdir(input_dir):
if not file_name.endswith(".json"):
continue
input_path = os.path.join(input_dir, file_name)
output_path = os.path.join(output_dir, file_name.replace("detection.json", "tracked.json"))
log_path = os.path.join(log_dir, file_name.replace(".json", ".log"))
logger = setup_logger(log_path)
logger.info(f"Starting processing for {file_name}")
frames = read_json_file(input_path)
tracker = PixelBBoxTracker(max_disappeared=180, max_distance=125, max_pigs=9)
results = {}
detection_counts = defaultdict(list)
for frame_id in sorted(frames.keys()):
frame = frames[frame_id]
detections = frame["detections"]
# Remove original track ID
for det in detections:
det.pop("track_id", None)
# Process tracking
track_ids, bboxes, regained_ids, lost_track_ids = tracker.update(detections)
# Prepare detections for this frame
frame_detections = []
for track_id, bbox in zip(track_ids, bboxes):
frame_detections.append({
"bbox": bbox,
"track_id": track_id,
"area": bbox[2] * bbox[3] # w * h
})
# Store results
results[frame_id] = {
"frame_width": frame["frame_width"],
"frame_height": frame["frame_height"],
"detections": frame_detections
}
# Logging
detection_count = len(frame_detections)
detection_counts[detection_count].append(frame_id)
if detection_count > 9:
logger.warning(f"Frame {frame_id}: Too many detections ({detection_count}) - capped to 9")
elif detection_count < 9:
logger.info(f"Frame {frame_id}: Only {detection_count} detections")
if lost_track_ids:
logger.info(f"Frame {frame_id}: Lost tracks - {', '.join(map(str, lost_track_ids))}")
if regained_ids:
logger.info(f"Frame {frame_id}: Regained tracks - {', '.join(map(str, regained_ids))}")
# Save detection count statistics
logger.info("\nDetection Count Statistics:")
for count, frames in sorted(detection_counts.items()):
logger.info(f"{count} detections: {len(frames)} frames")
# Save results
save_json_file(output_path, results)
logger.info(f"Tracking complete. Output saved to {output_path}\n")
print("All files processed successfully.")