Spaces:
Paused
Paused
from vitpose import VitPose | |
import requests | |
import os | |
from config import API_URL,API_KEY | |
from fastapi import UploadFile | |
import logging | |
import cv2 | |
import numpy as np | |
import time | |
import json | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
def process_video(file_name: str,vitpose: VitPose,user_id: str,player_id: str): | |
video_path = file_name | |
contents = open(video_path, "rb").read() | |
with open(video_path, "wb") as f: | |
f.write(contents) | |
logger.info(f"file saved {video_path}") | |
logger.info(f"starting task {video_path}") | |
new_file_name = os.path.join("static", video_path) | |
logger.info(f"new file name {new_file_name}") | |
vitpose.output_video_path = new_file_name | |
annotated_frames = vitpose.run(video_path) | |
vitpose.frames_to_video(annotated_frames) | |
logger.info(f"Video processed {video_path}") | |
with open(new_file_name, "rb") as f: | |
contents = f.read() | |
url = API_URL+ "/excercises/webhooks/video-processed" | |
logger.info(f"Sending video to {url}") | |
files = {"file": (video_path, contents, "video/mp4")} | |
logger.info(f"video_path: {video_path}") | |
response = requests.post(url, files=files, | |
data={"user_id":user_id,"typeMessage":"video_processed","file_name":video_path, | |
"player_id":player_id}, | |
stream=True, | |
headers={"token":API_KEY}) | |
logger.info(f"Response: {response.status_code}") | |
logger.info(f"Response: {response.text}") | |
logger.info(f"Video sent to {url}") | |
def process_salto_alto(file_name: str, vitpose: VitPose, player_data: dict, repetitions: int, exercise_id: str): | |
""" | |
Process a high jump exercise video using VitPose for pose estimation. | |
Args: | |
file_name: Path to the input video | |
vitpose: VitPose instance for pose estimation | |
player_data: Dictionary containing player information | |
repetitions: Expected number of repetitions | |
exercise_id: ID of the exercise | |
""" | |
# Use the provided VitPose instance | |
model = vitpose.pipeline | |
# Get player parameters from player_data or use defaults | |
reference_height = player_data.get('height', 1.68) # Altura aproximada de la persona en metros | |
body_mass_kg = player_data.get('weight', 64) # Peso corporal en kg | |
# Generate output paths | |
output_video = file_name.replace('.mp4', '_analyzed.mp4') | |
output_json = output_video.replace('.mp4', '.json') | |
# Process the video and get the jump metrics | |
results_dict = analyze_jump_video( | |
model=model, | |
input_video=file_name, | |
output_video=output_video, | |
reference_height=reference_height, | |
body_mass_kg=body_mass_kg | |
) | |
# Save results to JSON | |
with open(output_json, 'w') as f: | |
json.dumps(results_dict, indent=4) | |
# Print summary | |
print("\nResultados finales:") | |
print(f"Salto Relativo máximo: {results_dict['jump_metrics']['max_relative_jump']:.2f}m") | |
print(f"Salto Alto máximo: {results_dict['jump_metrics']['max_high_jump']:.2f}m") | |
print(f"Potencia Sayer (estimada): {results_dict['jump_metrics']['peak_power_sayer']:.2f} W") | |
# Return results dictionary | |
return { | |
"output_video": output_video, | |
"output_json": output_json, | |
"metrics": results_dict | |
} | |
def analyze_jump_video(model, input_video, output_video, reference_height=1.68, body_mass_kg=64): | |
""" | |
Analyze a jump video to calculate various jump metrics. | |
Args: | |
model: VitPose model instance | |
input_video: Path to input video | |
output_video: Path to output video | |
reference_height: Height of the person in meters | |
body_mass_kg: Weight of the person in kg | |
Returns: | |
Dictionary containing jump metrics and video analysis data | |
""" | |
# Configuration parameters | |
JUMP_THRESHOLD_PERCENT = 0.05 # Porcentaje de cambio en la altura del tobillo para detectar el inicio del salto | |
SMOOTHING_WINDOW = 5 # Ventana para suavizar la altura de los tobillos | |
HORIZONTAL_OFFSET_FACTOR = 0.75 # Factor para ubicar el cuadro entre el hombro y el borde | |
VELOCITY_WINDOW = 3 # Número de frames para calcular la velocidad | |
METRICS_BELOW_FEET_OFFSET = 20 # Offset en píxeles para colocar los cuadros debajo de los pies | |
# Color palette | |
BLUE = (255, 0, 0) | |
GREEN = (0, 255, 0) | |
YELLOW = (0, 255, 255) | |
WHITE = (255, 255, 255) | |
BLACK = (0, 0, 0) | |
GRAY = (128, 128, 128) | |
LIGHT_GRAY = (200, 200, 200) | |
repetition_data = [] | |
# Open the video | |
cap = cv2.VideoCapture(input_video) | |
if not cap.isOpened(): | |
print("Error al abrir el video") | |
return {} | |
# Get first frame to calibrate and get initial shoulder positions | |
ret, frame = cap.read() | |
if not ret: | |
print("Error al leer el video") | |
return {} | |
# Initialize calibration variables | |
PX_PER_METER = None | |
initial_person_height_px = None | |
initial_left_shoulder_x = None | |
initial_right_shoulder_x = None | |
# Process first frame to calibrate | |
results_first_frame = model(frame) # Detect pose in first frame | |
if results_first_frame and results_first_frame[0].keypoints and len(results_first_frame[0].keypoints.xy[0]) > 0: | |
kpts_first = results_first_frame[0].keypoints.xy[0].cpu().numpy() | |
if kpts_first[0][1] > 0 and kpts_first[15][1] > 0 and kpts_first[16][1] > 0: # Nose and ankles | |
initial_person_height_px = min(kpts_first[15][1], kpts_first[16][1]) - kpts_first[0][1] | |
PX_PER_METER = initial_person_height_px / reference_height | |
print(f"Escala calculada: {PX_PER_METER:.2f} px/m") | |
if kpts_first[5][0] > 0 and kpts_first[6][0] > 0: # Left (5) and right (6) shoulders | |
initial_left_shoulder_x = int(kpts_first[5][0]) | |
initial_right_shoulder_x = int(kpts_first[6][0]) | |
if PX_PER_METER is None or initial_left_shoulder_x is None or initial_right_shoulder_x is None: | |
print("No se pudo calibrar la escala o detectar los hombros en el primer frame.") | |
cap.release() | |
return {} | |
# Reset video for processing | |
cap.release() | |
cap = cv2.VideoCapture(input_video) | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
out = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) | |
# Variables for metrics and visualization | |
ground_level = None | |
takeoff_head_y = None | |
max_jump_height = 0 # Maximum relative jump | |
max_head_height_px = None # Maximum head height in pixels (lowest in y coordinates) | |
jump_started = False | |
head_y_history = [] | |
ankle_y_history = [] | |
last_detected_ankles_y = None | |
head_y_buffer = [] | |
velocity_vertical = 0.0 | |
peak_power_sayer = 0.0 # Initialize Sayer power | |
person_detected = False # Flag to indicate if person was detected in any frame | |
current_power = 0.0 | |
repetition_count = 0 | |
jump_in_air = False | |
# Process each frame | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
annotated_frame = frame.copy() | |
results = model(annotated_frame) | |
if results and results[0].keypoints and len(results[0].keypoints.xy[0]) > 0: | |
person_detected = True | |
kpts = results[0].keypoints.xy[0].cpu().numpy() | |
nose = kpts[0] | |
ankles = [kpts[15], kpts[16]] | |
left_shoulder = kpts[5] | |
right_shoulder = kpts[6] | |
if nose[1] > 0 and all(a[1] > 0 for a in ankles) and left_shoulder[0] > 0 and right_shoulder[0] > 0: | |
current_ankle_y = min(a[1] for a in ankles) | |
last_detected_ankles_y = current_ankle_y # Save current ankle position | |
current_head_y = nose[1] | |
current_left_shoulder_x = int(left_shoulder[0]) | |
current_right_shoulder_x = int(right_shoulder[0]) | |
# Smooth ankle and head positions | |
ankle_y_history.append(current_ankle_y) | |
if len(ankle_y_history) > SMOOTHING_WINDOW: | |
ankle_y_history.pop(0) | |
smoothed_ankle_y = np.mean(ankle_y_history) | |
head_y_history.append(current_head_y) | |
if len(head_y_history) > SMOOTHING_WINDOW: | |
head_y_history.pop(0) | |
smoothed_head_y = np.mean(head_y_history) | |
# Calculate vertical velocity (using head position) | |
head_y_buffer.append(smoothed_head_y) | |
if len(head_y_buffer) > VELOCITY_WINDOW: | |
head_y_buffer.pop(0) | |
if PX_PER_METER is not None and fps > 0: | |
delta_y_pixels = head_y_buffer[0] - head_y_buffer[-1] | |
delta_y_meters = delta_y_pixels / PX_PER_METER | |
delta_t = VELOCITY_WINDOW / fps | |
velocity_vertical = delta_y_meters / delta_t | |
# Set ground level in first frame where ankles are detected | |
if ground_level is None: | |
ground_level = smoothed_ankle_y | |
takeoff_head_y = smoothed_head_y | |
relative_ankle_change = (ground_level - smoothed_ankle_y) / ground_level if ground_level > 0 else 0 | |
# Detect jump start | |
if not jump_started and relative_ankle_change > JUMP_THRESHOLD_PERCENT: | |
jump_started = True | |
takeoff_head_y = smoothed_head_y | |
max_jump_height = 0 | |
max_head_height_px = smoothed_head_y | |
# Detect jump end | |
if jump_started and relative_ankle_change <= JUMP_THRESHOLD_PERCENT: | |
# Add to repetition data | |
salto_alto = calculate_absolute_jump_height(reference_height, max_jump_height) | |
repetition_data.append({ | |
"repetition": repetition_count + 1, | |
"relative_jump_m": round(max_jump_height, 3), | |
"absolute_jump_m": round(salto_alto, 3), | |
"peak_power_watts": round(current_power, 1) | |
}) | |
repetition_count += 1 | |
jump_started = False | |
# Update jump metrics while in air | |
if jump_started: | |
relative_jump = (takeoff_head_y - smoothed_head_y) / PX_PER_METER | |
if relative_jump > max_jump_height: | |
max_jump_height = relative_jump | |
if smoothed_head_y < max_head_height_px: | |
max_head_height_px = smoothed_head_y | |
if relative_jump: | |
current_power = calculate_peak_power_sayer(relative_jump, body_mass_kg) | |
if current_power > peak_power_sayer: | |
peak_power_sayer = current_power | |
else: | |
last_detected_ankles_y = None # Reset position if ankles not detected | |
velocity_vertical = 0.0 # Reset velocity if no reliable detection | |
# Calculate absolute jump height | |
salto_alto = calculate_absolute_jump_height(reference_height, max_jump_height) | |
# Draw floating metric boxes | |
annotated_frame = draw_metrics_overlay( | |
frame=annotated_frame, | |
max_jump_height=max_jump_height, | |
salto_alto=salto_alto, | |
velocity_vertical=velocity_vertical, | |
peak_power_sayer=peak_power_sayer, | |
repetition_count=repetition_count, | |
last_detected_ankles_y=last_detected_ankles_y, | |
initial_left_shoulder_x=initial_left_shoulder_x, | |
initial_right_shoulder_x=initial_right_shoulder_x, | |
width=width, | |
height=height, | |
colors={ | |
"blue": BLUE, | |
"green": GREEN, | |
"yellow": YELLOW, | |
"white": WHITE, | |
"black": BLACK, | |
"gray": GRAY, | |
"light_gray": LIGHT_GRAY | |
}, | |
metrics_below_feet_offset=METRICS_BELOW_FEET_OFFSET, | |
horizontal_offset_factor=HORIZONTAL_OFFSET_FACTOR | |
) | |
out.write(annotated_frame) | |
# Prepare results dictionary | |
results_dict = { | |
"jump_metrics": { | |
"max_relative_jump": float(max(0, max_jump_height)), | |
"max_high_jump": float(max(0, salto_alto)), | |
"peak_power_sayer": float(peak_power_sayer), | |
"repetitions": int(repetition_count), | |
"reference_height": float(reference_height), | |
"body_mass_kg": float(body_mass_kg), | |
"px_per_meter": float(PX_PER_METER) if PX_PER_METER is not None else 0.0 | |
}, | |
"video_analysis": { | |
"input_video": str(input_video), | |
"output_video": str(output_video), | |
"fps": float(fps), | |
"resolution": f"{int(width)}x{int(height)}" | |
}, | |
"repetition_data": [ | |
{ | |
"repetition": int(rep["repetition"]), | |
"relative_jump_m": float(rep["relative_jump_m"]), | |
"absolute_jump_m": float(rep["absolute_jump_m"]), | |
"peak_power_watts": float(rep["peak_power_watts"]) | |
} for rep in repetition_data | |
] | |
} | |
cap.release() | |
out.release() | |
return results_dict | |
def calculate_peak_power_sayer(jump_height_m, body_mass_kg): | |
""" | |
Estimates peak anaerobic power using Sayer's equation. | |
Args: | |
jump_height_m: Jump height in meters | |
body_mass_kg: Body mass in kg | |
Returns: | |
Estimated peak power in watts | |
""" | |
jump_height_cm = jump_height_m * 100 | |
return (60.7 * jump_height_cm) + (45.3 * body_mass_kg) - 2055 | |
def calculate_absolute_jump_height(reference_height, relative_jump): | |
""" | |
Calculate absolute jump height based on reference height and relative jump. | |
Args: | |
reference_height: Reference height in meters | |
relative_jump: Relative jump height in meters | |
Returns: | |
Absolute jump height in meters | |
""" | |
absolute_jump = reference_height + relative_jump | |
# Apply validation rule | |
if absolute_jump > 1.72: | |
return absolute_jump | |
else: | |
return 0 | |
def draw_metrics_overlay(frame, max_jump_height, salto_alto, velocity_vertical, peak_power_sayer, | |
repetition_count, last_detected_ankles_y, initial_left_shoulder_x, | |
initial_right_shoulder_x, width, height, colors, metrics_below_feet_offset=20, | |
horizontal_offset_factor=0.75): | |
""" | |
Draw metrics overlay on the frame. | |
Args: | |
frame: Input frame | |
max_jump_height: Maximum jump height in meters | |
salto_alto: Absolute jump height in meters | |
velocity_vertical: Vertical velocity in m/s | |
peak_power_sayer: Peak power in watts | |
repetition_count: Number of repetitions | |
last_detected_ankles_y: Y-coordinate of last detected ankles | |
initial_left_shoulder_x: X-coordinate of left shoulder | |
initial_right_shoulder_x: X-coordinate of right shoulder | |
width: Frame width | |
height: Frame height | |
colors: Dictionary with color values | |
metrics_below_feet_offset: Offset for metrics below feet | |
horizontal_offset_factor: Factor for horizontal offset | |
Returns: | |
Frame with metrics overlay | |
""" | |
overlay = frame.copy() | |
alpha = 0.7 | |
font = cv2.FONT_HERSHEY_SIMPLEX | |
font_scale_title_metric = 0.5 | |
font_scale_value = 0.7 | |
font_scale_title_main = 1.2 # Scale for main title (larger) | |
font_thickness_metric = 1 | |
font_thickness_title_main = 1 # Thickness for main title | |
line_height_title_metric = int(20 * 1.2) | |
line_height_value = int(25 * 1.2) | |
padding_vertical = int(15 * 1.2) | |
padding_horizontal = int(15 * 1.2) | |
text_color_title = colors["light_gray"] | |
text_color_value = colors["white"] | |
text_color_title_main = colors["white"] | |
bg_color = colors["gray"] | |
border_color = colors["white"] | |
border_thickness = 1 | |
corner_radius = 10 | |
spacing_horizontal = 30 | |
title_y_offset = 50 # Lower vertical position of title | |
metrics_y_offset_alto = 80 # Adjust Salto Alto position to leave space below | |
metrics_y_offset_relativo = None # Will be calculated dynamically | |
metrics_y_offset_velocidad = None # Will be calculated dynamically | |
metrics_y_offset_potencia = None # Will be calculated dynamically | |
# Helper function to draw rounded rectangles | |
def draw_rounded_rect(img, pt1, pt2, color, thickness=-1, lineType=cv2.LINE_AA, radius=10): | |
x1, y1 = pt1 | |
x2, y2 = pt2 | |
w = x2 - x1 | |
h = y2 - y1 | |
if radius > 0: | |
img = cv2.ellipse(img, (x1 + radius, y1 + radius), (radius, radius), 0, 0, 90, color, thickness, lineType) | |
img = cv2.ellipse(img, (x2 - radius, y1 + radius), (radius, radius), 0, 90, 180, color, thickness, lineType) | |
img = cv2.ellipse(img, (x2 - radius, y2 - radius), (radius, radius), 0, 180, 270, color, thickness, lineType) | |
img = cv2.ellipse(img, (x1 + radius, y2 - radius), (radius, radius), 0, 270, 360, color, thickness, lineType) | |
img = cv2.rectangle(img, (x1, y1 + radius), (x2, y2 - radius), color, thickness, lineType) | |
img = cv2.rectangle(img, (x1 + radius, y1), (x2 - radius, y2), color, thickness, lineType) | |
else: | |
img = cv2.rectangle(img, pt1, pt2, color, thickness, lineType) | |
return img | |
# --- Main Title --- | |
title_text = "Ejercicio de Salto" | |
title_text_size = cv2.getTextSize(title_text, font, font_scale_title_main, font_thickness_title_main)[0] | |
title_x = (width - title_text_size[0]) // 2 | |
title_y = title_y_offset | |
cv2.putText(overlay, title_text, (title_x, title_y), font, font_scale_title_main, text_color_title_main, font_thickness_title_main, cv2.LINE_AA) | |
# --- Relative Jump Box (dynamically positioned) --- | |
relativo_text = "SALTO RELATIVO" | |
relativo_value = f"{max(0, max_jump_height):.2f} m" | |
relativo_text_size = cv2.getTextSize(relativo_text, font, font_scale_title_metric, font_thickness_metric)[0] | |
relativo_value_size = cv2.getTextSize(relativo_value, font, font_scale_value, font_thickness_metric)[0] | |
bg_width_relativo = max(relativo_text_size[0], relativo_value_size[0]) + 2 * padding_horizontal | |
bg_height_relativo = line_height_title_metric + line_height_value + 2 * padding_vertical | |
x_relativo = 20 | |
if last_detected_ankles_y is not None and bg_height_relativo is not None: | |
metrics_y_offset_relativo = int(last_detected_ankles_y - bg_height_relativo - 10) # 10 pixels above ankle | |
# Make sure box doesn't go off top | |
if metrics_y_offset_relativo < title_y_offset + 50: | |
metrics_y_offset_relativo = int(last_detected_ankles_y + metrics_below_feet_offset) # Show below | |
else: | |
metrics_y_offset_relativo = height - 150 # Default position if ankles not detected | |
if metrics_y_offset_relativo is not None: | |
y_relativo = metrics_y_offset_relativo | |
pt1_relativo = (x_relativo, y_relativo) | |
pt2_relativo = (x_relativo + bg_width_relativo, y_relativo + bg_height_relativo) | |
overlay = draw_rounded_rect(overlay, pt1_relativo, pt2_relativo, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius) | |
cv2.rectangle(overlay, pt1_relativo, pt2_relativo, border_color, border_thickness, cv2.LINE_AA) | |
cv2.putText(overlay, relativo_text, (x_relativo + (bg_width_relativo - relativo_text_size[0]) // 2, y_relativo + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA) | |
cv2.putText(overlay, relativo_value, (x_relativo + (bg_width_relativo - relativo_value_size[0]) // 2, y_relativo + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA) | |
# --- High Jump Box (stays in top right) --- | |
alto_text = "SALTO ALTO" | |
alto_value = f"{max(0, salto_alto):.2f} m" | |
alto_text_size = cv2.getTextSize(alto_text, font, font_scale_title_metric, font_thickness_metric)[0] | |
alto_value_size = cv2.getTextSize(alto_value, font, font_scale_value, font_thickness_metric)[0] | |
bg_width_alto = max(alto_text_size[0], alto_value_size[0]) + 2 * padding_horizontal | |
bg_height_alto = line_height_title_metric + line_height_value + 2 * padding_vertical | |
x_alto = width - bg_width_alto - 20 # Default position near right edge | |
if initial_right_shoulder_x is not None: | |
available_space = width - initial_right_shoulder_x | |
x_alto_calculated = initial_right_shoulder_x + int(available_space * (1 - horizontal_offset_factor)) - bg_width_alto | |
# Make sure doesn't go off left edge and there's space from first box | |
if x_alto_calculated > x_relativo + bg_width_relativo + spacing_horizontal + 10 and x_alto_calculated + bg_width_alto < width - 10: | |
x_alto = x_alto_calculated | |
y_alto = metrics_y_offset_alto | |
pt1_alto = (x_alto, y_alto) | |
pt2_alto = (x_alto + bg_width_alto, y_alto + bg_height_alto) | |
overlay = draw_rounded_rect(overlay, pt1_alto, pt2_alto, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius) | |
cv2.rectangle(overlay, pt1_alto, pt2_alto, border_color, border_thickness, cv2.LINE_AA) | |
cv2.putText(overlay, alto_text, (x_alto + (bg_width_alto - alto_text_size[0]) // 2, y_alto + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA) | |
cv2.putText(overlay, alto_value, (x_alto + (bg_width_alto - alto_value_size[0]) // 2, y_alto + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA) | |
# --- Repetitions Box --- | |
reps_text = "REPETICIONES" | |
reps_value = f"{repetition_count}" | |
reps_text_size = cv2.getTextSize(reps_text, font, font_scale_title_metric, font_thickness_metric)[0] | |
reps_value_size = cv2.getTextSize(reps_value, font, font_scale_value, font_thickness_metric)[0] | |
bg_width_reps = max(reps_text_size[0], reps_value_size[0]) + 2 * padding_horizontal | |
bg_height_reps = line_height_title_metric + line_height_value + 2 * padding_vertical | |
x_reps = x_relativo | |
y_reps = y_relativo + bg_height_relativo + 10 | |
pt1_reps = (x_reps, y_reps) | |
pt2_reps = (x_reps + bg_width_reps, y_reps + bg_height_reps) | |
overlay = draw_rounded_rect(overlay, pt1_reps, pt2_reps, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius) | |
cv2.rectangle(overlay, pt1_reps, pt2_reps, border_color, border_thickness, cv2.LINE_AA) | |
cv2.putText(overlay, reps_text, (x_reps + (bg_width_reps - reps_text_size[0]) // 2, y_reps + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA) | |
cv2.putText(overlay, reps_value, (x_reps + (bg_width_reps - reps_value_size[0]) // 2, y_reps + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA) | |
# --- Vertical Velocity Box (below feet) --- | |
if last_detected_ankles_y is not None: | |
velocidad_text = "VELOCIDAD VERTICAL" | |
velocidad_value = f"{abs(velocity_vertical):.2f} m/s" # Show absolute value | |
velocidad_text_size = cv2.getTextSize(velocidad_text, font, font_scale_title_metric, font_thickness_metric)[0] | |
velocidad_value_size = cv2.getTextSize(velocidad_value, font, font_scale_value, font_thickness_metric)[0] | |
bg_width_velocidad = max(velocidad_text_size[0], velocidad_value_size[0]) + 2 * padding_horizontal | |
bg_height_velocidad = line_height_title_metric + line_height_value + 2 * padding_vertical | |
x_velocidad = int(width / 2 - bg_width_velocidad / 2) # Horizontally centered | |
y_velocidad = int(last_detected_ankles_y + metrics_below_feet_offset + bg_height_velocidad) | |
pt1_velocidad = (int(x_velocidad), int(y_velocidad - bg_height_velocidad)) | |
pt2_velocidad = (int(x_velocidad + bg_width_velocidad), int(y_velocidad)) | |
overlay = draw_rounded_rect(overlay, pt1_velocidad, pt2_velocidad, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius) | |
cv2.rectangle(overlay, pt1_velocidad, pt2_velocidad, border_color, border_thickness, cv2.LINE_AA) | |
cv2.putText(overlay, velocidad_text, (int(x_velocidad + (bg_width_velocidad - velocidad_text_size[0]) // 2), int(y_velocidad - bg_height_velocidad + padding_vertical + line_height_title_metric // 2 + 2)), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA) | |
cv2.putText(overlay, velocidad_value, (int(x_velocidad + (bg_width_velocidad - velocidad_value_size[0]) // 2), int(y_velocidad - bg_height_velocidad + padding_vertical + line_height_title_metric + line_height_value // 2 + 5)), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA) | |
# --- Sayer Power Box (below velocity box) --- | |
potencia_text = "POTENCIA SAYER" | |
potencia_value = f"{peak_power_sayer:.2f} W" | |
potencia_text_size = cv2.getTextSize(potencia_text, font, font_scale_title_metric, font_thickness_metric)[0] | |
potencia_value_size = cv2.getTextSize(potencia_value, font, font_scale_value, font_thickness_metric)[0] | |
bg_width_potencia = max(potencia_text_size[0], potencia_value_size[0]) + 2 * padding_horizontal | |
bg_height_potencia = line_height_title_metric + line_height_value + 2 * padding_vertical | |
x_potencia = x_velocidad # Same horizontal position as velocity | |
y_potencia = y_velocidad + 5 # Below velocity box | |
pt1_potencia = (int(x_potencia), int(y_potencia)) | |
pt2_potencia = (int(x_potencia + bg_width_potencia), int(y_potencia + bg_height_potencia)) | |
overlay = draw_rounded_rect(overlay, pt1_potencia, pt2_potencia, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius) | |
cv2.rectangle(overlay, pt1_potencia, pt2_potencia, border_color, border_thickness, cv2.LINE_AA) | |
cv2.putText(overlay, potencia_text, (int(x_potencia + (bg_width_potencia - potencia_text_size[0]) // 2), int(y_potencia + padding_vertical + line_height_title_metric // 2 + 2)), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA) | |
cv2.putText(overlay, potencia_value, (int(x_potencia + (bg_width_potencia - potencia_value_size[0]) // 2), int(y_potencia + padding_vertical + line_height_title_metric + line_height_value // 2 + 5)), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA) | |
# Blend overlay with original frame | |
result = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0) | |
return result |