Spaces:
Paused
Paused
File size: 27,584 Bytes
091117d fba6e1e 091117d 5093bc2 d5bf444 5093bc2 fba6e1e 5093bc2 fba6e1e 2d729d2 fba6e1e 2d729d2 fba6e1e 825522e 091117d 0936f88 fba6e1e 0936f88 fba6e1e 0936f88 fba6e1e 091117d 5093bc2 fba6e1e 2a37377 fba6e1e 2a37377 5093bc2 d5bf444 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 |
from vitpose import VitPose
import requests
import os
from config import API_URL,API_KEY
from fastapi import UploadFile
import logging
import cv2
import numpy as np
import time
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def process_video(file_name: str,vitpose: VitPose,user_id: str,player_id: str):
video_path = file_name
contents = open(video_path, "rb").read()
with open(video_path, "wb") as f:
f.write(contents)
logger.info(f"file saved {video_path}")
logger.info(f"starting task {video_path}")
new_file_name = os.path.join("static", video_path)
logger.info(f"new file name {new_file_name}")
vitpose.output_video_path = new_file_name
annotated_frames = vitpose.run(video_path)
vitpose.frames_to_video(annotated_frames)
logger.info(f"Video processed {video_path}")
with open(new_file_name, "rb") as f:
contents = f.read()
url = API_URL+ "/excercises/webhooks/video-processed"
logger.info(f"Sending video to {url}")
files = {"file": (video_path, contents, "video/mp4")}
logger.info(f"video_path: {video_path}")
response = requests.post(url, files=files,
data={"user_id":user_id,"typeMessage":"video_processed","file_name":video_path,
"player_id":player_id},
stream=True,
headers={"token":API_KEY})
logger.info(f"Response: {response.status_code}")
logger.info(f"Response: {response.text}")
logger.info(f"Video sent to {url}")
def process_salto_alto(file_name: str, vitpose: VitPose, player_data: dict, repetitions: int, exercise_id: str):
"""
Process a high jump exercise video using VitPose for pose estimation.
Args:
file_name: Path to the input video
vitpose: VitPose instance for pose estimation
player_data: Dictionary containing player information
repetitions: Expected number of repetitions
exercise_id: ID of the exercise
"""
# Use the provided VitPose instance
model = vitpose.pipeline
# Get player parameters from player_data or use defaults
reference_height = player_data.get('height', 1.68) # Altura aproximada de la persona en metros
body_mass_kg = player_data.get('weight', 64) # Peso corporal en kg
# Generate output paths
output_video = file_name.replace('.mp4', '_analyzed.mp4')
output_json = output_video.replace('.mp4', '.json')
# Process the video and get the jump metrics
results_dict = analyze_jump_video(
model=model,
input_video=file_name,
output_video=output_video,
reference_height=reference_height,
body_mass_kg=body_mass_kg
)
# Save results to JSON
with open(output_json, 'w') as f:
json.dumps(results_dict, indent=4)
# Print summary
print("\nResultados finales:")
print(f"Salto Relativo máximo: {results_dict['jump_metrics']['max_relative_jump']:.2f}m")
print(f"Salto Alto máximo: {results_dict['jump_metrics']['max_high_jump']:.2f}m")
print(f"Potencia Sayer (estimada): {results_dict['jump_metrics']['peak_power_sayer']:.2f} W")
# Return results dictionary
return {
"output_video": output_video,
"output_json": output_json,
"metrics": results_dict
}
def analyze_jump_video(model, input_video, output_video, reference_height=1.68, body_mass_kg=64):
"""
Analyze a jump video to calculate various jump metrics.
Args:
model: VitPose model instance
input_video: Path to input video
output_video: Path to output video
reference_height: Height of the person in meters
body_mass_kg: Weight of the person in kg
Returns:
Dictionary containing jump metrics and video analysis data
"""
# Configuration parameters
JUMP_THRESHOLD_PERCENT = 0.05 # Porcentaje de cambio en la altura del tobillo para detectar el inicio del salto
SMOOTHING_WINDOW = 5 # Ventana para suavizar la altura de los tobillos
HORIZONTAL_OFFSET_FACTOR = 0.75 # Factor para ubicar el cuadro entre el hombro y el borde
VELOCITY_WINDOW = 3 # Número de frames para calcular la velocidad
METRICS_BELOW_FEET_OFFSET = 20 # Offset en píxeles para colocar los cuadros debajo de los pies
# Color palette
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
YELLOW = (0, 255, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY = (128, 128, 128)
LIGHT_GRAY = (200, 200, 200)
repetition_data = []
# Open the video
cap = cv2.VideoCapture(input_video)
if not cap.isOpened():
print("Error al abrir el video")
return {}
# Get first frame to calibrate and get initial shoulder positions
ret, frame = cap.read()
if not ret:
print("Error al leer el video")
return {}
# Initialize calibration variables
PX_PER_METER = None
initial_person_height_px = None
initial_left_shoulder_x = None
initial_right_shoulder_x = None
# Process first frame to calibrate
results_first_frame = model(frame) # Detect pose in first frame
if results_first_frame and results_first_frame[0].keypoints and len(results_first_frame[0].keypoints.xy[0]) > 0:
kpts_first = results_first_frame[0].keypoints.xy[0].cpu().numpy()
if kpts_first[0][1] > 0 and kpts_first[15][1] > 0 and kpts_first[16][1] > 0: # Nose and ankles
initial_person_height_px = min(kpts_first[15][1], kpts_first[16][1]) - kpts_first[0][1]
PX_PER_METER = initial_person_height_px / reference_height
print(f"Escala calculada: {PX_PER_METER:.2f} px/m")
if kpts_first[5][0] > 0 and kpts_first[6][0] > 0: # Left (5) and right (6) shoulders
initial_left_shoulder_x = int(kpts_first[5][0])
initial_right_shoulder_x = int(kpts_first[6][0])
if PX_PER_METER is None or initial_left_shoulder_x is None or initial_right_shoulder_x is None:
print("No se pudo calibrar la escala o detectar los hombros en el primer frame.")
cap.release()
return {}
# Reset video for processing
cap.release()
cap = cv2.VideoCapture(input_video)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
# Variables for metrics and visualization
ground_level = None
takeoff_head_y = None
max_jump_height = 0 # Maximum relative jump
max_head_height_px = None # Maximum head height in pixels (lowest in y coordinates)
jump_started = False
head_y_history = []
ankle_y_history = []
last_detected_ankles_y = None
head_y_buffer = []
velocity_vertical = 0.0
peak_power_sayer = 0.0 # Initialize Sayer power
person_detected = False # Flag to indicate if person was detected in any frame
current_power = 0.0
repetition_count = 0
jump_in_air = False
# Process each frame
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
annotated_frame = frame.copy()
results = model(annotated_frame)
if results and results[0].keypoints and len(results[0].keypoints.xy[0]) > 0:
person_detected = True
kpts = results[0].keypoints.xy[0].cpu().numpy()
nose = kpts[0]
ankles = [kpts[15], kpts[16]]
left_shoulder = kpts[5]
right_shoulder = kpts[6]
if nose[1] > 0 and all(a[1] > 0 for a in ankles) and left_shoulder[0] > 0 and right_shoulder[0] > 0:
current_ankle_y = min(a[1] for a in ankles)
last_detected_ankles_y = current_ankle_y # Save current ankle position
current_head_y = nose[1]
current_left_shoulder_x = int(left_shoulder[0])
current_right_shoulder_x = int(right_shoulder[0])
# Smooth ankle and head positions
ankle_y_history.append(current_ankle_y)
if len(ankle_y_history) > SMOOTHING_WINDOW:
ankle_y_history.pop(0)
smoothed_ankle_y = np.mean(ankle_y_history)
head_y_history.append(current_head_y)
if len(head_y_history) > SMOOTHING_WINDOW:
head_y_history.pop(0)
smoothed_head_y = np.mean(head_y_history)
# Calculate vertical velocity (using head position)
head_y_buffer.append(smoothed_head_y)
if len(head_y_buffer) > VELOCITY_WINDOW:
head_y_buffer.pop(0)
if PX_PER_METER is not None and fps > 0:
delta_y_pixels = head_y_buffer[0] - head_y_buffer[-1]
delta_y_meters = delta_y_pixels / PX_PER_METER
delta_t = VELOCITY_WINDOW / fps
velocity_vertical = delta_y_meters / delta_t
# Set ground level in first frame where ankles are detected
if ground_level is None:
ground_level = smoothed_ankle_y
takeoff_head_y = smoothed_head_y
relative_ankle_change = (ground_level - smoothed_ankle_y) / ground_level if ground_level > 0 else 0
# Detect jump start
if not jump_started and relative_ankle_change > JUMP_THRESHOLD_PERCENT:
jump_started = True
takeoff_head_y = smoothed_head_y
max_jump_height = 0
max_head_height_px = smoothed_head_y
# Detect jump end
if jump_started and relative_ankle_change <= JUMP_THRESHOLD_PERCENT:
# Add to repetition data
salto_alto = calculate_absolute_jump_height(reference_height, max_jump_height)
repetition_data.append({
"repetition": repetition_count + 1,
"relative_jump_m": round(max_jump_height, 3),
"absolute_jump_m": round(salto_alto, 3),
"peak_power_watts": round(current_power, 1)
})
repetition_count += 1
jump_started = False
# Update jump metrics while in air
if jump_started:
relative_jump = (takeoff_head_y - smoothed_head_y) / PX_PER_METER
if relative_jump > max_jump_height:
max_jump_height = relative_jump
if smoothed_head_y < max_head_height_px:
max_head_height_px = smoothed_head_y
if relative_jump:
current_power = calculate_peak_power_sayer(relative_jump, body_mass_kg)
if current_power > peak_power_sayer:
peak_power_sayer = current_power
else:
last_detected_ankles_y = None # Reset position if ankles not detected
velocity_vertical = 0.0 # Reset velocity if no reliable detection
# Calculate absolute jump height
salto_alto = calculate_absolute_jump_height(reference_height, max_jump_height)
# Draw floating metric boxes
annotated_frame = draw_metrics_overlay(
frame=annotated_frame,
max_jump_height=max_jump_height,
salto_alto=salto_alto,
velocity_vertical=velocity_vertical,
peak_power_sayer=peak_power_sayer,
repetition_count=repetition_count,
last_detected_ankles_y=last_detected_ankles_y,
initial_left_shoulder_x=initial_left_shoulder_x,
initial_right_shoulder_x=initial_right_shoulder_x,
width=width,
height=height,
colors={
"blue": BLUE,
"green": GREEN,
"yellow": YELLOW,
"white": WHITE,
"black": BLACK,
"gray": GRAY,
"light_gray": LIGHT_GRAY
},
metrics_below_feet_offset=METRICS_BELOW_FEET_OFFSET,
horizontal_offset_factor=HORIZONTAL_OFFSET_FACTOR
)
out.write(annotated_frame)
# Prepare results dictionary
results_dict = {
"jump_metrics": {
"max_relative_jump": float(max(0, max_jump_height)),
"max_high_jump": float(max(0, salto_alto)),
"peak_power_sayer": float(peak_power_sayer),
"repetitions": int(repetition_count),
"reference_height": float(reference_height),
"body_mass_kg": float(body_mass_kg),
"px_per_meter": float(PX_PER_METER) if PX_PER_METER is not None else 0.0
},
"video_analysis": {
"input_video": str(input_video),
"output_video": str(output_video),
"fps": float(fps),
"resolution": f"{int(width)}x{int(height)}"
},
"repetition_data": [
{
"repetition": int(rep["repetition"]),
"relative_jump_m": float(rep["relative_jump_m"]),
"absolute_jump_m": float(rep["absolute_jump_m"]),
"peak_power_watts": float(rep["peak_power_watts"])
} for rep in repetition_data
]
}
cap.release()
out.release()
return results_dict
def calculate_peak_power_sayer(jump_height_m, body_mass_kg):
"""
Estimates peak anaerobic power using Sayer's equation.
Args:
jump_height_m: Jump height in meters
body_mass_kg: Body mass in kg
Returns:
Estimated peak power in watts
"""
jump_height_cm = jump_height_m * 100
return (60.7 * jump_height_cm) + (45.3 * body_mass_kg) - 2055
def calculate_absolute_jump_height(reference_height, relative_jump):
"""
Calculate absolute jump height based on reference height and relative jump.
Args:
reference_height: Reference height in meters
relative_jump: Relative jump height in meters
Returns:
Absolute jump height in meters
"""
absolute_jump = reference_height + relative_jump
# Apply validation rule
if absolute_jump > 1.72:
return absolute_jump
else:
return 0
def draw_metrics_overlay(frame, max_jump_height, salto_alto, velocity_vertical, peak_power_sayer,
repetition_count, last_detected_ankles_y, initial_left_shoulder_x,
initial_right_shoulder_x, width, height, colors, metrics_below_feet_offset=20,
horizontal_offset_factor=0.75):
"""
Draw metrics overlay on the frame.
Args:
frame: Input frame
max_jump_height: Maximum jump height in meters
salto_alto: Absolute jump height in meters
velocity_vertical: Vertical velocity in m/s
peak_power_sayer: Peak power in watts
repetition_count: Number of repetitions
last_detected_ankles_y: Y-coordinate of last detected ankles
initial_left_shoulder_x: X-coordinate of left shoulder
initial_right_shoulder_x: X-coordinate of right shoulder
width: Frame width
height: Frame height
colors: Dictionary with color values
metrics_below_feet_offset: Offset for metrics below feet
horizontal_offset_factor: Factor for horizontal offset
Returns:
Frame with metrics overlay
"""
overlay = frame.copy()
alpha = 0.7
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale_title_metric = 0.5
font_scale_value = 0.7
font_scale_title_main = 1.2 # Scale for main title (larger)
font_thickness_metric = 1
font_thickness_title_main = 1 # Thickness for main title
line_height_title_metric = int(20 * 1.2)
line_height_value = int(25 * 1.2)
padding_vertical = int(15 * 1.2)
padding_horizontal = int(15 * 1.2)
text_color_title = colors["light_gray"]
text_color_value = colors["white"]
text_color_title_main = colors["white"]
bg_color = colors["gray"]
border_color = colors["white"]
border_thickness = 1
corner_radius = 10
spacing_horizontal = 30
title_y_offset = 50 # Lower vertical position of title
metrics_y_offset_alto = 80 # Adjust Salto Alto position to leave space below
metrics_y_offset_relativo = None # Will be calculated dynamically
metrics_y_offset_velocidad = None # Will be calculated dynamically
metrics_y_offset_potencia = None # Will be calculated dynamically
# Helper function to draw rounded rectangles
def draw_rounded_rect(img, pt1, pt2, color, thickness=-1, lineType=cv2.LINE_AA, radius=10):
x1, y1 = pt1
x2, y2 = pt2
w = x2 - x1
h = y2 - y1
if radius > 0:
img = cv2.ellipse(img, (x1 + radius, y1 + radius), (radius, radius), 0, 0, 90, color, thickness, lineType)
img = cv2.ellipse(img, (x2 - radius, y1 + radius), (radius, radius), 0, 90, 180, color, thickness, lineType)
img = cv2.ellipse(img, (x2 - radius, y2 - radius), (radius, radius), 0, 180, 270, color, thickness, lineType)
img = cv2.ellipse(img, (x1 + radius, y2 - radius), (radius, radius), 0, 270, 360, color, thickness, lineType)
img = cv2.rectangle(img, (x1, y1 + radius), (x2, y2 - radius), color, thickness, lineType)
img = cv2.rectangle(img, (x1 + radius, y1), (x2 - radius, y2), color, thickness, lineType)
else:
img = cv2.rectangle(img, pt1, pt2, color, thickness, lineType)
return img
# --- Main Title ---
title_text = "Ejercicio de Salto"
title_text_size = cv2.getTextSize(title_text, font, font_scale_title_main, font_thickness_title_main)[0]
title_x = (width - title_text_size[0]) // 2
title_y = title_y_offset
cv2.putText(overlay, title_text, (title_x, title_y), font, font_scale_title_main, text_color_title_main, font_thickness_title_main, cv2.LINE_AA)
# --- Relative Jump Box (dynamically positioned) ---
relativo_text = "SALTO RELATIVO"
relativo_value = f"{max(0, max_jump_height):.2f} m"
relativo_text_size = cv2.getTextSize(relativo_text, font, font_scale_title_metric, font_thickness_metric)[0]
relativo_value_size = cv2.getTextSize(relativo_value, font, font_scale_value, font_thickness_metric)[0]
bg_width_relativo = max(relativo_text_size[0], relativo_value_size[0]) + 2 * padding_horizontal
bg_height_relativo = line_height_title_metric + line_height_value + 2 * padding_vertical
x_relativo = 20
if last_detected_ankles_y is not None and bg_height_relativo is not None:
metrics_y_offset_relativo = int(last_detected_ankles_y - bg_height_relativo - 10) # 10 pixels above ankle
# Make sure box doesn't go off top
if metrics_y_offset_relativo < title_y_offset + 50:
metrics_y_offset_relativo = int(last_detected_ankles_y + metrics_below_feet_offset) # Show below
else:
metrics_y_offset_relativo = height - 150 # Default position if ankles not detected
if metrics_y_offset_relativo is not None:
y_relativo = metrics_y_offset_relativo
pt1_relativo = (x_relativo, y_relativo)
pt2_relativo = (x_relativo + bg_width_relativo, y_relativo + bg_height_relativo)
overlay = draw_rounded_rect(overlay, pt1_relativo, pt2_relativo, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius)
cv2.rectangle(overlay, pt1_relativo, pt2_relativo, border_color, border_thickness, cv2.LINE_AA)
cv2.putText(overlay, relativo_text, (x_relativo + (bg_width_relativo - relativo_text_size[0]) // 2, y_relativo + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA)
cv2.putText(overlay, relativo_value, (x_relativo + (bg_width_relativo - relativo_value_size[0]) // 2, y_relativo + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA)
# --- High Jump Box (stays in top right) ---
alto_text = "SALTO ALTO"
alto_value = f"{max(0, salto_alto):.2f} m"
alto_text_size = cv2.getTextSize(alto_text, font, font_scale_title_metric, font_thickness_metric)[0]
alto_value_size = cv2.getTextSize(alto_value, font, font_scale_value, font_thickness_metric)[0]
bg_width_alto = max(alto_text_size[0], alto_value_size[0]) + 2 * padding_horizontal
bg_height_alto = line_height_title_metric + line_height_value + 2 * padding_vertical
x_alto = width - bg_width_alto - 20 # Default position near right edge
if initial_right_shoulder_x is not None:
available_space = width - initial_right_shoulder_x
x_alto_calculated = initial_right_shoulder_x + int(available_space * (1 - horizontal_offset_factor)) - bg_width_alto
# Make sure doesn't go off left edge and there's space from first box
if x_alto_calculated > x_relativo + bg_width_relativo + spacing_horizontal + 10 and x_alto_calculated + bg_width_alto < width - 10:
x_alto = x_alto_calculated
y_alto = metrics_y_offset_alto
pt1_alto = (x_alto, y_alto)
pt2_alto = (x_alto + bg_width_alto, y_alto + bg_height_alto)
overlay = draw_rounded_rect(overlay, pt1_alto, pt2_alto, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius)
cv2.rectangle(overlay, pt1_alto, pt2_alto, border_color, border_thickness, cv2.LINE_AA)
cv2.putText(overlay, alto_text, (x_alto + (bg_width_alto - alto_text_size[0]) // 2, y_alto + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA)
cv2.putText(overlay, alto_value, (x_alto + (bg_width_alto - alto_value_size[0]) // 2, y_alto + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA)
# --- Repetitions Box ---
reps_text = "REPETICIONES"
reps_value = f"{repetition_count}"
reps_text_size = cv2.getTextSize(reps_text, font, font_scale_title_metric, font_thickness_metric)[0]
reps_value_size = cv2.getTextSize(reps_value, font, font_scale_value, font_thickness_metric)[0]
bg_width_reps = max(reps_text_size[0], reps_value_size[0]) + 2 * padding_horizontal
bg_height_reps = line_height_title_metric + line_height_value + 2 * padding_vertical
x_reps = x_relativo
y_reps = y_relativo + bg_height_relativo + 10
pt1_reps = (x_reps, y_reps)
pt2_reps = (x_reps + bg_width_reps, y_reps + bg_height_reps)
overlay = draw_rounded_rect(overlay, pt1_reps, pt2_reps, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius)
cv2.rectangle(overlay, pt1_reps, pt2_reps, border_color, border_thickness, cv2.LINE_AA)
cv2.putText(overlay, reps_text, (x_reps + (bg_width_reps - reps_text_size[0]) // 2, y_reps + padding_vertical + line_height_title_metric // 2 + 2), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA)
cv2.putText(overlay, reps_value, (x_reps + (bg_width_reps - reps_value_size[0]) // 2, y_reps + padding_vertical + line_height_title_metric + line_height_value // 2 + 5), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA)
# --- Vertical Velocity Box (below feet) ---
if last_detected_ankles_y is not None:
velocidad_text = "VELOCIDAD VERTICAL"
velocidad_value = f"{abs(velocity_vertical):.2f} m/s" # Show absolute value
velocidad_text_size = cv2.getTextSize(velocidad_text, font, font_scale_title_metric, font_thickness_metric)[0]
velocidad_value_size = cv2.getTextSize(velocidad_value, font, font_scale_value, font_thickness_metric)[0]
bg_width_velocidad = max(velocidad_text_size[0], velocidad_value_size[0]) + 2 * padding_horizontal
bg_height_velocidad = line_height_title_metric + line_height_value + 2 * padding_vertical
x_velocidad = int(width / 2 - bg_width_velocidad / 2) # Horizontally centered
y_velocidad = int(last_detected_ankles_y + metrics_below_feet_offset + bg_height_velocidad)
pt1_velocidad = (int(x_velocidad), int(y_velocidad - bg_height_velocidad))
pt2_velocidad = (int(x_velocidad + bg_width_velocidad), int(y_velocidad))
overlay = draw_rounded_rect(overlay, pt1_velocidad, pt2_velocidad, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius)
cv2.rectangle(overlay, pt1_velocidad, pt2_velocidad, border_color, border_thickness, cv2.LINE_AA)
cv2.putText(overlay, velocidad_text, (int(x_velocidad + (bg_width_velocidad - velocidad_text_size[0]) // 2), int(y_velocidad - bg_height_velocidad + padding_vertical + line_height_title_metric // 2 + 2)), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA)
cv2.putText(overlay, velocidad_value, (int(x_velocidad + (bg_width_velocidad - velocidad_value_size[0]) // 2), int(y_velocidad - bg_height_velocidad + padding_vertical + line_height_title_metric + line_height_value // 2 + 5)), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA)
# --- Sayer Power Box (below velocity box) ---
potencia_text = "POTENCIA SAYER"
potencia_value = f"{peak_power_sayer:.2f} W"
potencia_text_size = cv2.getTextSize(potencia_text, font, font_scale_title_metric, font_thickness_metric)[0]
potencia_value_size = cv2.getTextSize(potencia_value, font, font_scale_value, font_thickness_metric)[0]
bg_width_potencia = max(potencia_text_size[0], potencia_value_size[0]) + 2 * padding_horizontal
bg_height_potencia = line_height_title_metric + line_height_value + 2 * padding_vertical
x_potencia = x_velocidad # Same horizontal position as velocity
y_potencia = y_velocidad + 5 # Below velocity box
pt1_potencia = (int(x_potencia), int(y_potencia))
pt2_potencia = (int(x_potencia + bg_width_potencia), int(y_potencia + bg_height_potencia))
overlay = draw_rounded_rect(overlay, pt1_potencia, pt2_potencia, bg_color, cv2.FILLED, cv2.LINE_AA, corner_radius)
cv2.rectangle(overlay, pt1_potencia, pt2_potencia, border_color, border_thickness, cv2.LINE_AA)
cv2.putText(overlay, potencia_text, (int(x_potencia + (bg_width_potencia - potencia_text_size[0]) // 2), int(y_potencia + padding_vertical + line_height_title_metric // 2 + 2)), font, font_scale_title_metric, text_color_title, font_thickness_metric, cv2.LINE_AA)
cv2.putText(overlay, potencia_value, (int(x_potencia + (bg_width_potencia - potencia_value_size[0]) // 2), int(y_potencia + padding_vertical + line_height_title_metric + line_height_value // 2 + 5)), font, font_scale_value, text_color_value, font_thickness_metric, cv2.LINE_AA)
# Blend overlay with original frame
result = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
return result |