cesar commited on
Commit
c66d0d6
·
verified ·
1 Parent(s): e651f8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -1
app.py CHANGED
@@ -1 +1,98 @@
1
- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b22e8eb38ed4c779d540fc/rkXD8FcVjo6w33hleoJvs.png)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ from ultralytics import YOLO
4
+
5
+ # ── Config ─────────────────────────────────────────────
6
+ MODEL_PATH = "yolov8n.pt" # modelo pre-entrenado (clase “person”)
7
+ CONF_THRES = 0.3 # confianza mínima detección
8
+ LINE_RATIO = 0.5 # posición de la línea virtual (50 % altura)
9
+ # ───────────────────────────────────────────────────────
10
+
11
+ model = YOLO(MODEL_PATH)
12
+
13
+ # estado global
14
+ memory = {} # {track_id: previous_cy}
15
+ in_count = 0
16
+ out_count = 0
17
+
18
+
19
+ def count_people(frame):
20
+ """
21
+ Recibe un frame RGB (numpy) -> procesa -> devuelve frame RGB anotado y string.
22
+ Se llama de forma continua porque el input tiene `streaming=True`.
23
+ """
24
+ global memory, in_count, out_count
25
+
26
+ if frame is None:
27
+ return None, ""
28
+
29
+ # ── paso 1: RGB ➜ BGR para OpenCV/YOLO ─────────────
30
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
31
+ h, w = frame_bgr.shape[:2]
32
+ line_y = int(h * LINE_RATIO)
33
+
34
+ # ── detección + tracking ───────────────────────────
35
+ results = model.track(
36
+ frame_bgr,
37
+ classes=[0], # solo “person”
38
+ conf=CONF_THRES,
39
+ persist=True,
40
+ verbose=False
41
+ )
42
+
43
+ annotated = frame_bgr.copy()
44
+ cv2.line(annotated, (0, line_y), (w, line_y), (0, 255, 255), 2)
45
+
46
+ if results:
47
+ for box in results[0].boxes:
48
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
49
+ cx, cy = int((x1 + x2) / 2), int((y1 + y2) / 2)
50
+ tid = int(box.id[0]) if box.id is not None else -1
51
+
52
+ # cruces entrada / salida
53
+ prev_cy = memory.get(tid, cy)
54
+ if prev_cy < line_y <= cy: # cruzó de arriba → abajo (ENTRA)
55
+ in_count += 1
56
+ elif prev_cy > line_y >= cy: # abajo → arriba (SALE)
57
+ out_count += 1
58
+ memory[tid] = cy
59
+
60
+ # dibujitos
61
+ cv2.rectangle(annotated, (x1, y1), (x2, y2), (0, 255, 0), 1)
62
+ cv2.circle(annotated, (cx, cy), 3, (0, 0, 255), -1)
63
+ cv2.putText(annotated, str(tid), (x1, y1 - 5),
64
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
65
+
66
+ total = in_count - out_count
67
+ label = f"In: {in_count} | Out: {out_count} | Ocupación: {total}"
68
+
69
+ # ── paso 2: BGR ➜ RGB para mostrar en Gradio ───────
70
+ annotated_rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
71
+ return annotated_rgb, label
72
+
73
+
74
+ def reset_counts():
75
+ """Callback para botón ‘Limpiar’."""
76
+ global memory, in_count, out_count
77
+ memory = {}
78
+ in_count = 0
79
+ out_count = 0
80
+ return None, ""
81
+
82
+
83
+ with gr.Blocks(title="Contador de personas (entrada única)") as demo:
84
+ gr.Markdown("# Contador de personas (entrada única)")
85
+
86
+ with gr.Row():
87
+ cam = gr.Image(sources=["webcam"], streaming=True, label="frame")
88
+ out_img = gr.Image(label="Video")
89
+ out_lbl = gr.Text(label="Contador")
90
+
91
+ btn_clear = gr.Button("Limpiar")
92
+
93
+ # wire-up
94
+ cam.stream(fn=count_people, outputs=[out_img, out_lbl])
95
+ btn_clear.click(fn=reset_counts, outputs=[out_img, out_lbl])
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch()