File size: 7,782 Bytes
82b20ab
 
 
3c574ec
82b20ab
3c574ec
82b20ab
3c574ec
82b20ab
 
 
 
3c574ec
82b20ab
 
 
 
3c574ec
 
82b20ab
 
 
 
 
 
 
 
 
 
 
662c4db
82b20ab
55b540a
 
 
 
 
 
 
 
 
 
 
82b20ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b31249e
2c6d023
82b20ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c6d023
82b20ab
 
2c6d023
82b20ab
 
 
 
b26ba8c
55b540a
3c574ec
 
 
55b540a
 
 
 
 
 
 
 
 
3c574ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55b540a
3c574ec
 
 
 
 
 
82b20ab
 
3c574ec
 
 
 
 
 
 
 
 
 
 
 
 
 
55b540a
3c574ec
 
 
55b540a
3c574ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55b540a
3c574ec
 
 
55b540a
3c574ec
 
 
82b20ab
 
 
55b540a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
#!/usr/bin/env python

import pathlib
import tempfile

import cv2
import gradio as gr
import numpy as np
import PIL.Image
import spaces
import supervision as sv
import torch
import tqdm
from transformers import AutoProcessor, RTDetrForObjectDetection, VitPoseForPoseEstimation

DESCRIPTION = "# ViTPose"

MAX_NUM_FRAMES = 300

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

person_detector_name = "PekingU/rtdetr_r50vd_coco_o365"
person_image_processor = AutoProcessor.from_pretrained(person_detector_name)
person_model = RTDetrForObjectDetection.from_pretrained(person_detector_name, device_map=device)

pose_model_name = "usyd-community/vitpose-base-simple"
pose_image_processor = AutoProcessor.from_pretrained(pose_model_name)
pose_model = VitPoseForPoseEstimation.from_pretrained(pose_model_name, device_map=device)


@spaces.GPU(duration=5)
@torch.inference_mode()
def detect_pose_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
    """Detects persons and estimates their poses in a single image.

    Args:
        image (PIL.Image.Image): Input image in which to detect persons and estimate poses.

    Returns:
        tuple[PIL.Image.Image, list[dict]]:
            - Annotated image with bounding boxes and pose keypoints drawn.
            - List of dictionaries containing human-readable pose estimation results for each detected person.
    """
    inputs = person_image_processor(images=image, return_tensors="pt").to(device)
    outputs = person_model(**inputs)
    results = person_image_processor.post_process_object_detection(
        outputs, target_sizes=torch.tensor([(image.height, image.width)]), threshold=0.3
    )
    result = results[0]  # take first image results

    # Human label refers 0 index in COCO dataset
    person_boxes_xyxy = result["boxes"][result["labels"] == 0]
    person_boxes_xyxy = person_boxes_xyxy.cpu().numpy()

    # Convert boxes from VOC (x1, y1, x2, y2) to COCO (x1, y1, w, h) format
    person_boxes = person_boxes_xyxy.copy()
    person_boxes[:, 2] = person_boxes[:, 2] - person_boxes[:, 0]
    person_boxes[:, 3] = person_boxes[:, 3] - person_boxes[:, 1]

    inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)

    # for vitpose-plus-base checkpoint we should additionally provide dataset_index
    # to specify which MOE experts to use for inference
    if pose_model.config.backbone_config.num_experts > 1:
        dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
        dataset_index = dataset_index.to(inputs["pixel_values"].device)
        inputs["dataset_index"] = dataset_index

    outputs = pose_model(**inputs)

    pose_results = pose_image_processor.post_process_pose_estimation(outputs, boxes=[person_boxes])
    image_pose_result = pose_results[0]  # results for first image

    # make results more human-readable
    human_readable_results = []
    for i, person_pose in enumerate(image_pose_result):
        data = {
            "person_id": i,
            "bbox": person_pose["bbox"].numpy().tolist(),
            "keypoints": [],
        }
        for keypoint, label, score in zip(
            person_pose["keypoints"], person_pose["labels"], person_pose["scores"], strict=True
        ):
            keypoint_name = pose_model.config.id2label[label.item()]
            x, y = keypoint
            data["keypoints"].append({"name": keypoint_name, "x": x.item(), "y": y.item(), "score": score.item()})
        human_readable_results.append(data)

    # preprocess to torch tensor of shape (n_objects, n_keypoints, 2)
    xy = [pose_result["keypoints"] for pose_result in image_pose_result]
    xy = torch.stack(xy).cpu().numpy()

    scores = [pose_result["scores"] for pose_result in image_pose_result]
    scores = torch.stack(scores).cpu().numpy()

    keypoints = sv.KeyPoints(xy=xy, confidence=scores)
    detections = sv.Detections(xyxy=person_boxes_xyxy)

    edge_annotator = sv.EdgeAnnotator(color=sv.Color.GREEN, thickness=1)
    vertex_annotator = sv.VertexAnnotator(color=sv.Color.RED, radius=2)
    bounding_box_annotator = sv.BoxAnnotator(color=sv.Color.WHITE, color_lookup=sv.ColorLookup.INDEX, thickness=1)

    annotated_frame = image.copy()

    # annotate bounding boxes
    annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)

    # annotate edges and vertices
    annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
    return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results


@spaces.GPU(duration=90)
def detect_pose_video(
    video_path: str,
    progress: gr.Progress = gr.Progress(track_tqdm=True),  # noqa: ARG001, B008
) -> str:
    """Detects persons and estimates their poses for each frame in a video, saving the annotated video.

    Args:
        video_path (str): Path to the input video file.
        progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).

    Returns:
        str: Path to the output video file with annotated bounding boxes and pose keypoints.
    """
    cap = cv2.VideoCapture(video_path)

    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    fps = cap.get(cv2.CAP_PROP_FPS)
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as out_file:
        writer = cv2.VideoWriter(out_file.name, fourcc, fps, (width, height))
        for _ in tqdm.auto.tqdm(range(min(MAX_NUM_FRAMES, num_frames))):
            ok, frame = cap.read()
            if not ok:
                break
            rgb_frame = frame[:, :, ::-1]
            annotated_frame, _ = detect_pose_image(PIL.Image.fromarray(rgb_frame))
            writer.write(np.asarray(annotated_frame)[:, :, ::-1])
        writer.release()
    cap.release()
    return out_file.name


with gr.Blocks(css_paths="style.css") as demo:
    gr.Markdown(DESCRIPTION)

    with gr.Tabs():
        with gr.Tab("Image"):
            with gr.Row():
                with gr.Column():
                    input_image = gr.Image(label="Input Image", type="pil")
                    run_button_image = gr.Button()
                with gr.Column():
                    output_image = gr.Image(label="Output Image")
                    output_json = gr.JSON(label="Output JSON")
            gr.Examples(
                examples=sorted(pathlib.Path("images").glob("*.jpg")),
                inputs=input_image,
                outputs=[output_image, output_json],
                fn=detect_pose_image,
            )

            run_button_image.click(
                fn=detect_pose_image,
                inputs=input_image,
                outputs=[output_image, output_json],
            )

        with gr.Tab("Video"):
            gr.Markdown(f"The input video will be truncated to {MAX_NUM_FRAMES} frames.")

            with gr.Row():
                with gr.Column():
                    input_video = gr.Video(label="Input Video")
                    run_button_video = gr.Button()
                with gr.Column():
                    output_video = gr.Video(label="Output Video")

            gr.Examples(
                examples=sorted(pathlib.Path("videos").glob("*.mp4")),
                inputs=input_video,
                outputs=output_video,
                fn=detect_pose_video,
                cache_examples=False,
            )
            run_button_video.click(
                fn=detect_pose_video,
                inputs=input_video,
                outputs=output_video,
            )


if __name__ == "__main__":
    demo.launch(mcp_server=True)