File size: 15,659 Bytes
f289d8a
 
 
 
 
 
 
2ff9705
 
 
b207201
2ff9705
f289d8a
 
 
2ff9705
 
d7c3dc6
75a9604
0a4697d
2ff9705
f289d8a
 
58d1ad8
5fcf694
4bf2ad3
64ed036
2ff9705
0a4697d
 
 
 
 
 
f289d8a
 
 
 
 
 
 
b44a48c
 
 
 
f289d8a
 
b44a48c
f289d8a
3340b09
 
f289d8a
 
 
 
 
 
 
 
 
 
 
3340b09
99ad111
 
 
 
 
f289d8a
49d2559
a9d8551
921b5bc
 
 
 
 
 
 
 
 
 
64ed036
377c3c6
 
 
 
 
f289d8a
 
377c3c6
f289d8a
 
 
 
 
0a46f13
f289d8a
377c3c6
 
 
 
 
 
f289d8a
5fcf694
377c3c6
b0811a8
 
 
 
 
5fcf694
 
 
 
377c3c6
 
 
 
 
 
5fcf694
 
 
 
b0811a8
5fcf694
 
b0811a8
5fcf694
b0811a8
5fcf694
 
377c3c6
 
 
 
 
 
5fcf694
 
377c3c6
 
 
 
 
 
5fcf694
377c3c6
5fcf694
 
 
377c3c6
 
 
 
 
 
5fcf694
 
 
 
b0811a8
5fcf694
 
b0811a8
5fcf694
b0811a8
5fcf694
 
377c3c6
 
 
 
 
 
5fcf694
 
377c3c6
 
 
 
 
 
5fcf694
b207201
5fcf694
377c3c6
 
 
 
 
 
f289d8a
 
 
377c3c6
 
 
 
 
 
 
5fcf694
b0811a8
5fcf694
 
 
75a9604
5fcf694
d7c3dc6
 
 
5fcf694
 
d7c3dc6
 
75a9604
 
d7c3dc6
75a9604
 
f334785
d7c3dc6
75a9604
f334785
5fcf694
 
 
 
 
 
 
 
 
 
 
f289d8a
74041a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f289d8a
 
 
9438399
2ec5667
9438399
 
 
 
 
 
 
 
96f48ba
 
9438399
 
 
 
 
 
 
 
 
a67a089
 
 
 
9438399
 
 
196bb2a
429475e
 
9ec45ba
429475e
a67a089
 
9438399
 
 
 
 
2ec5667
7eff7af
2ec5667
 
 
c3bbe69
 
 
 
20f45fe
7eff7af
2ec5667
 
c3bbe69
 
 
 
20f45fe
7eff7af
2ec5667
 
c3bbe69
 
 
 
 
9438399
 
525f20c
2ec5667
 
c8fb6b4
9438399
 
 
 
 
 
 
 
 
 
 
5fcf694
 
75a9604
9438399
 
74041a5
 
 
 
 
 
196bb2a
 
 
 
 
 
 
 
 
 
 
 
 
1ec606c
9438399
 
 
 
 
 
 
 
f289d8a
 
5fcf694
377c3c6
5fcf694
 
 
 
 
 
 
377c3c6
 
 
 
 
 
5fcf694
 
f289d8a
 
 
 
 
 
 
9438399
f289d8a
 
 
 
 
 
 
d3ceedc
9438399
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
# UVIS - Gradio App with Upload, URL & Video Support
"""
This script launches the UVIS (Unified Visual Intelligence System) as a Gradio Web App.
Supports image, video, and URL-based media inputs for detection, segmentation, and depth estimation.
Outputs include scene blueprint, structured JSON, and downloadable results.
"""


import time
import logging
import traceback

import gradio as gr
from PIL import Image
import cv2
import timeout_decorator
import spaces
import tempfile
import shutil
import os

from registry import get_model
from core.describe_scene import describe_scene
from core.process import process_image
from core.input_handler import resolve_input, validate_video, validate_image
from utils.helpers import format_error, generate_session_id
from huggingface_hub import hf_hub_download

try:
    shutil.rmtree(os.path.expanduser("~/.cache/huggingface"), ignore_errors=True)
    shutil.rmtree("/home/user/.cache/huggingface", ignore_errors=True)
    print("πŸ’₯ Nuked HF model cache from runtime.")
except Exception as e:
    print("🚫 Failed to nuke cache:", e)

# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Model mappings
DETECTION_MODEL_MAP = {
    "YOLOv8-Nano": "yolov8n",
    "YOLOv8-Small": "yolov8s",
    "YOLOv8-Large": "yolov8l",
    "YOLOv11-Beta": "yolov11b"
}


SEGMENTATION_MODEL_MAP = {
    "SegFormer-B0": "segformer_b0",
    "SegFormer-B5": "segformer_b5",
    "DeepLabV3-ResNet50": "deeplabv3_resnet50"
}

DEPTH_MODEL_MAP = {
    "MiDaS v21 Small 256": "midas_v21_small_256",
    "MiDaS v21 384": "midas_v21_384",
    "DPT Hybrid 384": "dpt_hybrid_384",
    "DPT Swin2 Large 384": "dpt_swin2_large_384",
    "DPT Beit Large 512": "dpt_beit_large_512"
}


# # Resource Limits
# MAX_IMAGE_MB = 15
# MAX_IMAGE_RES = (1920, 1080)
# MAX_VIDEO_MB = 50
# MAX_VIDEO_DURATION = 15  # seconds


@spaces.GPU
# def preload_models():
#     """
#     This function is needed to activate ZeroGPU. It must be decorated with @spaces.GPU.
#     It can be used to warm up models or load them into memory.
#     """
#     from registry import get_model
#     print("Warming up models for ZeroGPU...")
#     get_model("detection", "yolov8n", device="cpu")
#     get_model("segmentation", "deeplabv3_resnet50", device="cpu")
#     get_model("depth", "midas_v21_small_256", device="cpu")

def handle(mode, media_upload, url,
           run_det, det_model, det_confidence,
           run_seg, seg_model,
           run_depth, depth_model,
           blend):
    """
    Master handler for resolving input and processing.
    Returns: (img_out, vid_out, json_out, zip_out)
    """
    session_id = generate_session_id()
    logger.info(f"Session ID: {session_id} | Handler activated with mode: {mode}")
    start_time = time.time()

    media = resolve_input(mode, media_upload, url)
    if not media:
        return (
            gr.update(visible=False),
            gr.update(visible=False),
            format_error("No valid input provided. Please check your upload or URL."),
            None
        )

    first_input = media[0]

    # πŸ”§ Resolve dropdown label to model keys
    resolved_det_model = DETECTION_MODEL_MAP.get(det_model, det_model)
    resolved_seg_model = SEGMENTATION_MODEL_MAP.get(seg_model, seg_model)
    resolved_depth_model = DEPTH_MODEL_MAP.get(depth_model, depth_model)

    # --- VIDEO PATH ---
    if isinstance(first_input, str) and first_input.lower().endswith((".mp4", ".mov", ".avi")):
        valid, err = validate_video(first_input)
        if not valid:
            return (
                gr.update(visible=False),
                gr.update(visible=False),
                format_error(err),
                None
            )
        try:
            _, msg, output_video_path = process_video(
                video_path=first_input,
                run_det=run_det,
                det_model=resolved_det_model,
                det_confidence=det_confidence,
                run_seg=run_seg,
                seg_model=resolved_seg_model,
                run_depth=run_depth,
                depth_model=resolved_depth_model,
                blend=blend
            )
            return (
                gr.update(visible=False),  # hide image
                gr.update(value=output_video_path, visible=True),  # show video
                msg,
                output_video_path  # for download
            )
        except Exception as e:
            logger.error(f"Video processing failed: {e}")
            return (
                gr.update(visible=False),
                gr.update(visible=False),
                format_error(str(e)),
                None
            )

    # --- IMAGE PATH ---
    elif isinstance(first_input, Image.Image):
        valid, err = validate_image(first_input)
        if not valid:
            return (
                gr.update(visible=False),
                gr.update(visible=False),
                format_error(err),
                None
            )
        try:
            result_img, msg, output_zip = process_image(
                image=first_input,
                run_det=run_det,
                det_model=resolved_det_model,
                det_confidence=det_confidence,
                run_seg=run_seg,
                seg_model=resolved_seg_model,
                run_depth=run_depth,
                depth_model=resolved_depth_model,
                blend=blend
            )
            return (
                gr.update(value=result_img, visible=True),  # show image
                gr.update(visible=False),  # hide video
                msg,
                output_zip
            )
        except timeout_decorator.timeout_decorator.TimeoutError:
            logger.error("Image processing timed out.")
            return (
                gr.update(visible=False),
                gr.update(visible=False),
                format_error("Processing timed out. Try a smaller image or simpler model."),
                None
            )
        except Exception as e:
            traceback.print_exc()
            logger.error(f"Image processing failed: {e}")
            return (
                gr.update(visible=False),
                gr.update(visible=False),
                format_error(str(e)),
                None
            )

    logger.warning("Unsupported media type resolved.")
    log_runtime(start_time)
    return (
        gr.update(visible=False),
        gr.update(visible=False),
        format_error("Unsupported input type."),
        None
    )



def show_preview_from_upload(files):
    if not files:
        return gr.update(visible=False), gr.update(visible=False)
    
    file = files[0]
    filename = file.name.lower()

    if filename.endswith((".png", ".jpg", ".jpeg", ".webp")):
        img = Image.open(file).convert("RGB")
        return gr.update(value=img, visible=True), gr.update(visible=False)

    elif filename.endswith((".mp4", ".mov", ".avi")):
        # Copy uploaded video to a known temp location
        temp_dir = tempfile.mkdtemp()
        ext = os.path.splitext(filename)[-1]
        safe_path = os.path.join(temp_dir, f"uploaded_video{ext}")
        with open(safe_path, "wb") as f:
            f.write(file.read())

        return gr.update(visible=False), gr.update(value=safe_path, visible=True)

    return gr.update(visible=False), gr.update(visible=False)

def show_preview_from_url(url_input):
    if not url_input:
        return gr.update(visible=False), gr.update(visible=False)
    path = url_input.strip().lower()
    if path.endswith((".png", ".jpg", ".jpeg", ".webp")):
        return gr.update(value=url_input, visible=True), gr.update(visible=False)
    elif path.endswith((".mp4", ".mov", ".avi")):
        return gr.update(visible=False), gr.update(value=url_input, visible=True)
    return gr.update(visible=False), gr.update(visible=False)


def clear_model_cache():
    """
    Deletes all model weight folders so they are redownloaded fresh.
    """
    folders = [
        "models/detection/weights",
        "models/segmentation/weights",
        "models/depth/weights"
    ]
    for folder in folders:
        shutil.rmtree(folder, ignore_errors=True)
        logger.info(f" Cleared: {folder}")
    return " Model cache cleared. Models will be reloaded on next run."


# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## Unified Visual Intelligence System (UVIS)")
    with gr.Row():
        # left panel
        with gr.Column(scale=2):
        # Input Mode Toggle
            mode = gr.Radio(["Upload", "URL"], value="Upload", label="Input Mode")

            # File upload: accepts multiple images or one video (user chooses wisely)
            media_upload = gr.File(
                label="Upload Images (1–5) or 1 Video",
                file_types=["image", ".mp4", ".mov", ".avi"],
                file_count="multiple",
                visible=True
            )
            
            # URL input
            url = gr.Textbox(label="URL (Image/Video)", visible=False)
            
            # Toggle visibility
            def toggle_inputs(selected_mode):
                return [
                    gr.update(visible=(selected_mode == "Upload")),  # media_upload
                    gr.update(visible=(selected_mode == "URL")),      # url
                    gr.update(visible=False),                        # preview_image
                    gr.update(visible=False)                         # preview_video
                
                ]
            
            mode.change(toggle_inputs, inputs=mode, outputs=[media_upload, url])
           
            # Visibility logic function
            def toggle_visibility(checked):
                return gr.update(visible=checked)
            
            # def toggle_det_visibility(checked):
            #     return [gr.update(visible=checked), gr.update(visible=checked)]
            
            run_det = gr.Checkbox(label="Object Detection")
            run_seg = gr.Checkbox(label="Semantic Segmentation")
            run_depth = gr.Checkbox(label="Depth Estimation")
            
            with gr.Row():
                with gr.Column(visible=False) as OD_Settings:
                    with gr.Accordion("Object Detection Settings", open=True):
                        det_model = gr.Dropdown(choices=list(DETECTION_MODEL_MAP), label="Detection Model")
                        det_confidence = gr.Slider(0.1, 1.0, 0.5, label="Detection Confidence Threshold")
                        nms_thresh = gr.Slider(0.1, 1.0, 0.45, label="NMS Threshold")
                        max_det = gr.Slider(1, 100, 20, step=1, label="Max Detections")
                        iou_thresh = gr.Slider(0.1, 1.0, 0.5, label="IoU Threshold")
                        class_filter = gr.CheckboxGroup(["Person", "Car", "Dog"], label="Class Filter")
            
                with gr.Column(visible=False) as SS_Settings:
                    with gr.Accordion("Semantic Segmentation Settings", open=True):
                        seg_model = gr.Dropdown(choices=list(SEGMENTATION_MODEL_MAP), label="Segmentation Model")
                        resize_strategy = gr.Dropdown(["Crop", "Pad", "Scale"], label="Resize Strategy")
                        overlay_alpha = gr.Slider(0.0, 1.0, 0.5, label="Overlay Opacity")
                        seg_classes = gr.CheckboxGroup(["Road", "Sky", "Building"], label="Target Classes")
                        enable_crf = gr.Checkbox(label="Postprocessing (CRF)")
            
                with gr.Column(visible=False) as DE_Settings:
                    with gr.Accordion("Depth Estimation Settings", open=True):
                        depth_model = gr.Dropdown(choices=list(DEPTH_MODEL_MAP), label="Depth Model")
                        output_type = gr.Dropdown(["Raw", "Disparity", "Scaled"], label="Output Type")
                        colormap = gr.Dropdown(["Jet", "Viridis", "Plasma"], label="Colormap")
                        blend = gr.Slider(0.0, 1.0, 0.5, label="Overlay Blend")
                        normalize = gr.Checkbox(label="Normalize Depth")
                        max_depth = gr.Slider(0.1, 10.0, 5.0, label="Max Depth (meters)")

            # Attach Visibility Logic
            run_det.change(fn=toggle_visibility, inputs=[run_det], outputs=[OD_Settings])
            run_seg.change(fn=toggle_visibility, inputs=[run_seg], outputs=[SS_Settings])
            run_depth.change(fn=toggle_visibility, inputs=[run_depth], outputs=[DE_Settings])

                    
            blend = gr.Slider(0.0, 1.0, 0.5, label="Overlay Blend")

            # Run Button
            run = gr.Button("Run Analysis")

        #Right panel
        with gr.Column(scale=1):
            # single_img_preview = gr.Image(label="Preview (Image)", visible=False)
            # gallery_preview = gr.Gallery(label="Preview (Gallery)", columns=3, height="auto", visible=False)
            # video_preview = gr.Video(label="Preview (Video)", visible=False)        
            # Only one is shown at a time β€” image or video
            img_out = gr.Image(label="Preview / Processed Output", visible=False)
            vid_out = gr.Video(label="Preview / Processed Video", visible=False, streaming=True, autoplay=True)
            json_out = gr.JSON(label="Scene JSON")
            zip_out = gr.File(label="Download Results")
            clear_button = gr.Button("🧹 Clear Model Cache")
            status_box = gr.Textbox(label="Status", interactive=False)
        
        clear_button.click(fn=clear_model_cache, inputs=[], outputs=[status_box])


    media_upload.change(show_preview_from_upload, inputs=media_upload, outputs=[img_out, vid_out])
    url.submit(show_preview_from_url, inputs=url, outputs=[img_out, vid_out])
        
     # Unified run click β†’ switch visibility based on image or video output
    def route_output(image_output, json_output, zip_file):
        # Show img_out if image was returned, else show video
        if isinstance(image_output, Image.Image):
            return gr.update(value=image_output, visible=True), gr.update(visible=False), json_output, zip_file
        elif isinstance(zip_file, str) and zip_file.endswith(".mp4"):
            return gr.update(visible=False), gr.update(value=zip_file, visible=True), json_output, zip_file
        else:
            return gr.update(visible=False), gr.update(visible=False), json_output, zip_file

    
    # # Output Tabs
    # with gr.Tab("Scene JSON"):
    #     json_out = gr.JSON()
    # with gr.Tab("Scene Blueprint"):
    #     img_out = gr.Image()
    # with gr.Tab("Download"):
    #     zip_out = gr.File()


    # Button Click Event
    run.click(
    fn=handle,
    inputs=[
        mode, media_upload, url,
        run_det, det_model, det_confidence,
        run_seg, seg_model,
        run_depth, depth_model,
        blend
    ],
    outputs=[
        img_out,  # will be visible only if it's an image
        vid_out,  # will be visible only if it's a video
        json_out,
        zip_out
    ]
)


    # Footer Section
    gr.Markdown("---")
    gr.Markdown(
        """
        <div style='text-align: center; font-size: 14px;'>
            Built by <b>Durga Deepak Valluri</b><br>
            <a href="https://github.com/DurgaDeepakValluri" target="_blank">GitHub</a> |
            <a href="https://deecoded.io" target="_blank">Website</a> |
            <a href="https://www.linkedin.com/in/durga-deepak-valluri" target="_blank">LinkedIn</a>
        </div>
        """,
    )

# Launch the Gradio App
demo.launch(share=True)