File size: 4,728 Bytes
03efa53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import streamlit as st
from PIL import Image
import cv2
import numpy as np
from ultralytics import YOLO
import tempfile
import os
from streamlit.web.bootstrap import run


# Disable file watcher
os.environ["STREAMLIT_SERVER_ENABLE_FILE_WATCHER"] = "false"  # Disable watcher
os.environ["STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION"] = "false"
# Configure environment to suppress warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["STREAMLIT_WATCHER_TYPE"] = "none"

# Load model with caching
@st.cache_resource
def load_model():
    return YOLO("best50.pt")

# Initialize session state for webcam
if 'webcam_active' not in st.session_state:
    st.session_state.webcam_active = False

# App title and layout
st.title("Object Detection App")
st.write("Upload an image/video or use your webcam")

# Load model
model = load_model()

# Create tabs for different input sources
tab_upload, tab_webcam = st.tabs(["Upload Media", "Webcam"])

def process_image(img):
    """Process image and return annotated version with results"""
    img_array = np.array(img)
    img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
    results = model(img_bgr, conf=0.5, iou=0.4)
    annotated_img = results[0].plot()
    return cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB), results

with tab_upload:
    uploaded_file = st.file_uploader(
        "Choose an image or video", 
        type=["jpg", "jpeg", "png", "mp4", "mov"],
        label_visibility="collapsed"
    )
    
    if uploaded_file:
        if uploaded_file.type.startswith('image'):
            # Process image
            image = Image.open(uploaded_file)
            annotated_img, results = process_image(image)
            
            # Display side by side
            col1, col2 = st.columns(2)
            with col1:
                st.image(image, caption="Original Image", use_container_width=True)
            with col2:
                st.image(annotated_img, caption="Detected Objects", use_container_width=True)
            
            # Show detection results
            st.subheader("Detected Objects:")
            for box in results[0].boxes:
                class_name = model.names[int(box.cls)]
                confidence = float(box.conf)
                if confidence >= 0.5:
                    st.write(f"- {class_name} (confidence: {confidence:.2f})")


        elif uploaded_file.type.startswith('video'):
            # Process video
            with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tfile:
                tfile.write(uploaded_file.read())
                video_path = tfile.name
            
            st.video(video_path)
            
            # Process and show output video
            with st.spinner('Processing video...'):
                cap = cv2.VideoCapture(video_path)
                fps = cap.get(cv2.CAP_PROP_FPS)
                width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                
                output_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
                out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
                
                while cap.isOpened():
                    ret, frame = cap.read()
                    if not ret:
                        break
                    results = model(frame)
                    annotated_frame = results[0].plot()
                    out.write(annotated_frame)
                
                cap.release()
                out.release()
                
                st.video(output_path)
                os.unlink(video_path)
                os.unlink(output_path)

with tab_webcam:
    if st.checkbox("Start Webcam", key="webcam_toggle"):
        st.session_state.webcam_active = True
        st.write("Click below to stop the webcam")
        
        cap = cv2.VideoCapture(0)
        frame_placeholder = st.empty()
        stop_button = st.button("Stop Webcam")
        
        while cap.isOpened() and st.session_state.webcam_active:
            ret, frame = cap.read()
            if not ret or stop_button:
                st.session_state.webcam_active = False
                break
            
            results = model(frame)
            annotated_frame = results[0].plot()
            frame_placeholder.image(
                cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB),
                channels="RGB",
                use_container_width=True
            )
            
            if stop_button:
                st.session_state.webcam_active = False
                break
        
        cap.release()
        if stop_button:
            st.success("Webcam stopped")