import streamlit as st from PIL import Image import cv2 import numpy as np from ultralytics import YOLO import tempfile import os from streamlit.web.bootstrap import run # Disable file watcher os.environ["STREAMLIT_SERVER_ENABLE_FILE_WATCHER"] = "false" # Disable watcher os.environ["STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION"] = "false" # Configure environment to suppress warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ["STREAMLIT_WATCHER_TYPE"] = "none" # Load model with caching @st.cache_resource def load_model(): return YOLO("best50.pt") # Initialize session state for webcam if 'webcam_active' not in st.session_state: st.session_state.webcam_active = False # App title and layout st.title("Object Detection App") st.write("Upload an image/video or use your webcam") # Load model model = load_model() # Create tabs for different input sources tab_upload, tab_webcam = st.tabs(["Upload Media", "Webcam"]) def process_image(img): """Process image and return annotated version with results""" img_array = np.array(img) img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) results = model(img_bgr, conf=0.5, iou=0.4) annotated_img = results[0].plot() return cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB), results with tab_upload: uploaded_file = st.file_uploader( "Choose an image or video", type=["jpg", "jpeg", "png", "mp4", "mov"], label_visibility="collapsed" ) if uploaded_file: if uploaded_file.type.startswith('image'): # Process image image = Image.open(uploaded_file) annotated_img, results = process_image(image) # Display side by side col1, col2 = st.columns(2) with col1: st.image(image, caption="Original Image", use_container_width=True) with col2: st.image(annotated_img, caption="Detected Objects", use_container_width=True) # Show detection results st.subheader("Detected Objects:") for box in results[0].boxes: class_name = model.names[int(box.cls)] confidence = float(box.conf) if confidence >= 0.5: st.write(f"- {class_name} (confidence: {confidence:.2f})") elif uploaded_file.type.startswith('video'): # Process video with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tfile: tfile.write(uploaded_file.read()) video_path = tfile.name st.video(video_path) # Process and show output video with st.spinner('Processing video...'): cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) output_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) while cap.isOpened(): ret, frame = cap.read() if not ret: break results = model(frame) annotated_frame = results[0].plot() out.write(annotated_frame) cap.release() out.release() st.video(output_path) os.unlink(video_path) os.unlink(output_path) with tab_webcam: if st.checkbox("Start Webcam", key="webcam_toggle"): st.session_state.webcam_active = True st.write("Click below to stop the webcam") cap = cv2.VideoCapture(0) frame_placeholder = st.empty() stop_button = st.button("Stop Webcam") while cap.isOpened() and st.session_state.webcam_active: ret, frame = cap.read() if not ret or stop_button: st.session_state.webcam_active = False break results = model(frame) annotated_frame = results[0].plot() frame_placeholder.image( cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB), channels="RGB", use_container_width=True ) if stop_button: st.session_state.webcam_active = False break cap.release() if stop_button: st.success("Webcam stopped")