import gradio as gr import torch from PIL import Image, ImageDraw, ImageFont from transformers import DetrImageProcessor, DetrForObjectDetection from pathlib import Path import transformers import warnings import traceback import datetime warnings.filterwarnings("ignore", message=".*copying from a non-meta parameter.*") # Global variables to cache models current_model = None current_processor = None current_model_name = None # Global debug state debug_info = {"last_error": "", "step": "", "language": "", "timestamp": ""} # Available models with better selection available_models = { "DETR ResNet-50": "facebook/detr-resnet-50", "DETR ResNet-101": "facebook/detr-resnet-101", "DETR DC5": "facebook/detr-resnet-50-dc5", "DETR ResNet-50 Face Only": "esraakh/detr_fine_tune_face_detection_final" } def load_model(model_key): """Load model and processor based on selected model key""" global current_model, current_processor, current_model_name, debug_info model_name = available_models[model_key] # Only load if it's a different model if current_model_name != model_name: debug_info["step"] = f"Loading model: {model_name}" print(f"Loading model: {model_name}") current_processor = DetrImageProcessor.from_pretrained(model_name) current_model = DetrForObjectDetection.from_pretrained(model_name) current_model_name = model_name print(f"Model loaded: {model_name}") print(f"Available labels: {list(current_model.config.id2label.values())}") debug_info["step"] = f"Model loaded successfully: {model_name}" return current_model, current_processor # Load font font_path = Path("assets/fonts/arial.ttf") if not font_path.exists(): print(f"Font file {font_path} not found. Using default font.") font = ImageFont.load_default() else: font = ImageFont.truetype(str(font_path), size=100) # Set up translations for the app translations = { "English": { "title": "## Enhanced Object Detection App\nUpload an image to detect objects using various DETR models.", "input_label": "Input Image", "output_label": "Detected Objects", "dropdown_label": "Label Language", "dropdown_detection_model_label": "Detection Model", "threshold_label": "Detection Threshold", "button": "Detect Objects", "info_label": "Detection Info", "error_label": "Error Messages", "debug_label": "Debug Status", "debug_button": "Show Debug Status", "model_fast": "General Objects (fast)", "model_precision": "General Objects (high precision)", "model_small": "Small Objects/Details (slow)", "model_faces": "Face Detection (people only)" }, "Spanish": { "title": "## Aplicación Mejorada de Detección de Objetos\nSube una imagen para detectar objetos usando varios modelos DETR.", "input_label": "Imagen de entrada", "output_label": "Objetos detectados", "dropdown_label": "Idioma de las etiquetas", "dropdown_detection_model_label": "Modelo de detección", "threshold_label": "Umbral de detección", "button": "Detectar objetos", "info_label": "Información de detección", "error_label": "Mensajes de error", "debug_label": "Estado de depuración", "debug_button": "Mostrar estado de depuración", "model_fast": "Objetos generales (rápido)", "model_precision": "Objetos generales (precisión alta)", "model_small": "Objetos pequeños/detalles (lento)", "model_faces": "Detección de caras (solo personas)" }, "French": { "title": "## Application Améliorée de Détection d'Objets\nTéléchargez une image pour détecter des objets avec divers modèles DETR.", "input_label": "Image d'entrée", "output_label": "Objets détectés", "dropdown_label": "Langue des étiquettes", "dropdown_detection_model_label": "Modèle de détection", "threshold_label": "Seuil de détection", "button": "Détecter les objets", "info_label": "Information de détection", "error_label": "Messages d'erreur", "debug_label": "État de débogage", "debug_button": "Afficher l'état de débogage", "model_fast": "Objets généraux (rapide)", "model_precision": "Objets généraux (haute précision)", "model_small": "Petits objets/détails (lent)", "model_faces": "Détection de visages (personnes uniquement)" } } def t(language, key): return translations.get(language, translations["English"]).get(key, key) def get_translated_model_choices(language): """Get model choices translated to the selected language""" global debug_info debug_info["step"] = f"Translating model choices for {language}" model_mapping = { "DETR ResNet-50": "model_fast", "DETR ResNet-101": "model_precision", "DETR DC5": "model_small", "DETR ResNet-50 Face Only": "model_faces" } translated_choices = [] for model_key in available_models.keys(): if model_key in model_mapping: translation_key = model_mapping[model_key] translated_name = t(language, translation_key) else: translated_name = model_key translated_choices.append(translated_name) debug_info["step"] = f"Model choices translated: {translated_choices}" return translated_choices def get_model_key_from_translation(translated_name, language): """Get the original model key from translated name""" model_mapping = { "DETR ResNet-50": "model_fast", "DETR ResNet-101": "model_precision", "DETR DC5": "model_small", "DETR ResNet-50 Face Only": "model_faces" } # Reverse lookup for model_key, translation_key in model_mapping.items(): if t(language, translation_key) == translated_name: return model_key # If not found, try direct match if translated_name in available_models: return translated_name # Default fallback return "DETR ResNet-50" def get_helsinki_model(language_label): """Returns the Helsinki-NLP model name for translating from English to the selected language.""" lang_map = { "Spanish": "es", "French": "fr", "English": "en" } target = lang_map.get(language_label) if not target or target == "en": return None return f"Helsinki-NLP/opus-mt-en-{target}" # Translation cache translation_cache = {} def translate_label(language_label, label): """Translates the given label to the target language.""" # Check cache first cache_key = f"{language_label}_{label}" if cache_key in translation_cache: return translation_cache[cache_key] model_name = get_helsinki_model(language_label) if not model_name: return label try: translator = transformers.pipeline("translation", model=model_name) result = translator(label, max_length=40) translated = result[0]['translation_text'] # Cache the result translation_cache[cache_key] = translated return translated except Exception as e: print(f"Translation error (429 or other): {e}") return label # Return original if translation fails def detect_objects(image, language_selector, translated_model_selector, threshold): """Enhanced object detection with adjustable threshold and better info""" global debug_info try: debug_info["step"] = "Starting object detection" debug_info["timestamp"] = str(datetime.datetime.now()) # Get the actual model key from the translated name model_selector = get_model_key_from_translation(translated_model_selector, language_selector) debug_info["step"] = f"Model key resolved: {model_selector}" print(f"Processing image. Language: {language_selector}, Model: {model_selector}, Threshold: {threshold}") # Load the selected model debug_info["step"] = "Loading model" model, processor = load_model(model_selector) # Process the image debug_info["step"] = "Processing image with model" inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) # Convert model output to usable detection results with custom threshold debug_info["step"] = "Post-processing results" target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection( outputs, threshold=threshold, target_sizes=target_sizes )[0] # Create a copy of the image for drawing debug_info["step"] = "Drawing bounding boxes" image_with_boxes = image.copy() draw = ImageDraw.Draw(image_with_boxes) # Detection info detection_info = f"Detected {len(results['scores'])} objects with threshold {threshold}\n" detection_info += f"Model: {translated_model_selector} ({model_selector})\n\n" # Colors for different confidence levels colors = { 'high': 'red', # > 0.8 'medium': 'orange', # 0.5-0.8 'low': 'yellow' # < 0.5 } detected_objects = [] for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): confidence = score.item() box = [round(x, 2) for x in box.tolist()] # Choose color based on confidence if confidence > 0.8: color = colors['high'] elif confidence > 0.5: color = colors['medium'] else: color = colors['low'] # Draw bounding box draw.rectangle(box, outline=color, width=3) # Prepare label text label_text = model.config.id2label[label.item()] translated_label = translate_label(language_selector, label_text) display_text = f"{translated_label}: {round(confidence, 3)}" # Store detection info detected_objects.append({ 'label': label_text, 'translated': translated_label, 'confidence': confidence, 'box': box }) # Calculate text position and size try: text_bbox = draw.textbbox((0, 0), display_text, font=font) text_width = text_bbox[2] - text_bbox[0] text_height = text_bbox[3] - text_bbox[1] except: # Fallback for older PIL versions text_width, text_height = draw.textsize(display_text, font=font) # Draw text background text_bg = [ box[0], box[1] - text_height - 4, box[0] + text_width + 4, box[1] ] draw.rectangle(text_bg, fill="black") draw.text((box[0] + 2, box[1] - text_height - 2), display_text, fill="white", font=font) # Create detailed detection info if detected_objects: detection_info += "Objects found:\n" for obj in sorted(detected_objects, key=lambda x: x['confidence'], reverse=True): detection_info += f"- {obj['translated']} ({obj['label']}): {obj['confidence']:.3f}\n" else: detection_info += "No objects detected. Try lowering the threshold." debug_info["step"] = "Detection completed successfully" debug_info["last_error"] = "" return image_with_boxes, detection_info, "" except Exception as e: error_message = f"Error in object detection:\n{str(e)}\n\nStack trace:\n{traceback.format_exc()}" debug_info["last_error"] = error_message debug_info["step"] = f"ERROR in detection: {str(e)}" print(error_message) return image if image else None, "Detection failed. See error panel below.", error_message def update_interface(selected_language): global debug_info debug_info["language"] = selected_language debug_info["timestamp"] = str(datetime.datetime.now()) debug_info["step"] = "Starting language interface update" try: translated_choices = get_translated_model_choices(selected_language) default_model = t(selected_language, "model_fast") updates = [ gr.update(value=t(selected_language, "title")), # gr.update(label=t(selected_language, "dropdown_label")), # <-- ELIMINADA ESTA LÍNEA gr.update( choices=translated_choices, value=default_model, label=t(selected_language, "dropdown_detection_model_label") ), gr.update(label=t(selected_language, "threshold_label")), gr.update(label=t(selected_language, "input_label")), gr.update(value=t(selected_language, "button")), gr.update(label=t(selected_language, "output_label")), gr.update(label=t(selected_language, "info_label")), gr.update(label=t(selected_language, "error_label"), value="", visible=False), gr.update(label=t(selected_language, "debug_label")), gr.update(value=t(selected_language, "debug_button")) ] debug_info["step"] = "Interface update completed successfully" debug_info["last_error"] = "" return updates except Exception as e: error_msg = f"ERROR in interface update at step '{debug_info['step']}':\n{str(e)}\n\nTraceback:\n{traceback.format_exc()}" debug_info["last_error"] = error_msg debug_info["step"] = f"FAILED: {str(e)}" # Safe fallback safe_updates = [gr.update() for _ in range(10)] return safe_updates def get_debug_status(): """Get current debug status for display""" global debug_info status = f"""🔍 DEBUG STATUS: Current Language: {debug_info.get('language', 'N/A')} Last Timestamp: {debug_info.get('timestamp', 'N/A')} Current Step: {debug_info.get('step', 'N/A')} Last Error: {debug_info.get('last_error', 'None')} Available Models: {list(available_models.keys())} Current Model: {current_model_name or 'None loaded'} Translation Cache Size: {len(translation_cache)} """ return status def safe_detect_objects(image, language_selector, translated_model_selector, threshold): """Safe wrapper for object detection with error handling""" global debug_info if image is None: debug_info["step"] = "No image provided" return None, "Please upload an image first.", "" try: result_image, info, error = detect_objects(image, language_selector, translated_model_selector, threshold) # Update error panel visibility based on whether there's an error error_visible = bool(error.strip()) return ( result_image, info, gr.update(value=error, visible=error_visible) ) except Exception as e: error_message = f"Unexpected error in detection:\n{str(e)}\n\nStack trace:\n{traceback.format_exc()}" debug_info["last_error"] = error_message debug_info["step"] = f"UNEXPECTED ERROR: {str(e)}" print(error_message) return ( image, "Detection failed due to unexpected error. See error panel below.", gr.update(value=error_message, visible=True) ) def build_app(): with gr.Blocks(theme=gr.themes.Soft()) as app: with gr.Row(): title = gr.Markdown(t("English", "title")) with gr.Row(): with gr.Column(scale=1): language_selector = gr.Dropdown( choices=["English", "Spanish", "French"], value="English", label=t("English", "dropdown_label") ) with gr.Column(scale=1): model_selector = gr.Dropdown( choices=get_translated_model_choices("English"), value=t("English", "model_fast"), label=t("English", "dropdown_detection_model_label") ) with gr.Column(scale=1): threshold_slider = gr.Slider( minimum=0.1, maximum=0.95, value=0.5, step=0.05, label=t("English", "threshold_label") ) with gr.Row(): with gr.Column(scale=1): input_image = gr.Image(type="pil", label=t("English", "input_label")) button = gr.Button(t("English", "button"), variant="primary") with gr.Column(scale=1): output_image = gr.Image(label=t("English", "output_label")) detection_info = gr.Textbox( label=t("English", "info_label"), lines=10, max_lines=15 ) # Error panel - only visible when there are errors with gr.Row(): error_panel = gr.Textbox( label=t("English", "error_label"), lines=8, max_lines=20, visible=False, elem_classes=["error-panel"] ) # Debug panel - always visible for debugging in HF with gr.Row(): debug_panel = gr.Textbox( label=t("English", "debug_label"), lines=10, max_lines=20, value="Application started - ready for debugging", visible=True ) with gr.Row(): debug_button = gr.Button(t("English", "debug_button"), size="sm") # Connect language change event language_selector.change( fn=update_interface, inputs=language_selector, outputs=[ title, # language_selector, # <-- esta línea también debes eliminarla model_selector, threshold_slider, input_image, button, output_image, detection_info, error_panel, debug_panel, debug_button ], queue=True ) # Connect detection button click event button.click( fn=safe_detect_objects, inputs=[input_image, language_selector, model_selector, threshold_slider], outputs=[output_image, detection_info, error_panel] ) # Connect debug button click event debug_button.click( fn=get_debug_status, outputs=debug_panel ) return app # Initialize with default model and debug info debug_info["step"] = "Initializing default model" debug_info["timestamp"] = str(datetime.datetime.now()) load_model("DETR ResNet-50") debug_info["step"] = "Application ready" # Launch the application if __name__ == "__main__": app = build_app() app.launch()