# app.py """ Streamlit Frontend App: Uploads a flowchart image, sends it to FastAPI backend, and displays the structured JSON and English summary. Supports multiple OCR engines. """ import streamlit as st from PIL import Image import requests import base64 import io import os # Set up Streamlit UI layout st.set_page_config(page_title="Flowchart to English", layout="wide") st.title("๐Ÿ“„ Flowchart to Plain English") # Enable debug mode toggle debug_mode = st.toggle("๐Ÿ”ง Show Debug Info", value=False) # OCR engine selection dropdown ocr_engine = st.selectbox("Select OCR Engine", ["easyocr", "doctr"], index=0, help="Choose between EasyOCR (lightweight) and Doctr (transformer-based)") # Flowchart image uploader uploaded_file = st.file_uploader("Upload a flowchart image", type=["png", "jpg", "jpeg"]) # Backend API URL handling # Hugging Face Spaces injects SPACE_ID environment variable IS_SPACE = "SPACE_ID" in os.environ if IS_SPACE: # In Hugging Face Spaces, construct a URL relative to the current page # This ensures it works with the proxy setup API_URL = "https://venkatviswa-flowchart-to-text.hf.space/process-image" else: # In local dev, we use the full URL with port API_URL = "http://localhost:7860/process-image" # Allow override through environment variable if needed API_URL = os.getenv("API_URL", API_URL) if uploaded_file: # Load and resize uploaded image for preview image = Image.open(uploaded_file).convert("RGB") max_width = 600 ratio = max_width / float(image.size[0]) resized_image = image.resize((max_width, int(image.size[1] * ratio))) st.image(resized_image, caption="๐Ÿ“ค Uploaded Image", use_container_width=False) if st.button("๐Ÿ” Analyze Flowchart"): progress = st.progress(0, text="Sending image to backend...") try: # Send request to FastAPI backend response = requests.post( API_URL, files={"file": uploaded_file.getvalue()}, data={ "debug": str(debug_mode).lower(), "ocr_engine": ocr_engine } ) progress.progress(40, text="Processing detection and OCR...") if response.status_code == 200: result = response.json() # Show debug info if enabled if debug_mode: st.markdown("### ๐Ÿงช Debug Info") st.code(result.get("debug", ""), language="markdown") # Show YOLO visual if available if debug_mode and result.get("yolo_vis"): st.markdown("### ๐Ÿ–ผ๏ธ YOLO Detected Bounding Boxes") yolo_bytes = base64.b64decode(result["yolo_vis"]) yolo_img = Image.open(io.BytesIO(yolo_bytes)) st.image(yolo_img, caption="YOLO Boxes", use_container_width=True) progress.progress(80, text="Finalizing output...") # Show flowchart JSON and generated English summary col1, col2 = st.columns(2) with col1: st.subheader("๐Ÿง  Flowchart JSON") st.json(result["flowchart"]) with col2: st.subheader("๐Ÿ“ English Summary") st.markdown(result["summary"]) progress.progress(100, text="Done!") else: st.error(f"โŒ Backend Error: {response.status_code} - {response.text}") except Exception as e: st.error(f"โš ๏ธ Request Failed: {e}") else: st.info("Upload a flowchart image to begin.")