bravewiki's picture
Update app.py
34d51d0 verified
raw
history blame
1.72 kB
import os
import streamlit as st
import requests
from PIL import Image
import numpy as np
from transformers import AutoProcessor, AutoModelForPreTraining
import torch
from huggingface_hub import login
# Get the token from environment variables (set in Hugging Face Space)
token = os.getenv("HF_Token")
login(token)
# Initialize the Hugging Face model
tokenizer = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
model = AutoModelForPreTraining.from_pretrained("google/paligemma-3b-mix-224")
# Function to transcribe handwritten notes using Hugging Face model
def transcribe_handwriting(image):
# Convert image to array and preprocess
image = image.convert("RGB")
image = np.array(image)
# Prepare input for the model
inputs = tokenizer(image, return_tensors="pt")
# Generate output
with torch.no_grad():
outputs = model.generate(**inputs, max_length=512)
transcription = tokenizer.decode(outputs[0], skip_special_tokens=True)
return transcription
# Set Streamlit page configuration
st.set_page_config(
page_title="Prescription Reader",
page_icon="πŸ’Š",
layout="centered",
)
# Header
st.title("Doctor's Prescription Reader πŸ’Š")
# Upload prescription image
uploaded_file = st.file_uploader("Upload Prescription Image", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Display uploaded image
st.image(uploaded_file, caption="Uploaded Prescription", use_column_width=True)
with st.spinner("Transcribing handwriting..."):
# Transcribe handwritten notes
extracted_text = transcribe_handwriting(uploaded_file)
st.subheader("Transcribed Text from Prescription:")
st.text(extracted_text)