Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,31 +1,36 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
-
|
4 |
-
import pytesseract
|
5 |
-
import cv2
|
6 |
-
import numpy as np
|
7 |
from PIL import Image
|
|
|
|
|
|
|
8 |
from huggingface_hub import login
|
9 |
|
10 |
# Get the token from environment variables (set in Hugging Face Space)
|
11 |
token = os.getenv("HF_Token")
|
12 |
login(token)
|
13 |
|
14 |
-
#
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Set Streamlit page configuration
|
31 |
st.set_page_config(
|
@@ -44,15 +49,8 @@ if uploaded_file is not None:
|
|
44 |
# Display uploaded image
|
45 |
st.image(uploaded_file, caption="Uploaded Prescription", use_column_width=True)
|
46 |
|
47 |
-
with st.spinner("
|
48 |
-
#
|
49 |
-
extracted_text =
|
50 |
-
st.subheader("
|
51 |
-
st.text(extracted_text)
|
52 |
-
|
53 |
-
if extracted_text:
|
54 |
-
# Interpret extracted text using the model
|
55 |
-
with st.spinner("Interpreting the prescription..."):
|
56 |
-
ai_response = interpret_prescription(extracted_text)
|
57 |
-
st.subheader("AI Interpretation:")
|
58 |
-
st.text(ai_response)
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
+
import requests
|
|
|
|
|
|
|
4 |
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
+
import torch
|
8 |
from huggingface_hub import login
|
9 |
|
10 |
# Get the token from environment variables (set in Hugging Face Space)
|
11 |
token = os.getenv("HF_Token")
|
12 |
login(token)
|
13 |
|
14 |
+
# Initialize the Hugging Face model
|
15 |
+
model_name = "google/paligemma-3b-mix-224"
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
17 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
18 |
+
|
19 |
+
# Function to transcribe handwritten notes using Hugging Face model
|
20 |
+
def transcribe_handwriting(image):
|
21 |
+
# Convert image to array and preprocess
|
22 |
+
image = image.convert("RGB")
|
23 |
+
image = np.array(image)
|
24 |
+
|
25 |
+
# Prepare input for the model
|
26 |
+
inputs = tokenizer(image, return_tensors="pt")
|
27 |
+
|
28 |
+
# Generate output
|
29 |
+
with torch.no_grad():
|
30 |
+
outputs = model.generate(**inputs, max_length=512)
|
31 |
+
|
32 |
+
transcription = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
33 |
+
return transcription
|
34 |
|
35 |
# Set Streamlit page configuration
|
36 |
st.set_page_config(
|
|
|
49 |
# Display uploaded image
|
50 |
st.image(uploaded_file, caption="Uploaded Prescription", use_column_width=True)
|
51 |
|
52 |
+
with st.spinner("Transcribing handwriting..."):
|
53 |
+
# Transcribe handwritten notes
|
54 |
+
extracted_text = transcribe_handwriting(uploaded_file)
|
55 |
+
st.subheader("Transcribed Text from Prescription:")
|
56 |
+
st.text(extracted_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|