Yashvj123 commited on
Commit
49ac78b
Β·
verified Β·
1 Parent(s): 64942d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +234 -63
app.py CHANGED
@@ -4,45 +4,54 @@ import numpy as np
4
  import tempfile
5
  import os
6
  import easyocr
 
 
7
 
8
  from langchain.prompts import PromptTemplate
9
  from langchain.chains import LLMChain
10
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
 
11
 
12
- # Set Hugging Face API keys
13
  os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
14
  os.environ["HF_TOKEN"] = os.getenv("HF")
15
 
16
- # Streamlit page setup
17
- st.set_page_config(
18
- page_title="MediAssist - Prescription Analyzer",
19
- layout="wide",
20
- page_icon="πŸ’Š"
21
- )
22
-
23
- st.sidebar.title("πŸ’Š MediAssist")
24
- st.sidebar.markdown("Analyze prescriptions with ease using AI")
25
- st.sidebar.markdown("---")
26
- st.sidebar.markdown("πŸ”— **Connect with me:**")
27
- st.sidebar.markdown("""
28
- <div style='display: flex; gap: 10px;'>
29
- <a href="https://github.com/Yashvj22" target="_blank">
30
- <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
31
- </a>
32
- <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
33
- <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
34
- </a>
35
- </div>
36
- """, unsafe_allow_html=True)
37
- st.sidebar.markdown("---")
38
 
39
  st.markdown("""
40
- <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
41
- <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
42
- <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
43
- <br>
 
 
 
 
 
44
  """, unsafe_allow_html=True)
45
 
 
 
 
46
  uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
47
 
48
  if uploaded_file:
@@ -50,39 +59,42 @@ if uploaded_file:
50
  temp_file.write(uploaded_file.read())
51
  orig_path = temp_file.name
52
 
53
- # Preprocessing
54
  image = cv2.imread(orig_path)
55
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
56
  _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
57
  kernel = np.ones((3, 3), np.uint8)
58
  dilated = cv2.dilate(binary_inv, kernel, iterations=1)
59
 
60
- # Save preprocessed image for future reference/removal
61
  dilated_path = orig_path.replace(".png", "_dilated.png")
62
  cv2.imwrite(dilated_path, dilated)
63
 
64
- # OCR using EasyOCR
65
  reader = easyocr.Reader(['en'])
66
  text_list = reader.readtext(dilated, detail=0)
67
  text = "\n".join(text_list)
68
 
69
- # Prompt Template
70
- template = """
71
- You are a helpful medical assistant.
72
-
73
- Here is a prescription text extracted from an image:
74
-
75
- {prescription_text}
76
-
77
- Please do the following:
78
-
79
- 1. Extract only the medicine names mentioned in the prescription (ignore any other text).
80
- 2. For each medicine, provide:
81
- - When to take it (timing and dosage)
82
- - Possible side effects
83
- - Any special instructions
84
 
85
- Format your answer as bullet points, listing only medicines and their details.
 
 
 
 
 
 
 
 
 
 
 
86
  """
87
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
88
 
@@ -93,7 +105,7 @@ if uploaded_file:
93
  max_new_tokens=300,
94
  task="conversational"
95
  )
96
-
97
  llm = ChatHuggingFace(
98
  llm=llm_model,
99
  repo_id="aaditya/Llama3-OpenBioLLM-70B",
@@ -102,30 +114,189 @@ if uploaded_file:
102
  max_new_tokens=300,
103
  task="conversational"
104
  )
105
-
106
  chain = LLMChain(llm=llm, prompt=prompt)
107
 
108
- col1, col2 = st.columns([1, 2])
109
-
110
- with col1:
111
- st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)
 
112
 
113
- with col2:
114
- st.success("βœ… Prescription Uploaded & Preprocessed Successfully")
115
- st.markdown("### πŸ“œ Extracted Text")
116
- st.code(text)
117
 
118
- if st.button("πŸ” Analyze Text"):
119
- with st.spinner("Analyzing..."):
120
- response = chain.run(prescription_text=text)
121
- st.success(response)
122
 
123
- # Cleanup temp files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  os.remove(orig_path)
125
  os.remove(dilated_path)
126
-
127
  else:
128
- st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
 
131
 
 
4
  import tempfile
5
  import os
6
  import easyocr
7
+ from PIL import Image, ImageDraw, ImageFont
8
+ from translate import Translator
9
 
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import LLMChain
12
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
13
+ from transformers import pipeline
14
 
15
+ # Set API keys
16
  os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
17
  os.environ["HF_TOKEN"] = os.getenv("HF")
18
 
19
+ # Function to save text as an image
20
+ def save_text_as_image(text, file_path):
21
+ font = ImageFont.load_default()
22
+ lines = text.split('\n')
23
+ max_width = max([font.getbbox(line)[2] for line in lines]) + 20
24
+ line_height = font.getbbox(text)[3] + 10
25
+ img_height = line_height * len(lines) + 20
26
+
27
+ img = Image.new("RGB", (max_width, img_height), "white")
28
+ draw = ImageDraw.Draw(img)
29
+ y = 10
30
+ for line in lines:
31
+ draw.text((10, y), line, font=font, fill="black")
32
+ y += line_height
33
+
34
+ img.save(file_path)
35
+ return file_path
36
+
37
+ # Setup
38
+ st.set_page_config(page_title="MediAssist πŸ’Š", layout="wide")
 
 
39
 
40
  st.markdown("""
41
+ <style>
42
+ .stButton>button {
43
+ background-color: #4CAF50;
44
+ color: white;
45
+ font-weight: bold;
46
+ padding: 8px 20px;
47
+ border-radius: 8px;
48
+ }
49
+ </style>
50
  """, unsafe_allow_html=True)
51
 
52
+ st.title("πŸ’Š MediAssist - Prescription Analyzer")
53
+ st.markdown("##### Upload your prescription, get AI-based medicine insights, translate and download!")
54
+
55
  uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
56
 
57
  if uploaded_file:
 
59
  temp_file.write(uploaded_file.read())
60
  orig_path = temp_file.name
61
 
62
+ # Preprocess Image
63
  image = cv2.imread(orig_path)
64
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
65
  _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
66
  kernel = np.ones((3, 3), np.uint8)
67
  dilated = cv2.dilate(binary_inv, kernel, iterations=1)
68
 
 
69
  dilated_path = orig_path.replace(".png", "_dilated.png")
70
  cv2.imwrite(dilated_path, dilated)
71
 
72
+ # OCR
73
  reader = easyocr.Reader(['en'])
74
  text_list = reader.readtext(dilated, detail=0)
75
  text = "\n".join(text_list)
76
 
77
+ # Display
78
+ col1, col2 = st.columns([1, 2])
79
+ with col1:
80
+ st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)
81
+ with col2:
82
+ st.success("βœ… Image Uploaded and Preprocessed")
83
+ st.markdown("#### πŸ“ Extracted Text from Image")
84
+ st.code(text)
 
 
 
 
 
 
 
85
 
86
+ # Prompt
87
+ template = """
88
+ You are a helpful medical assistant.
89
+ Here is a prescription text extracted from an image:
90
+ {prescription_text}
91
+ Please do the following:
92
+ 1. Extract only the medicine names.
93
+ 2. For each, give:
94
+ - Dosage and Timing
95
+ - Possible Side Effects
96
+ - Special Instructions
97
+ Format in bullet points, medicine-wise.
98
  """
99
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
100
 
 
105
  max_new_tokens=300,
106
  task="conversational"
107
  )
108
+
109
  llm = ChatHuggingFace(
110
  llm=llm_model,
111
  repo_id="aaditya/Llama3-OpenBioLLM-70B",
 
114
  max_new_tokens=300,
115
  task="conversational"
116
  )
117
+
118
  chain = LLMChain(llm=llm, prompt=prompt)
119
 
120
+ if st.button("πŸ” Analyze Extracted Text"):
121
+ with st.spinner("Analyzing with LLM..."):
122
+ response = chain.run(prescription_text=text)
123
+ st.markdown("#### πŸ’‘ Analyzed Medicine Info")
124
+ st.text_area("Output", response, height=300)
125
 
126
+ # Save txt and image
127
+ txt_path = "prescription_output.txt"
128
+ with open(txt_path, "w") as f:
129
+ f.write(response)
130
 
131
+ img_path = "prescription_output.png"
132
+ save_text_as_image(response, img_path)
 
 
133
 
134
+ # Target language code (like 'hi' for Hindi, 'mr' for Marathi, 'gu' for Gujarati)
135
+ target_lang = "hi"
136
+
137
+ translator = Translator(to_lang=target_lang)
138
+ hindi_text = translator.translate(response)
139
+
140
+ # # Translation to Hindi
141
+ # translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi")
142
+ # hindi_text = translator(response, max_length=400)[0]['translation_text']
143
+
144
+ st.markdown("#### 🌐 Translate to Hindi")
145
+ st.text_area("Translated (Hindi)", hindi_text, height=300)
146
+
147
+ st.markdown("#### πŸ“₯ Download Options")
148
+ colA, colB, colC, colD = st.columns(4)
149
+ with colA:
150
+ st.download_button("⬇️ Download TXT", data=response, file_name="medicine_analysis.txt")
151
+ with colB:
152
+ with open(img_path, "rb") as img_file:
153
+ st.download_button("πŸ–ΌοΈ Download Image", data=img_file, file_name="medicine_analysis.png", mime="image/png")
154
+ with colC:
155
+ st.download_button("⬇️ Hindi TXT", data=hindi_text, file_name="hindi_medicine_analysis.txt")
156
+ with colD:
157
+ hindi_img_path = "hindi_output.png"
158
+ save_text_as_image(hindi_text, hindi_img_path)
159
+ with open(hindi_img_path, "rb") as hindi_img_file:
160
+ st.download_button("πŸ–ΌοΈ Hindi Image", data=hindi_img_file, file_name="hindi_output.png", mime="image/png")
161
+
162
+ # Cleanup
163
  os.remove(orig_path)
164
  os.remove(dilated_path)
 
165
  else:
166
+ st.markdown("<center><i>πŸ“Έ Upload a prescription image to get started</i></center>", unsafe_allow_html=True)
167
+
168
+
169
+
170
+
171
+
172
+ # import streamlit as st
173
+ # import cv2
174
+ # import numpy as np
175
+ # import tempfile
176
+ # import os
177
+ # import easyocr
178
+
179
+ # from langchain.prompts import PromptTemplate
180
+ # from langchain.chains import LLMChain
181
+ # from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
182
+
183
+ # # Set Hugging Face API keys
184
+ # os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
185
+ # os.environ["HF_TOKEN"] = os.getenv("HF")
186
+
187
+ # # Streamlit page setup
188
+ # st.set_page_config(
189
+ # page_title="MediAssist - Prescription Analyzer",
190
+ # layout="wide",
191
+ # page_icon="πŸ’Š"
192
+ # )
193
+
194
+ # st.sidebar.title("πŸ’Š MediAssist")
195
+ # st.sidebar.markdown("Analyze prescriptions with ease using AI")
196
+ # st.sidebar.markdown("---")
197
+ # st.sidebar.markdown("πŸ”— **Connect with me:**")
198
+ # st.sidebar.markdown("""
199
+ # <div style='display: flex; gap: 10px;'>
200
+ # <a href="https://github.com/Yashvj22" target="_blank">
201
+ # <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
202
+ # </a>
203
+ # <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
204
+ # <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
205
+ # </a>
206
+ # </div>
207
+ # """, unsafe_allow_html=True)
208
+ # st.sidebar.markdown("---")
209
+
210
+ # st.markdown("""
211
+ # <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
212
+ # <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
213
+ # <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
214
+ # <br>
215
+ # """, unsafe_allow_html=True)
216
+
217
+ # uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
218
+
219
+ # if uploaded_file:
220
+ # with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
221
+ # temp_file.write(uploaded_file.read())
222
+ # orig_path = temp_file.name
223
+
224
+ # # Preprocessing
225
+ # image = cv2.imread(orig_path)
226
+ # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
227
+ # _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
228
+ # kernel = np.ones((3, 3), np.uint8)
229
+ # dilated = cv2.dilate(binary_inv, kernel, iterations=1)
230
+
231
+ # # Save preprocessed image for future reference/removal
232
+ # dilated_path = orig_path.replace(".png", "_dilated.png")
233
+ # cv2.imwrite(dilated_path, dilated)
234
+
235
+ # # OCR using EasyOCR
236
+ # reader = easyocr.Reader(['en'])
237
+ # text_list = reader.readtext(dilated, detail=0)
238
+ # text = "\n".join(text_list)
239
+
240
+ # # Prompt Template
241
+ # template = """
242
+ # You are a helpful medical assistant.
243
+
244
+ # Here is a prescription text extracted from an image:
245
+
246
+ # {prescription_text}
247
+
248
+ # Please do the following:
249
+
250
+ # 1. Extract only the medicine names mentioned in the prescription (ignore any other text).
251
+ # 2. For each medicine, provide:
252
+ # - When to take it (timing and dosage)
253
+ # - Possible side effects
254
+ # - Any special instructions
255
+
256
+ # Format your answer as bullet points, listing only medicines and their details.
257
+ # """
258
+ # prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
259
+
260
+ # llm_model = HuggingFaceEndpoint(
261
+ # repo_id="aaditya/Llama3-OpenBioLLM-70B",
262
+ # provider="nebius",
263
+ # temperature=0.6,
264
+ # max_new_tokens=300,
265
+ # task="conversational"
266
+ # )
267
+
268
+ # llm = ChatHuggingFace(
269
+ # llm=llm_model,
270
+ # repo_id="aaditya/Llama3-OpenBioLLM-70B",
271
+ # provider="nebius",
272
+ # temperature=0.6,
273
+ # max_new_tokens=300,
274
+ # task="conversational"
275
+ # )
276
+
277
+ # chain = LLMChain(llm=llm, prompt=prompt)
278
+
279
+ # col1, col2 = st.columns([1, 2])
280
+
281
+ # with col1:
282
+ # st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)
283
+
284
+ # with col2:
285
+ # st.success("βœ… Prescription Uploaded & Preprocessed Successfully")
286
+ # st.markdown("### πŸ“œ Extracted Text")
287
+ # st.code(text)
288
+
289
+ # if st.button("πŸ” Analyze Text"):
290
+ # with st.spinner("Analyzing..."):
291
+ # response = chain.run(prescription_text=text)
292
+ # st.success(response)
293
+
294
+ # # Cleanup temp files
295
+ # os.remove(orig_path)
296
+ # os.remove(dilated_path)
297
+
298
+ # else:
299
+ # st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)
300
 
301
 
302