Yashvj123 commited on
Commit
d26cb25
Β·
verified Β·
1 Parent(s): 922b928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -36
app.py CHANGED
@@ -48,24 +48,29 @@ def save_text_as_image(text, file_path):
48
  img.save(file_path)
49
  return file_path
50
 
51
- # Setup UI
52
- st.set_page_config(page_title="MediAssist πŸ’Š", layout="wide")
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  st.markdown("""
55
- <style>
56
- .stButton>button {
57
- background-color: #4CAF50;
58
- color: white;
59
- font-weight: bold;
60
- padding: 8px 20px;
61
- border-radius: 8px;
62
- }
63
- </style>
64
  """, unsafe_allow_html=True)
65
 
66
- st.title("πŸ’Š MediAssist - Prescription Analyzer")
67
- st.markdown("##### Upload your prescription, get AI-based medicine insights, translate and download!")
68
-
69
  uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
70
 
71
  if uploaded_file:
@@ -98,35 +103,41 @@ if uploaded_file:
98
 
99
  # Prompt LLM
100
  template = """
101
- You are a helpful medical assistant.
102
- Here is a prescription text extracted from an image:
103
- {prescription_text}
104
- Please do the following:
105
- 1. Extract only the medicine names.
106
- 2. For each, give:
107
- - Dosage and Timing
108
- - Possible Side Effects
109
- - Special Instructions
110
- Format in bullet points, medicine-wise.
 
 
 
 
 
111
  """
 
112
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
113
 
114
- llm_model = HuggingFaceEndpoint(
115
  repo_id="aaditya/Llama3-OpenBioLLM-70B",
116
  provider="nebius",
117
  temperature=0.6,
118
  max_new_tokens=300,
119
- task="conversational"
120
  )
121
 
122
- llm = ChatHuggingFace(
123
- llm=llm_model,
124
- repo_id="aaditya/Llama3-OpenBioLLM-70B",
125
- provider="nebius",
126
- temperature=0.6,
127
- max_new_tokens=300,
128
- task="conversational"
129
- )
130
 
131
  chain = LLMChain(llm=llm, prompt=prompt)
132
 
@@ -137,7 +148,7 @@ if uploaded_file:
137
  with st.spinner("Analyzing with LLM..."):
138
  response = chain.run(prescription_text=text)
139
  st.markdown("#### πŸ’‘ AI-based Medicine Analysis")
140
- st.text_area("LLM Output", response, height=300)
141
 
142
  # Save txt and image
143
  txt_path = "medicine_analysis.txt"
@@ -189,7 +200,7 @@ if uploaded_file:
189
  try:
190
  os.remove(orig_path)
191
  os.remove(dilated_path)
192
- llm.clear()
193
  except:
194
  pass
195
 
 
48
  img.save(file_path)
49
  return file_path
50
 
51
+ st.sidebar.title("πŸ’Š MediAssist")
52
+ st.sidebar.markdown("Analyze prescriptions with ease using AI")
53
+ st.sidebar.markdown("---")
54
+ st.sidebar.markdown("πŸ”— **Connect with me:**")
55
+ st.sidebar.markdown("""
56
+ <div style='display: flex; gap: 10px;'>
57
+ <a href="https://github.com/Yashvj22" target="_blank">
58
+ <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
59
+ </a>
60
+ <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
61
+ <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
62
+ </a>
63
+ </div>
64
+ """, unsafe_allow_html=True)
65
+ st.sidebar.markdown("---")
66
 
67
  st.markdown("""
68
+ <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
69
+ <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
70
+ <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
71
+ <br>
 
 
 
 
 
72
  """, unsafe_allow_html=True)
73
 
 
 
 
74
  uploaded_file = st.file_uploader("πŸ“€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
75
 
76
  if uploaded_file:
 
103
 
104
  # Prompt LLM
105
  template = """
106
+ You are a helpful medical assistant.
107
+
108
+ Here is a prescription text extracted from an image:
109
+
110
+ {prescription_text}
111
+
112
+ Please do the following:
113
+
114
+ 1. Extract only the medicine names mentioned in the prescription (ignore any other text).
115
+ 2. For each medicine, provide:
116
+ - When to take it (timing and dosage)
117
+ - Possible side effects
118
+ - Any special instructions
119
+
120
+ Format your answer as bullet points, listing only medicines and their details.
121
  """
122
+
123
  prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
124
 
125
+ llm = HuggingFaceEndpoint(
126
  repo_id="aaditya/Llama3-OpenBioLLM-70B",
127
  provider="nebius",
128
  temperature=0.6,
129
  max_new_tokens=300,
130
+ task="text-generation"
131
  )
132
 
133
+ # llm = ChatHuggingFace(
134
+ # llm=llm_model,
135
+ # repo_id="aaditya/Llama3-OpenBioLLM-70B",
136
+ # provider="nebius",
137
+ # temperature=0.6,
138
+ # max_new_tokens=300,
139
+ # task="conversational"
140
+ # )
141
 
142
  chain = LLMChain(llm=llm, prompt=prompt)
143
 
 
148
  with st.spinner("Analyzing with LLM..."):
149
  response = chain.run(prescription_text=text)
150
  st.markdown("#### πŸ’‘ AI-based Medicine Analysis")
151
+ st.success(response)
152
 
153
  # Save txt and image
154
  txt_path = "medicine_analysis.txt"
 
200
  try:
201
  os.remove(orig_path)
202
  os.remove(dilated_path)
203
+
204
  except:
205
  pass
206