santhoshraghu commited on
Commit
a6a4e7b
·
verified ·
1 Parent(s): f46044d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -10
app.py CHANGED
@@ -24,6 +24,7 @@ from langchain_openai import OpenAIEmbeddings, ChatOpenAI
24
  from langchain_community.vectorstores import Qdrant
25
  from langchain_community.embeddings import HuggingFaceEmbeddings
26
  from langchain_community.embeddings import SentenceTransformerEmbeddings
 
27
  import nest_asyncio
28
 
29
  torch.cuda.empty_cache()
@@ -35,7 +36,7 @@ st.set_page_config(page_title="DermBOT", page_icon="🧬", layout="centered")
35
 
36
 
37
  # === Model Selection ===
38
- available_models = ["OpenAI GPT-4o", "LLaMA 3", "Gemini Pro"]
39
  st.session_state["selected_model"] = st.sidebar.selectbox("Select LLM Model", available_models)
40
 
41
 
@@ -130,6 +131,37 @@ elif "Gemini" in selected_model:
130
  return response.text
131
 
132
  llm = get_gemini_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  else:
135
  st.error("Unsupported model selected.")
@@ -293,30 +325,50 @@ def export_chat_to_pdf(messages):
293
 
294
 
295
  #Reranker utility
296
- def rerank_with_cohere(query: str, documents: list, top_n: int = 5) -> list:
297
  if not documents:
298
  return []
299
-
300
- raw_texts = [doc.page_content if hasattr(doc, "page_content") else str(doc) for doc in documents]
301
  results = co.rerank(query=query, documents=raw_texts, top_n=min(top_n, len(raw_texts)), model="rerank-v3.5")
302
- reranked_docs = [documents[result.index] for result in results]
303
- return reranked_docs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
  # Final answer generation using reranked context
306
- def get_reranked_response(query: str):
307
  docs = retriever.get_relevant_documents(query)
308
  reranked_docs = rerank_with_cohere(query, docs)
309
  context = "\n\n".join([doc.page_content for doc in reranked_docs])
310
  prompt = AI_PROMPT_TEMPLATE.format(question=query, context=context)
311
 
 
 
 
 
 
312
  if callable(llm):
313
- # Gemini or LLaMA
314
  return type("Obj", (), {"content": llm(prompt)})
315
  else:
316
- # OpenAI LangChain interface
317
  return llm.invoke([{"role": "system", "content": prompt}])
318
 
319
-
320
  # === App UI ===
321
 
322
  st.title("🧬 DermBOT — Skin AI Assistant")
 
24
  from langchain_community.vectorstores import Qdrant
25
  from langchain_community.embeddings import HuggingFaceEmbeddings
26
  from langchain_community.embeddings import SentenceTransformerEmbeddings
27
+ from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
28
  import nest_asyncio
29
 
30
  torch.cuda.empty_cache()
 
36
 
37
 
38
  # === Model Selection ===
39
+ available_models = ["GPT-4o", "LLaMA 4 Maverick", "Gemini 2.5 Pro","All"]
40
  st.session_state["selected_model"] = st.sidebar.selectbox("Select LLM Model", available_models)
41
 
42
 
 
131
  return response.text
132
 
133
  llm = get_gemini_response
134
+
135
+ elif "All" in selected_model:
136
+
137
+ from groq import Groq
138
+ import google.generativeai as genai
139
+ genai.configure(api_key=st.secrets["GEMINI_API_KEY"])
140
+ pair_ranker = pipeline("text-classification", model="llm-blender/PairRM")
141
+ gen_fuser = pipeline("text-generation", model="llm-blender/gen_fuser_3b", max_length=2048, do_sample=False)
142
+
143
+ def get_all_model_responses(prompt):
144
+ openai_resp = ChatOpenAI(model="gpt-4o", temperature=0.2, api_key=st.secrets["OPENAI_API_KEY"]).invoke(
145
+ [{"role": "system", "content": prompt}]).content
146
+
147
+ gemini = genai.GenerativeModel("gemini-2.5-pro-exp-03-25")
148
+ gemini_resp = gemini.generate_content(prompt).text
149
+
150
+ llama = Groq(api_key=st.secrets["GROQ_API_KEY"])
151
+ llama_resp = llama.chat.completions.create(
152
+ model="meta-llama/llama-4-maverick-17b-128e-instruct",
153
+ messages=[{"role": "user", "content": prompt}],
154
+ temperature=1, max_completion_tokens=1024, top_p=1, stream=False
155
+ ).choices[0].message.content
156
+
157
+ return [openai_resp, gemini_resp, llama_resp]
158
+
159
+ def rank_and_fuse(prompt, responses):
160
+ ranked = [(resp, pair_ranker(f"{prompt}\n\n{resp}")[0][1]['score']) for resp in responses]
161
+ ranked.sort(key=lambda x: x[1], reverse=True)
162
+ fusion_input = "\n\n".join([f"[Answer {i+1}]: {ans}" for i, (ans, _) in enumerate(ranked)])
163
+ return gen_fuser(f"Fuse these responses:\n{fusion_input}", return_full_text=False)[0]['generated_text']
164
+
165
 
166
  else:
167
  st.error("Unsupported model selected.")
 
325
 
326
 
327
  #Reranker utility
328
+ def rerank_with_cohere(query, documents, top_n=5):
329
  if not documents:
330
  return []
331
+ raw_texts = [doc.page_content for doc in documents]
 
332
  results = co.rerank(query=query, documents=raw_texts, top_n=min(top_n, len(raw_texts)), model="rerank-v3.5")
333
+ return [documents[result.index] for result in results]
334
+
335
+
336
+
337
+
338
+
339
+ pair_ranker = pipeline(
340
+ "text-classification",
341
+ model="llm-blender/PairRM",
342
+ tokenizer="llm-blender/PairRM",
343
+ return_all_scores=True
344
+ )
345
+
346
+ gen_fuser = pipeline(
347
+ "text-generation",
348
+ model="llm-blender/gen_fuser_3b",
349
+ tokenizer="llm-blender/gen_fuser_3b",
350
+ max_length=2048,
351
+ do_sample=False
352
+ )
353
+
354
 
355
  # Final answer generation using reranked context
356
+ def get_reranked_response(query):
357
  docs = retriever.get_relevant_documents(query)
358
  reranked_docs = rerank_with_cohere(query, docs)
359
  context = "\n\n".join([doc.page_content for doc in reranked_docs])
360
  prompt = AI_PROMPT_TEMPLATE.format(question=query, context=context)
361
 
362
+ if selected_model == "All":
363
+ responses = get_all_model_responses(prompt)
364
+ fused = rank_and_fuse(prompt, responses)
365
+ return type("Obj", (), {"content": fused})
366
+
367
  if callable(llm):
 
368
  return type("Obj", (), {"content": llm(prompt)})
369
  else:
 
370
  return llm.invoke([{"role": "system", "content": prompt}])
371
 
 
372
  # === App UI ===
373
 
374
  st.title("🧬 DermBOT — Skin AI Assistant")