Anshini's picture
Update app.py
b02d824 verified
raw
history blame
6.59 kB
import streamlit as st
import os
from langchain_huggingface import HuggingFaceEndpoint, HuggingFacePipeline, ChatHuggingFace
from langchain_core.messages import HumanMessage,SystemMessage,AIMessage
hf_token = os.getenv("HF_TOKEN")
# Set the default page in session state
if "page" not in st.session_state:
st.session_state.page = "home"
# Function to switch pages
def switch_page(page_name):
st.session_state.page = page_name
# Home page with buttons for different domains
if st.session_state.page == "home":
st.title("🤖 Innomatics ChatGenius Hub")
st.markdown("Choose a domain to chat with an expert model:")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("Python 🐍"):
switch_page("python")
if st.button("Statistics 📈"):
switch_page("statistics")
with col2:
if st.button("SQL 🛢️"):
switch_page("sql")
if st.button("Machine Learning 🤖"):
switch_page("ml")
with col3:
if st.button("Power BI 📊"):
switch_page("powerbi")
if st.button("Deep Learning 🧠"):
switch_page("deeplearning")
with col2:
if st.button("GenAI🔮🤖"):
switch_page("genai")
# Example domain-specific chatbot page
elif st.session_state.page == "python":
st.title("Python Chatbot 🐍")
# hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") or os.getenv("HF_TOKEN")
# if not hf_token:
# st.error("Please add your Hugging Face API token to Secrets (HUGGINGFACEHUB_API_TOKEN or HF_TOKEN).")
# st.stop()
# # Setup the LangChain HuggingFaceEndpoint and ChatHuggingFace LLM
# deep_seek_model = HuggingFaceEndpoint(
# repo_id="deepseek-ai/DeepSeek-R1",
# # provider = 'nebius'
# temperature=0.7,
# max_new_tokens=100,
# task="conversational",
# huggingfacehub_api_token=hf_token,
# )
# deepseek = ChatHuggingFace(
# llm=deep_seek_model,
# repo_id="deepseek-ai/DeepSeek-R1",
# # provider="nebius",
# temperature=0.7,
# max_new_tokens=100,
# task="conversational"
# )
gemma_model = HuggingFaceEndpoint(
repo_id="google/gemma-3-27b-it",
temperature=0.7,
max_new_tokens=512,
task="conversational",
huggingfacehub_api_token=hf_token,
)
chat_gemma = ChatHuggingFace(
llm=gemma_model,
repo_id="google/gemma-3-27b-it",
temperature=0.7,
max_new_tokens=512,
task="conversational",
)
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="Answer like a 10 year experinced Python developer")
]
def generate_response(user_input):
# Append user message
st.session_state.messages.append(HumanMessage(content=user_input))
# Invoke the model
response = deepseek.invoke(st.session_state.messages)
# Append AI response
st.session_state.messages.append(AIMessage(content=response))
return response
# User input
user_input = st.text_input("Ask a question about Python:")
if user_input:
with st.spinner("Getting answer..."):
answer = generate_response(user_input)
st.markdown(f"**Answer:** {answer}")
# Display chat history
if st.session_state.messages:
for msg in st.session_state.messages[1:]: # skip initial SystemMessage
if isinstance(msg, HumanMessage):
st.markdown(f"**You:** {msg.content}")
elif isinstance(msg, AIMessage):
st.markdown(f"**Bot:** {msg.content}")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
# Here you can load your Python LLM and chat interface
elif st.session_state.page == "sql":
st.title("SQL Chatbot 🛢️")
if not hf_token:
st.error("Please add your Hugging Face API token as an environment variable.")
st.stop()
# Initialize the LLaMA model from HuggingFace (via Nebius provider)
llama_model = HuggingFaceEndpoint(
repo_id="meta-llama/Llama-3.1-8B-Instruct",
temperature=0.7,
max_new_tokens=512,
task="conversational",
huggingfacehub_api_token=hf_token,
)
llama = ChatHuggingFace(
llm=llama_model,
repo_id="meta-llama/Llama-3.1-8B-Instruct",
# provider="nebius",
temperature=0.7,
max_new_tokens=512,
task="conversational"
)
# Streamlit A
st.markdown("Ask anything related to SQL interviews!")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [SystemMessage(content="Answer clearly like a technical 10 year experienced person in SQL .")]
# User input
user_input = st.text_input("💡 Ask your SQL interview question:", placeholder="e.g., give me 10 SQL interview questions with answers")
def generate_response(user_input):
st.session_state.messages.append(HumanMessage(content=user_input))
response = llama.invoke(st.session_state.messages)
st.session_state.messages.append(AIMessage(content=response))
return response
# Display response
if user_input:
with st.spinner("Thinking..."):
answer = generate_response(user_input)
st.markdown(f"**Answer:** {answer}")
# Show chat history
st.markdown("### 📜 Chat History")
for msg in st.session_state.messages[1:]: # Skip SystemMessage
if isinstance(msg, HumanMessage):
st.markdown(f"**You:** {msg.content}")
elif isinstance(msg, AIMessage):
st.markdown(f"**Bot:** {msg.content}")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
# Load SQL chatbot here
elif st.session_state.page == "powerbi":
st.title("Power BI Chatbot 📊")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
elif st.session_state.page == "ml":
st.title("Machine Learning Chatbot 🤖")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
elif st.session_state.page == "deeplearning":
st.title("Deep Learning Chatbot 🧠")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
elif st.session_state.page == "statistics":
st.title("Statistics Chatbot 📈")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))
elif st.session_state.page == "genai":
st.title("GenAI Chatbot 📈")
st.button("⬅️ Back to Home", on_click=lambda: switch_page("home"))