import os import streamlit as st from huggingface_hub import InferenceClient from dotenv import load_dotenv # Load .env (if you’re using one) load_dotenv() # Instantiate once client = InferenceClient( provider="auto", api_key=os.environ["HUGGINGFACEHUB_API_TOKEN"] ) # Page config st.set_page_config(page_title="Educational Chatbot", layout="wide") st.title("🎓 Educational Chatbot") # Initialize history if "history" not in st.session_state: st.session_state.history = [] # Render existing messages for sender, message in st.session_state.history: if sender == "You": st.chat_message("user").write(message) else: st.chat_message("assistant").write(message) # Input box user_input = st.chat_input("Ask me anything…") if user_input: # Display user message immediately st.chat_message("user").write(user_input) st.session_state.history.append(("You", user_input)) # Bot placeholder placeholder = st.chat_message("assistant") placeholder.write("⏳ Thinking...") try: # Call HF Inference API completion = client.chat.completions.create( model="deepseek-ai/DeepSeek-R1", messages=[{"role": "user", "content": user_input}], ) reply = completion.choices[0].message["content"] except Exception as e: reply = f"❌ API Error: {e}" # Update placeholder with real response placeholder.write(reply) st.session_state.history.append(("Bot", reply))