import os import streamlit as st from huggingface_hub import InferenceClient from dotenv import load_dotenv # Load environment variables (for local development) load_dotenv() # Initialize InferenceClient token = os.environ.get("HUGGINGFACEHUB_API_TOKEN") client = InferenceClient(provider="auto", api_key=token) st.set_page_config(page_title="Interview Prep Bot", layout="wide") st.title("🎓 Interview Preparation Chatbot") # Initialize history if "history" not in st.session_state: st.session_state.history = [] # Render chat history for sender, msg in st.session_state.history: role = "user" if sender == "You" else "assistant" st.chat_message(role).write(msg) # Prompt input text = st.chat_input("Ask me anything about interview prep…") if text: # record user st.session_state.history.append(("You", text)) st.chat_message("user").write(text) # placeholder placeholder = st.chat_message("assistant") placeholder.write("⏳ Thinking...") # build messages for API messages = [] for s, m in st.session_state.history: role = "user" if s == "You" else "assistant" messages.append({"role": role, "content": m}) # call HF chat completion try: completion = client.chat.completions.create( model="mistralai/Mistral-7B-Instruct-v0.1", messages=messages, ) reply = completion.choices[0].message["content"].strip() except Exception as e: reply = f"❌ API Error: {e}" # display and store reply placeholder.write(reply) st.session_state.history.append(("Bot", reply))