import os import json import streamlit as st from huggingface_hub import InferenceClient from dotenv import load_dotenv # Load environment variables for local development load_dotenv() # Initialize the Hugging Face Inference client hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") client = InferenceClient(provider="auto", api_key=hf_token) # Streamlit configuration st.set_page_config( page_title="Interview Prep Bot", page_icon="🧠", layout="centered" ) st.title("🎓 Interview Preparation Chatbot") # Session state initialization if "questions" not in st.session_state: st.session_state.questions = [] if "topic" not in st.session_state: st.session_state.topic = "Machine Learning" if "score" not in st.session_state: st.session_state.score = 0 if "correct_count" not in st.session_state: st.session_state.correct_count = 0 if "incorrect_count" not in st.session_state: st.session_state.incorrect_count = 0 # Sidebar: Topic and Score display st.sidebar.header("Practice Topic") st.session_state.topic = st.sidebar.selectbox( "Select a topic:", ["Machine Learning","Data Structures","Python","Generative AI","Computer Vision","Deep Learning"], index=["Machine Learning","Data Structures","Python","Generative AI","Computer Vision","Deep Learning"].index(st.session_state.topic) ) st.sidebar.markdown("---") st.sidebar.header("Your Score") st.sidebar.markdown(f"**Total:** {len(st.session_state.questions)}") st.sidebar.markdown(f"**Correct:** {st.session_state.correct_count}") st.sidebar.markdown(f"**Incorrect:** {st.session_state.incorrect_count}") st.sidebar.markdown(f"**Points:** {st.session_state.score}") # Function to fetch a new MCQ question via HF Inference API def fetch_question(topic): prompt = { "role": "system", "content": ( "You are an expert interviewer. " "Generate a multiple-choice question on the topic of {topic}. " "Respond with a JSON object: {\"question\": str, \"options\": [str, ...], \"correct_index\": int}" ).format(topic=topic) } response = client.chat.completions.create( model="mistralai/Mistral-7B-Instruct-v0.1", messages=[prompt] ) content = response.choices[0].message["content"].strip() try: data = json.loads(content) return data except json.JSONDecodeError: st.error("Failed to parse question. Please try again.") return None # Buttons for initial and next questions if not st.session_state.questions: if st.button("Get Question"): q = fetch_question(st.session_state.topic) if q: st.session_state.questions.append({**q, "selected": None, "submitted": False}) else: if st.button("Next Question"): q = fetch_question(st.session_state.topic) if q: st.session_state.questions.append({**q, "selected": None, "submitted": False}) # Display questions and capture answers for idx, q in enumerate(st.session_state.questions): st.markdown(f"### Question {idx+1}") st.write(q["question"]) # Radio options opts = q["options"] sel = st.radio( "Choose an answer:", options=list(range(len(opts))), format_func=lambda i: opts[i], index=q["selected"] if q["selected"] is not None else 0, key=f"radio_{idx}", disabled=q["submitted"] ) st.session_state.questions[idx]["selected"] = sel # Submit button if not q["submitted"]: if st.button("Submit Answer", key=f"submit_{idx}"): st.session_state.questions[idx]["submitted"] = True if sel == q["correct_index"]: st.success("Correct!") st.session_state.score += 10 st.session_state.correct_count += 1 else: st.error(f"Incorrect! Correct answer: {opts[q['correct_index']]}.") st.session_state.score -= 10 st.session_state.incorrect_count += 1 # Footer info st.markdown("---") st.markdown("*Points: +10 for correct, -10 for incorrect.*")