import os import json import streamlit as st from huggingface_hub import InferenceClient from dotenv import load_dotenv # Load environment variables load_dotenv() hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") client = InferenceClient(provider="auto", api_key=hf_token) # Streamlit configuration st.set_page_config( page_title="Interview Prep Bot", page_icon="🧠", layout="centered" ) st.title("🎓 Interview Preparation Chatbot") # Initialize session state if "questions" not in st.session_state: st.session_state.questions = [] if "topic" not in st.session_state: st.session_state.topic = "Machine Learning" if "score" not in st.session_state: st.session_state.score = 0 if "correct_count" not in st.session_state: st.session_state.correct_count = 0 if "incorrect_count" not in st.session_state: st.session_state.incorrect_count = 0 # Sidebar: Topic selection and scoring st.sidebar.header("Practice Topic") st.session_state.topic = st.sidebar.selectbox( "Select a topic:", ["Machine Learning","Data Structures","Python","Generative AI","Computer Vision","Deep Learning"], index=["Machine Learning","Data Structures","Python","Generative AI","Computer Vision","Deep Learning"].index(st.session_state.topic) ) st.sidebar.markdown("---") st.sidebar.header("Your Score") st.sidebar.markdown(f"**Total:** {len(st.session_state.questions)}") st.sidebar.markdown(f"**Correct:** {st.session_state.correct_count}") st.sidebar.markdown(f"**Incorrect:** {st.session_state.incorrect_count}") st.sidebar.markdown(f"**Points:** {st.session_state.score}") # Function to fetch an MCQ question with debug logging def fetch_question(topic): prompt = { "role": "system", "content": ( f"You are an expert interviewer. Generate a multiple-choice question on the topic of {topic}. " "Respond with a JSON object: {\"question\": str, \"options\": [str, ...], \"correct_index\": int}." ) } try: response = client.chat.completions.create( model="mistralai/Mistral-7B-Instruct-v0.1", messages=[prompt] ) content = response.choices[0].message["content"].strip() # Debug: show raw response st.write("**[DEBUG] Raw response:**") st.code(content) data = json.loads(content) except Exception as e: st.error(f"Failed to fetch or parse question: {e}") # If content exists, display it for debugging try: st.write("**[DEBUG] Last content before error:**") st.code(content) except Exception: pass return None # Validate structure question = data.get("question") options = data.get("options") correct_index = data.get("correct_index") if not question or not isinstance(options, list) or correct_index is None: st.error("Invalid question structure.") st.write("**[DEBUG] Parsed JSON:**") st.json(data) return None return {"question": question, "options": options, "correct_index": correct_index} # Buttons to get or advance questions if not st.session_state.questions: if st.button("Get Question"): q = fetch_question(st.session_state.topic) if q: st.session_state.questions.append({**q, "selected": None, "submitted": False}) else: if st.button("Next Question"): q = fetch_question(st.session_state.topic) if q: st.session_state.questions.append({**q, "selected": None, "submitted": False}) # Display questions and capture answers for idx, q in enumerate(st.session_state.questions): st.markdown(f"### Question {idx+1}") st.write(q["question"]) opts = q["options"] sel = st.radio( "Choose an answer:", options=list(range(len(opts))), format_func=lambda i: opts[i], index=q["selected"] if q["selected"] is not None else 0, key=f"radio_{idx}", disabled=q["submitted"] ) st.session_state.questions[idx]["selected"] = sel if not q["submitted"]: if st.button("Submit Answer", key=f"submit_{idx}"): st.session_state.questions[idx]["submitted"] = True if sel == q["correct_index"]: st.success("Correct! +10 points") st.session_state.score += 10 st.session_state.correct_count += 1 else: st.error(f"Incorrect! Correct: {opts[q['correct_index']]} (-10 points)") st.session_state.score -= 10 st.session_state.incorrect_count += 1 # Footer st.markdown("---") st.markdown("*Correct: +10 pts | Incorrect: -10 pts*")