Spaces:
Sleeping
Sleeping
File size: 5,077 Bytes
c9939ff 85c58a5 c9939ff 43d2378 c9939ff 1a89091 c9939ff 43d2378 78cbc5c c9939ff 1a89091 85c58a5 78cbc5c 85c58a5 78cbc5c 1a89091 78cbc5c 43d2378 78cbc5c 85c58a5 c9939ff 43d2378 85c58a5 1a89091 43d2378 1a89091 85c58a5 1a89091 43d2378 ab5d533 43d2378 85c58a5 43d2378 1a89091 43d2378 1a89091 43d2378 ab5d533 85c58a5 1a89091 78cbc5c 1a89091 85c58a5 c9939ff 85c58a5 1a89091 85c58a5 1a89091 85c58a5 1a89091 85c58a5 1a89091 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import os
import json
import streamlit as st
from huggingface_hub import InferenceClient
from dotenv import load_dotenv
import traceback
# Load environment variables
load_dotenv()
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
client = InferenceClient(provider="auto", api_key=hf_token)
# Streamlit configuration
st.set_page_config(
page_title="Interview Prep Bot",
page_icon="🧠",
layout="centered"
)
st.title("🎓 Interview Preparation Chatbot")
# Initialize session state
if "questions" not in st.session_state:
st.session_state.questions = []
if "topic" not in st.session_state:
st.session_state.topic = "Machine Learning"
if "score" not in st.session_state:
st.session_state.score = 0
if "correct_count" not in st.session_state:
st.session_state.correct_count = 0
if "incorrect_count" not in st.session_state:
st.session_state.incorrect_count = 0
# Sidebar: Topic selection and scoring
st.sidebar.header("Practice Topic")
st.session_state.topic = st.sidebar.selectbox(
"Select a topic:",
["Machine Learning", "Data Structures", "Python", "Generative AI", "Computer Vision", "Deep Learning"],
index=["Machine Learning", "Data Structures", "Python", "Generative AI", "Computer Vision", "Deep Learning"].index(st.session_state.topic)
)
st.sidebar.markdown("---")
st.sidebar.header("Your Score")
st.sidebar.markdown(f"**Total:** {len(st.session_state.questions)}")
st.sidebar.markdown(f"**Correct:** {st.session_state.correct_count}")
st.sidebar.markdown(f"**Incorrect:** {st.session_state.incorrect_count}")
st.sidebar.markdown(f"**Points:** {st.session_state.score}")
# Function to fetch an MCQ question with enhanced debug logging
def fetch_question(topic):
prompt = {
"role": "system",
"content": (
f"You are an expert interviewer. Generate a multiple-choice question on the topic of {topic}. "
"Respond ONLY with a valid JSON object: {\"question\": str, \"options\": [str,...], \"correct_index\": int}."
)
}
try:
response = client.chat.completions.create(
model="mistralai/Mistral-7B-Instruct-v0.1",
messages=[prompt]
)
# Debug: show full response for tracing
st.write("**[DEBUG] Full response object:**")
st.json(response.to_dict())
content = response.choices[0].message.get("content", "").strip()
st.write("**[DEBUG] Raw content:**")
st.code(content)
except Exception as e:
st.error(f"Error during API call: {e}")
st.text(traceback.format_exc())
return None
# Attempt JSON parsing
try:
data = json.loads(content)
except json.JSONDecodeError as jde:
st.error(f"JSON decode error: {jde}")
st.write("**[DEBUG] Content that failed JSON parsing:**")
st.code(content)
return None
except Exception as e:
st.error(f"Unexpected parsing error: {e}")
st.text(traceback.format_exc())
return None
# Validate structure
question = data.get("question")
options = data.get("options")
correct_index = data.get("correct_index")
if not question or not isinstance(options, list) or not isinstance(correct_index, int):
st.error("Invalid question structure: missing keys or wrong types.")
st.write("**[DEBUG] Parsed JSON:**")
st.json(data)
return None
return {"question": question, "options": options, "correct_index": correct_index}
# Buttons to get or advance questions
if not st.session_state.questions:
if st.button("Get Question"):
q = fetch_question(st.session_state.topic)
if q:
st.session_state.questions.append({**q, "selected": None, "submitted": False})
else:
if st.button("Next Question"):
q = fetch_question(st.session_state.topic)
if q:
st.session_state.questions.append({**q, "selected": None, "submitted": False})
# Display questions and capture answers
for idx, q in enumerate(st.session_state.questions):
st.markdown(f"### Question {idx+1}")
st.write(q["question"])
opts = q["options"]
sel = st.radio(
"Choose an answer:",
options=list(range(len(opts))),
format_func=lambda i: opts[i],
index=q["selected"] if q["selected"] is not None else 0,
key=f"radio_{idx}",
disabled=q["submitted"]
)
st.session_state.questions[idx]["selected"] = sel
if not q["submitted"]:
if st.button("Submit Answer", key=f"submit_{idx}"):
st.session_state.questions[idx]["submitted"] = True
if sel == q["correct_index"]:
st.success("Correct! +10 points")
st.session_state.score += 10
st.session_state.correct_count += 1
else:
st.error(f"Incorrect! Correct: {opts[q['correct_index']]} (-10 points)")
st.session_state.score -= 10
st.session_state.incorrect_count += 1
# Footer
st.markdown("---")
st.markdown("*Correct: +10 pts | Incorrect: -10 pts*")
|