File size: 4,472 Bytes
c9939ff
85c58a5
c9939ff
 
 
 
1a89091
c9939ff
78cbc5c
 
 
 
 
 
 
 
 
c9939ff
 
1a89091
85c58a5
 
78cbc5c
 
85c58a5
 
 
 
 
 
78cbc5c
1a89091
78cbc5c
 
 
1a89091
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78cbc5c
1a89091
85c58a5
 
 
 
 
 
c9939ff
1a89091
c9939ff
85c58a5
 
 
 
1a89091
 
 
85c58a5
 
1a89091
 
 
 
 
85c58a5
1a89091
 
 
 
 
 
 
 
 
85c58a5
1a89091
78cbc5c
1a89091
85c58a5
 
 
 
 
 
 
 
 
 
c9939ff
85c58a5
 
 
 
c9939ff
85c58a5
 
 
 
 
 
 
 
 
 
 
78cbc5c
1a89091
85c58a5
 
 
 
1a89091
85c58a5
 
 
1a89091
85c58a5
 
c9939ff
1a89091
85c58a5
1a89091
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import json
import streamlit as st
from huggingface_hub import InferenceClient
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
client = InferenceClient(provider="auto", api_key=hf_token)

# Streamlit configuration
st.set_page_config(
    page_title="Interview Prep Bot",
    page_icon="🧠",
    layout="centered"
)
st.title("🎓 Interview Preparation Chatbot")

# Initialize session state
if "questions" not in st.session_state:
    st.session_state.questions = []
if "topic" not in st.session_state:
    st.session_state.topic = "Machine Learning"
if "score" not in st.session_state:
    st.session_state.score = 0
if "correct_count" not in st.session_state:
    st.session_state.correct_count = 0
if "incorrect_count" not in st.session_state:
    st.session_state.incorrect_count = 0

# Sidebar: Topic selection and scoring
st.sidebar.header("Practice Topic")
st.session_state.topic = st.sidebar.selectbox(
    "Select a topic:",
    [
        "Machine Learning",
        "Data Structures",
        "Python",
        "Generative AI",
        "Computer Vision",
        "Deep Learning",
    ],
    index=[
        "Machine Learning",
        "Data Structures",
        "Python",
        "Generative AI",
        "Computer Vision",
        "Deep Learning",
    ].index(st.session_state.topic)
)

st.sidebar.markdown("---")
st.sidebar.header("Your Score")
st.sidebar.markdown(f"**Total:** {len(st.session_state.questions)}")
st.sidebar.markdown(f"**Correct:** {st.session_state.correct_count}")
st.sidebar.markdown(f"**Incorrect:** {st.session_state.incorrect_count}")
st.sidebar.markdown(f"**Points:** {st.session_state.score}")

# Function to fetch an MCQ question

def fetch_question(topic):
    prompt = {
        "role": "system",
        "content": (
            f"You are an expert interviewer. Generate a multiple-choice question on the topic of {topic}. "
            "Respond with a JSON object: {\"question\": str, \"options\": [str, ...], \"correct_index\": int}."
        )
    }
    try:
        response = client.chat.completions.create(
            model="mistralai/Mistral-7B-Instruct-v0.1",
            messages=[prompt]
        )
        content = response.choices[0].message["content"].strip()
        data = json.loads(content)
    except Exception as e:
        st.error(f"Failed to fetch or parse question: {e}")
        return None
    # Validate structure
    question = data.get("question")
    options = data.get("options")
    correct_index = data.get("correct_index")
    if not question or not isinstance(options, list) or correct_index is None:
        st.error(f"Invalid question structure. Response: {content}")
        return None
    return {"question": question, "options": options, "correct_index": correct_index}

# Buttons to get or advance questions
if not st.session_state.questions:
    if st.button("Get Question"):
        q = fetch_question(st.session_state.topic)
        if q:
            st.session_state.questions.append({**q, "selected": None, "submitted": False})
else:
    if st.button("Next Question"):
        q = fetch_question(st.session_state.topic)
        if q:
            st.session_state.questions.append({**q, "selected": None, "submitted": False})

# Display questions and capture answers
for idx, q in enumerate(st.session_state.questions):
    st.markdown(f"### Question {idx+1}")
    st.write(q["question"])

    # Radio options
    opts = q["options"]
    sel = st.radio(
        "Choose an answer:",
        options=list(range(len(opts))),
        format_func=lambda i: opts[i],
        index=q["selected"] if q["selected"] is not None else 0,
        key=f"radio_{idx}",
        disabled=q["submitted"]
    )
    st.session_state.questions[idx]["selected"] = sel

    # Submit answer
    if not q["submitted"]:
        if st.button("Submit Answer", key=f"submit_{idx}"):
            st.session_state.questions[idx]["submitted"] = True
            if sel == q["correct_index"]:
                st.success("Correct! +10 points")
                st.session_state.score += 10
                st.session_state.correct_count += 1
            else:
                st.error(f"Incorrect! Correct: {opts[q['correct_index']]} (-10 points)")
                st.session_state.score -= 10
                st.session_state.incorrect_count += 1

# Footer
st.markdown("---")
st.markdown("*Correct: +10 pts | Incorrect: -10 pts*")