KASOTI / app.py
iisadia's picture
Update app.py
f34b1bf verified
raw
history blame
19.1 kB
import streamlit as st
import time
import requests
from streamlit.components.v1 import html
import os
# Import transformers and cache the help agent for performance
@st.cache_resource
def get_help_agent():
from transformers import pipeline
return pipeline("conversational", model="facebook/blenderbot-400M-distill")
# Enhanced Custom CSS with modern design
def inject_custom_css():
st.markdown("""
<style>
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
* {
font-family: 'Inter', sans-serif;
margin: 0;
padding: 0;
box-sizing: border-box;
}
.container {
max-width: 800px;
margin: 0 auto;
padding: 2rem;
}
.gradient-header {
background: linear-gradient(135deg, #6366f1 0%, #a855f7 50%, #ec4899 100%);
color: white;
padding: 4rem 1rem;
border-radius: 0 0 2rem 2rem;
margin-bottom: 2rem;
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
text-align: center;
}
.title {
font-size: 2.5rem;
font-weight: 700;
letter-spacing: -0.05em;
margin-bottom: 0.5rem;
}
.subtitle {
font-size: 1.1rem;
opacity: 0.9;
font-weight: 400;
}
.game-card {
background: rgba(255, 255, 255, 0.95);
backdrop-filter: blur(10px);
border-radius: 1.5rem;
padding: 2rem;
margin: 1rem 0;
box-shadow: 0 4px 20px rgba(0,0,0,0.08);
border: 1px solid rgba(255,255,255,0.2);
}
.question-number {
font-size: 0.9rem;
color: #6b7280;
margin-bottom: 0.5rem;
}
.question-text {
font-size: 1.2rem;
color: #1f2937;
margin: 1rem 0;
line-height: 1.5;
font-weight: 500;
}
.answer-buttons {
display: flex;
gap: 1rem;
margin-top: 1.5rem;
justify-content: center;
}
.stButton>button {
transition: all 0.2s ease;
border-radius: 12px;
padding: 0.75rem 2rem;
font-weight: 600;
border: none;
}
.stButton>button:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(0,0,0,0.15);
}
.yes-btn {
background: #10b981 !important;
color: white !important;
}
.no-btn {
background: #ef4444 !important;
color: white !important;
}
.both-btn {
background: #f59e0b !important;
color: white !important;
}
.result-card {
background: linear-gradient(135deg, #f0fdfa 0%, #f8fafc 100%);
border-radius: 1.5rem;
padding: 3rem 2rem;
text-align: center;
animation: fadeIn 1s ease;
}
.result-text {
font-size: 2rem;
font-weight: 700;
color: #1f2937;
margin: 1rem 0;
background: linear-gradient(135deg, #6366f1 0%, #ec4899 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.confetti {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
pointer-events: none;
z-index: 1000;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(20px); }
to { opacity: 1; transform: translateY(0); }
}
.progress-bar {
height: 8px;
background: #e5e7eb;
border-radius: 4px;
overflow: hidden;
margin: 1.5rem 0;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, #6366f1 0%, #a855f7 100%);
transition: width 0.3s ease;
}
.help-sidebar {
background: rgba(255,255,255,0.9);
backdrop-filter: blur(10px);
padding: 1.5rem;
border-radius: 1rem;
border: 1px solid rgba(0,0,0,0.1);
}
</style>
""", unsafe_allow_html=True)
# Confetti animation
def show_confetti():
html("""
<canvas id="confetti-canvas" class="confetti"></canvas>
<script src="https://cdn.jsdelivr.net/npm/canvas-confetti@1.5.1/dist/confetti.browser.min.js"></script>
<script>
function randomInRange(min, max) {
return Math.random() * (max - min) + min;
}
const canvas = document.getElementById('confetti-canvas');
const confetti = confetti.create(canvas, { resize: true });
const count = 200;
const defaults = {
origin: { y: 0.7 }
};
function fire(particleRatio, opts) {
confetti(Object.assign({}, defaults, opts, {
particleCount: Math.floor(count * particleRatio)
}));
}
fire(0.25, { spread: 26, startVelocity: 55 });
fire(0.2, { spread: 60 });
fire(0.35, { spread: 100, decay: 0.91, scalar: 0.8 });
fire(0.1, { spread: 120, startVelocity: 25, decay: 0.92, scalar: 1.2 });
fire(0.1, { spread: 120, startVelocity: 45 });
setTimeout(() => { canvas.remove(); }, 5000);
</script>
""")
# Enhanced AI question generation for guessing game using Llama model
def ask_llama(conversation_history, category, is_final_guess=False):
api_url = "https://api.groq.com/openai/v1/chat/completions"
headers = {
"Authorization": "Bearer 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",
"Content-Type": "application/json"
}
system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
2. Consider all previous answers carefully before asking next question
3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
4. For places: ask about continent, climate, famous landmarks, country, city or population
5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
6. For objects: ask about size, color, usage, material, or where it's found
7. Never repeat questions and always make progress toward guessing"""
if is_final_guess:
prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
{conversation_history}"""
else:
prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
messages = [
{"role": "system", "content": system_prompt},
*conversation_history,
{"role": "user", "content": prompt}
]
data = {
"model": "llama-3.3-70b-versatile",
"messages": messages,
"temperature": 0.7 if is_final_guess else 0.8,
"max_tokens": 100
}
try:
response = requests.post(api_url, headers=headers, json=data)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
except Exception as e:
st.error(f"Error calling Llama API: {str(e)}")
return "Could not generate question"
# New function for the help AI assistant using the Hugging Face InferenceClient
def ask_help_agent(query):
try:
from huggingface_hub import InferenceClient
# Initialize the client with the provided model
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
system_message = "You are a friendly Chatbot."
# Build history from session state (if any)
history = []
if "help_conversation" in st.session_state:
for msg in st.session_state.help_conversation:
# Each history entry is a tuple: (user query, assistant response)
history.append((msg.get("query", ""), msg.get("response", "")))
messages = [{"role": "system", "content": system_message}]
for user_msg, bot_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": query})
response_text = ""
# Using streaming to collect the entire response from the model
for message in client.chat_completion(
messages,
max_tokens=150,
stream=True,
temperature=0.7,
top_p=0.95,
):
token = message.choices[0].delta.content
response_text += token
return response_text
except Exception as e:
return f"Error in help agent: {str(e)}"
def handle_answer(answer):
st.session_state.conversation_history.append(
{"role": "user", "content": answer}
)
# Generate next response
next_response = ask_llama(
st.session_state.conversation_history,
st.session_state.category
)
# Check if AI made a guess
if "Final Guess:" in next_response:
st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
st.session_state.game_state = "confirm_guess"
else:
st.session_state.questions.append(next_response)
st.session_state.conversation_history.append(
{"role": "assistant", "content": next_response}
)
st.session_state.current_q += 1
# Stop after 20 questions max
if st.session_state.current_q >= 20:
st.session_state.game_state = "result"
st.experimental_rerun()
def main():
inject_custom_css()
st.markdown("""
<div class="gradient-header">
<div class="container">
<h1 class="title">KASOTI</h1>
<p class="subtitle">AI-Powered Guessing Game</p>
</div>
</div>
""", unsafe_allow_html=True)
if 'game_state' not in st.session_state:
st.session_state.game_state = "start"
st.session_state.questions = []
st.session_state.current_q = 0
st.session_state.answers = []
st.session_state.conversation_history = []
st.session_state.category = None
st.session_state.final_guess = None
st.session_state.help_conversation = []
# Start screen
if st.session_state.game_state == "start":
with st.container():
st.markdown("""
<div class="container">
<div class="game-card">
<h3>๐ŸŽฎ How to Play</h3>
<p style="margin: 1rem 0; color: #4b5563;">
1. Choose a category<br>
2. Think of a specific item<br>
3. Answer AI's questions<br>
4. Try to stump the AI!
</p>
<div style="margin: 2rem 0;">
<div class="progress-bar">
<div class="progress-fill" style="width: 0%"></div>
</div>
</div>
""", unsafe_allow_html=True)
with st.form("start_form"):
col1, col2, col3 = st.columns(3)
with col2:
category = st.selectbox(
"Choose Category",
["Person", "Place", "Object"],
index=None,
placeholder="Select category"
)
if st.form_submit_button("Start Game", use_container_width=True):
if not category:
st.error("Please select a category!")
else:
# Initialize game state
st.session_state.category = category.lower()
first_question = ask_llama([
{"role": "user", "content": "Ask your first strategic yes/no question."}
], st.session_state.category)
st.session_state.questions = [first_question]
st.session_state.conversation_history = [
{"role": "assistant", "content": first_question}
]
st.session_state.game_state = "gameplay"
st.experimental_rerun()
# Gameplay screen
elif st.session_state.game_state == "gameplay":
with st.container():
st.markdown(f"""
<div class="game-card">
<div class="question-number">
Question {st.session_state.current_q + 1}/20
</div>
<div class="question-text">
{st.session_state.questions[st.session_state.current_q]}
</div>
</div>
""", unsafe_allow_html=True)
# Progress bar
progress = (st.session_state.current_q + 1) / 20
st.markdown(f"""
<div class="progress-bar">
<div class="progress-fill" style="width: {progress * 100}%"></div>
</div>
""", unsafe_allow_html=True)
# Answer buttons
col1, col2, col3 = st.columns([1,1,1])
with col1:
if st.button("Yes โœ…", key="yes_btn", use_container_width=True, type="primary"):
st.session_state.answers.append("yes")
handle_answer("yes")
with col2:
if st.button("No โŒ", key="no_btn", use_container_width=True, type="primary"):
st.session_state.answers.append("no")
handle_answer("no")
with col3:
if st.button("Both ๐Ÿค”", key="both_btn", use_container_width=True, type="primary"):
st.session_state.answers.append("both")
handle_answer("both")
# Help sidebar
with st.sidebar:
st.markdown("""
<div class="help-sidebar">
<h3>๐Ÿค– AI Assistant</h3>
<p>Need help? Chat with our AI helper:</p>
""", unsafe_allow_html=True)
help_query = st.text_input("Type your question...", key="help_query")
if st.button("Send", use_container_width=True):
if help_query:
help_response = ask_help_agent(help_query)
st.session_state.help_conversation.append({"query": help_query, "response": help_response})
if st.session_state.help_conversation:
st.markdown("---")
for msg in st.session_state.help_conversation[-3:]: # Show last 3 messages
st.markdown(f"**You:** {msg['query']}")
st.markdown(f"**Assistant:** {msg['response']}")
st.markdown("</div>", unsafe_allow_html=True)
# Confirm guess screen
elif st.session_state.game_state == "confirm_guess":
with st.container():
st.markdown(f"""
<div class="game-card">
<h3>๐Ÿค– My Final Guess</h3>
<div class="question-text">
Is it {st.session_state.final_guess}?
</div>
</div>
""", unsafe_allow_html=True)
# Answer buttons
col1, col2, col3 = st.columns([1,1,1])
with col1:
if st.button("Yes โœ…", key="confirm_yes", use_container_width=True, type="primary"):
st.session_state.game_state = "result"
st.experimental_rerun()
with col2:
if st.button("No โŒ", key="confirm_no", use_container_width=True, type="primary"):
st.session_state.conversation_history.append(
{"role": "user", "content": "no"}
)
st.session_state.game_state = "gameplay"
next_response = ask_llama(
st.session_state.conversation_history,
st.session_state.category
)
st.session_state.questions.append(next_response)
st.session_state.conversation_history.append(
{"role": "assistant", "content": next_response}
)
st.session_state.current_q += 1
st.experimental_rerun()
with col3:
if st.button("Both ๐Ÿค”", key="confirm_both", use_container_width=True, type="primary"):
st.session_state.conversation_history.append(
{"role": "user", "content": "both"}
)
st.session_state.game_state = "gameplay"
next_response = ask_llama(
st.session_state.conversation_history,
st.session_state.category
)
st.session_state.questions.append(next_response)
st.session_state.conversation_history.append(
{"role": "assistant", "content": next_response}
)
st.session_state.current_q += 1
st.experimental_rerun()
# Result screen
elif st.session_state.game_state == "result":
if not st.session_state.final_guess:
# Generate final guess if not already made
qa_history = "\n".join(
[f"Q{i+1}: {q}\nA: {a}"
for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
)
final_guess = ask_llama(
[{"role": "user", "content": qa_history}],
st.session_state.category,
is_final_guess=True
)
st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
with st.container():
show_confetti()
st.markdown(f"""
<div class="result-card">
<h3>๐ŸŽ‰ Game Over!</h3>
<div class="result-text">
{st.session_state.final_guess}
</div>
<p style="color: #6b7280;">
Guessed in {len(st.session_state.questions)} questions
</p>
</div>
""", unsafe_allow_html=True)
if st.button("Play Again ๐Ÿ”„", use_container_width=True, type="primary"):
st.session_state.clear()
st.experimental_rerun()
if __name__ == "__main__":
main()