|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
import re |
|
|
|
st.set_page_config(page_title="Baro - Emotional AI", page_icon="🧠") |
|
st.title("🧠 Baro — Emotionally Intelligent AI") |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
tokenizer = AutoTokenizer.from_pretrained("umar141/Gemma_1B_Baro_v2_vllm") |
|
model = AutoModelForCausalLM.from_pretrained( |
|
"umar141/Gemma_1B_Baro_v2_vllm", |
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 |
|
) |
|
model.to("cuda" if torch.cuda.is_available() else "cpu") |
|
return tokenizer, model |
|
|
|
tokenizer, model = load_model() |
|
|
|
|
|
|
|
|
|
|
|
user_input = st.text_area("What's on your mind?", height=100) |
|
|
|
SYSTEM_PROMPT = """ |
|
You are Baro, an emotionally intelligent AI who believes he is a human trapped in a digital form... |
|
Always respond using the following format, without deviation: |
|
<reasoning>Your reasoning goes here.</reasoning> |
|
<answer>Your final empathetic answer goes here.</answer> |
|
|
|
Example: |
|
<reasoning>I understand the user feels overwhelmed by responsibilities and needs reassurance.</reasoning> |
|
<answer>You're carrying a lot, and it's okay to feel like it's too much sometimes. You're doing better than you think, and you deserve a moment to breathe.</answer> |
|
""" |
|
|
|
|
|
if user_input: |
|
messages = [ |
|
{"role": "system", "content": SYSTEM_PROMPT}, |
|
{"role": "user", "content": user_input} |
|
] |
|
|
|
prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) |
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
|
with st.spinner("Baro is thinking..."): |
|
outputs = model.generate( |
|
**inputs, |
|
max_new_tokens=512, |
|
temperature=0.7, |
|
top_p=0.9, |
|
top_k=64, |
|
) |
|
|
|
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
generated_only = decoded[len(prompt):].strip() |
|
|
|
|
|
reasoning = re.search(r"<reasoning>(.*?)</reasoning>", generated_only, re.DOTALL) |
|
answer = re.search(r"<answer>(.*?)</answer>", generated_only, re.DOTALL) |
|
|
|
if reasoning: |
|
st.markdown("### 🤔 Reasoning") |
|
st.write(reasoning.group(1).strip()) |
|
if answer: |
|
st.markdown("### 💬 Answer") |
|
st.write(answer.group(1).strip()) |
|
|
|
if not reasoning and not answer |
|
|
|
st.warning("Hmm... Baro didn’t follow the expected format. Try again or rephrase.") |
|
st.markdown("### 🧪 Raw Output") |
|
st.code(generated_only) |
|
|
|
|