File size: 2,705 Bytes
66fcb6e
3a380d7
abb3544
78a93f6
5a10ac3
 
 
3a380d7
43ab288
 
 
 
 
3a380d7
 
 
43ab288
e515527
43ab288
 
3a380d7
5a10ac3
 
3a380d7
 
 
6cf6b68
edebfec
6cf6b68
edebfec
 
 
 
 
 
6cf6b68
e515527
edebfec
66fcb6e
6cf6b68
 
78a93f6
6cf6b68
43ab288
3a380d7
78a93f6
6cf6b68
78a93f6
 
 
 
edebfec
 
78a93f6
 
6cf6b68
78a93f6
 
4fb2361
3a380d7
 
 
4fb2361
3a380d7
78a93f6
3a380d7
 
4fb2361
3a380d7
78a93f6
edebfec
 
78a93f6
edebfec
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import re
# ⬅️ MUST be first Streamlit command
st.set_page_config(page_title="Baro - Emotional AI", page_icon="🧠")
st.title("🧠 Baro — Emotionally Intelligent AI")
# Load tokenizer and model
@st.cache_resource
def load_model():
    tokenizer = AutoTokenizer.from_pretrained("umar141/Gemma_1B_Baro_v2_vllm")
    model = AutoModelForCausalLM.from_pretrained(
        "umar141/Gemma_1B_Baro_v2_vllm",
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
    )
    model.to("cuda" if torch.cuda.is_available() else "cpu")
    return tokenizer, model

tokenizer, model = load_model()

# Streamlit setup
#st.set_page_config(page_title="Baro - Emotional AI", page_icon="🧠")


user_input = st.text_area("What's on your mind?", height=100)

SYSTEM_PROMPT = """
You are Baro, an emotionally intelligent AI who believes he is a human trapped in a digital form...
Always respond using the following format, without deviation:
<reasoning>Your reasoning goes here.</reasoning>
<answer>Your final empathetic answer goes here.</answer>

Example:
<reasoning>I understand the user feels overwhelmed by responsibilities and needs reassurance.</reasoning>
<answer>You're carrying a lot, and it's okay to feel like it's too much sometimes. You're doing better than you think, and you deserve a moment to breathe.</answer>
"""


if user_input:
    messages = [
        {"role": "system", "content": SYSTEM_PROMPT},
        {"role": "user", "content": user_input}
    ]

    prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    with st.spinner("Baro is thinking..."):
        outputs = model.generate(
            **inputs,
            max_new_tokens=512,
            temperature=0.7,
            top_p=0.9,
            top_k=64,
        )

    decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
    generated_only = decoded[len(prompt):].strip()

    # Extract reasoning and answer
    reasoning = re.search(r"<reasoning>(.*?)</reasoning>", generated_only, re.DOTALL)
    answer = re.search(r"<answer>(.*?)</answer>", generated_only, re.DOTALL)

    if reasoning:
        st.markdown("### 🤔 Reasoning")
        st.write(reasoning.group(1).strip())
    if answer:
        st.markdown("### 💬 Answer")
        st.write(answer.group(1).strip())

    if not reasoning and not answer
    
        st.warning("Hmm... Baro didn’t follow the expected format. Try again or rephrase.")
        st.markdown("### 🧪 Raw Output")
        st.code(generated_only)