Spaces:
Sleeping
Sleeping
File size: 3,648 Bytes
f2169e5 e988eb0 4798805 b5b9af8 e988eb0 b5b9af8 e988eb0 4798805 b5b9af8 4798805 b5b9af8 e988eb0 b5b9af8 95b2ec1 e988eb0 95b2ec1 e988eb0 b5b9af8 e988eb0 b5b9af8 e988eb0 b5b9af8 e988eb0 4798805 95b2ec1 e988eb0 95b2ec1 e988eb0 4798805 e988eb0 4798805 e988eb0 4798805 e988eb0 4798805 e988eb0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import os
import time
from datetime import datetime
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# -- SETUP --
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@st.cache_resource
def load_respondent():
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
trust_remote_code=True,
torch_dtype="auto"
)
return pipeline("text-generation", model=model, tokenizer=tokenizer)
generator = load_respondent()
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.summary = ""
# -- STYLING --
st.markdown("""
<style>
body {
background-color: #111827;
color: #f3f4f6;
}
.stTextInput > div > div > input {
color: #f3f4f6;
}
</style>
""", unsafe_allow_html=True)
# -- HEADER --
st.title("π§ TARS.help")
st.markdown("### A minimal AI that listens, reflects, and replies.")
st.markdown(f"ποΈ {datetime.now().strftime('%B %d, %Y')} | {len(st.session_state.history)//2} exchanges")
# -- SAFETY FILTER --
TRIGGER_PHRASES = ["kill myself", "end it all", "suicide", "not worth living", "can't go on"]
def is_high_risk(text):
return any(phrase in text.lower() for phrase in TRIGGER_PHRASES)
# -- INPUT --
user_input = st.text_input("How are you feeling today?", placeholder="Start typing...")
# -- REPLY FUNCTION --
def generate_reply(user_input, context):
prompt = f"""You are a kind and empathetic AI assistant. Respond thoughtfully based on the following conversation:
{context}
User: {user_input}
AI:"""
response = generator(prompt, max_new_tokens=150, temperature=0.7)[0]['generated_text']
return response.split("AI:")[-1].strip()
# -- CONVERSATION FLOW --
if user_input:
context = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history[-4:]])
with st.spinner("TARS is reflecting..."):
time.sleep(1.2)
if is_high_risk(user_input):
response = "I'm really sorry you're feeling this way. You're not alone β please talk to someone you trust or a mental health professional. π"
else:
full_context = context + f"\nUser: {user_input}"
response = generate_reply(user_input, context)
timestamp = datetime.now().strftime("%H:%M")
st.session_state.history.append(("π§ You", user_input, timestamp))
st.session_state.history.append(("π€ TARS", response, timestamp))
# -- DISPLAY HISTORY --
st.markdown("## π¨οΈ Session")
for speaker, msg, time in st.session_state.history:
st.markdown(f"**{speaker} [{time}]:** {msg}")
# -- SESSION SUMMARY --
if st.button("π§Ύ Generate Session Summary"):
convo = "\n".join([f"{s}: {m}" for s, m, _ in st.session_state.history])
prompt = f"""Summarize the emotional tone and key themes from this conversation in 3 sentences:
{convo}
Summary:"""
try:
output = generator(prompt, max_new_tokens=200, temperature=0.5)[0]['generated_text']
st.session_state.summary = output.split("Summary:")[-1].strip()
except Exception as e:
st.error("β Summary generation failed.")
st.exception(e)
if st.session_state.summary:
st.markdown("### π§ Session Note")
st.markdown(st.session_state.summary)
st.download_button("π₯ Download Summary", st.session_state.summary, file_name="tars_session.txt")
# -- FOOTER --
st.markdown("---")
st.caption("TARS is not a therapist but a quiet assistant that reflects with you.")
|