firstapp / src /streamlit_app.py
iabrarzelaci's picture
Update src/streamlit_app.py
bd6543e verified
import os
# Disable Streamlit telemetry and cache writing
os.environ["STREAMLIT_HOME"] = "/tmp" # safe writable dir in Hugging Face
os.environ["STREAMLIT_DISABLE_LOGGING"] = "1"
os.environ["STREAMLIT_TELEMETRY_ENABLED"] = "0"
import streamlit as st
from huggingface_hub import InferenceClient
# Initialize Hugging Face client
client = InferenceClient(
provider="cohere",
# api_key= st.secrets["hf_token"]
api_key = os.getenv("hf_token")
)
# Streamlit page config
st.set_page_config(page_title="تمّام – مساعدك الذكي", page_icon="🤖")
st.title("مرحبًا!")
# Arabic system prompt
SYSTEM_PROMPT = """
عمر انس 10mو احمد 12 الغب معهم العاب , اليك هذه المعلومات عن انس و احمد و هما اخوين
Play with them truth or dare and all games when they want
"""
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display previous messages
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# Chat input
prompt = st.chat_input("اكتب رسالتك هنا...")
if prompt:
# Save user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Prepare messages including system prompt
messages = [{"role": "system", "content": S YSTEM_PROMPT}]
messages += st.session_state.messages
# Call HF API
try:
completion = client.chat.completions.create(
model="CohereLabs/c4ai-command-r7b-arabic-02-2025",
messages=messages,
max_tokens=512,
)
model_reply = completion.choices[0].message.content
except Exception as e:
model_reply = f"❌ فشل الاتصال بالنموذج: {e}"
# Save and display assistant reply
st.session_state.messages.append({"role": "assistant", "content": model_reply})
with st.chat_message("assistant"):
st.markdown(model_reply)