File size: 1,261 Bytes
5a3befd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import os
import gdown
import subprocess
import streamlit as st

# Page configuration
st.set_page_config(page_title="Offline AI Chatbot", layout="wide")

# Model setup
MODEL_FOLDER = "models"
MODEL_FILE = "mistral-3b-q4.gguf"
MODEL_PATH = os.path.join(MODEL_FOLDER, MODEL_FILE)

# Direct download link for the 3B model
MODEL_URL = "https://huggingface.co/Tech-Meld/HX-Mistral-3B_v0.1-Q4_K_M-GGUF/resolve/main/hx-mistral-3b_v0.1.Q4_K_M.gguf"

# Ensure models folder exists
os.makedirs(MODEL_FOLDER, exist_ok=True)

# Download model if not already present
if not os.path.exists(MODEL_PATH):
    with st.spinner("Downloading Mistral 3B model (~2GB), please wait..."):
        gdown.download(MODEL_URL, MODEL_PATH, quiet=False)
    st.success("Model downloaded!")

# Streamlit UI
st.title("🟢 Offline AI Chatbot")

user_input = st.text_input("You:", "")

if st.button("Send") and user_input.strip() != "":
    with st.spinner("Generating response..."):
        # Run llama-run.exe
        result = subprocess.run(
            ["./llama-run.exe", MODEL_PATH, user_input],
            capture_output=True,
            text=True
        )
        reply = result.stdout.strip()
    st.text_area("AI:", value=reply, height=200)