LifeHelperAI commited on
Commit
5a3befd
·
verified ·
1 Parent(s): 99eeeaa

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. .gitignore +1 -0
  3. app.py +40 -0
  4. llama-run.exe +3 -0
  5. requirements.txt +2 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ llama-run.exe filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.gguf
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ import subprocess
4
+ import streamlit as st
5
+
6
+ # Page configuration
7
+ st.set_page_config(page_title="Offline AI Chatbot", layout="wide")
8
+
9
+ # Model setup
10
+ MODEL_FOLDER = "models"
11
+ MODEL_FILE = "mistral-3b-q4.gguf"
12
+ MODEL_PATH = os.path.join(MODEL_FOLDER, MODEL_FILE)
13
+
14
+ # Direct download link for the 3B model
15
+ MODEL_URL = "https://huggingface.co/Tech-Meld/HX-Mistral-3B_v0.1-Q4_K_M-GGUF/resolve/main/hx-mistral-3b_v0.1.Q4_K_M.gguf"
16
+
17
+ # Ensure models folder exists
18
+ os.makedirs(MODEL_FOLDER, exist_ok=True)
19
+
20
+ # Download model if not already present
21
+ if not os.path.exists(MODEL_PATH):
22
+ with st.spinner("Downloading Mistral 3B model (~2GB), please wait..."):
23
+ gdown.download(MODEL_URL, MODEL_PATH, quiet=False)
24
+ st.success("Model downloaded!")
25
+
26
+ # Streamlit UI
27
+ st.title("🟢 Offline AI Chatbot")
28
+
29
+ user_input = st.text_input("You:", "")
30
+
31
+ if st.button("Send") and user_input.strip() != "":
32
+ with st.spinner("Generating response..."):
33
+ # Run llama-run.exe
34
+ result = subprocess.run(
35
+ ["./llama-run.exe", MODEL_PATH, user_input],
36
+ capture_output=True,
37
+ text=True
38
+ )
39
+ reply = result.stdout.strip()
40
+ st.text_area("AI:", value=reply, height=200)
llama-run.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841448441872bdc2089bd324531c7497fecf7675aa72bce31106a227b94b6a96
3
+ size 2073088
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit
2
+ gdown