Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
from llama_cpp import Llama
|
3 |
import os
|
4 |
import requests
|
@@ -15,14 +15,14 @@ def download_model(url, save_path):
|
|
15 |
file.write(chunk)
|
16 |
return "Model downloaded successfully."
|
17 |
except Exception as e:
|
18 |
-
return f"Error
|
19 |
|
20 |
def load_model(model_path):
|
21 |
try:
|
22 |
-
llm = Llama(model_path, n_threads=2, n_gpu_layers=0)
|
23 |
return llm
|
24 |
except Exception as e:
|
25 |
-
return f"Error
|
26 |
|
27 |
def apply_chat_template(model_name, messages, system_prompt):
|
28 |
model_name_lower = model_name.lower()
|
@@ -53,19 +53,17 @@ def generate_response(prompt, model):
|
|
53 |
output = model(prompt, max_tokens=256)
|
54 |
return output["choices"][0]["text"].strip()
|
55 |
except Exception as e:
|
56 |
-
return f"Error
|
57 |
|
58 |
def inference(message, history, model_url, system_prompt):
|
59 |
if model_url and not os.path.exists(MODEL_PATH):
|
60 |
download_result = download_model(model_url, MODEL_PATH)
|
61 |
if "Error" in download_result:
|
62 |
-
|
63 |
-
return
|
64 |
|
65 |
llm = load_model(MODEL_PATH)
|
66 |
if isinstance(llm, str):
|
67 |
-
|
68 |
-
return
|
69 |
|
70 |
messages = [{"role": "system", "content": system_prompt}]
|
71 |
for item in history:
|
@@ -74,25 +72,18 @@ def inference(message, history, model_url, system_prompt):
|
|
74 |
|
75 |
prompt = apply_chat_template(llm.model_path, messages, system_prompt)
|
76 |
response = generate_response(prompt, llm)
|
77 |
-
|
|
|
78 |
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
-
|
82 |
-
|
83 |
|
84 |
-
|
85 |
-
system_prompt = st.text_area("System Prompt", value=DEFAULT_SYSTEM_PROMPT)
|
86 |
-
|
87 |
-
for message in st.session_state.messages:
|
88 |
-
with st.chat_message(message["role"]):
|
89 |
-
st.markdown(message["content"])
|
90 |
-
|
91 |
-
if prompt := st.chat_input("Message"):
|
92 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
93 |
-
with st.chat_message("user"):
|
94 |
-
st.markdown(prompt)
|
95 |
-
history = [m for m in st.session_state.messages if m["role"] != "system"]
|
96 |
-
inference(prompt, history, model_url, system_prompt)
|
97 |
-
with st.chat_message("assistant"):
|
98 |
-
st.markdown(st.session_state.messages[-1]["content"])
|
|
|
1 |
+
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
import os
|
4 |
import requests
|
|
|
15 |
file.write(chunk)
|
16 |
return "Model downloaded successfully."
|
17 |
except Exception as e:
|
18 |
+
return f"Error: {e}"
|
19 |
|
20 |
def load_model(model_path):
|
21 |
try:
|
22 |
+
llm = Llama(model_path, n_threads=2, n_gpu_layers=0) #force cpu, and set threads.
|
23 |
return llm
|
24 |
except Exception as e:
|
25 |
+
return f"Error: {e}"
|
26 |
|
27 |
def apply_chat_template(model_name, messages, system_prompt):
|
28 |
model_name_lower = model_name.lower()
|
|
|
53 |
output = model(prompt, max_tokens=256)
|
54 |
return output["choices"][0]["text"].strip()
|
55 |
except Exception as e:
|
56 |
+
return f"Error: {e}"
|
57 |
|
58 |
def inference(message, history, model_url, system_prompt):
|
59 |
if model_url and not os.path.exists(MODEL_PATH):
|
60 |
download_result = download_model(model_url, MODEL_PATH)
|
61 |
if "Error" in download_result:
|
62 |
+
return history + [{"role": "assistant", "content": download_result}], history
|
|
|
63 |
|
64 |
llm = load_model(MODEL_PATH)
|
65 |
if isinstance(llm, str):
|
66 |
+
return history + [{"role": "assistant", "content": llm}], history
|
|
|
67 |
|
68 |
messages = [{"role": "system", "content": system_prompt}]
|
69 |
for item in history:
|
|
|
72 |
|
73 |
prompt = apply_chat_template(llm.model_path, messages, system_prompt)
|
74 |
response = generate_response(prompt, llm)
|
75 |
+
history.append({"role": "assistant", "content": response})
|
76 |
+
return history, history
|
77 |
|
78 |
+
with gr.Blocks() as iface:
|
79 |
+
model_url_input = gr.Textbox(label="Model URL (GGUF)", placeholder="Enter GGUF model URL...")
|
80 |
+
system_prompt_input = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=3)
|
81 |
+
chatbot = gr.Chatbot(type="messages")
|
82 |
+
message = gr.Textbox(label="Message")
|
83 |
+
send_button = gr.Button("Send")
|
84 |
+
state = gr.State([])
|
85 |
|
86 |
+
send_button.click(inference, inputs=[message, state, model_url_input, system_prompt_input], outputs=[chatbot, state])
|
87 |
+
message.submit(inference, inputs=[message, state, model_url_input, system_prompt_input], outputs=[chatbot, state])
|
88 |
|
89 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|