Dread2Poor commited on
Commit
e023594
·
verified ·
1 Parent(s): 7d612eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -17
app.py CHANGED
@@ -1,10 +1,9 @@
1
-
2
- import gradio as gr
3
  from llama_cpp import Llama
4
  import os
5
  import requests
6
 
7
- MODEL_PATH = "irixium-12b-model_stock-q4_k_m.gguf"
8
  DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant."
9
 
10
  def download_model(url, save_path):
@@ -60,11 +59,13 @@ def inference(message, history, model_url, system_prompt):
60
  if model_url and not os.path.exists(MODEL_PATH):
61
  download_result = download_model(model_url, MODEL_PATH)
62
  if "Error" in download_result:
63
- return history + [{"role": "assistant", "content": download_result}], history
 
64
 
65
  llm = load_model(MODEL_PATH)
66
  if isinstance(llm, str):
67
- return history + [{"role": "assistant", "content": llm}], history
 
68
 
69
  messages = [{"role": "system", "content": system_prompt}]
70
  for item in history:
@@ -73,18 +74,25 @@ def inference(message, history, model_url, system_prompt):
73
 
74
  prompt = apply_chat_template(llm.model_path, messages, system_prompt)
75
  response = generate_response(prompt, llm)
76
- history.append({"role": "assistant", "content": response})
77
- return history, history
 
 
 
 
78
 
79
- with gr.Blocks() as iface:
80
- model_url_input = gr.Textbox(label="Model URL (GGUF)", placeholder="Enter GGUF model URL...")
81
- system_prompt_input = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=3)
82
- chatbot = gr.Chatbot(type="messages")
83
- message = gr.Textbox(label="Message")
84
- send_button = gr.Button("Send")
85
- state = gr.State([]) #This line is very important.
86
 
87
- send_button.click(inference, inputs=[message, state, model_url_input, system_prompt_input], outputs=[chatbot, state])
88
- message.submit(inference, inputs=[message, state, model_url_input, system_prompt_input], outputs=[chatbot, state])
 
89
 
90
- iface.launch()
 
 
 
 
 
 
 
 
1
+ import streamlit as st
 
2
  from llama_cpp import Llama
3
  import os
4
  import requests
5
 
6
+ MODEL_PATH = "model.gguf"
7
  DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant."
8
 
9
  def download_model(url, save_path):
 
59
  if model_url and not os.path.exists(MODEL_PATH):
60
  download_result = download_model(model_url, MODEL_PATH)
61
  if "Error" in download_result:
62
+ st.session_state.messages.append({"role": "assistant", "content": download_result})
63
+ return
64
 
65
  llm = load_model(MODEL_PATH)
66
  if isinstance(llm, str):
67
+ st.session_state.messages.append({"role": "assistant", "content": llm})
68
+ return
69
 
70
  messages = [{"role": "system", "content": system_prompt}]
71
  for item in history:
 
74
 
75
  prompt = apply_chat_template(llm.model_path, messages, system_prompt)
76
  response = generate_response(prompt, llm)
77
+ st.session_state.messages.append({"role": "assistant", "content": response})
78
+
79
+ st.title("llama.cpp Chat")
80
+
81
+ if "messages" not in st.session_state:
82
+ st.session_state.messages = []
83
 
84
+ model_url = st.text_input("Model URL (GGUF)", placeholder="Enter GGUF model URL...")
85
+ system_prompt = st.text_area("System Prompt", value=DEFAULT_SYSTEM_PROMPT)
 
 
 
 
 
86
 
87
+ for message in st.session_state.messages:
88
+ with st.chat_message(message["role"]):
89
+ st.markdown(message["content"])
90
 
91
+ if prompt := st.chat_input("Message"):
92
+ st.session_state.messages.append({"role": "user", "content": prompt})
93
+ with st.chat_message("user"):
94
+ st.markdown(prompt)
95
+ history = [m for m in st.session_state.messages if m["role"] != "system"]
96
+ inference(prompt, history, model_url, system_prompt)
97
+ with st.chat_message("assistant"):
98
+ st.markdown(st.session_state.messages[-1]["content"])