Futuresony commited on
Commit
0351b15
·
verified ·
1 Parent(s): e908972

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,7 @@ client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
7
  def format_alpaca_prompt(history, user_input, system_prompt):
8
  """Formats input in Alpaca/LLaMA style with conversation history"""
9
  formatted_history = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
10
- prompt = f"""{system_prompt}\n{formatted_history}\nUser: {user_input}\nAssistant:"""
11
  return prompt
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
@@ -26,7 +26,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
26
  # Update history
27
  history.append((message, cleaned_response))
28
 
29
- yield cleaned_response # Output only the answer
30
 
31
  demo = gr.ChatInterface(
32
  respond,
 
7
  def format_alpaca_prompt(history, user_input, system_prompt):
8
  """Formats input in Alpaca/LLaMA style with conversation history"""
9
  formatted_history = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
10
+ prompt = f"""{system_prompt}\n{formatted_history}\nUser: {user_input}\n"""
11
  return prompt
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
26
  # Update history
27
  history.append((message, cleaned_response))
28
 
29
+ return cleaned_response # Output only the answer
30
 
31
  demo = gr.ChatInterface(
32
  respond,