Futuresony commited on
Commit
7561eb5
·
verified ·
1 Parent(s): 74e3d53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -3,19 +3,15 @@ from huggingface_hub import InferenceClient
3
 
4
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
5
 
6
- def format_alpaca_prompt(user_input, system_prompt):
7
- """Formats input in Alpaca/LLaMA style"""
8
- prompt = f"""{system_prompt}
9
-
10
- ### Instruction:
11
- {user_input}
12
-
13
- ### Response:
14
- """
15
  return prompt
16
 
17
  def respond(message, history, system_message, max_tokens, temperature, top_p):
18
- formatted_prompt = format_alpaca_prompt(message, system_message)
19
 
20
  response = client.text_generation(
21
  formatted_prompt,
@@ -25,8 +21,9 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
25
  )
26
 
27
  # ✅ Extract only the response
28
- cleaned_response = response.split("### Response:")[-1].strip()
29
 
 
30
  yield cleaned_response # ✅ Output only the answer
31
 
32
  demo = gr.ChatInterface(
 
3
 
4
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
5
 
6
+ # Store chat history
7
+ def format_alpaca_prompt(user_input, history, system_prompt):
8
+ """Formats input in Alpaca/LLaMA style with history"""
9
+ history_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
10
+ prompt = f"""{system_prompt}\n{history_text}\nUser: {user_input}\nAssistant:"""
 
 
 
 
11
  return prompt
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
14
+ formatted_prompt = format_alpaca_prompt(message, history, system_message)
15
 
16
  response = client.text_generation(
17
  formatted_prompt,
 
21
  )
22
 
23
  # ✅ Extract only the response
24
+ cleaned_response = response.strip()
25
 
26
+ history.append((message, cleaned_response)) # ✅ Store conversation history
27
  yield cleaned_response # ✅ Output only the answer
28
 
29
  demo = gr.ChatInterface(