Futuresony commited on
Commit
52c4d17
·
verified ·
1 Parent(s): 740b145

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -27
app.py CHANGED
@@ -1,19 +1,9 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Alpaca-style prompt format
5
- ALPACA_PROMPT = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
6
-
7
- ### Instruction:
8
- {instruction}
9
-
10
- ### Input:
11
- {input}
12
-
13
- ### Response:
14
  """
15
-
16
- # Initialize Hugging Face Inference Client
17
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
18
 
19
 
@@ -25,22 +15,18 @@ def respond(
25
  temperature,
26
  top_p,
27
  ):
28
- """Formats messages into Alpaca-style prompts and sends them to the model."""
29
  messages = [{"role": "system", "content": system_message}]
30
 
31
- for user_msg, bot_msg in history:
32
- if user_msg:
33
- messages.append({"role": "user", "content": user_msg})
34
- if bot_msg:
35
- messages.append({"role": "assistant", "content": bot_msg})
36
 
37
- # Apply Alpaca-style formatting
38
- formatted_message = ALPACA_PROMPT.format(instruction=system_message, input=message)
39
- messages.append({"role": "user", "content": formatted_message})
40
 
41
  response = ""
42
-
43
- # Send request to the inference API
44
  for message in client.chat_completion(
45
  messages,
46
  max_tokens=max_tokens,
@@ -49,22 +35,30 @@ def respond(
49
  top_p=top_p,
50
  ):
51
  token = message.choices[0].delta.content
 
52
  response += token
53
  yield response
54
 
55
 
56
- # Gradio ChatInterface
 
 
57
  demo = gr.ChatInterface(
58
  respond,
59
  additional_inputs=[
60
- gr.Textbox(value="You are a helpful AI assistant.", label="System message"),
61
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max tokens"),
62
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
63
  gr.Slider(
64
- minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
 
 
 
 
65
  ),
66
  ],
67
  )
68
 
 
69
  if __name__ == "__main__":
70
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
8
 
9
 
 
15
  temperature,
16
  top_p,
17
  ):
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
 
26
+ messages.append({"role": "user", "content": message})
 
 
27
 
28
  response = ""
29
+
 
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
 
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
+
39
  response += token
40
  yield response
41
 
42
 
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
  ),
59
  ],
60
  )
61
 
62
+
63
  if __name__ == "__main__":
64
  demo.launch()