Futuresony commited on
Commit
e0b749f
·
verified ·
1 Parent(s): 0e0aaae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -39
app.py CHANGED
@@ -1,53 +1,65 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # HuggingFace API Client
5
- client = InferenceClient(model="Futuresony/future_ai_12_10_2024.gguf")
 
 
6
 
7
- def respond(message, history, system_message, max_tokens, temperature, top_p):
8
- """
9
- Function to process user messages and return AI responses.
10
- """
 
 
 
 
 
11
  messages = [{"role": "system", "content": system_message}]
12
-
13
- # Append conversation history
14
- for user, assistant in history:
15
- if user:
16
- messages.append({"role": "user", "content": user})
17
- if assistant:
18
- messages.append({"role": "assistant", "content": assistant})
19
-
20
- # Add user message
21
  messages.append({"role": "user", "content": message})
22
 
23
  response = ""
24
- try:
25
- # Stream AI-generated responses
26
- for chunk in client.chat_completion(
27
- messages=messages,
28
- max_tokens=max_tokens,
29
- stream=True,
30
- temperature=temperature,
31
- top_p=top_p,
32
- ):
33
- token = chunk.choices[0].delta.content
34
- response += token
35
- yield response
36
- except Exception as e:
37
- yield f"Error: {str(e)}"
38
-
39
- # Gradio Chat Interface
 
40
  demo = gr.ChatInterface(
41
- fn=respond,
42
- inputs=[
43
- gr.Textbox(value="You are a helpful assistant.", label="System Prompt"),
44
- gr.Slider(1, 2048, value=512, step=1, label="Max Tokens"),
45
- gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
46
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P"),
 
 
 
 
 
 
47
  ],
48
  )
49
 
50
- # Run Gradio App
51
  if __name__ == "__main__":
52
- demo.launch(server_name="0.0.0.0", server_port=7860)
53
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
8
 
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
  messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
 
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
  demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
  ],
60
  )
61
 
62
+
63
  if __name__ == "__main__":
64
+ demo.launch()
65