ninooo96 commited on
Commit
f516f8a
·
verified ·
1 Parent(s): fbb49bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -16
app.py CHANGED
@@ -1,17 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
-
4
- # LLM model
5
- model_name = "swap-uniba/LLaMAntino-2-7b-hf-ITA" # Sostituisci con il modello desiderato
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
8
-
9
- # Funzione di inferenza
10
- def chat(input_text):
11
- inputs = tokenizer(input_text, return_tensors="pt")
12
- outputs = model.generate(**inputs, max_new_tokens=150)
13
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
-
15
- # Interfaccia Gradio
16
- iface = gr.Interface(fn=chat, inputs="text", outputs="text")
17
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # # LLM model
5
+ # model_name = "swap-uniba/LLaMAntino-2-7b-hf-ITA" # Sostituisci con il modello desiderato
6
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ # model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
8
+
9
+ # # Funzione di inferenza
10
+ # def chat(input_text):
11
+ # inputs = tokenizer(input_text, return_tensors="pt")
12
+ # outputs = model.generate(**inputs, max_new_tokens=150)
13
+ # return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+
15
+ # # Interfaccia Gradio
16
+ # iface = gr.Interface(fn=chat, inputs="text", outputs="text")
17
+ # iface.launch()
18
+
19
+
20
  import gradio as gr
21
+ from huggingface_hub import InferenceClient
22
+
23
+ """
24
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
25
+ """
26
+ client = InferenceClient("swap-uniba/LLaMAntino-2-7b-hf-ITA")
27
+
28
+
29
+ def respond(
30
+ message,
31
+ history: list[tuple[str, str]],
32
+ system_message,
33
+ max_tokens,
34
+ temperature,
35
+ top_p,
36
+ ):
37
+ messages = [{"role": "system", "content": system_message}]
38
+
39
+ for val in history:
40
+ if val[0]:
41
+ messages.append({"role": "user", "content": val[0]})
42
+ if val[1]:
43
+ messages.append({"role": "assistant", "content": val[1]})
44
+
45
+ messages.append({"role": "user", "content": message})
46
+
47
+ response = ""
48
+
49
+ for message in client.chat_completion(
50
+ messages,
51
+ max_tokens=max_tokens,
52
+ stream=True,
53
+ temperature=temperature,
54
+ top_p=top_p,
55
+ ):
56
+ token = message.choices[0].delta.content
57
+
58
+ response += token
59
+ yield response
60
+
61
+
62
+ """
63
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
64
+ """
65
+ demo = gr.ChatInterface(
66
+ respond,
67
+ additional_inputs=[
68
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
69
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
70
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
71
+ gr.Slider(
72
+ minimum=0.1,
73
+ maximum=1.0,
74
+ value=0.95,
75
+ step=0.05,
76
+ label="Top-p (nucleus sampling)",
77
+ ),
78
+ ],
79
+ )
80
+
81
+
82
+ if __name__ == "__main__":
83
+ demo.launch()