Hodely commited on
Commit
28eb343
·
verified ·
1 Parent(s): fdc83ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -10
app.py CHANGED
@@ -1,14 +1,36 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
 
 
3
 
4
- model_id = "mistralai/Mistral-7B-Instruct-v0.2"
5
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
6
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
7
 
8
- def generate_response(prompt):
9
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
10
- outputs = model.generate(**inputs, max_new_tokens=150, do_sample=True, temperature=0.7)
11
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- import gradio as gr
14
- gr.Interface(fn=generate_response, inputs="text", outputs="text").launch()
 
 
1
  import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import gradio as gr
4
 
5
+ model_id = "teknium/OpenHermes-2.5-Mistral-7B"
 
 
6
 
7
+ # Carga el modelo y tokenizer
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_id,
11
+ device_map="auto",
12
+ torch_dtype=torch.float16,
13
+ )
14
+
15
+ pipe = pipeline(
16
+ "text-generation",
17
+ model=model,
18
+ tokenizer=tokenizer,
19
+ max_new_tokens=256,
20
+ do_sample=True,
21
+ temperature=0.7,
22
+ )
23
+
24
+ # Interfaz Gradio
25
+ def generate_text(prompt):
26
+ output = pipe(prompt)[0]["generated_text"]
27
+ return output
28
+
29
+ gr.Interface(
30
+ fn=generate_text,
31
+ inputs=gr.Textbox(label="prompt"),
32
+ outputs=gr.Textbox(label="output"),
33
+ title="Amside AI",
34
+ theme="soft",
35
+ ).launch()
36