Update app.py
Browse files
app.py
CHANGED
@@ -1,35 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
model_id = "teknium/OpenHermes-2.5-Mistral-7B"
|
6 |
-
|
7 |
-
# Carga el modelo y tokenizer
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
-
model = AutoModelForCausalLM.from_pretrained(
|
10 |
-
model_id,
|
11 |
-
device_map="auto",
|
12 |
-
torch_dtype=torch.float16,
|
13 |
-
)
|
14 |
-
|
15 |
-
pipe = pipeline(
|
16 |
-
"text-generation",
|
17 |
-
model=model,
|
18 |
-
tokenizer=tokenizer,
|
19 |
-
max_new_tokens=256,
|
20 |
-
do_sample=True,
|
21 |
-
temperature=0.7,
|
22 |
-
)
|
23 |
-
|
24 |
-
# Interfaz Gradio
|
25 |
-
def generate_text(prompt):
|
26 |
-
output = pipe(prompt)[0]["generated_text"]
|
27 |
-
return output
|
28 |
-
|
29 |
-
gr.Interface(
|
30 |
-
fn=generate_text,
|
31 |
-
inputs=gr.Textbox(label="prompt"),
|
32 |
-
outputs=gr.Textbox(label="output"),
|
33 |
-
title="Amside AI",
|
34 |
-
theme="soft",
|
35 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|