Amside / app.py
Hodely's picture
Update app.py
4c0659d verified
raw
history blame
594 Bytes
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
model_id = "teknium/OpenHermes-2-Mistral-7B"
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chat(prompt):
response = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
return response[0]["generated_text"]
gr.Interface(fn=chat, inputs="text", outputs="text").launch()