File size: 594 Bytes
4c0659d
0268ddc
4c0659d
 
 
 
 
 
5e5418c
4c0659d
5e5418c
 
4c0659d
 
5e5418c
4c0659d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr

model_id = "teknium/OpenHermes-2-Mistral-7B"

tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")

pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

def chat(prompt):
    response = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
    return response[0]["generated_text"]

gr.Interface(fn=chat, inputs="text", outputs="text").launch()