LLaMA-E / app.py
KaizeShi's picture
Add application file
f5b48c2
import torch
from peft import PeftModel
import transformers
import gradio as gr
import os
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
access_token = os.environ.get('HF_TOKEN')
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", token=access_token)
BASE_MODEL = "meta-llama/Llama-2-7b-hf"
LORA_WEIGHTS = "DSMI/LLaMA-E"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
print("Device: " + str(device))
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=False,
torch_dtype=torch.float16,
device_map="auto",
)
model = PeftModel.from_pretrained(
model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
low_cpu_mem_usage=True,
load_in_8bit=False,
torch_dtype=torch.float16,
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
print("Model: " + str(model))
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Input:
{input}
### Response:"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Response:"""
if device != "cpu":
model.half()
model.eval()
if torch.__version__ >= "2":
model = torch.compile(model)
def evaluate(
instruction,
input=None,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=2,
max_new_tokens=64,
**kwargs,
):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
return output.split("### Response:")[1].strip().split("</s>")[0]
g = gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2, label="Instruction", placeholder="Generate an attractive advertisement for this product."
),
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
gr.components.Slider(minimum=1, maximum=4, step=1, value=1, label="Beams"),
gr.components.Slider(
minimum=1, maximum=512, step=1, value=128, label="Max tokens"
),
],
outputs=[
gr.Textbox(
lines=5,
label="Output",
)
],
title="πŸ¦™πŸ›οΈ LLaMA-E",
description="LLaMA-E is meticulously crafted for e-commerce authoring tasks, incorporating specialized features to excel in generating product descriptions, advertisements, and other related content, as outlined in https://arxiv.org/abs/2308.04913#/. The model can be found at https://huggingface.co/DSMI/LLaMA-E#/. The demo here runs on the CPU. We strongly recommend running the model locally with GPU.",
)
g.queue(concurrency_count=1)
g.launch()