Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,156 Bytes
59812f5 7f9ac25 141ba59 c86c2f3 0b5b812 c86c2f3 d2d3f64 c86c2f3 141ba59 c86c2f3 4522cd0 59812f5 4522cd0 141ba59 2f022a7 4522cd0 2f022a7 4522cd0 e6dd388 c86c2f3 09b3f75 c86c2f3 1827259 141ba59 2f022a7 220d3c0 141ba59 c86c2f3 d2d3f64 4522cd0 c86c2f3 6a15314 9b30274 141ba59 e93a3af 141ba59 6a15314 141ba59 54995d2 6bc8e25 54995d2 141ba59 54995d2 141ba59 c86c2f3 141ba59 7441485 c86c2f3 141ba59 b4ca5ac 141ba59 09b3f75 c86c2f3 141ba59 09b3f75 c86c2f3 4522cd0 c86c2f3 141ba59 09b3f75 c86c2f3 141ba59 09b3f75 c86c2f3 4522cd0 c86c2f3 141ba59 e93a3af 141ba59 0b024ab 141ba59 53ef41d 0b024ab 141ba59 1657fc1 6a15314 141ba59 1827259 e18ba1b 141ba59 e6dd388 89f9579 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
from collections.abc import Iterator
from threading import Thread
import compressed_tensors
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
DESCRIPTION = """\
# shisa-v2-unphi-14b-W8A8-INT8
This Space demonstrates the [shisa-v2-unphi-14b-W8A8-INT8](https://huggingface.co/shisa-ai/shisa-v2-unphi-14b-W8A8-INT8) bilingual (JA/EN) chat model."""
LICENSE = """
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
if torch.cuda.is_available():
model_id = "shisa-ai/shisa-v2-unphi-14b-W8A8-INT8"
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
@spaces.GPU
def generate(
message: str,
chat_history: list[dict],
system_prompt: str = "",
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
repetition_penalty: float = 1.1,
) -> Iterator[str]:
conversation = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
conversation += chat_history
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
repetition_penalty=repetition_penalty,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
try:
for text in streamer:
outputs.append(text)
yield "".join(outputs)
except Exception as e:
yield f"An error occurred during generation: {str(e)}"
finally:
if not outputs:
yield ""
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.1,
),
],
stop_btn=None,
examples=[
["How much wood would a woodchuck chuck if a woodchuck could chuck wood?"],
["Can you explain briefly to me what is the Python programming language?"],
["日本の桜の季節について教えてください。"],
["あなたの AI について俳句を書いていただけますか?"],
],
cache_examples=False,
type="messages",
)
with gr.Blocks(css_paths="style.css", fill_height=True) as demo:
gr.Markdown(DESCRIPTION)
chat_interface.render()
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.queue(max_size=20).launch()
|