Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,898 Bytes
75ec781 c0b4343 75ec781 631e701 75ec781 d746a50 75ec781 631e701 75ec781 631e701 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
from transformers import (
Qwen2VLForConditionalGeneration,
AutoProcessor,
TextIteratorStreamer,
)
from PIL import Image
from threading import Thread
import gradio as gr
model_name = "scb10x/typhoon2-qwen2vl-7b-vision-instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(
model_name, torch_dtype="auto", device_map="auto"
)
min_pixels = 256 * 28 * 28
max_pixels = 1280 * 28 * 28
processor = AutoProcessor.from_pretrained(
model_name, min_pixels=min_pixels, max_pixels=max_pixels
)
theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#f7f7fd",
c100="#dfdef8",
c200="#c4c1f2",
c300="#a29eea",
c400="#8f8ae6",
c500="#756fe0",
c600="#635cc1",
c700="#4f4a9b",
c800="#433f83",
c900="#302d5e",
c950="#302d5e",
),
secondary_hue="rose",
neutral_hue="stone",
)
def bot_streaming(message, history, max_new_tokens=512):
txt = message["text"]
messages = []
images = []
for i, msg in enumerate(history):
if isinstance(msg[0], tuple):
messages.append(
{
"role": "user",
"content": [
{"type": "text", "text": history[i + 1][0]},
{"type": "image"},
],
}
)
messages.append(
{
"role": "assistant",
"content": [{"type": "text", "text": history[i + 1][1]}],
}
)
images.append(Image.open(msg[0][0]).convert("RGB"))
elif isinstance(history[i - 1], tuple) and isinstance(msg[0], str):
pass
elif isinstance(history[i - 1][0], str) and isinstance(msg[0], str):
messages.append(
{"role": "user", "content": [{"type": "text", "text": msg[0]}]}
)
messages.append(
{"role": "assistant", "content": [{"type": "text", "text": msg[1]}]}
)
if len(message["files"]) == 1:
if isinstance(message["files"][0], str):
image = Image.open(message["files"][0]).convert("RGB")
else:
image = Image.open(message["files"][0]["path"]).convert("RGB")
images.append(image)
messages.append(
{
"role": "user",
"content": [{"type": "text", "text": txt}, {"type": "image"}],
}
)
else:
messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
texts = processor.apply_chat_template(messages, add_generation_prompt=True)
if images == []:
inputs = processor(text=texts, return_tensors="pt").to("cuda")
else:
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
streamer = TextIteratorStreamer(
processor, skip_special_tokens=True, skip_prompt=True
)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=0.6,
top_p=0.9,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
yield buffer
demo = gr.ChatInterface(
fn=bot_streaming,
title="Typhoon 2 Vision",
textbox=gr.MultimodalTextbox(
placeholder="Type a message or drag and drop an image",
),
additional_inputs=[
gr.Slider(
minimum=512,
maximum=1024,
value=512,
step=1,
label="Maximum number of new tokens to generate",
)
],
cache_examples=False,
stop_btn="Stop Generation",
fill_height=True,
multimodal=True,
theme=theme,
)
demo.launch(ssr_mode=False)
|