Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,427 Bytes
75ec781 c0b4343 75ec781 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
from transformers import (
Qwen2VLForConditionalGeneration,
AutoProcessor,
TextIteratorStreamer,
)
from PIL import Image
from threading import Thread
import gradio as gr
model_name = "scb10x/typhoon2-qwen2vl-7b-vision-instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(
model_name, torch_dtype="auto", device_map="auto"
)
min_pixels = 256 * 28 * 28
max_pixels = 1280 * 28 * 28
processor = AutoProcessor.from_pretrained(
model_name, min_pixels=min_pixels, max_pixels=max_pixels
)
def bot_streaming(message, history, max_new_tokens=512):
txt = message["text"]
messages = []
images = []
for i, msg in enumerate(history):
if isinstance(msg[0], tuple):
messages.append(
{
"role": "user",
"content": [
{"type": "text", "text": history[i + 1][0]},
{"type": "image"},
],
}
)
messages.append(
{
"role": "assistant",
"content": [{"type": "text", "text": history[i + 1][1]}],
}
)
images.append(Image.open(msg[0][0]).convert("RGB"))
elif isinstance(history[i - 1], tuple) and isinstance(msg[0], str):
pass
elif isinstance(history[i - 1][0], str) and isinstance(msg[0], str):
messages.append(
{"role": "user", "content": [{"type": "text", "text": msg[0]}]}
)
messages.append(
{"role": "assistant", "content": [{"type": "text", "text": msg[1]}]}
)
if len(message["files"]) == 1:
if isinstance(message["files"][0], str):
image = Image.open(message["files"][0]).convert("RGB")
else:
image = Image.open(message["files"][0]["path"]).convert("RGB")
images.append(image)
messages.append(
{
"role": "user",
"content": [{"type": "text", "text": txt}, {"type": "image"}],
}
)
else:
messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
texts = processor.apply_chat_template(messages, add_generation_prompt=True)
if images == []:
inputs = processor(text=texts, return_tensors="pt").to("cuda")
else:
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
streamer = TextIteratorStreamer(
processor, skip_special_tokens=True, skip_prompt=True
)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=0.6,
top_p=0.9,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
yield buffer
demo = gr.ChatInterface(
fn=bot_streaming,
title="Typhoon 2 Vision",
textbox=gr.MultimodalTextbox(),
additional_inputs=[
gr.Slider(
minimum=512,
maximum=1024,
value=512,
step=1,
label="Maximum number of new tokens to generate",
)
],
cache_examples=False,
stop_btn="Stop Generation",
fill_height=True,
multimodal=True,
)
demo.launch(debug=True)
|