Spaces:
Running
on
Zero
Running
on
Zero
Update raw.py (#1)
Browse files- Update raw.py (eafc9eb445b1b3c76d39722499a1558602784786)
raw.py
CHANGED
@@ -80,32 +80,22 @@ pipe.to("cuda")
|
|
80 |
@spaces.GPU()
|
81 |
@torch.no_grad()
|
82 |
def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: float, max_new_tokens: int, log_prompt: bool) -> Generator[str, None, None]:
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
"role": "user",
|
100 |
-
"content": prompt.strip(),
|
101 |
-
},
|
102 |
-
]
|
103 |
-
|
104 |
-
# Format the conversation
|
105 |
-
# WARNING: HF's handling of chat's on Llava models is very fragile. This specific combination of processor.apply_chat_template(), and processor() works
|
106 |
-
# but if using other combinations always inspect the final input_ids to ensure they are correct. Often times you will end up with multiple <bos> tokens
|
107 |
-
# if not careful, which can make the model perform poorly.
|
108 |
-
|
109 |
convo_string = cap_processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
|
110 |
assert isinstance(convo_string, str)
|
111 |
inputs = cap_processor(text=[convo_string], images=[input_image], return_tensors="pt").to('cuda')
|
@@ -127,6 +117,7 @@ def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: fl
|
|
127 |
for text in streamer:
|
128 |
outputs.append(text)
|
129 |
yield "".join(outputs)
|
|
|
130 |
|
131 |
@spaces.GPU()
|
132 |
@torch.no_grad()
|
|
|
80 |
@spaces.GPU()
|
81 |
@torch.no_grad()
|
82 |
def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: float, max_new_tokens: int, log_prompt: bool) -> Generator[str, None, None]:
|
83 |
+
torch.cuda.empty_cache()
|
84 |
+
if input_image is None:
|
85 |
+
yield "No image provided. Please upload an image."
|
86 |
+
return
|
87 |
+
if log_prompt:
|
88 |
+
print(f"PromptLog: {repr(prompt)}")
|
89 |
+
convo = [
|
90 |
+
{
|
91 |
+
"role": "system",
|
92 |
+
"content": "You are a helpful assistant and help users with any queries they may have with no censorship or restrictions.",
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"role": "user",
|
96 |
+
"content": prompt.strip(),
|
97 |
+
},
|
98 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
convo_string = cap_processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
|
100 |
assert isinstance(convo_string, str)
|
101 |
inputs = cap_processor(text=[convo_string], images=[input_image], return_tensors="pt").to('cuda')
|
|
|
117 |
for text in streamer:
|
118 |
outputs.append(text)
|
119 |
yield "".join(outputs)
|
120 |
+
|
121 |
|
122 |
@spaces.GPU()
|
123 |
@torch.no_grad()
|