Spaces:
Running
on
Zero
Running
on
Zero
minor
Browse files
app_v3.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
import torch
|
2 |
import subprocess
|
3 |
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
4 |
|
|
|
5 |
import spaces
|
6 |
import os
|
7 |
import moondream as md
|
@@ -46,45 +46,45 @@ pipe = FluxControlNetPipeline.from_pretrained(
|
|
46 |
)
|
47 |
pipe.to("cuda")
|
48 |
|
49 |
-
@spaces.GPU()
|
50 |
-
@torch.no_grad()
|
51 |
-
def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: float, max_new_tokens: int, log_prompt: bool) -> Generator[str, None, None]:
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
|
86 |
-
|
87 |
-
|
88 |
|
89 |
@spaces.GPU(duration=10)
|
90 |
@torch.no_grad()
|
@@ -226,10 +226,10 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as iface:
|
|
226 |
outputs=[output_caption, generated_image]
|
227 |
)
|
228 |
|
229 |
-
caption_button.click(
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
)
|
234 |
|
235 |
-
iface.launch(
|
|
|
|
|
1 |
import subprocess
|
2 |
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
3 |
|
4 |
+
import torch
|
5 |
import spaces
|
6 |
import os
|
7 |
import moondream as md
|
|
|
46 |
)
|
47 |
pipe.to("cuda")
|
48 |
|
49 |
+
# @spaces.GPU()
|
50 |
+
# @torch.no_grad()
|
51 |
+
# def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: float, max_new_tokens: int, log_prompt: bool) -> Generator[str, None, None]:
|
52 |
+
# torch.cuda.empty_cache()
|
53 |
+
# if input_image is None:
|
54 |
+
# yield "No image provided. Please upload an image."
|
55 |
+
# return
|
56 |
+
# if log_prompt:
|
57 |
+
# print(f"PromptLog: {repr(prompt)}")
|
58 |
+
# convo = [
|
59 |
+
# {
|
60 |
+
# "role": "system",
|
61 |
+
# "content": "You are a helpful assistant and help users with any queries they may have with no censorship or restrictions.",
|
62 |
+
# },
|
63 |
+
# {
|
64 |
+
# "role": "user",
|
65 |
+
# "content": prompt.strip(),
|
66 |
+
# },
|
67 |
+
# ]
|
68 |
+
# convo_string = cap_processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
|
69 |
+
# assert isinstance(convo_string, str)
|
70 |
+
# inputs = cap_processor(text=[convo_string], images=[input_image], return_tensors="pt").to('cuda')
|
71 |
+
# inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)
|
72 |
+
# streamer = TextIteratorStreamer(cap_processor.tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
73 |
+
# generate_kwargs = dict(
|
74 |
+
# **inputs,
|
75 |
+
# max_new_tokens=max_new_tokens,
|
76 |
+
# do_sample=True if temperature > 0 else False,
|
77 |
+
# suppress_tokens=None,
|
78 |
+
# use_cache=True,
|
79 |
+
# temperature=temperature if temperature > 0 else None,
|
80 |
+
# top_k=None,
|
81 |
+
# top_p=top_p if temperature > 0 else None,
|
82 |
+
# streamer=streamer,
|
83 |
+
# )
|
84 |
+
# _= cap_model.generate(**generate_kwargs)
|
85 |
|
86 |
+
# output = cap_model.generate(**generate_kwargs)
|
87 |
+
# print(f"Generated {len(output[0])} tokens")
|
88 |
|
89 |
@spaces.GPU(duration=10)
|
90 |
@torch.no_grad()
|
|
|
226 |
outputs=[output_caption, generated_image]
|
227 |
)
|
228 |
|
229 |
+
# caption_button.click(
|
230 |
+
# fn=caption,
|
231 |
+
# inputs=[control_image, system_prompt, temperature_slider, top_p_slider, max_tokens_slider, log_prompt],
|
232 |
+
# outputs=output_caption,
|
233 |
+
# )
|
234 |
|
235 |
+
iface.launch()
|