LPX55 commited on
Commit
5ccb60d
·
1 Parent(s): 7bd8d03

feat: debugger implementation test

Browse files
Files changed (3) hide show
  1. app_v3.py +16 -7
  2. debug.py +58 -0
  3. requirements.txt +3 -1
app_v3.py CHANGED
@@ -4,7 +4,10 @@ import subprocess
4
  import torch
5
  import spaces
6
  import os
 
 
7
  import moondream as md
 
8
  from diffusers.utils import load_image
9
  from diffusers.hooks import apply_group_offloading
10
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
@@ -18,6 +21,8 @@ from threading import Thread
18
  from typing import Generator
19
  # from peft import PeftModel, PeftConfig
20
  import gradio as gr
 
 
21
 
22
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
23
  MAX_SEED = 1000000
@@ -65,6 +70,7 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
65
  control_guidance_start=0.0,
66
  control_guidance_end=guidance_end,
67
  ).images[0]
 
68
  return image
69
 
70
  def combine_caption_focus(caption, focus):
@@ -128,16 +134,19 @@ def process_image(control_image, user_prompt, system_prompt, scale, steps,
128
  seed=seed,
129
  guidance_end=guidance_end
130
  )
 
 
131
  yield f"Completed! Used prompt: {final_prompt}", image, final_prompt
132
  except Exception as e:
133
  yield f"Error: {str(e)}", None
134
- raise
135
-
136
  def handle_outputs(outputs):
137
  if isinstance(outputs, dict) and outputs.get("__type__") == "update_caption":
138
  return outputs["caption"], None
139
  return outputs
140
 
 
 
141
  with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
142
  gr.Markdown("⚠️ WIP SPACE - UNFINISHED & BUGGY")
143
  with gr.Row():
@@ -158,23 +167,23 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
158
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
159
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
160
  with gr.Row():
161
- with gr.Accordion("Generation settings", open=False):
162
  system_prompt = gr.Textbox(
163
  lines=4,
164
  value="Write a straightforward caption for this image. Begin with the main subject and medium. Mention pivotal elements—people, objects, scenery—using confident, definite language. Focus on concrete details like color, shape, texture, and spatial relationships. Show how elements interact. Omit mood and speculative wording. If text is present, quote it exactly. Note any watermarks, signatures, or compression artifacts. Never mention what's absent, resolution, or unobservable details. Vary your sentence structure and keep the description concise, without starting with 'This image is…' or similar phrasing.",
165
  label="System Prompt for Captioning",
166
- visible=True # Changed to visible
167
  )
168
  temperature_slider = gr.Slider(
169
  minimum=0.0, maximum=2.0, value=0.6, step=0.05,
170
  label="Temperature",
171
  info="Higher values make the output more random, lower values make it more deterministic.",
172
- visible=True # Changed to visible
173
  )
174
  top_p_slider = gr.Slider(
175
  minimum=0.0, maximum=1.0, value=0.9, step=0.01,
176
  label="Top-p",
177
- visible=True # Changed to visible
178
  )
179
  max_tokens_slider = gr.Slider(
180
  minimum=1, maximum=2048, value=368, step=1,
@@ -184,7 +193,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
184
  )
185
  log_prompt = gr.Checkbox(value=True, label="Log", visible=False) # Changed to visible
186
 
187
- gr.Markdown("**Tips:** 8 steps is all you need!")
188
 
189
  caption_state = gr.State()
190
  focus_state = gr.State()
 
4
  import torch
5
  import spaces
6
  import os
7
+ import datetime
8
+ import io
9
  import moondream as md
10
+ from datasets import load_dataset, Dataset, DatasetDict, Image as HFImage
11
  from diffusers.utils import load_image
12
  from diffusers.hooks import apply_group_offloading
13
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
 
21
  from typing import Generator
22
  # from peft import PeftModel, PeftConfig
23
  import gradio as gr
24
+ from huggingface_hub import CommitScheduler, HfApi
25
+ from debug import log_params, scheduler
26
 
27
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
28
  MAX_SEED = 1000000
 
70
  control_guidance_start=0.0,
71
  control_guidance_end=guidance_end,
72
  ).images[0]
73
+
74
  return image
75
 
76
  def combine_caption_focus(caption, focus):
 
134
  seed=seed,
135
  guidance_end=guidance_end
136
  )
137
+ with scheduler.lock():
138
+ log_params(final_prompt, scale, steps, controlnet_conditioning_scale, guidance_scale, seed, guidance_end, control_image, image)
139
  yield f"Completed! Used prompt: {final_prompt}", image, final_prompt
140
  except Exception as e:
141
  yield f"Error: {str(e)}", None
142
+
 
143
  def handle_outputs(outputs):
144
  if isinstance(outputs, dict) and outputs.get("__type__") == "update_caption":
145
  return outputs["caption"], None
146
  return outputs
147
 
148
+
149
+
150
  with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
151
  gr.Markdown("⚠️ WIP SPACE - UNFINISHED & BUGGY")
152
  with gr.Row():
 
167
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
168
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
169
  with gr.Row():
170
+ with gr.Accordion("Auto-Caption settings", open=False, visible=False):
171
  system_prompt = gr.Textbox(
172
  lines=4,
173
  value="Write a straightforward caption for this image. Begin with the main subject and medium. Mention pivotal elements—people, objects, scenery—using confident, definite language. Focus on concrete details like color, shape, texture, and spatial relationships. Show how elements interact. Omit mood and speculative wording. If text is present, quote it exactly. Note any watermarks, signatures, or compression artifacts. Never mention what's absent, resolution, or unobservable details. Vary your sentence structure and keep the description concise, without starting with 'This image is…' or similar phrasing.",
174
  label="System Prompt for Captioning",
175
+ visible=False # Changed to visible
176
  )
177
  temperature_slider = gr.Slider(
178
  minimum=0.0, maximum=2.0, value=0.6, step=0.05,
179
  label="Temperature",
180
  info="Higher values make the output more random, lower values make it more deterministic.",
181
+ visible=False # Changed to visible
182
  )
183
  top_p_slider = gr.Slider(
184
  minimum=0.0, maximum=1.0, value=0.9, step=0.01,
185
  label="Top-p",
186
+ visible=False # Changed to visible
187
  )
188
  max_tokens_slider = gr.Slider(
189
  minimum=1, maximum=2048, value=368, step=1,
 
193
  )
194
  log_prompt = gr.Checkbox(value=True, label="Log", visible=False) # Changed to visible
195
 
196
+ gr.Markdown("**Tips:** 8 steps is all you need! Incredibly powerful tool, usage instructions coming soon.")
197
 
198
  caption_state = gr.State()
199
  focus_state = gr.State()
debug.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # logging.py
2
+
3
+ import os
4
+ import uuid
5
+ import time
6
+ from huggingface_hub import CommitScheduler, HfApi
7
+ from PIL import Image
8
+
9
+ APP_VERSION = "0_3"
10
+ HF_DATASET_REPO = "LPX55/upscaler_logs" # Change to your dataset repo
11
+ HF_TOKEN = os.environ.get("HUGGINGFACE_TOKEN") # Make sure this is set in your environment
12
+
13
+ LOG_DIR = "logs_" + APP_VERSION
14
+ IMAGE_DIR = os.path.join(LOG_DIR, "upscaler")
15
+
16
+ LOG_FILE = os.path.join(LOG_DIR, f"{int(time.time())}-logs.csv")
17
+ os.makedirs(IMAGE_DIR, exist_ok=True)
18
+
19
+
20
+ scheduler = CommitScheduler(
21
+ repo_id=HF_DATASET_REPO,
22
+ repo_type="dataset",
23
+ folder_path=LOG_DIR,
24
+ every=5,
25
+ private=True,
26
+ token=HF_TOKEN,
27
+ allow_patterns=["*.csv", "images/*.png", "images/*.webp", "images/*.jpg", "images/*.jpeg"],
28
+ commit_message="DEBUG_MODE=1",
29
+ path_in_repo="v" + APP_VERSION
30
+
31
+ )
32
+
33
+ def log_params(
34
+ prompt, scale, steps, controlnet_conditioning_scale, guidance_scale, seed, guidance_end,
35
+ before_image, after_image, user=None
36
+ ):
37
+ before_id = str(uuid.uuid4()) + "_before.png"
38
+ after_id = str(uuid.uuid4()) + "_after.png"
39
+ before_path = os.path.join(IMAGE_DIR, before_id)
40
+ after_path = os.path.join(IMAGE_DIR, after_id)
41
+ before_image.save(before_path)
42
+ after_image.save(after_path)
43
+
44
+ is_new = not os.path.exists(LOG_FILE)
45
+ with open(LOG_FILE, "a", newline='') as f:
46
+ import csv
47
+ writer = csv.writer(f)
48
+ if is_new:
49
+ writer.writerow([
50
+ "timestamp", "user", "prompt", "scale", "steps", "controlnet_conditioning_scale",
51
+ "guidance_scale", "seed", "guidance_end", "before_image", "after_image"
52
+ ])
53
+ writer.writerow([
54
+ time.strftime("%Y-%m-%dT%H:%M:%S"),
55
+ user or "anonymous",
56
+ prompt, scale, steps, controlnet_conditioning_scale,
57
+ guidance_scale, seed, guidance_end, before_path, after_path
58
+ ])
requirements.txt CHANGED
@@ -16,4 +16,6 @@ bitsandbytes
16
  pydantic==2.10.6
17
  attention_map_diffusers
18
  moondream
19
- peft
 
 
 
16
  pydantic==2.10.6
17
  attention_map_diffusers
18
  moondream
19
+ peft
20
+ datasets[vision]
21
+ huggingface_hub