LPX55 commited on
Commit
1a3d2c9
·
verified ·
1 Parent(s): f951c1f

Update app_v4.py

Browse files
Files changed (1) hide show
  1. app_v4.py +31 -44
app_v4.py CHANGED
@@ -59,7 +59,11 @@ try:
59
  dump_environment_info()
60
  except Exception as e:
61
  print(f"Failed to dump env info: {e}")
62
-
 
 
 
 
63
  def resize_image_to_max_side(image: Image, max_side_length=1024) -> Image:
64
  width, height = image.size
65
  ratio = min(max_side_length / width, max_side_length / height)
@@ -81,7 +85,10 @@ def combine_caption_focus(caption, focus):
81
  def generate_caption(control_image):
82
  try:
83
  if control_image is None:
84
- return "Waiting for control image..."
 
 
 
85
 
86
  # Resize the image to a maximum longest side of 1024 pixels
87
  control_image = resize_image_to_max_side(control_image, max_side_length=1024)
@@ -91,17 +98,20 @@ def generate_caption(control_image):
91
  detailed_caption = mcaption["caption"]
92
  print(f"Detailed caption: {detailed_caption}")
93
 
94
- return detailed_caption
95
  except Exception as e:
96
  print(f"Error generating caption: {e}")
97
- return "A detailed photograph"
98
 
99
  def generate_focus(control_image, focus_list):
100
  try:
101
  if control_image is None:
102
- return None
103
  if focus_list is None:
104
- return ""
 
 
 
105
 
106
  # Resize the image to a maximum longest side of 1024 pixels
107
  control_image = resize_image_to_max_side(control_image, max_side_length=1024)
@@ -110,10 +120,10 @@ def generate_focus(control_image, focus_list):
110
  focus_query = model.query(control_image, "Please provide a concise but illustrative description of the following area(s) of focus: " + focus_list)
111
  focus_description = focus_query["answer"]
112
  print(f"Areas of focus: {focus_description}")
113
- return focus_description
114
  except Exception as e:
115
  print(f"Error generating focus: {e}")
116
- return "highly detailed photo, raw photography."
117
 
118
  @spaces.GPU(duration=6, progress=gr.Progress(track_tqdm=True))
119
  @torch.no_grad()
@@ -149,7 +159,7 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
149
 
150
  def process_image(control_image, user_prompt, system_prompt, scale, steps,
151
  controlnet_conditioning_scale, guidance_scale, seed,
152
- guidance_end, temperature, top_p, max_new_tokens, log_prompt):
153
  # Initialize with empty caption
154
  final_prompt = user_prompt.strip()
155
  # If no user prompt provided, generate a caption first
@@ -210,6 +220,8 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
210
  controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
211
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
212
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
 
 
213
  with gr.Row():
214
  with gr.Accordion("Auto-Caption settings", open=False, visible=False):
215
  system_prompt = gr.Textbox(
@@ -224,11 +236,6 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
224
  info="Higher values make the output more random, lower values make it more deterministic.",
225
  visible=False # Changed to visible
226
  )
227
- top_p_slider = gr.Slider(
228
- minimum=0.0, maximum=1.0, value=0.9, step=0.01,
229
- label="Top-p",
230
- visible=False # Changed to visible
231
- )
232
  max_tokens_slider = gr.Slider(
233
  minimum=1, maximum=2048, value=368, step=1,
234
  label="Max New Tokens",
@@ -238,39 +245,19 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
238
  log_prompt = gr.Checkbox(value=True, label="Log", visible=False) # Changed to visible
239
 
240
  gr.Markdown("**Tips:** 8 steps is all you need! Incredibly powerful tool, usage instructions coming soon.")
241
- with gr.Accordion("Help,I keep getting ZeroGPU errors.", open=False, elem_id="zgpu"):
242
  msg1 = gr.Markdown()
243
  try_btn = gr.LoginButton()
244
- try:
245
- x_ip_token = request.headers['x-ip-token']
246
- client = Client("LPX55/zerogpu-experiments", hf_token=huggingface_token, headers={"x-ip-token": x_ip_token})
247
- cresult = client.predict(
248
- n=3,
249
- api_name="/predict"
250
- )
251
- print(f"X TOKEN: {x_ip_token}")
252
- print(cresult)
253
- except:
254
- print("Guess we're just going to have to pretend that Spaces have been broken for almost a year now..")
255
 
256
- # result = client.predict(
257
- # image=handle_file('https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png'),
258
- # width=1024,
259
- # height=1024,
260
- # overlap_percentage=10,
261
- # num_inference_steps=8,
262
- # resize_option="Full",
263
- # custom_resize_percentage=50,
264
- # prompt_input="Hello!!",
265
- # alignment="Middle",
266
- # overlap_left=True,
267
- # overlap_right=True,
268
- # overlap_top=True,
269
- # overlap_bottom=True,
270
- # x_offset=0,
271
- # y_offset=0,
272
- # api_name="/infer"
273
  # )
 
274
  caption_state = gr.State()
275
  focus_state = gr.State()
276
  log_state = gr.State()
@@ -279,7 +266,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
279
  inputs=[
280
  control_image, prompt, system_prompt, scale, steps,
281
  controlnet_conditioning_scale, guidance_scale, seed,
282
- guidance_end, temperature_slider, top_p_slider, max_tokens_slider, log_prompt
283
  ],
284
  outputs=[log_state, generated_image, prompt]
285
  )
 
59
  dump_environment_info()
60
  except Exception as e:
61
  print(f"Failed to dump env info: {e}")
62
+
63
+ def get_image_dimensions(image: Image) -> str:
64
+ width, height = image.size
65
+ return f"Original Image Dimensions: {width}x{height}"
66
+
67
  def resize_image_to_max_side(image: Image, max_side_length=1024) -> Image:
68
  width, height = image.size
69
  ratio = min(max_side_length / width, max_side_length / height)
 
85
  def generate_caption(control_image):
86
  try:
87
  if control_image is None:
88
+ return "Waiting for control image...", "Original Image Dimensions: N/A"
89
+
90
+ # Get original dimensions
91
+ original_dimensions = get_image_dimensions(control_image)
92
 
93
  # Resize the image to a maximum longest side of 1024 pixels
94
  control_image = resize_image_to_max_side(control_image, max_side_length=1024)
 
98
  detailed_caption = mcaption["caption"]
99
  print(f"Detailed caption: {detailed_caption}")
100
 
101
+ return detailed_caption, original_dimensions
102
  except Exception as e:
103
  print(f"Error generating caption: {e}")
104
+ return "A detailed photograph", "Original Image Dimensions: N/A"
105
 
106
  def generate_focus(control_image, focus_list):
107
  try:
108
  if control_image is None:
109
+ return None, "Original Image Dimensions: N/A"
110
  if focus_list is None:
111
+ return "", "Original Image Dimensions: N/A"
112
+
113
+ # Get original dimensions
114
+ original_dimensions = get_image_dimensions(control_image)
115
 
116
  # Resize the image to a maximum longest side of 1024 pixels
117
  control_image = resize_image_to_max_side(control_image, max_side_length=1024)
 
120
  focus_query = model.query(control_image, "Please provide a concise but illustrative description of the following area(s) of focus: " + focus_list)
121
  focus_description = focus_query["answer"]
122
  print(f"Areas of focus: {focus_description}")
123
+ return focus_description, original_dimensions
124
  except Exception as e:
125
  print(f"Error generating focus: {e}")
126
+ return "highly detailed photo, raw photography.", "Original Image Dimensions: N/A"
127
 
128
  @spaces.GPU(duration=6, progress=gr.Progress(track_tqdm=True))
129
  @torch.no_grad()
 
159
 
160
  def process_image(control_image, user_prompt, system_prompt, scale, steps,
161
  controlnet_conditioning_scale, guidance_scale, seed,
162
+ guidance_end, temperature, max_new_tokens, log_prompt):
163
  # Initialize with empty caption
164
  final_prompt = user_prompt.strip()
165
  # If no user prompt provided, generate a caption first
 
220
  controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
221
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
222
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
223
+ original_dimensions = gr.Markdown(value="Original Image Dimensions: N/A") # New output for dimensions
224
+
225
  with gr.Row():
226
  with gr.Accordion("Auto-Caption settings", open=False, visible=False):
227
  system_prompt = gr.Textbox(
 
236
  info="Higher values make the output more random, lower values make it more deterministic.",
237
  visible=False # Changed to visible
238
  )
 
 
 
 
 
239
  max_tokens_slider = gr.Slider(
240
  minimum=1, maximum=2048, value=368, step=1,
241
  label="Max New Tokens",
 
245
  log_prompt = gr.Checkbox(value=True, label="Log", visible=False) # Changed to visible
246
 
247
  gr.Markdown("**Tips:** 8 steps is all you need! Incredibly powerful tool, usage instructions coming soon.")
248
+ with gr.Accordion("Auth for those Getting ZeroGPU errors.", open=False, elem_id="zgpu"):
249
  msg1 = gr.Markdown()
250
  try_btn = gr.LoginButton()
 
 
 
 
 
 
 
 
 
 
 
251
 
252
+ # sus = ['x-zerogpu-token', 'x-zerogpu-uuid', 'x-proxied-host', 'x-proxied-path', 'x-proxied-replica', 'x-request-id', 'x-ip-token']
253
+ # x_ip_token = request.headers['X-IP-TOKEN']
254
+ # print(str(x_ip_token))
255
+ # client = Client("LPX55/zerogpu-experiments", hf_token=huggingface_token, headers={"x-ip-token": x_ip_token})
256
+ # cresult = client.predict(
257
+ # n=3,
258
+ # api_name="/predict"
 
 
 
 
 
 
 
 
 
 
259
  # )
260
+
261
  caption_state = gr.State()
262
  focus_state = gr.State()
263
  log_state = gr.State()
 
266
  inputs=[
267
  control_image, prompt, system_prompt, scale, steps,
268
  controlnet_conditioning_scale, guidance_scale, seed,
269
+ guidance_end, temperature_slider, max_tokens_slider, log_prompt
270
  ],
271
  outputs=[log_state, generated_image, prompt]
272
  )