LPX55 commited on
Commit
ab738b4
·
verified ·
1 Parent(s): 3f9b082

Update app_v2.py

Browse files
Files changed (1) hide show
  1. app_v2.py +10 -12
app_v2.py CHANGED
@@ -106,13 +106,14 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
106
  return image
107
 
108
  def process_image(control_image, user_prompt, system_prompt, scale, steps,
109
- controlnet_conditioning_scale, guidance_scale, seed,
110
- guidance_end, temperature, top_p, max_new_tokens, log_prompt):
111
  # Initialize with empty caption
112
  final_prompt = user_prompt.strip()
113
 
114
  # If no user prompt provided, generate a caption first
115
  if not final_prompt:
 
116
  caption_gen = caption(
117
  input_image=control_image,
118
  prompt=system_prompt,
@@ -121,17 +122,18 @@ def process_image(control_image, user_prompt, system_prompt, scale, steps,
121
  max_new_tokens=max_new_tokens,
122
  log_prompt=log_prompt
123
  )
 
124
  # Get the full caption by exhausting the generator
125
  generated_caption = ""
126
  for chunk in caption_gen:
127
  generated_caption += chunk
128
- yield {"__type__": "update_caption", "caption": generated_caption}, None
129
 
130
  final_prompt = generated_caption
131
- yield {"__type__": "update_caption", "caption": f"Using caption: {final_prompt}"}, None
132
 
133
  # Show the final prompt being used
134
- yield {"__type__": "update_caption", "caption": f"Generating with: {final_prompt}"}, None
135
 
136
  # Generate the image
137
  try:
@@ -145,9 +147,9 @@ def process_image(control_image, user_prompt, system_prompt, scale, steps,
145
  seed=seed,
146
  guidance_end=guidance_end
147
  )
148
- yield {"__type__": "update_caption", "caption": f"Completed! Used prompt: {final_prompt}"}, image
149
  except Exception as e:
150
- yield {"__type__": "update_caption", "caption": f"Error: {str(e)}"}, None
151
  raise
152
 
153
  def handle_outputs(outputs):
@@ -207,13 +209,9 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as iface:
207
  inputs=[
208
  control_image, prompt, system_prompt, scale, steps,
209
  controlnet_conditioning_scale, guidance_scale, seed,
210
- guidance_end, temperature_slider, top_p_slider,
211
- max_tokens_slider, log_prompt
212
  ],
213
  outputs=[output_caption, generated_image]
214
- ).then(
215
- handle_outputs,
216
- outputs=[output_caption, generated_image]
217
  )
218
 
219
  caption_button.click(
 
106
  return image
107
 
108
  def process_image(control_image, user_prompt, system_prompt, scale, steps,
109
+ controlnet_conditioning_scale, guidance_scale, seed,
110
+ guidance_end, temperature, top_p, max_new_tokens, log_prompt):
111
  # Initialize with empty caption
112
  final_prompt = user_prompt.strip()
113
 
114
  # If no user prompt provided, generate a caption first
115
  if not final_prompt:
116
+ # Generate caption
117
  caption_gen = caption(
118
  input_image=control_image,
119
  prompt=system_prompt,
 
122
  max_new_tokens=max_new_tokens,
123
  log_prompt=log_prompt
124
  )
125
+
126
  # Get the full caption by exhausting the generator
127
  generated_caption = ""
128
  for chunk in caption_gen:
129
  generated_caption += chunk
130
+ yield generated_caption, None # Update caption in real-time
131
 
132
  final_prompt = generated_caption
133
+ yield f"Using caption: {final_prompt}", None
134
 
135
  # Show the final prompt being used
136
+ yield f"Generating with: {final_prompt}", None
137
 
138
  # Generate the image
139
  try:
 
147
  seed=seed,
148
  guidance_end=guidance_end
149
  )
150
+ yield f"Completed! Used prompt: {final_prompt}", image
151
  except Exception as e:
152
+ yield f"Error: {str(e)}", None
153
  raise
154
 
155
  def handle_outputs(outputs):
 
209
  inputs=[
210
  control_image, prompt, system_prompt, scale, steps,
211
  controlnet_conditioning_scale, guidance_scale, seed,
212
+ guidance_end, temperature, top_p, max_new_tokens, log_prompt
 
213
  ],
214
  outputs=[output_caption, generated_image]
 
 
 
215
  )
216
 
217
  caption_button.click(