Spaces:
mashroo
/
Running on Zero

YoussefAnso commited on
Commit
1f1e8f3
·
1 Parent(s): e92b0ed

Streamline Gradio interface by chaining event actions directly, improving error handling for image uploads, and maintaining API endpoint only for the final image generation step.

Browse files
Files changed (1) hide show
  1. app.py +9 -25
app.py CHANGED
@@ -256,40 +256,24 @@ with gr.Blocks() as demo:
256
  )
257
  gr.Markdown("Note: Ensure that the input image is correctly pre-processed into a grey background, otherwise the results will be unpredictable.")
258
 
259
- inputs = [
260
- processed_image,
261
- seed,
262
- guidance_scale,
263
- step,
264
- ]
265
- outputs = [
266
- image_output,
267
- xyz_output,
268
- output_model,
269
- ]
270
-
271
  text_button.click(
272
- fn=check_input_image,
273
  inputs=[image_input],
274
- concurrency_limit=1,
275
- api_name=None # Disable API endpoint for this step
276
- ).success(
277
  fn=preprocess_image,
278
  inputs=[image_input, background_choice, foreground_ratio, back_groud_color],
279
- outputs=[processed_image],
280
- concurrency_limit=1,
281
- api_name=None # Disable API endpoint for this step
282
- ).success(
283
  fn=gen_image,
284
- inputs=inputs,
285
- outputs=outputs,
286
- concurrency_limit=1,
287
- api_name="generate" # Only enable API for the final generation step
288
  )
289
 
290
  # Launch the interface
291
  demo.launch(
292
  show_error=True,
293
  max_threads=1,
294
- share=True # Required for Hugging Face Spaces
295
  )
 
256
  )
257
  gr.Markdown("Note: Ensure that the input image is correctly pre-processed into a grey background, otherwise the results will be unpredictable.")
258
 
259
+ # Chain the events directly
 
 
 
 
 
 
 
 
 
 
 
260
  text_button.click(
261
+ fn=lambda x: gr.Error("No image uploaded!") if x is None else x,
262
  inputs=[image_input],
263
+ outputs=[image_input]
264
+ ).then(
 
265
  fn=preprocess_image,
266
  inputs=[image_input, background_choice, foreground_ratio, back_groud_color],
267
+ outputs=[processed_image]
268
+ ).then(
 
 
269
  fn=gen_image,
270
+ inputs=[processed_image, seed, guidance_scale, step],
271
+ outputs=[image_output, xyz_output, output_model]
 
 
272
  )
273
 
274
  # Launch the interface
275
  demo.launch(
276
  show_error=True,
277
  max_threads=1,
278
+ share=True
279
  )