felfri commited on
Commit
a7dd8ff
·
verified ·
1 Parent(s): 5f063c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -34,11 +34,11 @@ with gr.Blocks() as demo:
34
  with gr.Column():
35
  gr.Markdown('## [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) Generations')
36
  choice1 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
37
- images1 = gr.Gallery(label="Images")
38
  with gr.Column():
39
  gr.Markdown('## Fair Diffusion Generations')
40
  choice2 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
41
- images2 = gr.Gallery(label="Images")
42
  gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).")
43
 
44
  choice1.change(open_stable_ims, choice1, [images1])
 
34
  with gr.Column():
35
  gr.Markdown('## [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) Generations')
36
  choice1 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
37
+ images1 = gr.Gallery(label="Images", columns=4, height="auto")
38
  with gr.Column():
39
  gr.Markdown('## Fair Diffusion Generations')
40
  choice2 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
41
+ images2 = gr.Gallery(label="Images", columns=4, height="auto")
42
  gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).")
43
 
44
  choice1.change(open_stable_ims, choice1, [images1])