Update app.py
Browse files
app.py
CHANGED
@@ -1,370 +1,297 @@
|
|
|
|
1 |
import os
|
|
|
|
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
-
import
|
5 |
import torch
|
|
|
6 |
from PIL import Image
|
7 |
-
import spaces
|
8 |
-
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
|
9 |
-
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
10 |
-
from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
|
11 |
-
import copy
|
12 |
-
import random
|
13 |
-
import time
|
14 |
-
from transformers import pipeline
|
15 |
-
import sqlite3
|
16 |
-
from datetime import datetime
|
17 |
-
|
18 |
-
# ๋ฐ์ดํฐ๋ฒ ์ด์ค ์ด๊ธฐํ
|
19 |
-
def init_db():
|
20 |
-
conn = sqlite3.connect('gallery.db')
|
21 |
-
c = conn.cursor()
|
22 |
-
c.execute('''CREATE TABLE IF NOT EXISTS images
|
23 |
-
(id INTEGER PRIMARY KEY AUTOINCREMENT,
|
24 |
-
model_name TEXT,
|
25 |
-
prompt TEXT,
|
26 |
-
image_path TEXT,
|
27 |
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP)''')
|
28 |
-
conn.commit()
|
29 |
-
conn.close()
|
30 |
-
|
31 |
-
# ์ด๋ฏธ์ง ์ ์ฅ ํจ์
|
32 |
-
def save_image(image, model_name, prompt):
|
33 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
34 |
-
filename = f"gallery_{timestamp}.png"
|
35 |
-
image.save(os.path.join("gallery", filename))
|
36 |
-
|
37 |
-
conn = sqlite3.connect('gallery.db')
|
38 |
-
c = conn.cursor()
|
39 |
-
c.execute("INSERT INTO images (model_name, prompt, image_path) VALUES (?, ?, ?)",
|
40 |
-
(model_name, prompt, filename))
|
41 |
-
conn.commit()
|
42 |
-
conn.close()
|
43 |
-
|
44 |
-
# ๊ฐค๋ฌ๋ฆฌ ์ด๋ฏธ์ง ๋ก๋ ํจ์
|
45 |
-
def load_gallery_images():
|
46 |
-
conn = sqlite3.connect('gallery.db')
|
47 |
-
c = conn.cursor()
|
48 |
-
c.execute("SELECT model_name, prompt, image_path FROM images ORDER BY created_at DESC")
|
49 |
-
rows = c.fetchall()
|
50 |
-
conn.close()
|
51 |
-
|
52 |
-
return [(os.path.join("gallery", row[2]), f"{row[0]}: {row[1]}") for row in rows]
|
53 |
-
|
54 |
-
# CPU์์ ์คํ๋๋ ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
55 |
-
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device=-1)
|
56 |
-
|
57 |
-
# ํ๋กฌํํธ ์ฒ๋ฆฌ ํจ์
|
58 |
-
def process_prompt(prompt):
|
59 |
-
if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in prompt):
|
60 |
-
translated = translator(prompt)[0]['translation_text']
|
61 |
-
return prompt, translated
|
62 |
-
return prompt, prompt
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
68 |
|
69 |
-
# ๊ธฐ๋ณธ ๋ชจ๋ธ ์ด๊ธฐํ
|
70 |
-
dtype = torch.bfloat16
|
71 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
72 |
-
|
|
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
|
78 |
-
MAX_SEED =
|
|
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
def __enter__(self):
|
87 |
-
self.start_time = time.time()
|
88 |
-
return self
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
self.elapsed_time = self.end_time - self.start_time
|
93 |
-
if self.activity_name:
|
94 |
-
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
|
95 |
-
else:
|
96 |
-
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
|
97 |
-
|
98 |
-
def update_selection(evt: gr.SelectData, width, height):
|
99 |
-
selected_lora = loras[evt.index]
|
100 |
-
new_placeholder = f"{selected_lora['title']}๋ฅผ ์ํ ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ์ธ์"
|
101 |
-
lora_repo = selected_lora["repo"]
|
102 |
-
updated_text = f"### ์ ํ๋จ: [{lora_repo}](https://huggingface.co/{lora_repo}) โจ"
|
103 |
-
if "aspect" in selected_lora:
|
104 |
-
if selected_lora["aspect"] == "portrait":
|
105 |
-
width = 768
|
106 |
-
height = 1024
|
107 |
-
elif selected_lora["aspect"] == "landscape":
|
108 |
-
width = 1024
|
109 |
-
height = 768
|
110 |
-
else:
|
111 |
-
width = 1024
|
112 |
-
height = 1024
|
113 |
-
return (
|
114 |
-
gr.update(placeholder=new_placeholder),
|
115 |
-
updated_text,
|
116 |
-
evt.index,
|
117 |
-
width,
|
118 |
-
height,
|
119 |
-
)
|
120 |
-
|
121 |
-
@spaces.GPU(duration=70)
|
122 |
-
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
123 |
-
pipe.to("cuda")
|
124 |
-
generator = torch.Generator(device="cuda").manual_seed(seed)
|
125 |
-
with calculateDuration("์ด๋ฏธ์ง ์์ฑ"):
|
126 |
-
# Generate image
|
127 |
-
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
128 |
-
prompt=prompt_mash,
|
129 |
-
num_inference_steps=steps,
|
130 |
-
guidance_scale=cfg_scale,
|
131 |
-
width=width,
|
132 |
-
height=height,
|
133 |
-
generator=generator,
|
134 |
-
joint_attention_kwargs={"scale": lora_scale},
|
135 |
-
output_type="pil",
|
136 |
-
good_vae=good_vae,
|
137 |
-
):
|
138 |
-
yield img
|
139 |
-
|
140 |
-
@spaces.GPU(duration=70)
|
141 |
-
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
142 |
-
if selected_index is None:
|
143 |
-
raise gr.Error("์งํํ๊ธฐ ์ ์ LoRA๋ฅผ ์ ํํด์ผ ํฉ๋๋ค.")
|
144 |
|
145 |
-
|
|
|
|
|
|
|
146 |
|
147 |
-
|
148 |
-
lora_path = selected_lora["repo"]
|
149 |
-
trigger_word = selected_lora["trigger_word"]
|
150 |
-
if(trigger_word):
|
151 |
-
if "trigger_position" in selected_lora:
|
152 |
-
if selected_lora["trigger_position"] == "prepend":
|
153 |
-
prompt_mash = f"{trigger_word} {english_prompt}"
|
154 |
-
else:
|
155 |
-
prompt_mash = f"{english_prompt} {trigger_word}"
|
156 |
-
else:
|
157 |
-
prompt_mash = f"{trigger_word} {english_prompt}"
|
158 |
-
else:
|
159 |
-
prompt_mash = english_prompt
|
160 |
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
# LoRA ๊ฐ์ค์น ๋ก๋
|
165 |
-
with calculateDuration(f"{selected_lora['title']}์ LoRA ๊ฐ์ค์น ๋ก๋"):
|
166 |
-
if "weights" in selected_lora:
|
167 |
-
pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
|
168 |
-
else:
|
169 |
-
pipe.load_lora_weights(lora_path)
|
170 |
-
|
171 |
-
# ์ฌํ์ฑ์ ์ํ ์๋ ์ค์
|
172 |
-
with calculateDuration("์๋ ๋ฌด์์ํ"):
|
173 |
-
if randomize_seed:
|
174 |
-
seed = random.randint(0, MAX_SEED)
|
175 |
-
|
176 |
-
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
177 |
|
178 |
-
#
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
|
212 |
-
except Exception as e:
|
213 |
-
print(e)
|
214 |
-
gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
|
215 |
-
raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
|
216 |
-
return split_link[1], link, safetensors_name, trigger_word, image_url
|
217 |
-
|
218 |
-
def check_custom_model(link):
|
219 |
-
if(link.startswith("https://")):
|
220 |
-
if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
|
221 |
-
link_split = link.split("huggingface.co/")
|
222 |
-
return get_huggingface_safetensors(link_split[1])
|
223 |
-
else:
|
224 |
-
return get_huggingface_safetensors(link)
|
225 |
-
|
226 |
-
def add_custom_lora(custom_lora):
|
227 |
-
global loras
|
228 |
-
if(custom_lora):
|
229 |
-
try:
|
230 |
-
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
|
231 |
-
print(f"Loaded custom LoRA: {repo}")
|
232 |
-
card = f'''
|
233 |
-
<div class="custom_lora_card">
|
234 |
-
<span>Loaded custom LoRA:</span>
|
235 |
-
<div class="card_internal">
|
236 |
-
<img src="{image}" />
|
237 |
-
<div>
|
238 |
-
<h3>{title}</h3>
|
239 |
-
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
|
240 |
-
</div>
|
241 |
-
</div>
|
242 |
-
</div>
|
243 |
-
'''
|
244 |
-
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
|
245 |
-
if(not existing_item_index):
|
246 |
-
new_item = {
|
247 |
-
"image": image,
|
248 |
-
"title": title,
|
249 |
-
"repo": repo,
|
250 |
-
"weights": path,
|
251 |
-
"trigger_word": trigger_word
|
252 |
-
}
|
253 |
-
print(new_item)
|
254 |
-
existing_item_index = len(loras)
|
255 |
-
loras.append(new_item)
|
256 |
-
|
257 |
-
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
|
258 |
-
except Exception as e:
|
259 |
-
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
|
260 |
-
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=True), gr.update(), "", None, ""
|
261 |
-
else:
|
262 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
|
263 |
-
|
264 |
-
def remove_custom_lora():
|
265 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
|
266 |
-
|
267 |
-
run_lora.zerogpu = True
|
268 |
-
|
269 |
-
css = """
|
270 |
footer {
|
271 |
visibility: hidden;
|
272 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
"""
|
274 |
|
275 |
-
|
276 |
-
|
277 |
-
os.makedirs('gallery')
|
278 |
-
|
279 |
-
# ๋ฐ์ดํฐ๋ฒ ์ด์ค ์ด๊ธฐํ
|
280 |
-
init_db()
|
281 |
-
|
282 |
-
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
283 |
-
selected_index = gr.State(None)
|
284 |
|
285 |
-
with
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
)
|
302 |
-
|
303 |
-
custom_lora = gr.Textbox(label="์ปค์คํ
LoRA", info="LoRA Hugging Face ๊ฒฝ๋ก", placeholder="multimodalart/vintage-ads-flux")
|
304 |
-
gr.Markdown("[FLUX LoRA ๋ชฉ๋ก ํ์ธ](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
|
305 |
-
custom_lora_info = gr.HTML(visible=False)
|
306 |
-
custom_lora_button = gr.Button("์ปค์คํ
LoRA ์ ๊ฑฐ", visible=False)
|
307 |
-
with gr.Column():
|
308 |
-
progress_bar = gr.Markdown(elem_id="progress",visible=False)
|
309 |
-
result = gr.Image(label="์์ฑ๋ ์ด๋ฏธ์ง")
|
310 |
-
original_prompt_display = gr.Textbox(label="์๋ณธ ํ๋กฌํํธ")
|
311 |
-
english_prompt_display = gr.Textbox(label="์์ด ํ๋กฌํํธ")
|
312 |
|
313 |
-
|
314 |
-
with gr.Accordion("๊ณ ๊ธ ์ค์ ", open=False):
|
315 |
-
with gr.Column():
|
316 |
-
with gr.Row():
|
317 |
-
cfg_scale = gr.Slider(label="CFG ์ค์ผ์ผ", minimum=1, maximum=20, step=0.5, value=3.5)
|
318 |
-
steps = gr.Slider(label="์คํ
", minimum=1, maximum=50, step=1, value=28)
|
319 |
-
|
320 |
-
with gr.Row():
|
321 |
-
width = gr.Slider(label="๋๋น", minimum=256, maximum=1536, step=64, value=1024)
|
322 |
-
height = gr.Slider(label="๋์ด", minimum=256, maximum=1536, step=64, value=1024)
|
323 |
-
|
324 |
-
with gr.Row():
|
325 |
-
randomize_seed = gr.Checkbox(True, label="์๋ ๋ฌด์์ํ")
|
326 |
-
seed = gr.Slider(label="์๋", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
327 |
-
lora_scale = gr.Slider(label="LoRA ์ค์ผ์ผ", minimum=0, maximum=3, step=0.01, value=0.95)
|
328 |
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
columns=3,
|
334 |
-
|
|
|
|
|
|
|
335 |
height="auto"
|
336 |
)
|
337 |
-
|
338 |
-
|
339 |
-
gallery
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
|
349 |
-
)
|
350 |
-
|
351 |
-
custom_lora_button.click(
|
352 |
-
remove_custom_lora,
|
353 |
-
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
|
354 |
)
|
355 |
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
|
|
|
|
362 |
)
|
363 |
|
364 |
-
|
365 |
-
|
366 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
)
|
368 |
|
369 |
-
|
370 |
-
|
|
|
1 |
+
import random
|
2 |
import os
|
3 |
+
import uuid
|
4 |
+
from datetime import datetime
|
5 |
import gradio as gr
|
6 |
+
import numpy as np
|
7 |
+
import spaces
|
8 |
import torch
|
9 |
+
from diffusers import DiffusionPipeline
|
10 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Create permanent storage directory
|
13 |
+
SAVE_DIR = "saved_images" # Gradio will handle the persistence
|
14 |
+
if not os.path.exists(SAVE_DIR):
|
15 |
+
os.makedirs(SAVE_DIR, exist_ok=True)
|
16 |
|
|
|
|
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
+
repo_id = "black-forest-labs/FLUX.1-dev"
|
19 |
+
adapter_id = "openfree/pierre-auguste-renoir" # Changed to Renoir model
|
20 |
|
21 |
+
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
22 |
+
pipeline.load_lora_weights(adapter_id)
|
23 |
+
pipeline = pipeline.to(device)
|
24 |
|
25 |
+
MAX_SEED = np.iinfo(np.int32).max
|
26 |
+
MAX_IMAGE_SIZE = 1024
|
27 |
|
28 |
+
def save_generated_image(image, prompt):
|
29 |
+
# Generate unique filename with timestamp
|
30 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
31 |
+
unique_id = str(uuid.uuid4())[:8]
|
32 |
+
filename = f"{timestamp}_{unique_id}.png"
|
33 |
+
filepath = os.path.join(SAVE_DIR, filename)
|
|
|
|
|
|
|
34 |
|
35 |
+
# Save the image
|
36 |
+
image.save(filepath)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Save metadata
|
39 |
+
metadata_file = os.path.join(SAVE_DIR, "metadata.txt")
|
40 |
+
with open(metadata_file, "a", encoding="utf-8") as f:
|
41 |
+
f.write(f"{filename}|{prompt}|{timestamp}\n")
|
42 |
|
43 |
+
return filepath
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
def load_generated_images():
|
46 |
+
if not os.path.exists(SAVE_DIR):
|
47 |
+
return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
# Load all images from the directory
|
50 |
+
image_files = [os.path.join(SAVE_DIR, f) for f in os.listdir(SAVE_DIR)
|
51 |
+
if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
|
52 |
+
# Sort by creation time (newest first)
|
53 |
+
image_files.sort(key=lambda x: os.path.getctime(x), reverse=True)
|
54 |
+
return image_files
|
55 |
+
|
56 |
+
def load_predefined_images():
|
57 |
+
predefined_images = [
|
58 |
+
"assets/r1.webp",
|
59 |
+
"assets/r2.webp",
|
60 |
+
"assets/r3.webp",
|
61 |
+
"assets/r4.webp",
|
62 |
+
"assets/r5.webp",
|
63 |
+
"assets/r6.webp",
|
64 |
+
]
|
65 |
+
return predefined_images
|
66 |
+
|
67 |
+
@spaces.GPU(duration=120)
|
68 |
+
def inference(
|
69 |
+
prompt: str,
|
70 |
+
seed: int,
|
71 |
+
randomize_seed: bool,
|
72 |
+
width: int,
|
73 |
+
height: int,
|
74 |
+
guidance_scale: float,
|
75 |
+
num_inference_steps: int,
|
76 |
+
lora_scale: float,
|
77 |
+
progress: gr.Progress = gr.Progress(track_tqdm=True),
|
78 |
+
):
|
79 |
+
if randomize_seed:
|
80 |
+
seed = random.randint(0, MAX_SEED)
|
81 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
82 |
|
83 |
+
image = pipeline(
|
84 |
+
prompt=prompt,
|
85 |
+
guidance_scale=guidance_scale,
|
86 |
+
num_inference_steps=num_inference_steps,
|
87 |
+
width=width,
|
88 |
+
height=height,
|
89 |
+
generator=generator,
|
90 |
+
joint_attention_kwargs={"scale": lora_scale},
|
91 |
+
).images[0]
|
92 |
|
93 |
+
# Save the generated image
|
94 |
+
filepath = save_generated_image(image, prompt)
|
95 |
+
|
96 |
+
# Return the image, seed, and updated gallery
|
97 |
+
return image, seed, load_generated_images()
|
98 |
+
|
99 |
+
examples = [
|
100 |
+
"Renoir's painting of a lively outdoor dance scene at Moulin de la Galette, with dappled sunlight filtering through trees, illuminating well-dressed Parisians enjoying a summer afternoon. Couples dance while others socialize at tables, capturing the joie de vivre of 1870s Montmartre. [trigger]",
|
101 |
+
"Renoir's intimate portrait of a young woman with rosy cheeks and lips, soft blonde hair, and a gentle smile. She wears a vibrant blue dress against a background of lush flowers and greenery, showcasing his mastery of depicting feminine beauty with warm, luminous skin tones. [trigger]",
|
102 |
+
"Renoir's painting of two young girls seated at a piano, captured in his distinctive soft focus style. The scene shows one girl playing while the other stands beside her, both wearing delicate white dresses. The interior setting features warm colors and loose brushwork typical of his mature period. [trigger]",
|
103 |
+
"Renoir's painting of an elegant boating party, with fashionably dressed men and women relaxing on a restaurant terrace overlooking the Seine. The scene captures the leisurely atmosphere of 1880s French society, with sparkling water reflections and a bright, airy palette of blues, whites, and warm flesh tones. [trigger]",
|
104 |
+
"Renoir's painting of a sun-dappled garden scene with children playing. The composition features vibrant flowers in full bloom, lush greenery, and Renoir's characteristic luminous treatment of sunlight filtering through foliage, creating patches of brilliant color across the canvas. [trigger]",
|
105 |
+
"Renoir's depiction of bathers by a riverbank, with several female figures arranged in a harmonious composition. The painting showcases his later style with fuller figures rendered in pearlescent flesh tones against a backdrop of shimmering water and verdant landscape, demonstrating his unique approach to the nude figure in nature. [trigger]"
|
106 |
+
]
|
107 |
+
|
108 |
+
# Brighter custom CSS with vibrant colors
|
109 |
+
custom_css = """
|
110 |
+
:root {
|
111 |
+
--color-primary: #FF9E6C;
|
112 |
+
--color-secondary: #FFD8A9;
|
113 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
footer {
|
115 |
visibility: hidden;
|
116 |
}
|
117 |
+
.gradio-container {
|
118 |
+
background: linear-gradient(to right, #FFF4E0, #FFEDDB);
|
119 |
+
}
|
120 |
+
.title {
|
121 |
+
color: #E25822 !important;
|
122 |
+
font-size: 2.5rem !important;
|
123 |
+
font-weight: 700 !important;
|
124 |
+
text-align: center;
|
125 |
+
margin: 1rem 0;
|
126 |
+
text-shadow: 2px 2px 4px rgba(0,0,0,0.1);
|
127 |
+
}
|
128 |
+
.subtitle {
|
129 |
+
color: #2B3A67 !important;
|
130 |
+
font-size: 1.2rem !important;
|
131 |
+
text-align: center;
|
132 |
+
margin-bottom: 2rem;
|
133 |
+
}
|
134 |
+
.model-description {
|
135 |
+
background-color: rgba(255, 255, 255, 0.7);
|
136 |
+
border-radius: 10px;
|
137 |
+
padding: 20px;
|
138 |
+
margin: 20px 0;
|
139 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
140 |
+
border-left: 5px solid #E25822;
|
141 |
+
}
|
142 |
+
button.primary {
|
143 |
+
background-color: #E25822 !important;
|
144 |
+
}
|
145 |
+
button:hover {
|
146 |
+
transform: translateY(-2px);
|
147 |
+
box-shadow: 0 5px 15px rgba(0,0,0,0.1);
|
148 |
+
}
|
149 |
+
.tabs {
|
150 |
+
margin-top: 20px;
|
151 |
+
}
|
152 |
+
.gallery {
|
153 |
+
background-color: rgba(255, 255, 255, 0.5);
|
154 |
+
border-radius: 10px;
|
155 |
+
padding: 10px;
|
156 |
+
}
|
157 |
"""
|
158 |
|
159 |
+
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
160 |
+
gr.HTML('<div class="title">Pierre-Auguste Renoir STUDIO</div>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
+
# Model description with the requested content
|
163 |
+
with gr.Group(elem_classes="model-description"):
|
164 |
+
gr.Markdown("""
|
165 |
+
# About This Model
|
166 |
+
|
167 |
+
This studio features the **Pierre-Auguste Renoir** artistic style model from [openfree/pierre-auguste-renoir](https://huggingface.co/openfree/pierre-auguste-renoir).
|
168 |
+
|
169 |
+
Pierre-Auguste Renoir (1841-1919) was a leading painter in the development of the Impressionist style. His paintings are notable for their vibrant light and saturated color, focusing on people in intimate and candid compositions. The warmth of his palette often emphasized the sensual beauty of his subjects, particularly women.
|
170 |
+
|
171 |
+
I developed a flux-based learning model trained on a curated collection of high-resolution masterpieces from renowned global artists. This LoRA fine-tuning process leveraged the exceptional quality of open-access imagery released by prestigious institutions including the Art Institute of Chicago. The resulting model demonstrates remarkable capability in capturing the nuanced artistic techniques and stylistic elements across diverse historical art movements.
|
172 |
+
|
173 |
+
**How to use**: Simply enter a prompt describing a scene in Renoir's style and add [trigger] at the end.
|
174 |
+
""")
|
175 |
+
|
176 |
+
with gr.Tabs(elem_classes="tabs") as tabs:
|
177 |
+
with gr.Tab("Generation"):
|
178 |
+
with gr.Column(elem_id="col-container"):
|
179 |
+
with gr.Row():
|
180 |
+
prompt = gr.Text(
|
181 |
+
label="Prompt",
|
182 |
+
show_label=False,
|
183 |
+
max_lines=1,
|
184 |
+
placeholder="Enter your prompt (add [trigger] at the end)",
|
185 |
+
container=False,
|
186 |
)
|
187 |
+
run_button = gr.Button("Generate", variant="primary", scale=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
+
result = gr.Image(label="Result", show_label=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
|
191 |
+
with gr.Accordion("Advanced Settings", open=False):
|
192 |
+
seed = gr.Slider(
|
193 |
+
label="Seed",
|
194 |
+
minimum=0,
|
195 |
+
maximum=MAX_SEED,
|
196 |
+
step=1,
|
197 |
+
value=42,
|
198 |
+
)
|
199 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
200 |
+
|
201 |
+
with gr.Row():
|
202 |
+
width = gr.Slider(
|
203 |
+
label="Width",
|
204 |
+
minimum=256,
|
205 |
+
maximum=MAX_IMAGE_SIZE,
|
206 |
+
step=32,
|
207 |
+
value=1024,
|
208 |
+
)
|
209 |
+
height = gr.Slider(
|
210 |
+
label="Height",
|
211 |
+
minimum=256,
|
212 |
+
maximum=MAX_IMAGE_SIZE,
|
213 |
+
step=32,
|
214 |
+
value=768,
|
215 |
+
)
|
216 |
+
|
217 |
+
with gr.Row():
|
218 |
+
guidance_scale = gr.Slider(
|
219 |
+
label="Guidance scale",
|
220 |
+
minimum=0.0,
|
221 |
+
maximum=10.0,
|
222 |
+
step=0.1,
|
223 |
+
value=3.5,
|
224 |
+
)
|
225 |
+
num_inference_steps = gr.Slider(
|
226 |
+
label="Number of inference steps",
|
227 |
+
minimum=1,
|
228 |
+
maximum=50,
|
229 |
+
step=1,
|
230 |
+
value=30,
|
231 |
+
)
|
232 |
+
lora_scale = gr.Slider(
|
233 |
+
label="LoRA scale",
|
234 |
+
minimum=0.0,
|
235 |
+
maximum=1.0,
|
236 |
+
step=0.1,
|
237 |
+
value=1.0,
|
238 |
+
)
|
239 |
+
|
240 |
+
gr.Examples(
|
241 |
+
examples=examples,
|
242 |
+
inputs=[prompt],
|
243 |
+
outputs=[result, seed],
|
244 |
+
)
|
245 |
+
|
246 |
+
with gr.Tab("Gallery"):
|
247 |
+
gallery_header = gr.Markdown("### Your Generated Images")
|
248 |
+
generated_gallery = gr.Gallery(
|
249 |
+
label="Generated Images",
|
250 |
columns=3,
|
251 |
+
show_label=False,
|
252 |
+
value=load_generated_images(),
|
253 |
+
elem_id="generated_gallery",
|
254 |
+
elem_classes="gallery",
|
255 |
height="auto"
|
256 |
)
|
257 |
+
refresh_btn = gr.Button("๐ Refresh Gallery", variant="primary")
|
258 |
+
|
259 |
+
# Add sample gallery section at the bottom
|
260 |
+
gr.Markdown("### Pierre-Auguste Renoir Style Examples")
|
261 |
+
predefined_gallery = gr.Gallery(
|
262 |
+
label="Sample Images",
|
263 |
+
columns=3,
|
264 |
+
rows=2,
|
265 |
+
show_label=False,
|
266 |
+
value=load_predefined_images(),
|
267 |
+
elem_classes="gallery"
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
)
|
269 |
|
270 |
+
# Event handlers
|
271 |
+
def refresh_gallery():
|
272 |
+
return load_generated_images()
|
273 |
+
|
274 |
+
refresh_btn.click(
|
275 |
+
fn=refresh_gallery,
|
276 |
+
inputs=None,
|
277 |
+
outputs=generated_gallery,
|
278 |
)
|
279 |
|
280 |
+
gr.on(
|
281 |
+
triggers=[run_button.click, prompt.submit],
|
282 |
+
fn=inference,
|
283 |
+
inputs=[
|
284 |
+
prompt,
|
285 |
+
seed,
|
286 |
+
randomize_seed,
|
287 |
+
width,
|
288 |
+
height,
|
289 |
+
guidance_scale,
|
290 |
+
num_inference_steps,
|
291 |
+
lora_scale,
|
292 |
+
],
|
293 |
+
outputs=[result, seed, generated_gallery],
|
294 |
)
|
295 |
|
296 |
+
demo.queue()
|
297 |
+
demo.launch()
|