Datasets:
Tasks:
Image Classification
Formats:
imagefolder
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K - 10K
License:
File size: 2,799 Bytes
bc5332d 45c6ae9 bc5332d 45c6ae9 bc5332d 45c6ae9 bc5332d 45c6ae9 bc5332d ddaa08a 45c6ae9 bc5332d 45c6ae9 bc5332d 45c6ae9 bc5332d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import os
from PIL.Image import Image
from diffusers import StableDiffusionPipeline as SDP
from diffusers import AutoPipelineForText2Image as AP
from diffusers import DiffusionPipeline as DP
import torch
import json
import random
DEVICE = "mps" if torch.backends.mps.is_available() else \
"cuda" if torch.cuda.is_available() else "cpu"
# Model 1
pipe = SDP.from_pretrained("nitrosocke/Ghibli-Diffusion",
torch_dtype=torch.float16).to(DEVICE)
ID_PREFIX = "nitrosocke"
# Model 2
# pipe = AP.from_pretrained("black-forest-labs/FLUX.1-dev",
# torch_dtype=torch.bfloat16).to(DEVICE)
# pipe.load_lora_weights('openfree/flux-chatgpt-ghibli-lora',
# weight_name='flux-chatgpt-ghibli-lora.safetensors')
# ID_PREFIX = "openfree"
# Model 3
# pipe = DP.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
# torch_dtype=torch.float16, variant="fp16",
# use_safetensors=True,).to(DEVICE)
# pipe.load_lora_weights("KappaNeuro/studio-ghibli-style")
# pipe.to(DEVICE)
# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
# pipe.enable_model_cpu_offload()
# ID_PREFIX = "KappaNeuro"
NUM_IMAGES = 3
out_dir = f"data/{ID_PREFIX}"
os.makedirs(out_dir, exist_ok=True)
with open("metadata.jsonl", "r", encoding="utf-8") as fin, \
open("ai_entries.jsonl", "w", encoding="utf-8") as fout, \
open("pairs.jsonl", "a", encoding="utf-8") as pairs:
for i, line in enumerate(fin):
sample = json.loads(line)
if sample["label"] != "real":
continue
description = sample["description"]
real_id: str = sample["id"]
aigen_id = real_id.replace("real", ID_PREFIX)
prompt = f"ghibli style, {description}"
seeds = [random.randrange(2**32) for _ in range(NUM_IMAGES)]
gens = [torch.Generator(device=DEVICE).manual_seed(s) for s in seeds]
images: list[Image] = pipe(prompt, num_images_per_prompt=NUM_IMAGES, generator=gens).images
src_path = sample["image"]
src_file: str = os.path.basename(src_path)
file_noext = src_file.split(".")[0]
for j, image in enumerate(images):
img_id = f"{aigen_id}-{j}"
dst_path = os.path.join(out_dir, f"{file_noext}_{j}.jpg")
fout.write(json.dumps({
"id": img_id,
"image": dst_path,
"label": ID_PREFIX,
"description": description,
}) + "\n")
pairs.write(json.dumps({
"real_image": src_path,
"ai_image": dst_path,
"description": description,
"seed": seeds[j]
}) + "\n")
image.save(dst_path)
|