Datasets:
Tasks:
Image Classification
Formats:
imagefolder
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K - 10K
License:
import os | |
from PIL.Image import Image | |
from diffusers import StableDiffusionPipeline as SDP | |
from diffusers import AutoPipelineForText2Image as AP | |
from diffusers import DiffusionPipeline as DP | |
import torch | |
import json | |
import random | |
DEVICE = "mps" if torch.backends.mps.is_available() else \ | |
"cuda" if torch.cuda.is_available() else "cpu" | |
# Model 1 | |
pipe = SDP.from_pretrained("nitrosocke/Ghibli-Diffusion", | |
torch_dtype=torch.float16).to(DEVICE) | |
ID_PREFIX = "nitrosocke" | |
# Model 2 | |
# pipe = AP.from_pretrained("black-forest-labs/FLUX.1-dev", | |
# torch_dtype=torch.bfloat16).to(DEVICE) | |
# pipe.load_lora_weights('openfree/flux-chatgpt-ghibli-lora', | |
# weight_name='flux-chatgpt-ghibli-lora.safetensors') | |
# ID_PREFIX = "openfree" | |
# Model 3 | |
# pipe = DP.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", | |
# torch_dtype=torch.float16, variant="fp16", | |
# use_safetensors=True,).to(DEVICE) | |
# pipe.load_lora_weights("KappaNeuro/studio-ghibli-style") | |
# pipe.to(DEVICE) | |
# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) | |
# pipe.enable_model_cpu_offload() | |
# ID_PREFIX = "KappaNeuro" | |
NUM_IMAGES = 3 | |
out_dir = f"data/{ID_PREFIX}" | |
os.makedirs(out_dir, exist_ok=True) | |
with open("metadata.jsonl", "r", encoding="utf-8") as fin, \ | |
open("ai_entries.jsonl", "w", encoding="utf-8") as fout, \ | |
open("pairs.jsonl", "a", encoding="utf-8") as pairs: | |
for i, line in enumerate(fin): | |
sample = json.loads(line) | |
if sample["label"] != "real": | |
continue | |
description = sample["description"] | |
real_id: str = sample["id"] | |
aigen_id = real_id.replace("real", ID_PREFIX) | |
prompt = f"ghibli style, {description}" | |
seeds = [random.randrange(2**32) for _ in range(NUM_IMAGES)] | |
gens = [torch.Generator(device=DEVICE).manual_seed(s) for s in seeds] | |
images: list[Image] = pipe(prompt, num_images_per_prompt=NUM_IMAGES, generator=gens).images | |
src_path = sample["image"] | |
src_file: str = os.path.basename(src_path) | |
file_noext = src_file.split(".")[0] | |
for j, image in enumerate(images): | |
img_id = f"{aigen_id}-{j}" | |
dst_path = os.path.join(out_dir, f"{file_noext}_{j}.jpg") | |
fout.write(json.dumps({ | |
"id": img_id, | |
"image": dst_path, | |
"label": ID_PREFIX, | |
"description": description, | |
}) + "\n") | |
pairs.write(json.dumps({ | |
"real_image": src_path, | |
"ai_image": dst_path, | |
"description": description, | |
"seed": seeds[j] | |
}) + "\n") | |
image.save(dst_path) | |