import logging import random import warnings import os import gradio as gr import numpy as np import spaces import torch from diffusers import FluxControlNetModel, FluxPipeline, AutoencoderTiny, AutoencoderKL from transformers import T5Tokenizer from diffusers.pipelines import FluxControlNetPipeline from diffusers.utils import load_image from gradio_imageslider import ImageSlider from PIL import Image from huggingface_hub import snapshot_download, hf_hub_download css = """ #col-container { margin: 0 auto; max-width: 512px; } """ if torch.cuda.is_available(): power_device = "GPU" device = "cuda" else: power_device = "CPU" device = "cpu" huggingface_token = os.getenv("HUGGINFACE_TOKEN") model_path = snapshot_download( repo_id="LPX55/FLUX.1-merged_uncensored", repo_type="model", ignore_patterns=["*.md", "*..gitattributes"], local_dir="FLUX.1-merged_uncensored", token=huggingface_token, # type a new token-id. ) # tokenizer_2 = T5Tokenizer.from_pretrained("LPX55/FLUX.1-merged_uncensored", subfolder="tokenizer_2", token=huggingface_token) # taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device) good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to(device) # Load pipeline controlnet = FluxControlNetModel.from_pretrained( "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16 ).to(device) pipe = FluxControlNetPipeline.from_pretrained( "LPX55/FLUX.1-merged_uncensored", controlnet=controlnet, torch_dtype=torch.bfloat16, vae=good_vae, token=huggingface_token, ) # pipe.load_lora_weights( # hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" # ) # pipe.set_adapters(["hyper-sd"], adapter_weights=[0.125]) pipe.to(device) MAX_SEED = 1000000 MAX_PIXEL_BUDGET = 1536 * 1536 torch.cuda.empty_cache() def process_input(input_image, upscale_factor, **kwargs): w, h = input_image.size w_original, h_original = w, h aspect_ratio = w / h was_resized = False if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET: warnings.warn( f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels." ) gr.Info( f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget." ) input_image = input_image.resize( ( int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor), int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor), ) ) was_resized = True # resize to multiple of 8 w, h = input_image.size w = w - w % 8 h = h - h % 8 return input_image.resize((w, h)), w_original, h_original, was_resized @spaces.GPU#(duration=42) def infer( seed, randomize_seed, input_image, num_inference_steps, upscale_factor, controlnet_conditioning_scale, progress=gr.Progress(track_tqdm=True), ): if randomize_seed: seed = random.randint(0, MAX_SEED) true_input_image = load_image(input_image) input_image, w_original, h_original, was_resized = process_input( input_image, upscale_factor ) # rescale with upscale factor w, h = input_image.size control_image = input_image.resize((w * upscale_factor, h * upscale_factor)) generator = torch.Generator().manual_seed(seed) gr.Info("Upscaling image...") image = pipe( prompt="", control_image=control_image, controlnet_conditioning_scale=controlnet_conditioning_scale, num_inference_steps=num_inference_steps, guidance_scale=3.5, height=control_image.size[1], width=control_image.size[0], generator=generator, ).images[0] if was_resized: gr.Info( f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size." ) # resize to target desired size image = image.resize((w_original * upscale_factor, h_original * upscale_factor)) image.save("output.jpg") # convert to numpy return [true_input_image, image, seed] with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo: # with gr.Column(elem_id="col-container"): gr.HTML("