LPX55 commited on
Commit
b45de7e
·
verified ·
1 Parent(s): a7eb958

Update mini.py

Browse files
Files changed (1) hide show
  1. mini.py +4 -11
mini.py CHANGED
@@ -3,12 +3,10 @@ import torch
3
  import spaces
4
  from PIL import Image
5
  import os
6
- from transformers import CLIPTokenizer, CLIPTextModel, AutoProcessor, T5EncoderModel, T5TokenizerFast, BitsAndBytesConfig
7
  from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
8
  from flux.transformer_flux_simple import FluxTransformer2DModel
9
  from flux.pipeline_flux_chameleon_og import FluxPipeline
10
- from flux.pipeline_flux_img2img import FluxImg2ImgPipeline
11
-
12
  import torch.nn as nn
13
  import math
14
  import logging
@@ -31,9 +29,6 @@ MODEL_CACHE_DIR = "model_cache"
31
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
32
  DTYPE = torch.bfloat16
33
 
34
- quant_config = BitsAndBytesConfig(load_in_8bit=True,)
35
-
36
-
37
  # Aspect ratio options
38
  ASPECT_RATIOS = {
39
  "1:1": (1024, 1024),
@@ -86,12 +81,11 @@ tokenizer_two = T5TokenizerFast.from_pretrained(
86
 
87
  # Load larger models to CPU
88
  vae = AutoencoderKL.from_pretrained(
89
- os.path.join(MODEL_CACHE_DIR, "flux/vae"),
90
  ).to(DTYPE).cpu()
91
 
92
  transformer = FluxTransformer2DModel.from_pretrained(
93
- os.path.join(MODEL_CACHE_DIR, "flux/transformer"),
94
- quantization_config=quant_config,
95
  ).to(DTYPE).cpu()
96
 
97
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
@@ -101,8 +95,7 @@ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
101
 
102
  # Load Qwen2VL to CPU
103
  qwen2vl = Qwen2VLSimplifiedModel.from_pretrained(
104
- os.path.join(MODEL_CACHE_DIR, "qwen2-vl"),
105
- quantization_config=quant_config,
106
  ).to(DTYPE).cpu()
107
 
108
  # Load connector and embedder
 
3
  import spaces
4
  from PIL import Image
5
  import os
6
+ from transformers import CLIPTokenizer, CLIPTextModel, AutoProcessor, T5EncoderModel, T5TokenizerFast
7
  from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
8
  from flux.transformer_flux_simple import FluxTransformer2DModel
9
  from flux.pipeline_flux_chameleon_og import FluxPipeline
 
 
10
  import torch.nn as nn
11
  import math
12
  import logging
 
29
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
  DTYPE = torch.bfloat16
31
 
 
 
 
32
  # Aspect ratio options
33
  ASPECT_RATIOS = {
34
  "1:1": (1024, 1024),
 
81
 
82
  # Load larger models to CPU
83
  vae = AutoencoderKL.from_pretrained(
84
+ os.path.join(MODEL_CACHE_DIR, "flux/vae")
85
  ).to(DTYPE).cpu()
86
 
87
  transformer = FluxTransformer2DModel.from_pretrained(
88
+ os.path.join(MODEL_CACHE_DIR, "flux/transformer")
 
89
  ).to(DTYPE).cpu()
90
 
91
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
 
95
 
96
  # Load Qwen2VL to CPU
97
  qwen2vl = Qwen2VLSimplifiedModel.from_pretrained(
98
+ os.path.join(MODEL_CACHE_DIR, "qwen2-vl")
 
99
  ).to(DTYPE).cpu()
100
 
101
  # Load connector and embedder