LPX55 commited on
Commit
b0fa53c
·
verified ·
1 Parent(s): 360fae1

Update app_v3.py

Browse files
Files changed (1) hide show
  1. app_v3.py +5 -6
app_v3.py CHANGED
@@ -23,7 +23,6 @@ from typing import Generator
23
  import gradio as gr
24
  from huggingface_hub import CommitScheduler, HfApi, logging
25
  from debug import log_params, scheduler
26
- import torch._dynamo
27
  logging.set_verbosity_debug()
28
 
29
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
@@ -47,13 +46,13 @@ pipe = FluxControlNetPipeline.from_pretrained(
47
  )
48
  pipe.to("cuda")
49
 
50
- torch._dynamo.config.suppress_errors = True
51
  # For FLUX models, compiling VAE decode can also be beneficial if needed, though UNet is primary.
52
  # pipe.vae.decode = torch.compile(pipe.vae.decode, mode="reduce-overhead", fullgraph=True) # Uncomment if VAE compile helps
53
- try:
54
- pipe.vae.decode = torch.compile(pipe.vae.decode, mode="default")
55
- except Exception as e:
56
- print(f"Compile failed: {e}")
57
 
58
  # 2. Memory Efficient Attention (xFormers): Reduces memory usage and improves speed
59
  # Requires xformers library installation. Beneficial even with high VRAM.
 
23
  import gradio as gr
24
  from huggingface_hub import CommitScheduler, HfApi, logging
25
  from debug import log_params, scheduler
 
26
  logging.set_verbosity_debug()
27
 
28
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
 
46
  )
47
  pipe.to("cuda")
48
 
49
+ # torch._dynamo.config.suppress_errors = True
50
  # For FLUX models, compiling VAE decode can also be beneficial if needed, though UNet is primary.
51
  # pipe.vae.decode = torch.compile(pipe.vae.decode, mode="reduce-overhead", fullgraph=True) # Uncomment if VAE compile helps
52
+ # try:
53
+ # pipe.vae.decode = torch.compile(pipe.vae.decode, mode="default")
54
+ # except Exception as e:
55
+ # print(f"Compile failed: {e}")
56
 
57
  # 2. Memory Efficient Attention (xFormers): Reduces memory usage and improves speed
58
  # Requires xformers library installation. Beneficial even with high VRAM.