Spaces:
Runtime error
Runtime error
disable speed up
Browse files
app.py
CHANGED
@@ -40,25 +40,25 @@ pipeline_t2i = StableGarmentPipeline.from_pretrained(base_model_path, vae=vae, t
|
|
40 |
# pipeline = StableDiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", vae=vae, torch_dtype=torch_dtype).to(device=device)
|
41 |
pipeline_t2i.scheduler = scheduler
|
42 |
|
43 |
-
|
44 |
-
# speed up for cpu
|
45 |
-
# to channels last
|
46 |
-
pipeline_t2i.unet = pipeline_t2i.unet.to(memory_format=torch.channels_last)
|
47 |
-
pipeline_t2i.vae = pipeline_t2i.vae.to(memory_format=torch.channels_last)
|
48 |
-
pipeline_t2i.text_encoder = pipeline_t2i.text_encoder.to(memory_format=torch.channels_last)
|
49 |
-
# pipeline_t2i.safety_checker = pipeline_t2i.safety_checker.to(memory_format=torch.channels_last)
|
50 |
-
|
51 |
-
# Create random input to enable JIT compilation
|
52 |
-
sample = torch.randn(2,4,64,64).type(torch_dtype)
|
53 |
-
timestep = torch.rand(1)*999
|
54 |
-
encoder_hidden_status = torch.randn(2,77,768).type(torch_dtype)
|
55 |
-
input_example = (sample, timestep, encoder_hidden_status)
|
56 |
-
|
57 |
-
# optimize with IPEX
|
58 |
-
pipeline_t2i.unet = ipex.optimize(pipeline_t2i.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example)
|
59 |
-
pipeline_t2i.vae = ipex.optimize(pipeline_t2i.vae.eval(), dtype=torch.bfloat16, inplace=True)
|
60 |
-
pipeline_t2i.text_encoder = ipex.optimize(pipeline_t2i.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
|
61 |
-
# pipeline_t2i.safety_checker = ipex.optimize(pipeline_t2i.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
|
62 |
|
63 |
|
64 |
pipeline_tryon = None
|
|
|
40 |
# pipeline = StableDiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", vae=vae, torch_dtype=torch_dtype).to(device=device)
|
41 |
pipeline_t2i.scheduler = scheduler
|
42 |
|
43 |
+
if False: #device=="cpu":
|
44 |
+
# speed up for cpu
|
45 |
+
# to channels last
|
46 |
+
pipeline_t2i.unet = pipeline_t2i.unet.to(memory_format=torch.channels_last)
|
47 |
+
pipeline_t2i.vae = pipeline_t2i.vae.to(memory_format=torch.channels_last)
|
48 |
+
pipeline_t2i.text_encoder = pipeline_t2i.text_encoder.to(memory_format=torch.channels_last)
|
49 |
+
# pipeline_t2i.safety_checker = pipeline_t2i.safety_checker.to(memory_format=torch.channels_last)
|
50 |
+
|
51 |
+
# Create random input to enable JIT compilation
|
52 |
+
sample = torch.randn(2,4,64,64).type(torch_dtype)
|
53 |
+
timestep = torch.rand(1)*999
|
54 |
+
encoder_hidden_status = torch.randn(2,77,768).type(torch_dtype)
|
55 |
+
input_example = (sample, timestep, encoder_hidden_status)
|
56 |
+
|
57 |
+
# optimize with IPEX
|
58 |
+
pipeline_t2i.unet = ipex.optimize(pipeline_t2i.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example)
|
59 |
+
pipeline_t2i.vae = ipex.optimize(pipeline_t2i.vae.eval(), dtype=torch.bfloat16, inplace=True)
|
60 |
+
pipeline_t2i.text_encoder = ipex.optimize(pipeline_t2i.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
|
61 |
+
# pipeline_t2i.safety_checker = ipex.optimize(pipeline_t2i.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
|
62 |
|
63 |
|
64 |
pipeline_tryon = None
|