Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -187,7 +187,7 @@ class LLMService:
|
|
187 |
|
188 |
model_id_or_path = "stablediffusionapi/realistic-vision-v51"
|
189 |
self.vae_pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, safety_checker=None, torch_dtype=torch.float16)
|
190 |
-
self.vae_pipe = self.vae_pipe.to(
|
191 |
|
192 |
self.boi_token_id = self.tokenizer.encode(BOI_TOKEN, add_special_tokens=False)[0]
|
193 |
self.eoi_token_id = self.tokenizer.encode(EOI_TOKEN, add_special_tokens=False)[0]
|
@@ -361,8 +361,10 @@ def generate(text_list, image_list, max_new_tokens, force_boi, force_bbox, force
|
|
361 |
generated_image = service.sd_adapter.generate(image_embeds=img_feat, num_inference_steps=50)[0]
|
362 |
|
363 |
if force_polish:
|
364 |
-
service.sd_adapter = service.sd_adapter.cpu()
|
365 |
-
service.vae_pipe = service.vae_pipe.to(service.vit_sd_device, dtype=service.dtype)
|
|
|
|
|
366 |
|
367 |
init_image = generated_image.resize((1024, 1024))
|
368 |
prompt = ""
|
@@ -373,8 +375,10 @@ def generate(text_list, image_list, max_new_tokens, force_boi, force_bbox, force
|
|
373 |
image_base64 = encode_image(generated_image)
|
374 |
gen_imgs_base64_list.append(image_base64)
|
375 |
|
376 |
-
|
377 |
-
|
|
|
|
|
378 |
|
379 |
|
380 |
# print('loading visual encoder and llm to GPU, and sd to CPU')
|
|
|
187 |
|
188 |
model_id_or_path = "stablediffusionapi/realistic-vision-v51"
|
189 |
self.vae_pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, safety_checker=None, torch_dtype=torch.float16)
|
190 |
+
self.vae_pipe = self.vae_pipe.to(self.vit_sd_device)
|
191 |
|
192 |
self.boi_token_id = self.tokenizer.encode(BOI_TOKEN, add_special_tokens=False)[0]
|
193 |
self.eoi_token_id = self.tokenizer.encode(EOI_TOKEN, add_special_tokens=False)[0]
|
|
|
361 |
generated_image = service.sd_adapter.generate(image_embeds=img_feat, num_inference_steps=50)[0]
|
362 |
|
363 |
if force_polish:
|
364 |
+
#service.sd_adapter = service.sd_adapter.cpu()
|
365 |
+
#service.vae_pipe = service.vae_pipe.to(service.vit_sd_device, dtype=service.dtype)
|
366 |
+
|
367 |
+
torch.cuda.empty_cache()
|
368 |
|
369 |
init_image = generated_image.resize((1024, 1024))
|
370 |
prompt = ""
|
|
|
375 |
image_base64 = encode_image(generated_image)
|
376 |
gen_imgs_base64_list.append(image_base64)
|
377 |
|
378 |
+
torch.cuda.empty_cache()
|
379 |
+
|
380 |
+
# service.vae_pipe = service.vae_pipe.to("cpu")
|
381 |
+
# service.sd_adapter = service.sd_adapter.to(service.vit_sd_device, dtype=service.dtype)
|
382 |
|
383 |
|
384 |
# print('loading visual encoder and llm to GPU, and sd to CPU')
|