Spaces:
Running
on
Zero
Running
on
Zero
modify
Browse files- misc_utils/train_utils.py +1 -1
- modules/openclip/modules.py +2 -2
misc_utils/train_utils.py
CHANGED
@@ -44,7 +44,7 @@ def get_vae(args):
|
|
44 |
if args.get('diffusion'):
|
45 |
if args.diffusion.params.get('base_path'):
|
46 |
base_path = args.diffusion.params.base_path
|
47 |
-
vae = AutoencoderKL.from_pretrained(
|
48 |
return vae
|
49 |
return None
|
50 |
|
|
|
44 |
if args.get('diffusion'):
|
45 |
if args.diffusion.params.get('base_path'):
|
46 |
base_path = args.diffusion.params.base_path
|
47 |
+
vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae")
|
48 |
return vae
|
49 |
return None
|
50 |
|
modules/openclip/modules.py
CHANGED
@@ -100,8 +100,8 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
|
100 |
assert layer in self.LAYERS
|
101 |
|
102 |
if base_path:
|
103 |
-
self.tokenizer = CLIPTokenizer.from_pretrained(
|
104 |
-
self.transformer = CLIPTextModel.from_pretrained(
|
105 |
else:
|
106 |
self.tokenizer = CLIPTokenizer.from_pretrained(version)
|
107 |
self.transformer = CLIPTextModel.from_pretrained(version)
|
|
|
100 |
assert layer in self.LAYERS
|
101 |
|
102 |
if base_path:
|
103 |
+
self.tokenizer = CLIPTokenizer.from_pretrained(base_path, subfolder='tokenizer')
|
104 |
+
self.transformer = CLIPTextModel.from_pretrained(base_path, subfolder='text_encoder')
|
105 |
else:
|
106 |
self.tokenizer = CLIPTokenizer.from_pretrained(version)
|
107 |
self.transformer = CLIPTextModel.from_pretrained(version)
|