aleafy commited on
Commit
2c33647
·
1 Parent(s): 32f018b
misc_utils/train_utils.py CHANGED
@@ -44,7 +44,7 @@ def get_vae(args):
44
  if args.get('diffusion'):
45
  if args.diffusion.params.get('base_path'):
46
  base_path = args.diffusion.params.base_path
47
- vae = AutoencoderKL.from_pretrained(os.path.join(base_path, "vae"))
48
  return vae
49
  return None
50
 
 
44
  if args.get('diffusion'):
45
  if args.diffusion.params.get('base_path'):
46
  base_path = args.diffusion.params.base_path
47
+ vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae")
48
  return vae
49
  return None
50
 
modules/openclip/modules.py CHANGED
@@ -100,8 +100,8 @@ class FrozenCLIPEmbedder(AbstractEncoder):
100
  assert layer in self.LAYERS
101
 
102
  if base_path:
103
- self.tokenizer = CLIPTokenizer.from_pretrained(os.path.join(base_path, 'tokenizer'))
104
- self.transformer = CLIPTextModel.from_pretrained(os.path.join(base_path, 'text_encoder'))
105
  else:
106
  self.tokenizer = CLIPTokenizer.from_pretrained(version)
107
  self.transformer = CLIPTextModel.from_pretrained(version)
 
100
  assert layer in self.LAYERS
101
 
102
  if base_path:
103
+ self.tokenizer = CLIPTokenizer.from_pretrained(base_path, subfolder='tokenizer')
104
+ self.transformer = CLIPTextModel.from_pretrained(base_path, subfolder='text_encoder')
105
  else:
106
  self.tokenizer = CLIPTokenizer.from_pretrained(version)
107
  self.transformer = CLIPTextModel.from_pretrained(version)