stzhao commited on
Commit
fd8fe35
·
verified ·
1 Parent(s): af4705c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -36,14 +36,14 @@ def prompt_enhance(client, image_caption, text_caption):
36
 
37
  pipe = load_models()
38
 
39
- def truncate_caption_by_tokens(caption, max_tokens=256):
40
- """Truncate the caption to fit within the max token limit"""
41
- tokens = tokenizer.encode(caption)
42
- if len(tokens) > max_tokens:
43
- truncated_tokens = tokens[:max_tokens]
44
- caption = tokenizer.decode(truncated_tokens, skip_special_tokens=True)
45
- print(f"Caption was truncated from {len(tokens)} tokens to {max_tokens} tokens")
46
- return caption
47
 
48
 
49
  @spaces.GPU(duration=60)
@@ -52,7 +52,7 @@ def generate_image(enhanced_caption, seed, num_inference_steps, guidance_scale):
52
  pipe.enable_model_cpu_offload()
53
  """Generate image using LeX-Lumina"""
54
  # Truncate the caption if it's too long
55
- enhanced_caption = truncate_caption_by_tokens(enhanced_caption, max_tokens=256)
56
 
57
  print(f"enhanced caption:\n{enhanced_caption}")
58
 
 
36
 
37
  pipe = load_models()
38
 
39
+ # def truncate_caption_by_tokens(caption, max_tokens=256):
40
+ # """Truncate the caption to fit within the max token limit"""
41
+ # tokens = tokenizer.encode(caption)
42
+ # if len(tokens) > max_tokens:
43
+ # truncated_tokens = tokens[:max_tokens]
44
+ # caption = tokenizer.decode(truncated_tokens, skip_special_tokens=True)
45
+ # print(f"Caption was truncated from {len(tokens)} tokens to {max_tokens} tokens")
46
+ # return caption
47
 
48
 
49
  @spaces.GPU(duration=60)
 
52
  pipe.enable_model_cpu_offload()
53
  """Generate image using LeX-Lumina"""
54
  # Truncate the caption if it's too long
55
+ # enhanced_caption = truncate_caption_by_tokens(enhanced_caption, max_tokens=256)
56
 
57
  print(f"enhanced caption:\n{enhanced_caption}")
58