Staticaliza commited on
Commit
542f90d
·
verified ·
1 Parent(s): 5e7d02a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
 
6
  from PIL import Image
7
  from decord import VideoReader, cpu
8
- from transformers import AutoModel, AutoTokenizer
9
 
10
  # Pre-Initialize
11
  DEVICE = "auto"
@@ -18,8 +18,12 @@ DEFAULT_INPUT = "Describe in one paragraph."
18
  MAX_FRAMES = 64
19
 
20
  repo_name = "openbmb/MiniCPM-o-2_6" # "openbmb/MiniCPM-V-2_6-int4" # "openbmb/MiniCPM-V-2_6"
21
- repo = AutoModel.from_pretrained(repo_name, trust_remote_code=True, attn_implementation="sdpa", torch_dtype=torch.bfloat16)
 
 
 
22
  tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
 
23
 
24
  css = '''
25
  .gradio-container{max-width: 560px !important}
@@ -72,6 +76,8 @@ def generate(image, video, audio, instruction=DEFAULT_INPUT, sampling=False, tem
72
  return "No input provided."
73
 
74
  parameters = {
 
 
75
  "sampling": sampling,
76
  "temperature": temperature,
77
  "top_p": top_p,
@@ -80,7 +86,7 @@ def generate(image, video, audio, instruction=DEFAULT_INPUT, sampling=False, tem
80
  "max_new_tokens": max_tokens,
81
  }
82
 
83
- output = repo.chat(image=None, msgs=inputs, tokenizer=tokenizer, **parameters)
84
 
85
  print(output)
86
 
 
5
 
6
  from PIL import Image
7
  from decord import VideoReader, cpu
8
+ from transformers import AutoModel, AutoTokenizer, AutoProcessor
9
 
10
  # Pre-Initialize
11
  DEVICE = "auto"
 
18
  MAX_FRAMES = 64
19
 
20
  repo_name = "openbmb/MiniCPM-o-2_6" # "openbmb/MiniCPM-V-2_6-int4" # "openbmb/MiniCPM-V-2_6"
21
+ repo = AutoModel.from_pretrained(repo_name,
22
+ init_vision=True,
23
+ init_audio=False,
24
+ init_tts=False, trust_remote_code=True, attn_implementation="sdpa", torch_dtype=torch.bfloat16)
25
  tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
26
+ processor = AutoProcessor.from_pretrained(repo_name, trust_remote_code=True)
27
 
28
  css = '''
29
  .gradio-container{max-width: 560px !important}
 
76
  return "No input provided."
77
 
78
  parameters = {
79
+ "tokenizer": tokenizer,
80
+ "processor": processor,
81
  "sampling": sampling,
82
  "temperature": temperature,
83
  "top_p": top_p,
 
86
  "max_new_tokens": max_tokens,
87
  }
88
 
89
+ output = repo.chat(msgs=inputs, tokenizer=tokenizer, **parameters)
90
 
91
  print(output)
92