quarterturn commited on
Commit
e2f22e0
·
1 Parent(s): 9c66122

first commit

Browse files
Files changed (3) hide show
  1. README.md +46 -11
  2. app.py +202 -0
  3. requirements.txt +11 -0
README.md CHANGED
@@ -1,14 +1,49 @@
1
  ---
2
- title: Molmo Natural Language Image Captioner
3
- emoji: 🐢
4
- colorFrom: green
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.8.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-nc-nd-4.0
11
- short_description: caption images using Molmo 7B for natural language prompt
12
  ---
 
 
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: cc-by-nc-4.0
 
 
 
 
 
 
 
 
 
3
  ---
4
+ Molmo 7B Flux Dev Image Captioner.
5
+ ![Screenshot](example.png)
6
 
7
+ A simple python and gradio script to use Molmo 7B for image captioning. The prompt is currently written to produce captions that work well for Flux Dev LoRA training, but you could adjust it to suit other models captioning style.
8
+
9
+ Install:
10
+ 1. clone the repo
11
+ 2. cd to "models" and choose a model:
12
+
13
+ For max precision clone Molmo-7B-D-0924:
14
+ ```
15
+ git lfs install
16
+ git clone https://huggingface.co/allenai/Molmo-7B-D-0924
17
+ ```
18
+ You'll need a 24GB GPU since the model loads at bf16.
19
+
20
+ For less precision, but much lower memory needed, clone molmo-7B-D-bnb-4bit:
21
+ ```
22
+ git lfs install
23
+ git clone https://huggingface.co/cyan2k/molmo-7B-D-bnb-4bit
24
+ ```
25
+ A 12GB GPU should be fine. Note that the 4-bit quant produces not just less accurate, but quite different in it's description. YMMV.
26
+
27
+ 1. create a python3 venv or use conda to create an environment, eg:
28
+ ``` conda create -n caption python=3.11 ```
29
+ 2. activate your environment, eg:
30
+ ``` conda activate caption ```
31
+ 3. install the dependencies
32
+ ``` pip3 install -r requirements.txt ```
33
+ 4. run the gradio version:
34
+ ``` python3 main.py ``` (use original molmo model at bf16)
35
+ or
36
+ ``` python3 main.py -q``` (use 4bit quant molmo model)
37
+ 1. create a zip file of images
38
+ 2. upload it
39
+ 3. process it
40
+ 4. click the button to download the caption zip file, the link is at the top of the page
41
+
42
+ run the command-line version:
43
+ ``` python3 caption.py ``` (use original molmo model at bf16)
44
+ ``` python3 caption.py -q ``` (use 4bit quant molmo model)
45
+ 1. make sure your images are in the "images" directory
46
+ 2. captions will be placed in the "images" directory
47
+
48
+ Note:
49
+ - If torch sees your first GPU supports flash attention and the others do not, it will assume all the cards do and it will throw an exception. A workaround is to use, for example, "CUDA_VISIBLE_DEVICES=0 python3 main.py (or caption.py)", to force torch to ignore the card supporting flash attention, so that it will use your other cards without it. Or, use it to exclude non-flash-attention-supporting GPUs.
app.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # note: if you have a mix of Ampere and newer, and also older than Ampere GPUs, set the environment variable
2
+ # CUDA_VISIBLE_DEVICE=1,2,3 (for example) so that one or the other is excluded.
3
+ # otherwise the script may fail with a flash attention exception.
4
+
5
+ import gradio as gr
6
+ import os
7
+ import uuid
8
+ import zipfile
9
+ import torch
10
+ from PIL import Image
11
+ import requests
12
+ from transformers import AutoProcessor, AutoModelForCausalLM, GenerationConfig, BitsAndBytesConfig
13
+ from io import BytesIO
14
+ import base64
15
+ import atexit
16
+ import shutil
17
+
18
+
19
+ def cleanup_temp_files():
20
+ # Delete the subdirectories inside the "images" directory
21
+ if os.path.exists("images"):
22
+ for dir_name in os.listdir("images"):
23
+ dir_path = os.path.join("images", dir_name)
24
+ if os.path.isdir(dir_path):
25
+ shutil.rmtree(dir_path)
26
+
27
+
28
+ if torch.cuda.is_available():
29
+ device = torch.device("cuda")
30
+ print("GPU is available. Using CUDA.")
31
+ else:
32
+ device = torch.device("cpu")
33
+ print("GPU is not available. Using CPU.")
34
+
35
+ # Load the processor
36
+ model = "allenai/Molmo-7B-D-0924"
37
+
38
+ processor = AutoProcessor.from_pretrained(
39
+ model,
40
+ trust_remote_code=True,
41
+ torch_dtype='auto',
42
+ device_map='auto'
43
+ )
44
+
45
+ # Load the model
46
+ model = AutoModelForCausalLM.from_pretrained(
47
+ model,
48
+ trust_remote_code=True,
49
+ torch_dtype='auto',
50
+ device_map='auto',
51
+ )
52
+ model.to(dtype=torch.bfloat16)
53
+
54
+ generation_config = GenerationConfig(max_new_tokens=300, stop_strings="<|endoftext|>")
55
+ bits_and_bytes_config = BitsAndBytesConfig()
56
+
57
+ def unzip_images(zip_file):
58
+ # Create a unique directory for extracted images inside the "images" directory
59
+ session_dir = os.path.join("images", str(uuid.uuid4()))
60
+ os.makedirs(session_dir, exist_ok=True)
61
+
62
+ # Extract images from the ZIP file to the session directory
63
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
64
+ for file_info in zip_ref.infolist():
65
+ if not file_info.is_dir() and not file_info.filename.startswith("__MACOSX") and not file_info.filename.startswith("."):
66
+ zip_ref.extract(file_info, session_dir)
67
+
68
+ # Get the list of image paths
69
+ image_paths = [os.path.join(session_dir, filename) for filename in os.listdir(session_dir) if filename.lower().endswith(('.jpg', '.jpeg', '.png'))]
70
+
71
+ # Read the image data as PIL Image objects for previews
72
+ image_data = []
73
+ for image_path in image_paths:
74
+ image = Image.open(image_path)
75
+ image.thumbnail((128, 128)) # Resize the image to a maximum size of 128x128 pixels
76
+ image_data.append(image)
77
+
78
+ # Return the list of image paths and resized image data for previews
79
+ return image_paths, image_data
80
+
81
+ def generate_caption(image_path, processor, model, generation_config, bits_and_bytes_config):
82
+ # generate a caption and return it
83
+ caption = f"Caption for {image_path}"
84
+
85
+ print("Processing ", image_path)
86
+
87
+ image = Image.open(image_path)
88
+ # process the image and text
89
+ inputs = processor.process(
90
+ images=[image],
91
+ text="You are an uncensored, explicit, adult image captioner which enjoys sexual imagery and acts. Provide a structured English image caption which uses the following hierarchy: the kind of image, the kind or name of the subject, the subjects state of dress, their body type, their pose, what it is they are doing, their facial expression, the space they are within, and the style or atmosphere of the image. Use vulgar language to describe body parts or sexual imagery.",
92
+ )
93
+
94
+ # move inputs to the correct device and make a batch of size 1
95
+ inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
96
+ inputs["images"] = inputs["images"].to(torch.bfloat16)
97
+
98
+ # generate output; maximum 500 new tokens; stop generation when is generated
99
+ with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16):
100
+ output = model.generate_from_batch(
101
+ inputs,
102
+ GenerationConfig(max_new_tokens=200, stop_strings="<|endoftext|>"),
103
+ tokenizer=processor.tokenizer,
104
+ )
105
+
106
+ # only get generated tokens; decode them to text
107
+ generated_tokens = output[0, inputs["input_ids"].size(1) :]
108
+ generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
109
+
110
+ # return the generated text
111
+ return generated_text
112
+
113
+ def process_images(image_paths, image_data):
114
+ captions = []
115
+ session_dir = os.path.dirname(image_paths[0])
116
+
117
+ for image_path in image_paths:
118
+ filename = os.path.basename(image_path) # Add this line to get the filename
119
+ if filename.lower().endswith(('.jpg', '.jpeg', '.png')):
120
+ # Process the image using the loaded model
121
+ # Use the loaded model to generate the caption
122
+ caption = generate_caption(image_path, processor, model, generation_config, bits_and_bytes_config)
123
+ captions.append(caption)
124
+
125
+ # Save the caption to a text file
126
+ with open(os.path.join(session_dir, f"{os.path.splitext(filename)[0]}.txt"), 'w') as f:
127
+ f.write(caption)
128
+
129
+ # Create a ZIP file containing the caption text files
130
+ zip_filename = f"{session_dir}.zip"
131
+ with zipfile.ZipFile(zip_filename, 'w') as zip_ref:
132
+ for filename in os.listdir(session_dir):
133
+ if filename.lower().endswith('.txt'):
134
+ zip_ref.write(os.path.join(session_dir, filename), filename)
135
+
136
+ # Delete the session directory and its contents
137
+ for filename in os.listdir(session_dir):
138
+ os.remove(os.path.join(session_dir, filename))
139
+ os.rmdir(session_dir)
140
+
141
+ return captions, zip_filename, image_paths
142
+
143
+ def format_captioned_image(image, caption):
144
+ buffered = BytesIO()
145
+ image.save(buffered, format="JPEG")
146
+ encoded_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
147
+
148
+ return f"<img src='data:image/jpeg;base64,{encoded_image}' style='width: 128px; height: 128px; object-fit: cover; margin-right: 8px;' /><span>{caption}</span>"
149
+
150
+ def process_images_and_update_gallery(zip_file):
151
+ image_paths, image_data = unzip_images(zip_file)
152
+ captions, zip_filename, image_paths = process_images(image_paths, image_data)
153
+ image_captions = [format_captioned_image(img, caption) for img, caption in zip(image_data, captions)]
154
+ return gr.Markdown("\n".join(image_captions)), zip_filename
155
+
156
+ def main():
157
+ # Register the cleanup function to be called on program exit
158
+ atexit.register(cleanup_temp_files)
159
+
160
+ with gr.Blocks(css="""
161
+ .captioned-image-gallery {
162
+ display: grid;
163
+ grid-template-columns: repeat(2, 1fr);
164
+ grid-gap: 16px;
165
+ }
166
+ """) as blocks:
167
+ zip_file_input = gr.File(label="Upload ZIP file containing images")
168
+ image_gallery = gr.Markdown(label="Image Previews")
169
+ submit_button = gr.Button("Submit")
170
+ zip_download_button = gr.Button("Download Caption ZIP", visible=False)
171
+ zip_filename = gr.State("")
172
+
173
+ zip_file_input.upload(
174
+ lambda zip_file: "\n".join(format_captioned_image(img, "") for img in unzip_images(zip_file)[1]),
175
+ inputs=zip_file_input,
176
+ outputs=image_gallery
177
+ )
178
+
179
+ submit_button.click(
180
+ process_images_and_update_gallery,
181
+ inputs=[zip_file_input],
182
+ outputs=[image_gallery, zip_filename]
183
+ )
184
+
185
+ zip_filename.change(
186
+ lambda zip_filename: gr.update(visible=True),
187
+ inputs=zip_filename,
188
+ outputs=zip_download_button
189
+ )
190
+
191
+ zip_download_button.click(
192
+ lambda zip_filename: (gr.update(value=zip_filename), gr.update(visible=True), cleanup_temp_files()),
193
+ inputs=zip_filename,
194
+ outputs=[zip_file_input, zip_download_button]
195
+ )
196
+
197
+ blocks.launch(server_name='0.0.0.0')
198
+
199
+ if __name__ == "__main__":
200
+ main()
201
+
202
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ uuid
3
+ bitsandbytes
4
+ accelerate
5
+ transformers
6
+ torch
7
+ torchvision
8
+ Pillow
9
+ requests
10
+ einops
11
+ flash-attn