Spaces:
Running
Running
Rishi Desai
commited on
Commit
·
fb6c5e9
1
Parent(s):
e3a461e
more smol edits'
Browse files- caption.py +3 -3
- prompt.py +2 -2
caption.py
CHANGED
@@ -3,7 +3,7 @@ import io
|
|
3 |
import os
|
4 |
from together import Together
|
5 |
|
6 |
-
|
7 |
TRIGGER_WORD = "tr1gger"
|
8 |
|
9 |
def get_system_prompt():
|
@@ -106,7 +106,7 @@ def caption_single_image(client, img_str):
|
|
106 |
|
107 |
# Request caption for the image using Llama 4 Maverick
|
108 |
response = client.chat.completions.create(
|
109 |
-
model=
|
110 |
messages=messages
|
111 |
)
|
112 |
|
@@ -141,7 +141,7 @@ def caption_image_batch(client, image_strings, category):
|
|
141 |
{"role": "user", "content": content}
|
142 |
]
|
143 |
response = client.chat.completions.create(
|
144 |
-
model=
|
145 |
messages=messages
|
146 |
)
|
147 |
return process_batch_response(response, image_strings)
|
|
|
3 |
import os
|
4 |
from together import Together
|
5 |
|
6 |
+
MODEL_ID = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
7 |
TRIGGER_WORD = "tr1gger"
|
8 |
|
9 |
def get_system_prompt():
|
|
|
106 |
|
107 |
# Request caption for the image using Llama 4 Maverick
|
108 |
response = client.chat.completions.create(
|
109 |
+
model=MODEL_ID,
|
110 |
messages=messages
|
111 |
)
|
112 |
|
|
|
141 |
{"role": "user", "content": content}
|
142 |
]
|
143 |
response = client.chat.completions.create(
|
144 |
+
model=MODEL_ID,
|
145 |
messages=messages
|
146 |
)
|
147 |
return process_batch_response(response, image_strings)
|
prompt.py
CHANGED
@@ -3,7 +3,7 @@ import argparse
|
|
3 |
from pathlib import Path
|
4 |
from caption import get_system_prompt, get_together_client, extract_captions
|
5 |
|
6 |
-
|
7 |
|
8 |
def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
|
9 |
"""Optimize a user prompt to follow the same format as training captions.
|
@@ -44,7 +44,7 @@ def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
|
|
44 |
]
|
45 |
|
46 |
response = client.chat.completions.create(
|
47 |
-
model=
|
48 |
messages=messages
|
49 |
)
|
50 |
|
|
|
3 |
from pathlib import Path
|
4 |
from caption import get_system_prompt, get_together_client, extract_captions
|
5 |
|
6 |
+
MODEL_ID = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
7 |
|
8 |
def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
|
9 |
"""Optimize a user prompt to follow the same format as training captions.
|
|
|
44 |
]
|
45 |
|
46 |
response = client.chat.completions.create(
|
47 |
+
model=MODEL_ID,
|
48 |
messages=messages
|
49 |
)
|
50 |
|