Rishi Desai commited on
Commit
fb6c5e9
·
1 Parent(s): e3a461e

more smol edits'

Browse files
Files changed (2) hide show
  1. caption.py +3 -3
  2. prompt.py +2 -2
caption.py CHANGED
@@ -3,7 +3,7 @@ import io
3
  import os
4
  from together import Together
5
 
6
- MODEL = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
7
  TRIGGER_WORD = "tr1gger"
8
 
9
  def get_system_prompt():
@@ -106,7 +106,7 @@ def caption_single_image(client, img_str):
106
 
107
  # Request caption for the image using Llama 4 Maverick
108
  response = client.chat.completions.create(
109
- model=MODEL,
110
  messages=messages
111
  )
112
 
@@ -141,7 +141,7 @@ def caption_image_batch(client, image_strings, category):
141
  {"role": "user", "content": content}
142
  ]
143
  response = client.chat.completions.create(
144
- model=MODEL,
145
  messages=messages
146
  )
147
  return process_batch_response(response, image_strings)
 
3
  import os
4
  from together import Together
5
 
6
+ MODEL_ID = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
7
  TRIGGER_WORD = "tr1gger"
8
 
9
  def get_system_prompt():
 
106
 
107
  # Request caption for the image using Llama 4 Maverick
108
  response = client.chat.completions.create(
109
+ model=MODEL_ID,
110
  messages=messages
111
  )
112
 
 
141
  {"role": "user", "content": content}
142
  ]
143
  response = client.chat.completions.create(
144
+ model=MODEL_ID,
145
  messages=messages
146
  )
147
  return process_batch_response(response, image_strings)
prompt.py CHANGED
@@ -3,7 +3,7 @@ import argparse
3
  from pathlib import Path
4
  from caption import get_system_prompt, get_together_client, extract_captions
5
 
6
- MODEL = "deepseek-ai/DeepSeek-V3"
7
 
8
  def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
9
  """Optimize a user prompt to follow the same format as training captions.
@@ -44,7 +44,7 @@ def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
44
  ]
45
 
46
  response = client.chat.completions.create(
47
- model=MODEL,
48
  messages=messages
49
  )
50
 
 
3
  from pathlib import Path
4
  from caption import get_system_prompt, get_together_client, extract_captions
5
 
6
+ MODEL_ID = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
7
 
8
  def optimize_prompt(user_prompt, captions_dir=None, captions_list=None):
9
  """Optimize a user prompt to follow the same format as training captions.
 
44
  ]
45
 
46
  response = client.chat.completions.create(
47
+ model=MODEL_ID,
48
  messages=messages
49
  )
50