prithivMLmods commited on
Commit
a56276a
·
verified ·
1 Parent(s): 8299481

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -302,7 +302,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
302
  gr.Markdown("> [SpaceThinker-Qwen2.5VL-3B](https://huggingface.co/remyxai/SpaceThinker-Qwen2.5VL-3B): thinking/reasoning multimodal/vision-language model (VLM) trained to enhance spatial reasoning.")
303
  gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): model is a fine-tuned version of qwen/qwen2-vl-7b, optimized for document-level optical character recognition (ocr), long-context vision-language understanding.")
304
  gr.Markdown("> [SpaceOm](https://huggingface.co/remyxai/SpaceOm): SpaceOm, the reasoning traces in the spacethinker dataset average ~200 thinking tokens, so now included longer reasoning traces in the training data to help the model use more tokens in reasoning.")
305
- gr.Markdown("> [DeepCaption-VLA-7B](https://huggingface.co/prithivMLmods/Behemoth-3B-070225-post0.1): DeepCaption-VLA-7B model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, tailored for Image Captioning and VLA. This variant is designed to generate precise, highly descriptive captions.")
306
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
307
 
308
  image_submit.click(
 
302
  gr.Markdown("> [SpaceThinker-Qwen2.5VL-3B](https://huggingface.co/remyxai/SpaceThinker-Qwen2.5VL-3B): thinking/reasoning multimodal/vision-language model (VLM) trained to enhance spatial reasoning.")
303
  gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): model is a fine-tuned version of qwen/qwen2-vl-7b, optimized for document-level optical character recognition (ocr), long-context vision-language understanding.")
304
  gr.Markdown("> [SpaceOm](https://huggingface.co/remyxai/SpaceOm): SpaceOm, the reasoning traces in the spacethinker dataset average ~200 thinking tokens, so now included longer reasoning traces in the training data to help the model use more tokens in reasoning.")
305
+ gr.Markdown("> [DeepCaption-VLA-7B](https://huggingface.co/prithivMLmods/DeepCaption-VLA-7B): DeepCaption-VLA-7B model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, tailored for Image Captioning and VLA. This variant is designed to generate precise, highly descriptive captions.")
306
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
307
 
308
  image_submit.click(