File size: 2,026 Bytes
127e34a
 
95d314a
97bbc07
127e34a
95d314a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97bbc07
127e34a
b700f65
95d314a
127e34a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import torch
from transformers import AutoModel, AutoTokenizer
import os
import spaces

def download_model_and_tokenizer():
    """Download model and tokenizer to the specified directory."""
    print("Downloading model and tokenizer...")
    model = AutoModel.from_pretrained(
        'openbmb/MiniCPM-Llama3-V-2_5',
        trust_remote_code=True,
        torch_dtype=torch.float16,
        cache_dir='models/MiniCPM'
    )
    tokenizer = AutoTokenizer.from_pretrained(
        'openbmb/MiniCPM-Llama3-V-2_5',
        trust_remote_code=True,
        cache_dir='models/MiniCPM'
    )
    print("Download complete.")
    return model, tokenizer

def load_model_and_tokenizer():
    """Load the model and tokenizer, downloading them if necessary."""
    model_dir = 'models/MiniCPM'

    # Check if directory exists and contains files
    if not os.path.exists(model_dir) or not os.listdir(model_dir):
        # If folder doesn't exist or is empty, download the model and tokenizer
        os.makedirs(model_dir, exist_ok=True)
        model, tokenizer = download_model_and_tokenizer()
    else:
        print("Loading model and tokenizer from local directory...")
        model = AutoModel.from_pretrained(
            model_dir,
            trust_remote_code=True,
            torch_dtype=torch.float16
        )
        tokenizer = AutoTokenizer.from_pretrained(
            model_dir,
            trust_remote_code=True
        )
    return model, tokenizer
@spaces.GPU
def get_caption(image):

    model, tokenizer = load_model_and_tokenizer()
    model = model.to(device='cuda')
    model.eval()
    question = "Describe the image."
    msgs = [{'role': 'user', 'content': question}]

    res = model.chat(
        image=image,
        msgs=msgs,
        tokenizer=tokenizer,
        sampling=True,
        temperature=0.7,
        stream=True
    )
    generated_text = ""
    for new_text in res:
        generated_text += new_text

    model.cpu()
    del model
    torch.cuda.empty_cache()
    return generated_text