File size: 1,174 Bytes
69b2598
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from transformers import pipeline
import gradio as gr
import base64
import io

# calling pipeline to get_completion
get_completion = pipeline("image-to-text",model="carolcarneiro/keras-dummy-sequential-demo") #"sashakunitsyn/vlrm-blip2-opt-2.7b") #"Salesforce/blip2-opt-6.7b") # "Salesforce/blip-image-captioning-large" "nlpconnect/vit-gpt2-image-captioning"

def summarize(input):
    output = get_completion(input)
    return output[0]['generated_text']

def image_to_base64_str(pil_image):
    byte_arr = io.BytesIO()
    pil_image.save(byte_arr, format='PNG')
    byte_arr = byte_arr.getvalue()
    return str(base64.b64encode(byte_arr).decode('utf-8'))

def captioner(image):
    #base64_image = image_to_base64_str(image)
    result = get_completion(image)
    return result[0]['generated_text']

gr.close_all()
demo = gr.Interface(fn=captioner,
                    inputs=[gr.Image(label="Upload image", type="pil")],
                    outputs=[gr.Textbox(label="Caption")],
                    title="Image Captioning Application",
                    description="Caption the image you'd like to upload",
                    allow_flagging="never")

demo.launch()