File size: 3,084 Bytes
65f2058
 
 
 
 
 
 
 
 
 
 
 
 
9cd4773
65f2058
9cd4773
65f2058
 
9cd4773
65f2058
 
9cd4773
65f2058
 
9cd4773
 
 
 
65f2058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9cd4773
65f2058
d0d10bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9cd4773
 
 
 
 
 
 
 
 
65f2058
 
9cd4773
 
 
 
65f2058
9cd4773
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio as gr
from PIL import Image
from inference.main import MultiModalPhi2

messages = []

multimodal_phi2 = MultiModalPhi2(
    modelname_or_path="Navyabhat/Llava-Phi2",
    temperature=0.2,
    max_new_tokens=1024,
    device="cpu",
)

def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
    textflag, imageflag, audioflag = False, False, False
    if text not in ["", None]:
        chatbot.append((text, None))
        textflag = True
    if image is not None:
        chatbot.append(((image,), None))
        imageflag = True
    if audio_mic is not None:
        chatbot.append(((audio_mic,), None))
        audioflag = True
    else:
        if audio_upload is not None:
            chatbot.append(((audio_upload,), None))
            audioflag = True
    if not any([textflag, imageflag, audioflag]):
        # Raise an error if neither text nor file is provided
        raise gr.Error("Enter a valid text, image or audio")
    return chatbot


def clear_data():
    return {prompt: None, image: None, audio_upload: None, audio_mic: None, chatbot: []}


def run(history, text, image, audio_upload, audio_mic):
    if text in [None, ""]:
        text = None

    if audio_upload is not None:
        audio = audio_upload
    elif audio_mic is not None:
        audio = audio_mic
    else:
        audio = None

    print("text", text)
    print("image", image)
    print("audio", audio)

    if image is not None:
        image = Image.open(image)
    outputs = multimodal_phi2(text, audio, image)
    # outputs = ""

    history.append((None, outputs.title()))
    return history, None, None, None, None


with gr.Blocks() as demo:
    gr.Markdown("## MulitModal Phi2 Model Pretraining and Finetuning from Scratch")

# with gr.Column(scale=8):
# with gr.Box():
    with gr.Row():
        chatbot = gr.Chatbot(
            avatar_images=("πŸ§‘", "πŸ€–"),
            height=550,
        )

    # with gr.Box():
    with gr.Row():
        # Adding a Textbox with a placeholder "write prompt"
        prompt = gr.Textbox(
            placeholder="Enter text, or upload an image or audio", lines=2, label="Query", value=None
        )
    # Creating a column with a scale of 2
    # with gr.Row():
        # Adding image
        image= gr.UploadButton("πŸ“", file_types=["image", "audio"])
        audio_upload= image
    # Creating a column with a scale of 2
    # with gr.Row():
        # Add audio
        audio_mic = gr.Audio(
            source="microphone", type="filepath", format="mp3"
        )
    with gr.Row():
        # Adding a Button
        submit = gr.Button()
        clear = gr.Button(value="Clear")

    submit.click(
        add_content,
        inputs=[chatbot, prompt, image, audio_upload, audio_mic],
        outputs=[chatbot],
    ).success(
        run,
        inputs=[chatbot, prompt, image, audio_upload, audio_mic],
        outputs=[chatbot, prompt, image, audio_upload, audio_mic],
    )

    clear.click(
        clear_data,
        outputs=[prompt, image, audio_upload, audio_mic, chatbot],
    )

demo.launch()