lighthouse-emnlp2024 commited on
Commit
d4004bc
·
1 Parent(s): 734acc2

Add app.py

Browse files
Files changed (3) hide show
  1. app.py +164 -0
  2. packages.txt +1 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright $today.year LY Corporation
3
+
4
+ LY Corporation licenses this file to you under the Apache License,
5
+ version 2.0 (the "License"); you may not use this file except in compliance
6
+ with the License. You may obtain a copy of the License at:
7
+
8
+ https://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+ License for the specific language governing permissions and limitations
14
+ under the License.
15
+ """
16
+ import os
17
+ import torch
18
+ import subprocess
19
+ import gradio as gr
20
+ import librosa
21
+ from tqdm import tqdm
22
+ from lighthouse.models import *
23
+
24
+ # use GPU if available
25
+ device = "cuda" if torch.cuda.is_available() else "cpu"
26
+ MODEL_NAMES = ['qd_detr']
27
+ FEATURES = ['clap']
28
+ TOPK_MOMENT = 5
29
+
30
+ """
31
+ Helper functions
32
+ """
33
+ def load_pretrained_weights():
34
+ file_urls = []
35
+ for model_name in MODEL_NAMES:
36
+ for feature in FEATURES:
37
+ file_urls.append(
38
+ "https://zenodo.org/records/13961029/files/{}_{}_clotho-moment.ckpt".format(feature, model_name)
39
+ )
40
+ for file_url in tqdm(file_urls):
41
+ if not os.path.exists('gradio_demo/weights/' + os.path.basename(file_url)):
42
+ command = 'wget -P gradio_demo/weights/ {}'.format(file_url)
43
+ subprocess.run(command, shell=True)
44
+
45
+ return file_urls
46
+
47
+ def flatten(array2d):
48
+ list1d = []
49
+ for elem in array2d:
50
+ list1d += elem
51
+ return list1d
52
+
53
+ """
54
+ Model initialization
55
+ """
56
+ load_pretrained_weights()
57
+ model = QDDETRPredictor('gradio_demo/weights/clap_qd_detr_clotho-moment.ckpt', device=device, feature_name='clap')
58
+ loaded_audio = None
59
+
60
+ """
61
+ Gradio functions
62
+ """
63
+ def audio_upload(audio):
64
+ global loaded_audio
65
+ if audio is None:
66
+ loaded_audio = None
67
+ yield gr.update(value="Removed the audio", visible=True)
68
+ else:
69
+ yield gr.update(value="Processing the audio. Wait for a minute...", visible=True)
70
+ audio_feats = model.encode_audio(audio)
71
+ loaded_audio = audio_feats
72
+ yield gr.update(value="Finished audio processing!", visible=True)
73
+
74
+ def model_load(radio):
75
+ if radio is not None:
76
+ yield gr.update(value="Loading new model. Wait for a minute...", visible=True)
77
+ global model
78
+ feature, model_name = radio.split('+')
79
+ feature, model_name = feature.strip(), model_name.strip()
80
+
81
+ if model_name == 'qd_detr':
82
+ model_class = QDDETRPredictor
83
+ else:
84
+ raise gr.Error("Select from the models")
85
+
86
+ model = model_class('gradio_demo/weights/{}_{}_clotho-moment.ckpt'.format(feature, model_name),
87
+ device=device, feature_name='{}'.format(feature))
88
+ yield gr.update(value="Model loaded: {}".format(radio), visible=True)
89
+
90
+ def predict(textbox, line, gallery):
91
+ global loaded_audio
92
+ if loaded_audio is None:
93
+ raise gr.Error('Upload the audio before pushing the `Retrieve moment` button.')
94
+ else:
95
+ prediction = model.predict(textbox, loaded_audio)
96
+ mr_results = prediction['pred_relevant_windows']
97
+
98
+ buttons = []
99
+ for i, pred in enumerate(mr_results[:TOPK_MOMENT]):
100
+ buttons.append(gr.Button(value='moment {}: [{}, {}] Score: {}'.format(i+1, pred[0], pred[1], pred[2]), visible=True))
101
+
102
+ return buttons
103
+
104
+
105
+ def show_trimmed_audio(audio, button):
106
+ s, sr = librosa.load(audio, sr=None)
107
+ _seconds = button.split(': [')[1].split(']')[0].split(', ')
108
+ start_sec = float(_seconds[0])
109
+ end_sec = float(_seconds[1])
110
+ start_frame = int(start_sec * sr)
111
+ end_frame = int(end_sec * sr)
112
+
113
+ return gr.Audio((sr, s[start_frame:end_frame]), interactive=False, visible=True)
114
+
115
+
116
+ def main():
117
+ title = """# Audio Moment Retrieval Demo"""
118
+
119
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
120
+ gr.Markdown(title)
121
+
122
+ with gr.Row():
123
+ with gr.Column():
124
+ with gr.Group():
125
+ gr.Markdown("## Model selection")
126
+ radio_list = flatten([["{} + {}".format(feature, model_name) for model_name in MODEL_NAMES] for feature in FEATURES])
127
+ radio = gr.Radio(radio_list, label="models", value="clap + qd_detr", info="Which model do you want to use?")
128
+ load_status_text = gr.Textbox(label='Model load status', value='Model loaded: clap + qd_detr')
129
+
130
+ with gr.Group():
131
+ gr.Markdown("## Audio and query")
132
+ audio_input = gr.Audio(type='filepath')
133
+ output = gr.Textbox(label='Audio processing progress')
134
+ query_input = gr.Textbox(label='query')
135
+ button = gr.Button("Retrieve moment", variant="primary")
136
+
137
+ with gr.Column():
138
+ with gr.Group():
139
+ gr.Markdown("## Retrieved moments")
140
+
141
+ button_1 = gr.Button(value='moment 1', visible=False, elem_id='result_0')
142
+ button_2 = gr.Button(value='moment 2', visible=False, elem_id='result_1')
143
+ button_3 = gr.Button(value='moment 3', visible=False, elem_id='result_2')
144
+ button_4 = gr.Button(value='moment 4', visible=False, elem_id='result_3')
145
+ button_5 = gr.Button(value='moment 5', visible=False, elem_id='result_4')
146
+ result = gr.Audio(None, label='Trimmed audio', interactive=False, visible=False)
147
+
148
+ button_1.click(show_trimmed_audio, inputs=[audio_input, button_1], outputs=[result])
149
+ button_2.click(show_trimmed_audio, inputs=[audio_input, button_2], outputs=[result])
150
+ button_3.click(show_trimmed_audio, inputs=[audio_input, button_3], outputs=[result])
151
+ button_4.click(show_trimmed_audio, inputs=[audio_input, button_4], outputs=[result])
152
+ button_5.click(show_trimmed_audio, inputs=[audio_input, button_5], outputs=[result])
153
+
154
+ audio_input.change(audio_upload, inputs=[audio_input], outputs=output)
155
+ radio.select(model_load, inputs=[radio], outputs=load_status_text)
156
+
157
+ button.click(predict,
158
+ inputs=[query_input],
159
+ outputs=[button_1, button_2, button_3, button_4, button_5])
160
+
161
+ demo.launch()
162
+
163
+ if __name__ == "__main__":
164
+ main()
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ git+https://github.com/line/lighthouse.git
2
+ torch==2.1.0
3
+ torchvision==0.16.0
4
+ torchaudio==2.1.0
5
+ torchtext==0.16.0