Commit
·
6e1c8e6
1
Parent(s):
d4004bc
Add samples
Browse files- .gitattributes +1 -0
- app.py +6 -3
- sample_data/1a-ODBWMUAE.wav +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -26,6 +26,8 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
26 |
MODEL_NAMES = ['qd_detr']
|
27 |
FEATURES = ['clap']
|
28 |
TOPK_MOMENT = 5
|
|
|
|
|
29 |
|
30 |
"""
|
31 |
Helper functions
|
@@ -129,15 +131,15 @@ def main():
|
|
129 |
|
130 |
with gr.Group():
|
131 |
gr.Markdown("## Audio and query")
|
132 |
-
audio_input = gr.Audio(type='filepath')
|
133 |
output = gr.Textbox(label='Audio processing progress')
|
134 |
-
query_input = gr.Textbox(label='query')
|
135 |
button = gr.Button("Retrieve moment", variant="primary")
|
136 |
|
137 |
with gr.Column():
|
138 |
with gr.Group():
|
139 |
gr.Markdown("## Retrieved moments")
|
140 |
-
|
141 |
button_1 = gr.Button(value='moment 1', visible=False, elem_id='result_0')
|
142 |
button_2 = gr.Button(value='moment 2', visible=False, elem_id='result_1')
|
143 |
button_3 = gr.Button(value='moment 3', visible=False, elem_id='result_2')
|
@@ -157,6 +159,7 @@ def main():
|
|
157 |
button.click(predict,
|
158 |
inputs=[query_input],
|
159 |
outputs=[button_1, button_2, button_3, button_4, button_5])
|
|
|
160 |
|
161 |
demo.launch()
|
162 |
|
|
|
26 |
MODEL_NAMES = ['qd_detr']
|
27 |
FEATURES = ['clap']
|
28 |
TOPK_MOMENT = 5
|
29 |
+
sample_path = "sample_data/1a-ODBWMUAE.wav"
|
30 |
+
sample_query = "Water cascades down from a waterfall."
|
31 |
|
32 |
"""
|
33 |
Helper functions
|
|
|
131 |
|
132 |
with gr.Group():
|
133 |
gr.Markdown("## Audio and query")
|
134 |
+
audio_input = gr.Audio(sample_path, type='filepath')
|
135 |
output = gr.Textbox(label='Audio processing progress')
|
136 |
+
query_input = gr.Textbox(sample_query, label='query')
|
137 |
button = gr.Button("Retrieve moment", variant="primary")
|
138 |
|
139 |
with gr.Column():
|
140 |
with gr.Group():
|
141 |
gr.Markdown("## Retrieved moments")
|
142 |
+
gr.Markdown("Click on the moment button to listen to the trimmed audio.")
|
143 |
button_1 = gr.Button(value='moment 1', visible=False, elem_id='result_0')
|
144 |
button_2 = gr.Button(value='moment 2', visible=False, elem_id='result_1')
|
145 |
button_3 = gr.Button(value='moment 3', visible=False, elem_id='result_2')
|
|
|
159 |
button.click(predict,
|
160 |
inputs=[query_input],
|
161 |
outputs=[button_1, button_2, button_3, button_4, button_5])
|
162 |
+
demo.load(audio_upload, inputs=[audio_input], outputs=output)
|
163 |
|
164 |
demo.launch()
|
165 |
|
sample_data/1a-ODBWMUAE.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a3175fc225f4e220c1e9604123ad1cc2f2d6d56330e65a4df01157b6672a0e2
|
3 |
+
size 3201056
|