Spaces:
Runtime error
Runtime error
File size: 1,274 Bytes
c4a4b60 4e909b7 72fadc4 cbb2414 72fadc4 ac84af9 72fadc4 cbb2414 72fadc4 cbb2414 72fadc4 cbb2414 0d5f435 cbb2414 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import os
import numpy as np
import gradio as gr
from gradio.mix import Series
from transformers import pipeline
path_to_L_model = str(os.environ['path_to_L_model'])
read_token = str(os.environ['read_token'])
description = "Talk to Breud!"
title = "Breud (BERT + Freud)"
# wisper = gr.Interface.load("models/openai/whisper-base")
# interface_model_L = gr.Interface.load(
# name=path_to_L_model,
# api_key=read_token,
# )
# Series(
# wisper,
# interface_model_L,
# description = description,
# title = title,
# inputs = gr.Audio(source="microphone"),
# ).launch()
asr = pipeline("automatic-speech-recognition", "openai/whisper-base")
classifier = pipeline("text-classification", path_to_L_model, api_token=read_token)
def speech_to_text(speech):
text = asr(speech)["text"]
return text
def text_to_sentiment(text):
return classifier(text)[0]["label"]
demo = gr.Blocks()
with demo:
audio_file = gr.Audio(source="microphone")
text = gr.Textbox()
label = gr.Label()
b1 = gr.Button("Recognize Speech")
b2 = gr.Button("Classify Sentiment")
b1.click(speech_to_text, inputs=audio_file, outputs=text)
b2.click(text_to_sentiment, inputs=text, outputs=label)
demo.launch() |