Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -36,39 +36,103 @@ def respond(
|
|
36 |
yield response
|
37 |
|
38 |
|
39 |
-
#
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
<script>
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
|
|
61 |
recognition.start();
|
62 |
-
}
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
</script>
|
69 |
"""
|
70 |
|
71 |
-
# Chat Interface with
|
72 |
demo = gr.ChatInterface(
|
73 |
fn=respond,
|
74 |
additional_inputs=[
|
@@ -84,35 +148,7 @@ demo = gr.ChatInterface(
|
|
84 |
),
|
85 |
],
|
86 |
)
|
|
|
87 |
|
88 |
-
|
89 |
-
with gr.Blocks() as ui:
|
90 |
-
with gr.Row():
|
91 |
-
gr.HTML(
|
92 |
-
"""
|
93 |
-
<style>
|
94 |
-
#mic_button {
|
95 |
-
font-family: 'Material Symbols Outlined';
|
96 |
-
font-size: 24px;
|
97 |
-
color: #555;
|
98 |
-
border: none;
|
99 |
-
background: none;
|
100 |
-
cursor: pointer;
|
101 |
-
padding: 4px;
|
102 |
-
margin-right: 8px;
|
103 |
-
}
|
104 |
-
#user_input {
|
105 |
-
width: 100%;
|
106 |
-
}
|
107 |
-
</style>
|
108 |
-
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined">
|
109 |
-
<div style="display: flex; align-items: center;">
|
110 |
-
<button id="mic_button" title="Click to speak">🎤</button>
|
111 |
-
<input id="user_input" type="text" placeholder="Type your message here..." />
|
112 |
-
</div>
|
113 |
-
"""
|
114 |
-
)
|
115 |
-
gr.HTML(speech_recognition_js) # Include JS for mic button functionality
|
116 |
-
|
117 |
demo.launch()
|
118 |
-
|
|
|
36 |
yield response
|
37 |
|
38 |
|
39 |
+
# Custom HTML for mic button and input box
|
40 |
+
mic_html = """
|
41 |
+
<div class="mic-container">
|
42 |
+
<button id="mic_button" title="Click to speak">
|
43 |
+
<span class="material-symbols-outlined">mic</span>
|
44 |
+
</button>
|
45 |
+
<input id="user_input" type="text" placeholder="Type your message here..." />
|
46 |
+
</div>
|
47 |
+
<div id="status" class="status"></div>
|
48 |
+
<style>
|
49 |
+
.mic-container {
|
50 |
+
display: flex;
|
51 |
+
align-items: center;
|
52 |
+
max-width: 500px;
|
53 |
+
margin: 20px auto;
|
54 |
+
padding: 10px;
|
55 |
+
background: #f5f5f5;
|
56 |
+
border-radius: 8px;
|
57 |
+
}
|
58 |
+
#mic_button {
|
59 |
+
font-family: 'Material Symbols Outlined';
|
60 |
+
font-size: 24px;
|
61 |
+
color: #555;
|
62 |
+
border: none;
|
63 |
+
background: none;
|
64 |
+
cursor: pointer;
|
65 |
+
padding: 4px;
|
66 |
+
margin-right: 8px;
|
67 |
+
}
|
68 |
+
#user_input {
|
69 |
+
width: 100%;
|
70 |
+
padding: 8px;
|
71 |
+
font-size: 16px;
|
72 |
+
border: 1px solid #ccc;
|
73 |
+
border-radius: 4px;
|
74 |
+
}
|
75 |
+
.status {
|
76 |
+
margin-top: 20px;
|
77 |
+
text-align: center;
|
78 |
+
color: #666;
|
79 |
+
}
|
80 |
+
</style>
|
81 |
<script>
|
82 |
+
let micButton = document.getElementById('mic_button');
|
83 |
+
let userInputBox = document.getElementById('user_input');
|
84 |
+
let statusDiv = document.getElementById('status');
|
85 |
+
let recognition = null;
|
86 |
|
87 |
+
if ('webkitSpeechRecognition' in window) {
|
88 |
+
recognition = new webkitSpeechRecognition();
|
89 |
+
recognition.continuous = false;
|
90 |
+
recognition.interimResults = false;
|
91 |
+
recognition.lang = 'en-US';
|
92 |
|
93 |
+
recognition.onstart = function() {
|
94 |
+
statusDiv.textContent = "Listening...";
|
95 |
+
micButton.style.color = '#4CAF50';
|
96 |
+
};
|
97 |
|
98 |
+
recognition.onresult = function(event) {
|
99 |
+
const transcript = event.results[0][0].transcript;
|
100 |
+
userInputBox.value = transcript;
|
101 |
+
statusDiv.textContent = "Done listening.";
|
102 |
+
micButton.style.color = '#555';
|
103 |
+
};
|
104 |
+
|
105 |
+
recognition.onerror = function(event) {
|
106 |
+
console.error('Speech recognition error:', event.error);
|
107 |
+
statusDiv.textContent = "Error: " + event.error;
|
108 |
+
micButton.style.color = '#555';
|
109 |
+
};
|
110 |
+
|
111 |
+
recognition.onspeechend = function() {
|
112 |
+
recognition.stop();
|
113 |
+
statusDiv.textContent = "Stopped listening.";
|
114 |
+
micButton.style.color = '#555';
|
115 |
+
};
|
116 |
|
117 |
+
micButton.onclick = function() {
|
118 |
+
try {
|
119 |
recognition.start();
|
120 |
+
} catch(err) {
|
121 |
+
console.error('Recognition already started:', err);
|
122 |
+
recognition.stop();
|
123 |
+
}
|
124 |
+
};
|
125 |
+
} else {
|
126 |
+
micButton.onclick = function() {
|
127 |
+
alert("Speech recognition is not supported in this browser.");
|
128 |
+
statusDiv.textContent = "Speech recognition not supported";
|
129 |
+
};
|
130 |
+
statusDiv.textContent = "Speech recognition not available";
|
131 |
+
}
|
132 |
</script>
|
133 |
"""
|
134 |
|
135 |
+
# Gradio Chat Interface with Mic Button
|
136 |
demo = gr.ChatInterface(
|
137 |
fn=respond,
|
138 |
additional_inputs=[
|
|
|
148 |
),
|
149 |
],
|
150 |
)
|
151 |
+
demo.load(_js=mic_html)
|
152 |
|
153 |
+
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
demo.launch()
|
|