File size: 7,562 Bytes
649867e
5c15933
649867e
1bcb93f
 
aad2393
e3a075e
68b4319
aad2393
649867e
aad2393
 
68b4319
 
c14fb36
 
68b4319
 
 
1bcb93f
aad2393
 
649867e
038b3e7
c14fb36
649867e
c14fb36
649867e
038b3e7
649867e
 
1bcb93f
aad2393
 
68b4319
649867e
68b4319
aad2393
 
 
649867e
 
68b4319
038b3e7
 
c14fb36
649867e
 
 
038b3e7
 
1bcb93f
aad2393
 
e3a075e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcb93f
038b3e7
a5504a1
 
 
 
 
 
038b3e7
 
 
 
 
1bcb93f
e3a075e
1bcb93f
 
 
 
 
 
 
a5504a1
 
 
 
 
 
 
 
 
1bcb93f
aad2393
038b3e7
68b4319
aad2393
 
 
 
038b3e7
aad2393
038b3e7
 
 
 
 
aad2393
68b4319
 
 
 
 
 
 
e3a075e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bcb93f
416ddd3
1bcb93f
038b3e7
 
c14fb36
e3a075e
038b3e7
1bcb93f
038b3e7
1bcb93f
 
416ddd3
1bcb93f
e3a075e
 
 
 
416ddd3
5c15933
416ddd3
aad2393
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import torch
import librosa
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
from gtts import gTTS
import gradio as gr
import spaces
from langdetect import detect

print("Using GPU for operations when available")

# Function to safely load pipeline within a GPU-decorated function
@spaces.GPU
def load_pipeline(model_name, **kwargs):
    try:
        device = 0 if torch.cuda.is_available() else "cpu"
        return pipeline(model=model_name, device=device, **kwargs)
    except Exception as e:
        print(f"Error loading {model_name} pipeline: {e}")
        return None

# Load Whisper model for speech recognition within a GPU-decorated function
@spaces.GPU
def load_whisper():
    try:
        device = 0 if torch.cuda.is_available() else "cpu"
        processor = WhisperProcessor.from_pretrained("openai/whisper-small")
        model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
        return processor, model
    except Exception as e:
        print(f"Error loading Whisper model: {e}")
        return None, None

# Load sarvam-2b for text generation within a GPU-decorated function
@spaces.GPU
def load_sarvam():
    return load_pipeline('sarvamai/sarvam-2b-v0.5')

# Process audio input within a GPU-decorated function
@spaces.GPU
def process_audio_input(audio, whisper_processor, whisper_model):
    if whisper_processor is None or whisper_model is None:
        return "Error: Speech recognition model is not available. Please type your message instead."
    
    try:
        audio, sr = librosa.load(audio, sr=16000)
        input_features = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features.to(whisper_model.device)
        predicted_ids = whisper_model.generate(input_features)
        transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
        return transcription
    except Exception as e:
        return f"Error processing audio: {str(e)}. Please type your message instead."

# Generate response within a GPU-decorated function
@spaces.GPU
def generate_response(transcription, sarvam_pipe):
    if sarvam_pipe is None:
        return "Error: Text generation model is not available."
    
    try:
        # Prepare the prompt
        prompt = f"Human: {transcription}\n\nAssistant:"
        
        # Generate response using the sarvam-2b model
        response = sarvam_pipe(prompt, max_length=200, num_return_sequences=1, do_sample=True, temperature=0.7)[0]['generated_text']
        
        # Extract the assistant's response
        assistant_response = response.split("Assistant:")[-1].strip()
        
        return assistant_response
    except Exception as e:
        return f"Error generating response: {str(e)}"

# Text-to-speech function
def text_to_speech(text, lang='hi'):
    try:
        # Use a better TTS engine for Indic languages
        if lang in ['hi', 'bn', 'gu', 'kn', 'ml', 'mr', 'or', 'pa', 'ta', 'te']:
            tts = gTTS(text=text, lang=lang, tld='co.in')  # Use Indian TLD
        else:
            tts = gTTS(text=text, lang=lang)
        
        tts.save("response.mp3")
        return "response.mp3"
    except Exception as e:
        print(f"Error in text-to-speech: {str(e)}")
        return None

# Language detection function
def detect_language(text):
    lang_codes = {
        'bn': 'Bengali', 'gu': 'Gujarati', 'hi': 'Hindi', 'kn': 'Kannada',
        'ml': 'Malayalam', 'mr': 'Marathi', 'or': 'Oriya', 'pa': 'Punjabi',
        'ta': 'Tamil', 'te': 'Telugu', 'en': 'English'
    }
    
    try:
        detected_lang = detect(text)
        return detected_lang if detected_lang in lang_codes else 'en'
    except:
        # Fallback to simple script-based detection
        for code, lang in lang_codes.items():
            if any(ord(char) >= 0x0900 and ord(char) <= 0x097F for char in text):  # Devanagari script
                return 'hi'
        return 'en'  # Default to English if no Indic script is detected

@spaces.GPU
def indic_language_assistant(input_type, audio_input, text_input):
    try:
        # Load models within the GPU-decorated function
        whisper_processor, whisper_model = load_whisper()
        sarvam_pipe = load_sarvam()

        if input_type == "audio" and audio_input is not None:
            transcription = process_audio_input(audio_input, whisper_processor, whisper_model)
        elif input_type == "text" and text_input:
            transcription = text_input
        else:
            return "Please provide either audio or text input.", "No input provided.", None

        response = generate_response(transcription, sarvam_pipe)
        lang = detect_language(response)
        audio_response = text_to_speech(response, lang)
        
        return transcription, response, audio_response
    except Exception as e:
        error_message = f"An error occurred: {str(e)}"
        return error_message, error_message, None

# Custom CSS
custom_css = """
body {
    background-color: #1a1a1a;
    color: #ffffff;
    font-family: Arial, sans-serif;
}

.container {
    max-width: 800px;
    margin: 0 auto;
    padding: 20px;
}

h1 {
    font-size: 2.5em;
    background: linear-gradient(45deg, #4a90e2, #f48fb1);
    -webkit-background-clip: text;
    -webkit-text-fill-color: transparent;
    margin-bottom: 10px;
}

h2 {
    color: #a0a0a0;
    font-weight: normal;
}

.task-container {
    display: flex;
    justify-content: space-between;
    flex-wrap: wrap;
    margin-top: 30px;
}

.task-card {
    background-color: #2a2a2a;
    border-radius: 10px;
    padding: 15px;
    margin: 10px 0;
    width: calc(50% - 10px);
    box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
    transition: transform 0.3s ease;
}

.task-card:hover {
    transform: translateY(-5px);
}

.task-icon {
    font-size: 24px;
    margin-bottom: 10px;
}

.input-box {
    width: 100%;
    padding: 10px;
    border-radius: 20px;
    border: none;
    background-color: #333;
    color: #fff;
    margin-top: 20px;
}

.submit-btn {
    background-color: #4a90e2;
    color: white;
    border: none;
    padding: 10px 20px;
    border-radius: 20px;
    cursor: pointer;
    margin-top: 10px;
    transition: background-color 0.3s ease;
}

.submit-btn:hover {
    background-color: #3a7bd5;
}
"""

# Custom HTML
custom_html = """
<div class="container">
    <h1>Hello, User</h1>
    <h2>How can I help you today?</h2>
    <div class="task-container">
        <div class="task-card">
            <div class="task-icon">🎤</div>
            <p>Speak in any Indic language</p>
        </div>
        <div class="task-card">
            <div class="task-icon">⌨️</div>
            <p>Type in any Indic language</p>
        </div>
    </div>
</div>
"""

# Create Gradio interface
iface = gr.Interface(
    fn=indic_language_assistant,
    inputs=[
        gr.Radio(["audio", "text"], label="Input Type", value="audio"),
        gr.Audio(type="filepath", label="Speak (if audio input selected)"),
        gr.Textbox(label="Type your message (if text input selected)", elem_classes="input-box")
    ],
    outputs=[
        gr.Textbox(label="Transcription/Input"),
        gr.Textbox(label="Generated Response"),
        gr.Audio(label="Audio Response")
    ],
    title="Indic Language Virtual Assistant",
    description="Speak or type in any supported Indic language or English. The assistant will respond in text and audio.",
    css=custom_css,
    elem_id="indic-assistant",
    theme="dark"
)

# Launch the app
iface.launch()