File size: 4,486 Bytes
649867e
5c15933
649867e
1bcb93f
 
68b4319
6b8e248
649867e
68b4319
 
 
6b8e248
68b4319
 
 
1bcb93f
649867e
 
038b3e7
649867e
6b8e248
649867e
038b3e7
649867e
 
1bcb93f
 
68b4319
649867e
68b4319
038b3e7
649867e
038b3e7
1bcb93f
 
649867e
 
68b4319
038b3e7
 
6b8e248
649867e
 
 
038b3e7
 
1bcb93f
 
68b4319
038b3e7
68b4319
038b3e7
 
 
 
 
1bcb93f
 
038b3e7
 
 
 
 
 
 
1bcb93f
 
 
 
 
 
 
 
 
 
 
 
 
038b3e7
68b4319
038b3e7
 
 
 
 
 
 
68b4319
 
 
 
 
 
 
 
5c15933
1bcb93f
416ddd3
1bcb93f
038b3e7
 
 
 
 
1bcb93f
038b3e7
1bcb93f
 
416ddd3
1bcb93f
038b3e7
416ddd3
5c15933
416ddd3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import torch
import librosa
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
from gtts import gTTS
import gradio as gr

print("Using CPU for all operations")

# Function to safely load pipeline
def load_pipeline(model_name, **kwargs):
    try:
        return pipeline(model=model_name, device="cpu", **kwargs)
    except Exception as e:
        print(f"Error loading {model_name} pipeline: {e}")
        return None

# Load Whisper model for speech recognition
def load_whisper():
    try:
        processor = WhisperProcessor.from_pretrained("openai/whisper-small")
        model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
        return processor, model
    except Exception as e:
        print(f"Error loading Whisper model: {e}")
        return None, None

# Load sarvam-2b for text generation
def load_sarvam():
    return load_pipeline('sarvamai/sarvam-2b-v0.5')

# Attempt to load models
whisper_processor, whisper_model = load_whisper()
sarvam_pipe = load_sarvam()

def process_audio_input(audio):
    if whisper_processor is None or whisper_model is None:
        return "Error: Speech recognition model is not available. Please type your message instead."
    
    try:
        audio, sr = librosa.load(audio, sr=16000)
        input_features = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features
        predicted_ids = whisper_model.generate(input_features)
        transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
        return transcription
    except Exception as e:
        return f"Error processing audio: {str(e)}. Please type your message instead."

def generate_response(text_input):
    if sarvam_pipe is None:
        return "Error: sarvam-2b model is not available. The assistant cannot generate responses at this time."
    
    try:
        response = sarvam_pipe(text_input, max_new_tokens=100, temperature=0.7, repetition_penalty=1.2)[0]['generated_text']
        return response
    except Exception as e:
        return f"Error generating response: {str(e)}"

def text_to_speech(text, lang='hi'):
    try:
        tts = gTTS(text=text, lang=lang)
        tts.save("response.mp3")
        return "response.mp3"
    except Exception as e:
        print(f"Error in text-to-speech: {str(e)}")
        return None

def detect_language(text):
    lang_codes = {
        'bn': 'Bengali', 'gu': 'Gujarati', 'hi': 'Hindi', 'kn': 'Kannada',
        'ml': 'Malayalam', 'mr': 'Marathi', 'or': 'Oriya', 'pa': 'Punjabi',
        'ta': 'Tamil', 'te': 'Telugu', 'en': 'English'
    }
    
    for code, lang in lang_codes.items():
        if any(word in text for word in ['नमस्ते', 'હેલો', 'ನಮಸ್ಕಾರ', 'ഹലോ', 'नमस्कार', 'ਸਤ ਸ੍ਰੀ ਅਕਾਲ', 'வணக்கம்', 'నమస్కారం']):
            return 'hi'  # Default to Hindi for simplicity
    return 'en'  # Default to English if no Indic script is detected

def indic_language_assistant(input_type, audio_input, text_input):
    try:
        if input_type == "audio" and audio_input is not None:
            transcription = process_audio_input(audio_input)
        elif input_type == "text" and text_input:
            transcription = text_input
        else:
            return "Please provide either audio or text input.", "No input provided.", None

        response = generate_response(transcription)
        lang = detect_language(response)
        audio_response = text_to_speech(response, lang)
        
        return transcription, response, audio_response
    except Exception as e:
        error_message = f"An error occurred: {str(e)}"
        return error_message, error_message, None

# Create Gradio interface
iface = gr.Interface(
    fn=indic_language_assistant,
    inputs=[
        gr.Radio(["audio", "text"], label="Input Type", value="audio"),
        gr.Audio(source="microphone", type="filepath", label="Speak (if audio input selected)"),
        gr.Textbox(label="Type your message (if text input selected)")
    ],
    outputs=[
        gr.Textbox(label="Transcription/Input"),
        gr.Textbox(label="Generated Response"),
        gr.Audio(label="Audio Response")
    ],
    title="Indic Language Virtual Assistant",
    description="Speak or type in any supported Indic language or English. The assistant will respond in text and audio."
)

# Launch the app
iface.launch()