Spaces:
Sleeping
Sleeping
File size: 7,187 Bytes
7304960 4133628 902cd01 7f95fc6 902cd01 7f95fc6 8c9cc75 902cd01 079cf4d 902cd01 85071eb 7f95fc6 8c9cc75 079cf4d 7f95fc6 079cf4d 902cd01 079cf4d 902cd01 079cf4d 7f95fc6 902cd01 079cf4d 902cd01 079cf4d 902cd01 079cf4d 7f95fc6 902cd01 079cf4d 902cd01 079cf4d 902cd01 7304960 902cd01 079cf4d 902cd01 079cf4d 902cd01 079cf4d 902cd01 079cf4d 7304960 902cd01 079cf4d 7304960 902cd01 7304960 079cf4d 7f95fc6 079cf4d 7f95fc6 079cf4d 7f95fc6 079cf4d 7304960 079cf4d 7304960 85071eb 079cf4d 7304960 7f95fc6 7304960 85071eb 079cf4d 902cd01 7304960 85071eb 902cd01 079cf4d 7f95fc6 902cd01 7304960 902cd01 079cf4d 902cd01 079cf4d 902cd01 7f95fc6 079cf4d 7304960 85071eb 079cf4d 7304960 7f95fc6 85071eb 7304960 902cd01 7304960 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
#app.py
import gradio as gr
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
from IndicTransToolkit.processor import IndicProcessor
import requests
from datetime import datetime
import tempfile
from gtts import gTTS
import os
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# Load models
model_en_to_indic = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True).to(DEVICE)
tokenizer_en_to_indic = AutoTokenizer.from_pretrained("ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True)
model_indic_to_en = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/indictrans2-indic-en-1B", trust_remote_code=True).to(DEVICE)
tokenizer_indic_to_en = AutoTokenizer.from_pretrained("ai4bharat/indictrans2-indic-en-1B", trust_remote_code=True)
ip = IndicProcessor(inference=True)
asr = pipeline("automatic-speech-recognition", model="openai/whisper-small")
# --- Supabase settings ---
SUPABASE_URL = "https://gptmdbhzblfybdnohqnh.supabase.co"
SUPABASE_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."
# --- Supabase utilities ---
def save_to_supabase(input_text, output_text, direction):
if not input_text.strip() or not output_text.strip():
return "Nothing to save."
table = "translations" if direction == "en_to_ks" else "ks_to_en_translations"
payload = {
"timestamp": datetime.utcnow().isoformat(),
"input_text": input_text,
"output_text": output_text
}
headers = {
"apikey": SUPABASE_API_KEY,
"Authorization": f"Bearer {SUPABASE_API_KEY}",
"Content-Type": "application/json"
}
try:
response = requests.post(f"{SUPABASE_URL}/rest/v1/{table}", json=payload, headers=headers)
return "β
Saved successfully!" if response.status_code == 201 else "β Failed to save."
except Exception as e:
print("Save error:", e)
return "β Save error."
def get_translation_history(direction):
table = "translations" if direction == "en_to_ks" else "ks_to_en_translations"
headers = {
"apikey": SUPABASE_API_KEY,
"Authorization": f"Bearer {SUPABASE_API_KEY}"
}
try:
res = requests.get(f"{SUPABASE_URL}/rest/v1/{table}?order=timestamp.desc&limit=10", headers=headers)
if res.status_code == 200:
data = res.json()
return "\n\n".join([f"Input: {r['input_text']} β Output: {r['output_text']}" for r in data])
return "Failed to load history."
except Exception as e:
print("History error:", e)
return "Error loading history."
# --- Translation ---
def translate(text, direction):
if not text.strip():
return "Enter some text.", None
if direction == "en_to_ks":
src_lang, tgt_lang = "eng_Latn", "kas_Arab"
model, tokenizer = model_en_to_indic, tokenizer_en_to_indic
else:
src_lang, tgt_lang = "kas_Arab", "eng_Latn"
model, tokenizer = model_indic_to_en, tokenizer_indic_to_en
try:
batch = ip.preprocess_batch([text], src_lang=src_lang, tgt_lang=tgt_lang)
tokens = tokenizer(batch, return_tensors="pt", padding=True).to(DEVICE)
with torch.no_grad():
output = model.generate(**tokens, max_length=256, num_beams=5)
result = tokenizer.batch_decode(output, skip_special_tokens=True)
final = ip.postprocess_batch(result, lang=tgt_lang)[0]
return final, None
except Exception as e:
print("Translation error:", e)
return "β οΈ Translation failed.", None
# --- TTS for English output ---
def synthesize_tts(text):
try:
tts = gTTS(text=text, lang="en")
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
tts.save(f.name)
return f.name
except Exception as e:
print("TTS error:", e)
return None
# --- STT only for en_to_ks ---
def generate_stt_for_input(audio_path, direction):
if direction != "en_to_ks":
return "β οΈ Audio input is only supported for English to Kashmiri.", "", None
try:
transcription = asr(audio_path)["text"]
except Exception as e:
print("STT error:", e)
return "β οΈ Transcription failed.", "", None
translated, _ = translate(transcription, direction)
return transcription, translated, None
# --- Generate TTS for English output ---
def generate_tts_for_output(output_text, direction):
if direction == "ks_to_en" and output_text.strip():
return synthesize_tts(output_text)
return None
# --- Switch UI direction ---
def switch_direction(direction, input_text_val, output_text_val):
new_direction = "ks_to_en" if direction == "en_to_ks" else "en_to_ks"
input_label = "Kashmiri Text" if new_direction == "ks_to_en" else "English Text"
output_label = "English Translation" if new_direction == "ks_to_en" else "Kashmiri Translation"
return new_direction, gr.update(value=output_text_val, label=input_label), gr.update(value=input_text_val, label=output_label), None
# === Gradio Interface ===
with gr.Blocks() as interface:
gr.HTML("<h2 style='text-align:center;'>English β Kashmiri Translator</h2>")
translation_direction = gr.State(value="en_to_ks")
with gr.Row():
input_text = gr.Textbox(label="English Text", placeholder="Enter text here...", lines=2)
output_text = gr.Textbox(label="Kashmiri Translation", placeholder="Translated text...", lines=2)
with gr.Row():
translate_button = gr.Button("Translate")
save_button = gr.Button("Save Translation")
switch_button = gr.Button("Switch Direction")
save_status = gr.Textbox(label="Save Status", interactive=False)
history = gr.Textbox(label="Translation History", lines=8, interactive=False)
with gr.Row():
audio_input = gr.Audio(type="filepath", label="ποΈ Record English audio")
audio_output = gr.Audio(label="π English TTS", interactive=False)
stt_button = gr.Button("π€ Transcribe & Translate (EN β KS Only)")
tts_button = gr.Button("π Generate English Speech (KS β EN Only)")
# Events
translate_button.click(
fn=translate,
inputs=[input_text, translation_direction],
outputs=[output_text, audio_output]
)
tts_button.click(
fn=generate_tts_for_output,
inputs=[output_text, translation_direction],
outputs=audio_output
)
save_button.click(
fn=save_to_supabase,
inputs=[input_text, output_text, translation_direction],
outputs=save_status
).then(
fn=get_translation_history,
inputs=translation_direction,
outputs=history
)
switch_button.click(
fn=switch_direction,
inputs=[translation_direction, input_text, output_text],
outputs=[translation_direction, input_text, output_text, audio_output]
)
stt_button.click(
fn=generate_stt_for_input,
inputs=[audio_input, translation_direction],
outputs=[input_text, output_text, audio_output]
)
if __name__ == "__main__":
interface.queue().launch(share=True) |