File size: 1,631 Bytes
684aa3e
b912ba6
 
e22e364
 
 
02a14aa
b912ba6
 
 
 
02a14aa
 
 
 
 
b912ba6
e22e364
 
b912ba6
 
02a14aa
 
 
 
 
ac21c0e
b912ba6
 
e22e364
 
02a14aa
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import streamlit as st
from transformers import MarianTokenizer, MarianMTModel

@st.cache_resource
def _load_default_model():
    model_name = "Helsinki-NLP/opus-mt-en-fr"
    return MarianTokenizer.from_pretrained(model_name), MarianMTModel.from_pretrained(model_name)

@st.cache_resource
def load_model(src_lang, tgt_lang):
    try:
        model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
        return MarianTokenizer.from_pretrained(model_name), MarianMTModel.from_pretrained(model_name)
    except:
        st.warning(f"No model for {src_lang} to {tgt_lang}. Using en-fr.")
        return _load_default_model()

DEFAULT_TOKENIZER, DEFAULT_MODEL = _load_default_model()

def translate(text, source_lang, target_lang):
    if not text:
        return "No text provided."
    src_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
                "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
    tgt_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
                "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(target_lang, "fr")
    tokenizer, model = load_model(src_code, tgt_code)
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=400)
    translated = model.generate(**inputs)
    return tokenizer.decode(translated[0], skip_special_tokens=True)

LANGUAGES = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
             "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}