File size: 2,038 Bytes
684aa3e
b912ba6
 
e22e364
 
 
 
 
 
 
 
b912ba6
e22e364
b912ba6
 
154ffd8
b912ba6
 
 
 
 
 
ac21c0e
 
b912ba6
e22e364
 
 
b912ba6
e22e364
b912ba6
 
 
fbf3e87
 
 
ac21c0e
 
e22e364
 
b912ba6
 
e22e364
 
154ffd8
e22e364
154ffd8
e22e364
 
 
 
 
 
 
 
fbf3e87
 
ac21c0e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import streamlit as st
from transformers import MarianTokenizer, MarianMTModel

# Preload default model for English to French
@st.cache_resource
def _load_default_model():
    """Load default MarianMT model (en-fr)."""
    model_name = "Helsinki-NLP/opus-mt-en-fr"
    tokenizer = MarianTokenizer.from_pretrained(model_name)
    model = MarianMTModel.from_pretrained(model_name)
    return tokenizer, model

# Cache other models dynamically
@st.cache_resource
def load_model(src_lang, tgt_lang):
    """Load the MarianMT model and tokenizer for a language pair."""
    model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
    try:
        tokenizer = MarianTokenizer.from_pretrained(model_name)
        model = MarianMTModel.from_pretrained(model_name)
        return tokenizer, model
    except Exception as e:
        st.warning(f"Model for {src_lang} to {tgt_lang} not available. Falling back to en-fr.")
        return _load_default_model()  # Fallback to preloaded en-fr model

# Preload default model globally
DEFAULT_TOKENIZER, DEFAULT_MODEL = _load_default_model()

def translate(text, source_lang, target_lang):
    """Translate text from source to target language."""
    if not text:
        return "Please provide text to translate."
    
    src_code = LANGUAGES.get(source_lang, "en")
    tgt_code = LANGUAGES.get(target_lang, "fr")
    
    # Attempt to load the specific model, fall back to en-fr if it fails
    tokenizer, model = load_model(src_code, tgt_code)
    
    # Perform translation
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=400)
    translated = model.generate(**inputs)
    return tokenizer.decode(translated[0], skip_special_tokens=True)

# Dictionary of supported languages with MarianMT codes
LANGUAGES = {
    "English": "en",
    "French": "fr",
    "Spanish": "es",
    "German": "de",
    "Chinese": "zh",
    "Arabic": "ar",
    "Russian": "ru",
    "Hindi": "hi",
    "Japanese": "ja"
}

# Removed SUPPORTED_PAIRS to revert to original behavior