Update translation.py
Browse files- translation.py +5 -3
translation.py
CHANGED
@@ -4,7 +4,7 @@ import torch
|
|
4 |
|
5 |
@st.cache_resource
|
6 |
def _load_default_model():
|
7 |
-
model_name = "Helsinki-NLP/opus-mt-en-fr"
|
8 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
9 |
model = MarianMTModel.from_pretrained(model_name)
|
10 |
return tokenizer, model
|
@@ -12,12 +12,14 @@ def _load_default_model():
|
|
12 |
@st.cache_resource
|
13 |
def load_model(src_lang, tgt_lang):
|
14 |
try:
|
|
|
|
|
15 |
model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
|
16 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
17 |
model = MarianMTModel.from_pretrained(model_name)
|
18 |
return tokenizer, model
|
19 |
except Exception as e:
|
20 |
-
st.warning(f"No direct model for {src_lang} to {tgt_lang}. Using cached en-fr. Error
|
21 |
return _load_default_model()
|
22 |
|
23 |
@st.cache_data
|
@@ -26,7 +28,7 @@ def translate_cached(text, source_lang, target_lang):
|
|
26 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
|
27 |
tgt_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
|
28 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(target_lang, "fr")
|
29 |
-
tokenizer, model = load_model(src_code,
|
30 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
31 |
with torch.no_grad():
|
32 |
translated = model.generate(**inputs, max_length=500)
|
|
|
4 |
|
5 |
@st.cache_resource
|
6 |
def _load_default_model():
|
7 |
+
model_name = "Helsinki-NLP/opus-mt-en-fr" # Default model
|
8 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
9 |
model = MarianMTModel.from_pretrained(model_name)
|
10 |
return tokenizer, model
|
|
|
12 |
@st.cache_resource
|
13 |
def load_model(src_lang, tgt_lang):
|
14 |
try:
|
15 |
+
if src_lang == tgt_lang: # Handle same language case
|
16 |
+
return _load_default_model()
|
17 |
model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
|
18 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
19 |
model = MarianMTModel.from_pretrained(model_name)
|
20 |
return tokenizer, model
|
21 |
except Exception as e:
|
22 |
+
st.warning(f"No direct model for {src_lang} to {tgt_lang}. Using cached en-fr. Error suppressed.")
|
23 |
return _load_default_model()
|
24 |
|
25 |
@st.cache_data
|
|
|
28 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
|
29 |
tgt_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
|
30 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(target_lang, "fr")
|
31 |
+
tokenizer, model = load_model(src_code, tgt_lang if src_lang != tgt_lang else "fr") # Avoid en-en error
|
32 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
33 |
with torch.no_grad():
|
34 |
translated = model.generate(**inputs, max_length=500)
|