Update translation.py
Browse files- translation.py +2 -2
translation.py
CHANGED
@@ -22,7 +22,7 @@ def load_model(source_lang, target_lang):
|
|
22 |
st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
|
23 |
return _load_default_model()
|
24 |
|
25 |
-
@st.cache_data(ttl=3600)
|
26 |
def translate_cached(text, source_lang, target_lang):
|
27 |
src_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
|
28 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
|
@@ -31,7 +31,7 @@ def translate_cached(text, source_lang, target_lang):
|
|
31 |
tokenizer, model = load_model(src_code, tgt_code)
|
32 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
33 |
with torch.no_grad():
|
34 |
-
translated = model.generate(**inputs, max_length=500, num_beams=
|
35 |
return tokenizer.decode(translated[0], skip_special_tokens=True)
|
36 |
|
37 |
def translate(text, source_lang, target_lang):
|
|
|
22 |
st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
|
23 |
return _load_default_model()
|
24 |
|
25 |
+
@st.cache_data(ttl=3600)
|
26 |
def translate_cached(text, source_lang, target_lang):
|
27 |
src_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
|
28 |
"Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
|
|
|
31 |
tokenizer, model = load_model(src_code, tgt_code)
|
32 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
33 |
with torch.no_grad():
|
34 |
+
translated = model.generate(**inputs, max_length=500, num_beams=4, early_stopping=True) # Increased beams for speed
|
35 |
return tokenizer.decode(translated[0], skip_special_tokens=True)
|
36 |
|
37 |
def translate(text, source_lang, target_lang):
|