Krishna086 commited on
Commit
288ca30
·
verified ·
1 Parent(s): f3041d7

Update translation.py

Browse files
Files changed (1) hide show
  1. translation.py +6 -10
translation.py CHANGED
@@ -45,22 +45,18 @@ def load_model(source_lang, target_lang):
45
  tokenizer_model_pair = all_models.get(model_key)
46
  if tokenizer_model_pair and tokenizer_model_pair[0] and tokenizer_model_pair[1]:
47
  return tokenizer_model_pair
48
- # Use direct English pivot with defined combined_translate
49
- if source_lang != "en" and target_lang != "en":
50
- en_pivot_pair = all_models.get((source_lang, "en")) or _load_model_pair(source_lang, "en")
51
- if en_pivot_pair[0] and en_pivot_pair[1]:
52
- return en_pivot_pair
53
- default_tokenizer, _ = _load_default_model()
54
  def combined_translate(text):
55
  with torch.no_grad():
56
  if source_lang != "en":
57
- src_to_en_tokenizer, src_to_en_model = all_models.get((source_lang, "en"), _load_default_model())
58
  en_text = src_to_en_tokenizer.decode(src_to_en_model.generate(**src_to_en_tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500))[0], skip_special_tokens=True)
59
  else:
60
  en_text = text
61
  if target_lang != "en":
62
- en_to_tgt_tokenizer, en_to_tgt_model = all_models.get(("en", target_lang), _load_default_model())
63
- return en_to_tgt_tokenizer.decode(en_to_tgt_model.generate(**en_to_tgt_tokenizer(en_text, return_tensors="pt", padding=True, truncation=True, max_length=1000))[0], skip_special_tokens=True) # Increased max_length
64
  return en_text
65
  return default_tokenizer, CombinedModel()
66
 
@@ -81,7 +77,7 @@ def translate(text, source_lang, target_lang):
81
  tokenizer, model = load_model(source_lang, target_lang)
82
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
83
  with torch.no_grad():
84
- translated = model.generate(**inputs, max_length=1000 if target_lang == "hi" else 500, num_beams=6 if target_lang == "hi" else 4, early_stopping=True) # Adjusted for Hindi
85
  result = tokenizer.decode(translated[0], skip_special_tokens=True)
86
  return result if result.strip() else text
87
  except Exception as e:
 
45
  tokenizer_model_pair = all_models.get(model_key)
46
  if tokenizer_model_pair and tokenizer_model_pair[0] and tokenizer_model_pair[1]:
47
  return tokenizer_model_pair
48
+ # Simplified pivot through English
49
+ default_tokenizer, default_model = _load_default_model()
 
 
 
 
50
  def combined_translate(text):
51
  with torch.no_grad():
52
  if source_lang != "en":
53
+ src_to_en_tokenizer, src_to_en_model = all_models.get((source_lang, "en"), (default_tokenizer, default_model))
54
  en_text = src_to_en_tokenizer.decode(src_to_en_model.generate(**src_to_en_tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500))[0], skip_special_tokens=True)
55
  else:
56
  en_text = text
57
  if target_lang != "en":
58
+ en_to_tgt_tokenizer, en_to_tgt_model = all_models.get(("en", target_lang), (default_tokenizer, default_model))
59
+ return en_to_tgt_tokenizer.decode(en_to_tgt_model.generate(**en_to_tgt_tokenizer(en_text, return_tensors="pt", padding=True, truncation=True, max_length=1000))[0], skip_special_tokens=True)
60
  return en_text
61
  return default_tokenizer, CombinedModel()
62
 
 
77
  tokenizer, model = load_model(source_lang, target_lang)
78
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
79
  with torch.no_grad():
80
+ translated = model.generate(**inputs, max_length=1000 if target_lang == "hi" else 500, num_beams=6 if target_lang == "hi" else 4, early_stopping=True)
81
  result = tokenizer.decode(translated[0], skip_special_tokens=True)
82
  return result if result.strip() else text
83
  except Exception as e: