24Sureshkumar commited on
Commit
c8086b7
·
verified ·
1 Parent(s): f9ff107

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -4,7 +4,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  from diffusers import StableDiffusionPipeline
5
  import torch
6
 
7
- # Load NLLB translation model
8
  @st.cache_resource
9
  def load_translation_model():
10
  model_name = "facebook/nllb-200-distilled-600M"
@@ -12,20 +12,20 @@ def load_translation_model():
12
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
13
  return tokenizer, model
14
 
15
- # Load Stable Diffusion model for CPU
16
  @st.cache_resource
17
  def load_diffusion_model():
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
  "CompVis/stable-diffusion-v1-4",
20
- torch_dtype=torch.float32 # Important for CPU
21
  )
22
- pipe = pipe.to("cpu") # ✅ Force to CPU
23
  return pipe
24
 
25
- # Translate Tamil to English using NLLB
26
  def translate_text(text, tokenizer, model):
27
- inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
28
- inputs["forced_bos_token_id"] = tokenizer.lang_code_to_id["eng_Latn"]
29
  with torch.no_grad():
30
  translated_tokens = model.generate(**inputs, max_length=512)
31
  return tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
@@ -47,7 +47,6 @@ def main():
47
 
48
  with st.spinner("மொழிபெயர்ப்பு மற்றும் பட உருவாக்கம் நடைபெறுகிறது..."):
49
  tokenizer, model = load_translation_model()
50
- tokenizer.src_lang = "tam_Taml" # Tamil
51
  translated_text = translate_text(user_input, tokenizer, model)
52
  st.success(f"மொழிபெயர்ப்பு (ஆங்கிலம்): {translated_text}")
53
 
 
4
  from diffusers import StableDiffusionPipeline
5
  import torch
6
 
7
+ # Load NLLB-200 translation model
8
  @st.cache_resource
9
  def load_translation_model():
10
  model_name = "facebook/nllb-200-distilled-600M"
 
12
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
13
  return tokenizer, model
14
 
15
+ # Load Stable Diffusion model
16
  @st.cache_resource
17
  def load_diffusion_model():
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
  "CompVis/stable-diffusion-v1-4",
20
+ torch_dtype=torch.float32 # CPU only
21
  )
22
+ pipe = pipe.to("cpu")
23
  return pipe
24
 
25
+ # Translate Tamil to English using prompt-style method
26
  def translate_text(text, tokenizer, model):
27
+ prompt = f"Translate this from Tamil to English: {text}"
28
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
29
  with torch.no_grad():
30
  translated_tokens = model.generate(**inputs, max_length=512)
31
  return tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
 
47
 
48
  with st.spinner("மொழிபெயர்ப்பு மற்றும் பட உருவாக்கம் நடைபெறுகிறது..."):
49
  tokenizer, model = load_translation_model()
 
50
  translated_text = translate_text(user_input, tokenizer, model)
51
  st.success(f"மொழிபெயர்ப்பு (ஆங்கிலம்): {translated_text}")
52