24Sureshkumar commited on
Commit
391969d
Β·
verified Β·
1 Parent(s): 67241c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -1,16 +1,20 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
  @st.cache_resource
7
  def load_all_models():
8
- # Load translation model
9
- model_id = "ai4bharat/indictrans2-indic-en-dist-200M"
10
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
11
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True)
 
 
 
 
12
 
13
- # Load Stable Diffusion image generator
14
  img_pipe = StableDiffusionPipeline.from_pretrained(
15
  "stabilityai/stable-diffusion-2-1",
16
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
@@ -18,33 +22,29 @@ def load_all_models():
18
  )
19
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
- return tokenizer, model, img_pipe
22
 
23
  def main():
24
- st.set_page_config(page_title="Tamil to English to Image", layout="centered")
25
- st.title("πŸ“Έ Tamil ➝ English ➝ AI Image Generator")
26
 
27
  tamil_text = st.text_area("Enter Tamil text:", height=150)
28
 
29
  if st.button("Generate Image"):
30
  if not tamil_text.strip():
31
- st.warning("Please enter some Tamil text.")
32
  return
33
 
34
  with st.spinner("Loading models..."):
35
- tokenizer, model, img_pipe = load_all_models()
36
 
37
  with st.spinner("Translating Tamil to English..."):
38
- # Prepare special format: "<2en> <tamil sentence>"
39
- formatted_input = f"<2en> {tamil_text.strip()}"
40
- inputs = tokenizer(formatted_input, return_tensors="pt")
41
- output_ids = model.generate(**inputs)
42
- translated = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
43
- st.success(f"πŸ”€ English Translation: `{translated}`")
44
 
45
  with st.spinner("Generating image..."):
46
  image = img_pipe(prompt=translated).images[0]
47
- st.image(image, caption="πŸ–ΌοΈ AI-generated Image", use_column_width=True)
48
 
49
  if __name__ == "__main__":
50
  main()
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
  @st.cache_resource
7
  def load_all_models():
8
+ # Load Tamil ➝ English translation model
9
+ translation_pipeline = pipeline(
10
+ "translation",
11
+ model="ai4bharat/indictrans2-indic-en-dist-200M",
12
+ tokenizer="ai4bharat/indictrans2-indic-en-dist-200M",
13
+ src_lang="ta", tgt_lang="en",
14
+ device=0 if torch.cuda.is_available() else -1,
15
+ )
16
 
17
+ # Load image generation model
18
  img_pipe = StableDiffusionPipeline.from_pretrained(
19
  "stabilityai/stable-diffusion-2-1",
20
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
 
22
  )
23
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
24
 
25
+ return translation_pipeline, img_pipe
26
 
27
  def main():
28
+ st.set_page_config(page_title="Tamil ➝ English ➝ Image", layout="centered")
29
+ st.title("🌐 Tamil to English to Image Generator")
30
 
31
  tamil_text = st.text_area("Enter Tamil text:", height=150)
32
 
33
  if st.button("Generate Image"):
34
  if not tamil_text.strip():
35
+ st.warning("Please enter Tamil text.")
36
  return
37
 
38
  with st.spinner("Loading models..."):
39
+ translation_pipeline, img_pipe = load_all_models()
40
 
41
  with st.spinner("Translating Tamil to English..."):
42
+ translated = translation_pipeline(tamil_text)[0]["translation_text"]
43
+ st.success(f"πŸ”€ Translated English: `{translated}`")
 
 
 
 
44
 
45
  with st.spinner("Generating image..."):
46
  image = img_pipe(prompt=translated).images[0]
47
+ st.image(image, caption="🎨 AI-generated Image", use_column_width=True)
48
 
49
  if __name__ == "__main__":
50
  main()