24Sureshkumar commited on
Commit
770a398
Β·
verified Β·
1 Parent(s): b4aa913

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -21
app.py CHANGED
@@ -3,23 +3,20 @@ from transformers import pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
 
6
  @st.cache_resource
7
  def load_all_models():
8
- # Load Tamil ➝ English translation model (with remote code)
9
  translation_pipeline = pipeline(
10
- "translation",
11
  model="ai4bharat/indictrans2-indic-en-dist-200M",
12
  tokenizer="ai4bharat/indictrans2-indic-en-dist-200M",
13
- src_lang="ta", tgt_lang="en",
14
- trust_remote_code=True, # πŸ”₯ Must add this
15
  device=0 if torch.cuda.is_available() else -1,
 
16
  )
17
 
18
- # Load image generation model
19
  img_pipe = StableDiffusionPipeline.from_pretrained(
20
- "stabilityai/stable-diffusion-2-1",
21
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
22
- revision="fp16" if torch.cuda.is_available() else None,
23
  )
24
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
25
 
@@ -27,26 +24,34 @@ def load_all_models():
27
 
28
 
29
  def main():
30
- st.set_page_config(page_title="Tamil ➝ English ➝ Image", layout="centered")
31
- st.title("🌐 Tamil to English to Image Generator")
32
 
33
- tamil_text = st.text_area("Enter Tamil text:", height=150)
34
 
35
- if st.button("Generate Image"):
36
- if not tamil_text.strip():
37
- st.warning("Please enter Tamil text.")
38
  return
39
 
40
- with st.spinner("Loading models..."):
41
  translation_pipeline, img_pipe = load_all_models()
42
 
43
- with st.spinner("Translating Tamil to English..."):
44
- translated = translation_pipeline(tamil_text)[0]["translation_text"]
45
- st.success(f"πŸ”€ Translated English: `{translated}`")
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- with st.spinner("Generating image..."):
48
- image = img_pipe(prompt=translated).images[0]
49
- st.image(image, caption="🎨 AI-generated Image", use_column_width=True)
50
 
51
  if __name__ == "__main__":
52
  main()
 
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
+ # Cache and load models only once
7
  @st.cache_resource
8
  def load_all_models():
 
9
  translation_pipeline = pipeline(
10
+ "text2text-generation",
11
  model="ai4bharat/indictrans2-indic-en-dist-200M",
12
  tokenizer="ai4bharat/indictrans2-indic-en-dist-200M",
 
 
13
  device=0 if torch.cuda.is_available() else -1,
14
+ trust_remote_code=True # Required for custom tokenizer
15
  )
16
 
 
17
  img_pipe = StableDiffusionPipeline.from_pretrained(
18
+ "runwayml/stable-diffusion-v1-5",
19
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
 
20
  )
21
  img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
22
 
 
24
 
25
 
26
  def main():
27
+ st.title("🧠 Tamil to English Image Generator")
 
28
 
29
+ tamil_text = st.text_area("✍️ Enter Tamil word or sentence:")
30
 
31
+ if st.button("Translate & Generate Image"):
32
+ if tamil_text.strip() == "":
33
+ st.warning("⚠️ Please enter some Tamil text.")
34
  return
35
 
36
+ with st.spinner("⏳ Loading models..."):
37
  translation_pipeline, img_pipe = load_all_models()
38
 
39
+ # Format input for IndicTrans2
40
+ formatted_input = f"<2ta> <2en> {tamil_text.strip()}"
41
+
42
+ try:
43
+ translated = translation_pipeline(formatted_input)[0]["translation_text"]
44
+ except Exception as e:
45
+ st.error(f"❌ Translation failed: {e}")
46
+ return
47
+
48
+ st.success("βœ… Translation Successful!")
49
+ st.markdown(f"**πŸ—£οΈ English Translation:** {translated}")
50
+
51
+ with st.spinner("🎨 Generating image..."):
52
+ image = img_pipe(translated).images[0]
53
+ st.image(image, caption="πŸ–ΌοΈ AI-Generated Image", use_column_width=True)
54
 
 
 
 
55
 
56
  if __name__ == "__main__":
57
  main()