24Sureshkumar's picture
Update app.py
67241c5 verified
raw
history blame
2.02 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from diffusers import StableDiffusionPipeline
import torch
@st.cache_resource
def load_all_models():
# Load translation model
model_id = "ai4bharat/indictrans2-indic-en-dist-200M"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True)
# Load Stable Diffusion image generator
img_pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
revision="fp16" if torch.cuda.is_available() else None,
)
img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
return tokenizer, model, img_pipe
def main():
st.set_page_config(page_title="Tamil to English to Image", layout="centered")
st.title("πŸ“Έ Tamil ➝ English ➝ AI Image Generator")
tamil_text = st.text_area("Enter Tamil text:", height=150)
if st.button("Generate Image"):
if not tamil_text.strip():
st.warning("Please enter some Tamil text.")
return
with st.spinner("Loading models..."):
tokenizer, model, img_pipe = load_all_models()
with st.spinner("Translating Tamil to English..."):
# Prepare special format: "<2en> <tamil sentence>"
formatted_input = f"<2en> {tamil_text.strip()}"
inputs = tokenizer(formatted_input, return_tensors="pt")
output_ids = model.generate(**inputs)
translated = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
st.success(f"πŸ”€ English Translation: `{translated}`")
with st.spinner("Generating image..."):
image = img_pipe(prompt=translated).images[0]
st.image(image, caption="πŸ–ΌοΈ AI-generated Image", use_column_width=True)
if __name__ == "__main__":
main()