24Sureshkumar's picture
Update app.py
770a398 verified
raw
history blame
1.89 kB
import streamlit as st
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
# Cache and load models only once
@st.cache_resource
def load_all_models():
translation_pipeline = pipeline(
"text2text-generation",
model="ai4bharat/indictrans2-indic-en-dist-200M",
tokenizer="ai4bharat/indictrans2-indic-en-dist-200M",
device=0 if torch.cuda.is_available() else -1,
trust_remote_code=True # Required for custom tokenizer
)
img_pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
return translation_pipeline, img_pipe
def main():
st.title("🧠 Tamil to English Image Generator")
tamil_text = st.text_area("✍️ Enter Tamil word or sentence:")
if st.button("Translate & Generate Image"):
if tamil_text.strip() == "":
st.warning("⚠️ Please enter some Tamil text.")
return
with st.spinner("⏳ Loading models..."):
translation_pipeline, img_pipe = load_all_models()
# Format input for IndicTrans2
formatted_input = f"<2ta> <2en> {tamil_text.strip()}"
try:
translated = translation_pipeline(formatted_input)[0]["translation_text"]
except Exception as e:
st.error(f"❌ Translation failed: {e}")
return
st.success("✅ Translation Successful!")
st.markdown(f"**🗣️ English Translation:** {translated}")
with st.spinner("🎨 Generating image..."):
image = img_pipe(translated).images[0]
st.image(image, caption="🖼️ AI-Generated Image", use_column_width=True)
if __name__ == "__main__":
main()