import streamlit as st from transformers import pipeline from diffusers import StableDiffusionPipeline import torch # Hugging Face token from secrets HF_TOKEN = st.secrets["HF_TOKEN"] # Streamlit page settings st.set_page_config(page_title="Tamil to Image Generator", layout="centered") st.title("🧠 Tamil to Image Generator 🎨") st.markdown("Enter Tamil text → Translate to English → Generate a creative story → Create an AI Image") # Input Tamil text tamil_input = st.text_area("Enter Tamil text", placeholder="உலகின் அழகான கடற்கரை பற்றி சொல்...") if st.button("Generate Image"): if not tamil_input.strip(): st.warning("Please enter some Tamil text.") else: with st.spinner("🔁 Translating Tamil to English..."): translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ta-en") translated = translator(tamil_input, max_length=100)[0]['translation_text'] st.success("✅ Translation done") st.markdown(f"**📝 Translated Text:** `{translated}`") with st.spinner("🧠 Generating creative English text..."): generator = pipeline("text-generation", model="gpt2") prompt_text = f"Describe this beautifully: {translated}" generated = generator(prompt_text, max_length=80, do_sample=True, top_k=50)[0]['generated_text'] st.success("✅ Text generation done") st.markdown(f"**🎨 Creative Description:** `{generated}`") with st.spinner("🖼️ Generating AI Image... (may take 20–30 seconds)"): pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, use_auth_token=HF_TOKEN ).to("cuda" if torch.cuda.is_available() else "cpu") image = pipe(prompt=generated).images[0] st.success("✅ Image generated!") st.image(image, caption="🖼️ AI Generated Image", use_column_width=True)