File size: 2,099 Bytes
803ddb4
 
 
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
 
e5964e8
803ddb4
 
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
 
 
e5964e8
803ddb4
 
e5964e8
803ddb4
 
 
 
 
 
e5964e8
803ddb4
e5964e8
803ddb4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import streamlit as st
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch

# Hugging Face token from secrets
HF_TOKEN = st.secrets["HF_TOKEN"]

# Streamlit page settings
st.set_page_config(page_title="Tamil to Image Generator", layout="centered")

st.title("🧠 Tamil to Image Generator 🎨")
st.markdown("Enter Tamil text → Translate to English → Generate a creative story → Create an AI Image")

# Input Tamil text
tamil_input = st.text_area("Enter Tamil text", placeholder="உலகின் அழகான கடற்கரை பற்றி சொல்...")

if st.button("Generate Image"):
    if not tamil_input.strip():
        st.warning("Please enter some Tamil text.")
    else:
        with st.spinner("🔁 Translating Tamil to English..."):
            translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ta-en")
            translated = translator(tamil_input, max_length=100)[0]['translation_text']

        st.success("✅ Translation done")
        st.markdown(f"**📝 Translated Text:** `{translated}`")

        with st.spinner("🧠 Generating creative English text..."):
            generator = pipeline("text-generation", model="gpt2")
            prompt_text = f"Describe this beautifully: {translated}"
            generated = generator(prompt_text, max_length=80, do_sample=True, top_k=50)[0]['generated_text']

        st.success("✅ Text generation done")
        st.markdown(f"**🎨 Creative Description:** `{generated}`")

        with st.spinner("🖼️ Generating AI Image... (may take 20–30 seconds)"):
            pipe = StableDiffusionPipeline.from_pretrained(
                "runwayml/stable-diffusion-v1-5",
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                use_auth_token=HF_TOKEN
            ).to("cuda" if torch.cuda.is_available() else "cpu")

            image = pipe(prompt=generated).images[0]

        st.success("✅ Image generated!")
        st.image(image, caption="🖼️ AI Generated Image", use_column_width=True)