|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
@st.cache_resource |
|
def load_all_models(): |
|
|
|
model_id = "ai4bharat/indictrans2-indic-en-dist-200M" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True) |
|
|
|
|
|
img_pipe = StableDiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-2-1", |
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
|
revision="fp16" if torch.cuda.is_available() else None, |
|
) |
|
img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
return tokenizer, model, img_pipe |
|
|
|
def main(): |
|
st.set_page_config(page_title="Tamil to English to Image", layout="centered") |
|
st.title("πΈ Tamil β English β AI Image Generator") |
|
|
|
tamil_text = st.text_area("Enter Tamil text:", height=150) |
|
|
|
if st.button("Generate Image"): |
|
if not tamil_text.strip(): |
|
st.warning("Please enter some Tamil text.") |
|
return |
|
|
|
with st.spinner("Loading models..."): |
|
tokenizer, model, img_pipe = load_all_models() |
|
|
|
with st.spinner("Translating Tamil to English..."): |
|
|
|
formatted_input = f"<2en> {tamil_text.strip()}" |
|
inputs = tokenizer(formatted_input, return_tensors="pt") |
|
output_ids = model.generate(**inputs) |
|
translated = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] |
|
st.success(f"π€ English Translation: `{translated}`") |
|
|
|
with st.spinner("Generating image..."): |
|
image = img_pipe(prompt=translated).images[0] |
|
st.image(image, caption="πΌοΈ AI-generated Image", use_column_width=True) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|