File size: 3,122 Bytes
1437058 a1cf7cb d550533 eea6ac5 d16c1e4 54da9ab 4d86cc2 9fd10ba d7164de 9bdb949 9fd10ba 9bdb949 9fd10ba 4d86cc2 9bdb949 9fd10ba 9bdb949 9fd10ba d7164de 9fd10ba 9bdb949 4d86cc2 5ca4bcd 9bdb949 d7164de 4d86cc2 9607ff2 c1732d5 4d86cc2 d7164de 1437058 4d86cc2 1437058 4d86cc2 1437058 4d86cc2 1437058 4d86cc2 1437058 4d86cc2 9607ff2 d16c1e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from diffusers import StableDiffusionPipeline
import torch
st.set_page_config(
page_title="Tamil Creative Studio",
page_icon="🇮🇳",
layout="centered",
)
def load_css():
st.markdown(
"""<style>
.header {
text-align: center;
padding: 20px;
background: #f9f9f9;
border-radius: 10px;
margin-bottom: 20px;
}
.header h1 { color: #cc0000; }
.header p { color: #333; font-style: italic; }
</style>""",
unsafe_allow_html=True,
)
@st.cache_resource
def load_all_models():
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from diffusers import StableDiffusionPipeline
import torch
# Translation model
model_id = "ai4bharat/indictrans2-indic-en-dist-200M"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True)
# Text generation model (simple GPT-2)
text_gen = pipeline("text-generation", model="gpt2")
# Image generation model (Stable Diffusion)
img_pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
revision="fp16",
torch_dtype=torch.float16
).to("cuda")
return tokenizer, model, text_gen, img_pipe
def translate_tamil(text, tokenizer, model):
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
outs = model.generate(**inputs, max_length=150, num_beams=5, early_stopping=True)
return tokenizer.decode(outs[0], skip_special_tokens=True)
def main():
load_css()
st.markdown(
'<div class="header"><h1>🌐 தமிழ் → English → Creative Studio</h1>'
'<p>Translate Tamil text and generate creative content</p></div>',
unsafe_allow_html=True
)
tokenizer, model, text_gen, img_pipe = load_all_models()
tamil_text = st.text_area("**தமிழ் உரை:**", height=150, placeholder="உங்கள் உரையை இங்கே உள்ளிடவும்...")
if st.button("உருவாக்கு"):
if not tamil_text.strip():
st.warning("உரையை உள்ளிடவும்.")
return
with st.spinner("மொழிபெயர்க்கிறது..."):
eng = translate_tamil(tamil_text, tokenizer, model)
st.success(eng)
with st.spinner("உரை உருவாக்குதல்..."):
creative = text_gen(f"Create a creative description about: {eng}", max_length=80, num_return_sequences=1)[0]["generated_text"]
st.info(creative)
with st.spinner("படத்தை உருவாக்குதல்..."):
img = img_pipe(eng, num_inference_steps=40, guidance_scale=8.5).images[0]
st.image(img, caption="Generated Image", use_column_width=True)
if __name__ == "__main__":
main()
|