Update app.py
Browse files
app.py
CHANGED
@@ -1,58 +1,61 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, pipeline
|
3 |
-
from diffusers import
|
4 |
import torch
|
5 |
|
6 |
-
@st.cache_resource
|
7 |
-
def
|
8 |
-
#
|
9 |
-
model_name = "
|
10 |
tokenizer = M2M100Tokenizer.from_pretrained(model_name)
|
11 |
model = M2M100ForConditionalGeneration.from_pretrained(model_name)
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
"stabilityai/
|
19 |
).to("cpu")
|
20 |
|
21 |
-
return tokenizer, model,
|
22 |
|
23 |
-
def
|
24 |
tokenizer.src_lang = "ta"
|
25 |
-
inputs = tokenizer(text, return_tensors="pt"
|
26 |
-
|
27 |
-
return tokenizer.decode(
|
28 |
|
29 |
-
def
|
30 |
-
|
31 |
-
return
|
32 |
|
33 |
def main():
|
34 |
-
st.set_page_config(page_title="Tamil to English →
|
35 |
-
st.title("
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
41 |
return
|
42 |
|
43 |
-
with st.spinner("
|
44 |
-
|
45 |
-
|
46 |
-
st.success(f"Translated to English: {english_text}")
|
47 |
|
48 |
-
with st.spinner("
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
|
53 |
with st.spinner("படம் உருவாக்கப்படுகிறது..."):
|
54 |
-
image =
|
55 |
-
|
56 |
|
57 |
if __name__ == "__main__":
|
58 |
main()
|
|
|
1 |
+
# app.py
|
2 |
import streamlit as st
|
3 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, pipeline
|
4 |
+
from diffusers import DiffusionPipeline
|
5 |
import torch
|
6 |
|
7 |
+
@st.cache_resource(show_spinner=False)
|
8 |
+
def load_all_models():
|
9 |
+
# Load translation model
|
10 |
+
model_name = "facebook/m2m100_418M"
|
11 |
tokenizer = M2M100Tokenizer.from_pretrained(model_name)
|
12 |
model = M2M100ForConditionalGeneration.from_pretrained(model_name)
|
13 |
|
14 |
+
# Load creative text model (smaller GPT-2)
|
15 |
+
textgen = pipeline("text-generation", model="gpt2", device=-1)
|
16 |
|
17 |
+
# Load lightweight image generation pipeline
|
18 |
+
img_pipe = DiffusionPipeline.from_pretrained(
|
19 |
+
"stabilityai/sdxl-lite", torch_dtype=torch.float32
|
20 |
).to("cpu")
|
21 |
|
22 |
+
return tokenizer, model, textgen, img_pipe
|
23 |
|
24 |
+
def translate(text, tokenizer, model):
|
25 |
tokenizer.src_lang = "ta"
|
26 |
+
inputs = tokenizer(text, return_tensors="pt")
|
27 |
+
output = model.generate(inputs["input_ids"], forced_bos_token_id=tokenizer.get_lang_id("en"), max_length=100)
|
28 |
+
return tokenizer.decode(output[0], skip_special_tokens=True)
|
29 |
|
30 |
+
def generate_text(prompt, pipe):
|
31 |
+
output = pipe(prompt, max_length=60, do_sample=True)[0]
|
32 |
+
return output["generated_text"]
|
33 |
|
34 |
def main():
|
35 |
+
st.set_page_config(page_title="Tamil to English → Creative → Image", layout="centered")
|
36 |
+
st.title("🌐 தமிழ் ➝ English ➝ Creative Text + Image")
|
37 |
+
|
38 |
+
tokenizer, model, textgen, img_pipe = load_all_models()
|
39 |
+
|
40 |
+
tamil_text = st.text_area("தமிழ் உரையை உள்ளிடவும்:", height=130)
|
41 |
+
|
42 |
+
if st.button("உருவாக்கு"):
|
43 |
+
if not tamil_text.strip():
|
44 |
+
st.warning("தயவுசெய்து உரையை உள்ளிடவும்.")
|
45 |
return
|
46 |
|
47 |
+
with st.spinner("மொழிபெயர்ப்பு..."):
|
48 |
+
english_text = translate(tamil_text, tokenizer, model)
|
49 |
+
st.success(f"🔁 Translated: {english_text}")
|
|
|
50 |
|
51 |
+
with st.spinner("உரையாக்கம்..."):
|
52 |
+
creative_text = generate_text(english_text, textgen)
|
53 |
+
st.info("📝 Creative Output:")
|
54 |
+
st.write(creative_text)
|
55 |
|
56 |
with st.spinner("படம் உருவாக்கப்படுகிறது..."):
|
57 |
+
image = img_pipe(english_text).images[0]
|
58 |
+
st.image(image, caption="🎨 Generated Image", use_column_width=True)
|
59 |
|
60 |
if __name__ == "__main__":
|
61 |
main()
|