Update app.py
Browse files
app.py
CHANGED
@@ -1,53 +1,58 @@
|
|
1 |
-
# app.py
|
2 |
import streamlit as st
|
3 |
-
from transformers import
|
4 |
from diffusers import StableDiffusionPipeline
|
5 |
import torch
|
6 |
|
7 |
-
# Load translation model
|
8 |
@st.cache_resource
|
9 |
-
def
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
#
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
torch_dtype=torch.float32
|
21 |
).to("cpu")
|
22 |
-
return pipe
|
23 |
|
24 |
-
|
25 |
-
def translate_tamil_to_english(text, tokenizer, model):
|
26 |
-
prompt = f"translate Tamil to English: {text}"
|
27 |
-
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
28 |
-
with torch.no_grad():
|
29 |
-
output = model.generate(**inputs, max_length=128)
|
30 |
-
return tokenizer.decode(output[0], skip_special_tokens=True)
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
|
37 |
-
|
|
|
|
|
38 |
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
40 |
if not user_input.strip():
|
41 |
-
st.warning("உரை
|
42 |
return
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
if __name__ == "__main__":
|
53 |
main()
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, pipeline
|
3 |
from diffusers import StableDiffusionPipeline
|
4 |
import torch
|
5 |
|
|
|
6 |
@st.cache_resource
|
7 |
+
def load_models():
|
8 |
+
# Translation model
|
9 |
+
model_name = "alirezamsh/small100"
|
10 |
+
tokenizer = M2M100Tokenizer.from_pretrained(model_name)
|
11 |
+
model = M2M100ForConditionalGeneration.from_pretrained(model_name)
|
12 |
+
|
13 |
+
# Creative text generation (GPT-2)
|
14 |
+
text_generator = pipeline("text-generation", model="gpt2-medium")
|
15 |
+
|
16 |
+
# Stable diffusion image generation (CPU-friendly)
|
17 |
+
image_pipe = StableDiffusionPipeline.from_pretrained(
|
18 |
+
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float32
|
19 |
).to("cpu")
|
|
|
20 |
|
21 |
+
return tokenizer, model, text_generator, image_pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
def translate_tamil_to_english(text, tokenizer, model):
|
24 |
+
tokenizer.src_lang = "ta"
|
25 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True)
|
26 |
+
generated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.get_lang_id("en"))
|
27 |
+
return tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
|
28 |
|
29 |
+
def generate_creative_text(prompt, generator):
|
30 |
+
response = generator(f"Describe creatively: {prompt}", max_length=60, num_return_sequences=1)
|
31 |
+
return response[0]["generated_text"]
|
32 |
|
33 |
+
def main():
|
34 |
+
st.set_page_config(page_title="Tamil to English → Image & Text", layout="centered")
|
35 |
+
st.title("🪔 தமிழ் உரையை படமாக மாற்றவும்")
|
36 |
+
|
37 |
+
user_input = st.text_area("தமிழ் உரை உள்ளிடவும்:", height=150)
|
38 |
+
if st.button("உருவாக்கவும்"):
|
39 |
if not user_input.strip():
|
40 |
+
st.warning("தயவுசெய்து ஒரு உரை உள்ளிடவும்.")
|
41 |
return
|
42 |
+
|
43 |
+
with st.spinner("மொழிபெயர்ப்பு நடக்கிறது..."):
|
44 |
+
tokenizer, model, textgen, pipe = load_models()
|
45 |
+
english_text = translate_tamil_to_english(user_input, tokenizer, model)
|
46 |
+
st.success(f"Translated to English: {english_text}")
|
47 |
+
|
48 |
+
with st.spinner("சிறந்த உரையை உருவாக்குகிறது..."):
|
49 |
+
creative = generate_creative_text(english_text, textgen)
|
50 |
+
st.info("✍️ Creative Text:")
|
51 |
+
st.write(creative)
|
52 |
+
|
53 |
+
with st.spinner("படம் உருவாக்கப்படுகிறது..."):
|
54 |
+
image = pipe(english_text, num_inference_steps=25).images[0]
|
55 |
+
st.image(image, caption="🎨 Generated Image", use_column_width=True)
|
56 |
|
57 |
if __name__ == "__main__":
|
58 |
main()
|