24Sureshkumar commited on
Commit
d16c1e4
·
verified ·
1 Parent(s): 09de243

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -24
app.py CHANGED
@@ -1,8 +1,13 @@
1
  import streamlit as st
 
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
- import base64
 
 
 
6
 
7
  # Set page config
8
  st.set_page_config(
@@ -20,19 +25,29 @@ def load_css(file_name):
20
  @st.cache_resource(show_spinner=False)
21
  def load_all_models():
22
  # Load translation model
23
- trans_tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indictrans2-ta-en-dist-200M")
24
- trans_model = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/indictrans2-ta-en-dist-200M")
25
-
 
 
 
 
26
  # Load text generation model
27
- text_gen = pipeline("text-generation", model="gpt2", device=-1)
28
-
 
 
 
 
 
29
  # Load image generation model
30
  img_pipe = StableDiffusionPipeline.from_pretrained(
31
- "stabilityai/stable-diffusion-2-base",
32
  torch_dtype=torch.float32,
33
- safety_checker=None
 
34
  ).to("cpu")
35
-
36
  return trans_tokenizer, trans_model, text_gen, img_pipe
37
 
38
  def translate_tamil(text, tokenizer, model):
@@ -43,24 +58,23 @@ def translate_tamil(text, tokenizer, model):
43
  truncation=True,
44
  max_length=128
45
  )
46
-
47
  generated = model.generate(
48
  **inputs,
49
  max_length=150,
50
  num_beams=5,
51
  early_stopping=True
52
  )
53
-
54
  return tokenizer.batch_decode(
55
- generated,
56
  skip_special_tokens=True,
57
  clean_up_tokenization_spaces=True
58
  )[0]
59
 
60
  def main():
61
  load_css("style.css")
62
-
63
- # Header with background
64
  st.markdown(
65
  """
66
  <div class="header">
@@ -70,37 +84,37 @@ def main():
70
  """,
71
  unsafe_allow_html=True
72
  )
73
-
74
  tokenizer, model, text_gen, img_pipe = load_all_models()
75
-
76
  with st.container():
77
  tamil_text = st.text_area(
78
- "**தமிழ் உரை:**",
79
  height=150,
80
  placeholder="உங்கள் உரையை இங்கே உள்ளிடவும்...",
81
  key="tamil_input"
82
  )
83
-
84
  col1, col2 = st.columns([1, 3])
85
  with col1:
86
  if st.button("**உருவாக்கு**", type="primary", use_container_width=True):
87
  if not tamil_text.strip():
88
  st.warning("தயவு செய்து உரையை உள்ளிடவும்.")
89
  st.stop()
90
-
91
  with st.spinner("மொழிபெயர்க்கிறது..."):
92
  eng = translate_tamil(tamil_text, tokenizer, model)
93
-
94
  with st.expander("**🔤 Translation**", expanded=True):
95
  st.success(eng)
96
-
97
  with st.spinner("உரை உருவாக்குதல்..."):
98
  creative = text_gen(
99
  f"Create a creative description about: {eng}",
100
  max_length=80,
101
  num_return_sequences=1
102
  )[0]["generated_text"]
103
-
104
  st.info("**📝 Creative Text:**")
105
  st.write(creative)
106
 
@@ -110,8 +124,8 @@ def main():
110
  num_inference_steps=40,
111
  guidance_scale=8.5
112
  ).images[0]
113
-
114
  st.image(img, caption="**🎨 Generated Image**", use_column_width=True)
115
 
116
  if __name__ == "__main__":
117
- main()
 
1
  import streamlit as st
2
+ import os
3
+ from dotenv import load_dotenv
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+ HF_TOKEN = os.getenv("HF_TOKEN")
11
 
12
  # Set page config
13
  st.set_page_config(
 
25
  @st.cache_resource(show_spinner=False)
26
  def load_all_models():
27
  # Load translation model
28
+ trans_tokenizer = AutoTokenizer.from_pretrained(
29
+ "ai4bharat/indictrans2-ta-en-dist-200M", token=HF_TOKEN
30
+ )
31
+ trans_model = AutoModelForSeq2SeqLM.from_pretrained(
32
+ "ai4bharat/indictrans2-ta-en-dist-200M", token=HF_TOKEN
33
+ )
34
+
35
  # Load text generation model
36
+ text_gen = pipeline(
37
+ "text-generation",
38
+ model="gpt2",
39
+ device=-1,
40
+ token=HF_TOKEN
41
+ )
42
+
43
  # Load image generation model
44
  img_pipe = StableDiffusionPipeline.from_pretrained(
45
+ "stabilityai/stable-diffusion-2-base",
46
  torch_dtype=torch.float32,
47
+ safety_checker=None,
48
+ token=HF_TOKEN
49
  ).to("cpu")
50
+
51
  return trans_tokenizer, trans_model, text_gen, img_pipe
52
 
53
  def translate_tamil(text, tokenizer, model):
 
58
  truncation=True,
59
  max_length=128
60
  )
61
+
62
  generated = model.generate(
63
  **inputs,
64
  max_length=150,
65
  num_beams=5,
66
  early_stopping=True
67
  )
68
+
69
  return tokenizer.batch_decode(
70
+ generated,
71
  skip_special_tokens=True,
72
  clean_up_tokenization_spaces=True
73
  )[0]
74
 
75
  def main():
76
  load_css("style.css")
77
+
 
78
  st.markdown(
79
  """
80
  <div class="header">
 
84
  """,
85
  unsafe_allow_html=True
86
  )
87
+
88
  tokenizer, model, text_gen, img_pipe = load_all_models()
89
+
90
  with st.container():
91
  tamil_text = st.text_area(
92
+ "**தமிழ் உரை:**",
93
  height=150,
94
  placeholder="உங்கள் உரையை இங்கே உள்ளிடவும்...",
95
  key="tamil_input"
96
  )
97
+
98
  col1, col2 = st.columns([1, 3])
99
  with col1:
100
  if st.button("**உருவாக்கு**", type="primary", use_container_width=True):
101
  if not tamil_text.strip():
102
  st.warning("தயவு செய்து உரையை உள்ளிடவும்.")
103
  st.stop()
104
+
105
  with st.spinner("மொழிபெயர்க்கிறது..."):
106
  eng = translate_tamil(tamil_text, tokenizer, model)
107
+
108
  with st.expander("**🔤 Translation**", expanded=True):
109
  st.success(eng)
110
+
111
  with st.spinner("உரை உருவாக்குதல்..."):
112
  creative = text_gen(
113
  f"Create a creative description about: {eng}",
114
  max_length=80,
115
  num_return_sequences=1
116
  )[0]["generated_text"]
117
+
118
  st.info("**📝 Creative Text:**")
119
  st.write(creative)
120
 
 
124
  num_inference_steps=40,
125
  guidance_scale=8.5
126
  ).images[0]
127
+
128
  st.image(img, caption="**🎨 Generated Image**", use_column_width=True)
129
 
130
  if __name__ == "__main__":
131
+ main()