24Sureshkumar commited on
Commit
9bdb949
·
verified ·
1 Parent(s): d0a988c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -25,19 +25,24 @@ def load_css():
25
  unsafe_allow_html=True,
26
  )
27
 
28
- @st.cache_resource(show_spinner=False)
29
  def load_all_models():
 
 
30
  model_id = "ai4bharat/indictrans2-indic-en-dist-200M"
31
- tokenizer = AutoTokenizer.from_pretrained(model_id)
32
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
33
- text_gen = pipeline("text-generation", model="gpt2", device=-1)
 
 
34
  img_pipe = StableDiffusionPipeline.from_pretrained(
35
- "stabilityai/stable-diffusion-2-base",
36
- torch_dtype=torch.float32,
37
- safety_checker=None
38
- ).to("cpu")
 
39
  return tokenizer, model, text_gen, img_pipe
40
 
 
41
  def translate_tamil(text, tokenizer, model):
42
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
43
  outs = model.generate(**inputs, max_length=150, num_beams=5, early_stopping=True)
 
25
  unsafe_allow_html=True,
26
  )
27
 
 
28
  def load_all_models():
29
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
30
+
31
  model_id = "ai4bharat/indictrans2-indic-en-dist-200M"
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
34
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True)
35
+
36
+ text_gen = pipeline("text-generation", model="gpt2")
37
  img_pipe = StableDiffusionPipeline.from_pretrained(
38
+ "CompVis/stable-diffusion-v1-4",
39
+ revision="fp16",
40
+ torch_dtype=torch.float16
41
+ ).to("cuda")
42
+
43
  return tokenizer, model, text_gen, img_pipe
44
 
45
+
46
  def translate_tamil(text, tokenizer, model):
47
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
48
  outs = model.generate(**inputs, max_length=150, num_beams=5, early_stopping=True)