24Sureshkumar commited on
Commit
45e16e5
·
verified ·
1 Parent(s): 060cc15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -25
app.py CHANGED
@@ -4,25 +4,21 @@ from diffusers import StableDiffusionPipeline
4
  import torch
5
  import os
6
 
7
- # 1. Check for HF_TOKEN
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
  if HF_TOKEN is None:
10
- raise ValueError("Please set the HF_TOKEN environment variable in Hugging Face repository secrets.")
11
 
12
- # 2. Set device
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
- # 3. Load translator with token
16
  translator = pipeline(
17
  "translation",
18
- model="Helsinki-NLP/opus-mt-ta-en",
19
  use_auth_token=HF_TOKEN
20
  )
21
 
22
- # 4. Load text generator (GPT-2) — public, no token needed
23
  generator = pipeline("text-generation", model="gpt2")
24
 
25
- # 5. Load image generator (Stable Diffusion) with token
26
  image_pipe = StableDiffusionPipeline.from_pretrained(
27
  "CompVis/stable-diffusion-v1-4",
28
  use_auth_token=HF_TOKEN,
@@ -30,33 +26,21 @@ image_pipe = StableDiffusionPipeline.from_pretrained(
30
  )
31
  image_pipe = image_pipe.to(device)
32
 
33
- # 6. Main function
34
  def generate_image_from_tamil(tamil_input):
35
- # Translate Tamil to English
36
  translated = translator(tamil_input, max_length=100)[0]['translation_text']
37
-
38
- # Generate a prompt using GPT-2
39
- generated = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text']
40
- generated = generated.strip()
41
-
42
- # Generate image using Stable Diffusion
43
  image = image_pipe(generated).images[0]
44
-
45
  return translated, generated, image
46
 
47
- # 7. Gradio Interface
48
  iface = gr.Interface(
49
  fn=generate_image_from_tamil,
50
  inputs=gr.Textbox(lines=2, label="Enter Tamil Text"),
51
- outputs=[
52
- gr.Textbox(label="Translated English Text"),
53
- gr.Textbox(label="Generated English Prompt"),
54
- gr.Image(label="Generated Image")
55
- ],
56
- title="Tamil to Image Generator",
57
- description="This app translates Tamil text to English, generates creative English prompts, and visualizes them using Stable Diffusion.",
58
  allow_flagging="never"
59
  )
60
 
61
- # 8. Launch app
62
  iface.launch()
 
4
  import torch
5
  import os
6
 
 
7
  HF_TOKEN = os.getenv("HF_TOKEN")
8
  if HF_TOKEN is None:
9
+ raise ValueError("Set HF_TOKEN in env variables.")
10
 
 
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
13
+ # Use multilingual model that supports Tamil→English
14
  translator = pipeline(
15
  "translation",
16
+ model="Helsinki-NLP/opus-mt-mul-en",
17
  use_auth_token=HF_TOKEN
18
  )
19
 
 
20
  generator = pipeline("text-generation", model="gpt2")
21
 
 
22
  image_pipe = StableDiffusionPipeline.from_pretrained(
23
  "CompVis/stable-diffusion-v1-4",
24
  use_auth_token=HF_TOKEN,
 
26
  )
27
  image_pipe = image_pipe.to(device)
28
 
 
29
  def generate_image_from_tamil(tamil_input):
 
30
  translated = translator(tamil_input, max_length=100)[0]['translation_text']
31
+ generated = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text'].strip()
 
 
 
 
 
32
  image = image_pipe(generated).images[0]
 
33
  return translated, generated, image
34
 
 
35
  iface = gr.Interface(
36
  fn=generate_image_from_tamil,
37
  inputs=gr.Textbox(lines=2, label="Enter Tamil Text"),
38
+ outputs=[gr.Textbox(label="Translated English Text"),
39
+ gr.Textbox(label="Generated English Prompt"),
40
+ gr.Image(label="Generated Image")],
41
+ title="Tamil→Image Generator",
42
+ description="Translate Tamil → English, generate prompt → create image.",
 
 
43
  allow_flagging="never"
44
  )
45
 
 
46
  iface.launch()