24Sureshkumar commited on
Commit
2c77ef8
·
verified ·
1 Parent(s): 5cf5d2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -31
app.py CHANGED
@@ -7,72 +7,64 @@ import io
7
  import os
8
  from typing import Tuple
9
 
10
- # Load Hugging Face API key securely
11
- HF_API_KEY = os.getenv("HF_API_KEY") # You must set this as an environment variable
12
  if not HF_API_KEY:
13
- raise ValueError("HF_API_KEY is not set. Add it in Hugging Face 'Variables and Secrets' or local environment.")
14
 
15
- # API Endpoint for Image Generation
16
  IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
17
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
18
 
19
- # Check if GPU is available
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
- # Load Tamil-to-English Translation Model
23
  translator_model = "Helsinki-NLP/opus-mt-mul-en"
24
  translator = MarianMTModel.from_pretrained(translator_model).to(device)
25
  translator_tokenizer = MarianTokenizer.from_pretrained(translator_model)
26
 
27
- # Load Text Generation Model
28
  generator_model = "EleutherAI/gpt-neo-1.3B"
29
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
30
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
31
- if generator_tokenizer.pad_token is None:
32
- generator_tokenizer.pad_token = generator_tokenizer.eos_token
33
 
34
  def translate_tamil_to_english(text: str) -> str:
35
- """Translates Tamil text to English."""
36
  inputs = translator_tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
37
  output = translator.generate(**inputs)
38
  return translator_tokenizer.decode(output[0], skip_special_tokens=True)
39
 
40
  def generate_text(prompt: str) -> str:
41
- """Generates a creative text based on English input."""
42
  inputs = generator_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
43
- output = generator.generate(**inputs, max_length=100)
44
  return generator_tokenizer.decode(output[0], skip_special_tokens=True)
45
 
46
  def generate_image(prompt: str) -> Image.Image:
47
- """Sends request to API for image generation."""
48
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
49
  try:
50
  if response.status_code == 200 and response.headers["content-type"].startswith("image"):
51
  return Image.open(io.BytesIO(response.content))
52
  except Exception as e:
53
- print("Image generation error:", e)
54
- return Image.new("RGB", (300, 300), "red") # Fallback placeholder image
55
 
56
  def process_input(tamil_text: str) -> Tuple[str, str, Image.Image]:
57
- """Complete pipeline: Translation, Text Generation, and Image Generation."""
58
  english_text = translate_tamil_to_english(tamil_text)
59
  creative_text = generate_text(english_text)
60
  image = generate_image(english_text)
61
  return english_text, creative_text, image
62
 
63
- # Gradio Interface
64
- interface = gr.Interface(
65
- fn=process_input,
66
- inputs=gr.Textbox(label="Enter Tamil Text"),
67
- outputs=[
68
- gr.Textbox(label="Translated English Text"),
69
- gr.Textbox(label="Creative Text"),
70
- gr.Image(label="Generated Image")
71
- ],
72
- title="Tamil to English Translator & Image Generator",
73
- description="Enter Tamil text, and this app will translate it, generate a creative description, and create an image based on the text.",
74
- allow_flagging="never" # Avoids schema-related error in Spaces
75
- )
76
 
77
- # Launch the app
78
- interface.launch()
 
 
 
 
 
 
 
 
 
7
  import os
8
  from typing import Tuple
9
 
10
+ # Load HF token
11
+ HF_API_KEY = os.getenv("HF_API_KEY") or "your_hf_token_here" # Replace this with your token if local
12
  if not HF_API_KEY:
13
+ raise ValueError("HF_API_KEY is not set.")
14
 
15
+ # Hugging Face image model
16
  IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
17
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
18
 
 
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
+ # Translation model (Tamil to English)
22
  translator_model = "Helsinki-NLP/opus-mt-mul-en"
23
  translator = MarianMTModel.from_pretrained(translator_model).to(device)
24
  translator_tokenizer = MarianTokenizer.from_pretrained(translator_model)
25
 
26
+ # Text generation model
27
  generator_model = "EleutherAI/gpt-neo-1.3B"
28
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
29
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
30
+ generator_tokenizer.pad_token = generator_tokenizer.eos_token
 
31
 
32
  def translate_tamil_to_english(text: str) -> str:
 
33
  inputs = translator_tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
34
  output = translator.generate(**inputs)
35
  return translator_tokenizer.decode(output[0], skip_special_tokens=True)
36
 
37
  def generate_text(prompt: str) -> str:
 
38
  inputs = generator_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
39
+ output = generator.generate(**inputs, max_length=100, num_return_sequences=1)
40
  return generator_tokenizer.decode(output[0], skip_special_tokens=True)
41
 
42
  def generate_image(prompt: str) -> Image.Image:
 
43
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
44
  try:
45
  if response.status_code == 200 and response.headers["content-type"].startswith("image"):
46
  return Image.open(io.BytesIO(response.content))
47
  except Exception as e:
48
+ print("Image generation failed:", e)
49
+ return Image.new("RGB", (300, 300), color="gray")
50
 
51
  def process_input(tamil_text: str) -> Tuple[str, str, Image.Image]:
 
52
  english_text = translate_tamil_to_english(tamil_text)
53
  creative_text = generate_text(english_text)
54
  image = generate_image(english_text)
55
  return english_text, creative_text, image
56
 
57
+ # Gradio app
58
+ with gr.Blocks() as demo:
59
+ gr.Markdown("## Tamil to English Translator with Text and Image Generator")
 
 
 
 
 
 
 
 
 
 
60
 
61
+ tamil_input = gr.Textbox(label="Enter Tamil Text")
62
+ translate_btn = gr.Button("Translate & Generate")
63
+
64
+ english_output = gr.Textbox(label="Translated English")
65
+ creative_output = gr.Textbox(label="Creative Text")
66
+ image_output = gr.Image(label="Generated Image")
67
+
68
+ translate_btn.click(fn=process_input, inputs=tamil_input, outputs=[english_output, creative_output, image_output])
69
+
70
+ demo.launch()