File size: 2,949 Bytes
bf7e1be
 
 
 
 
 
5a6cf02
52b96f9
87884fb
2c77ef8
 
87884fb
2c77ef8
87884fb
2c77ef8
87884fb
 
 
bf7e1be
 
2c77ef8
bf7e1be
 
 
 
2c77ef8
bf7e1be
 
 
2c77ef8
bf7e1be
52b96f9
bf7e1be
 
 
 
52b96f9
bf7e1be
2c77ef8
bf7e1be
 
52b96f9
bf7e1be
52b96f9
 
 
 
2c77ef8
 
bf7e1be
52b96f9
bf7e1be
 
af9f29e
bf7e1be
 
2c77ef8
 
 
52b96f9
2c77ef8
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import requests
from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import torch
import io
import os
from typing import Tuple

# Load HF token
HF_API_KEY = os.getenv("HF_API_KEY") or "your_hf_token_here"  # Replace this with your token if local
if not HF_API_KEY:
    raise ValueError("HF_API_KEY is not set.")

# Hugging Face image model
IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Translation model (Tamil to English)
translator_model = "Helsinki-NLP/opus-mt-mul-en"
translator = MarianMTModel.from_pretrained(translator_model).to(device)
translator_tokenizer = MarianTokenizer.from_pretrained(translator_model)

# Text generation model
generator_model = "EleutherAI/gpt-neo-1.3B"
generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
generator_tokenizer.pad_token = generator_tokenizer.eos_token

def translate_tamil_to_english(text: str) -> str:
    inputs = translator_tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
    output = translator.generate(**inputs)
    return translator_tokenizer.decode(output[0], skip_special_tokens=True)

def generate_text(prompt: str) -> str:
    inputs = generator_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
    output = generator.generate(**inputs, max_length=100, num_return_sequences=1)
    return generator_tokenizer.decode(output[0], skip_special_tokens=True)

def generate_image(prompt: str) -> Image.Image:
    response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
    try:
        if response.status_code == 200 and response.headers["content-type"].startswith("image"):
            return Image.open(io.BytesIO(response.content))
    except Exception as e:
        print("Image generation failed:", e)
    return Image.new("RGB", (300, 300), color="gray")

def process_input(tamil_text: str) -> Tuple[str, str, Image.Image]:
    english_text = translate_tamil_to_english(tamil_text)
    creative_text = generate_text(english_text)
    image = generate_image(english_text)
    return english_text, creative_text, image

# Gradio app
with gr.Blocks() as demo:
    gr.Markdown("## Tamil to English Translator with Text and Image Generator")

    tamil_input = gr.Textbox(label="Enter Tamil Text")
    translate_btn = gr.Button("Translate & Generate")

    english_output = gr.Textbox(label="Translated English")
    creative_output = gr.Textbox(label="Creative Text")
    image_output = gr.Image(label="Generated Image")

    translate_btn.click(fn=process_input, inputs=tamil_input, outputs=[english_output, creative_output, image_output])

demo.launch()