File size: 2,435 Bytes
bf7e1be
b6ca69f
bf7e1be
 
87884fb
b6ca69f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c77ef8
b6ca69f
 
 
 
 
 
 
52b96f9
2bd9593
b6ca69f
 
2c77ef8
b6ca69f
2c77ef8
b6ca69f
 
 
 
 
2c77ef8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr
from transformers import MarianMTModel, MarianTokenizer, BlipProcessor, BlipForConditionalGeneration
from PIL import Image
import torch

# Load the Tamil-to-English translation model
model_name = "Helsinki-NLP/opus-mt-ta-en"
tokenizer = MarianTokenizer.from_pretrained(model_name)
translation_model = MarianMTModel.from_pretrained(model_name)

# Load the BLIP model for image captioning
caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

def translate_tamil_to_english(tamil_text):
    inputs = tokenizer(tamil_text, return_tensors="pt", padding=True)
    translated = translation_model.generate(**inputs)
    english_text = tokenizer.decode(translated[0], skip_special_tokens=True)
    return english_text

# Generate image using text (stub – replace with actual model if needed)
def generate_image_from_text(text_prompt):
    # Instead of using Stable Diffusion, just show a sample image
    img = Image.new('RGB', (512, 512), color='lightblue')
    return img

def describe_image(image):
    inputs = caption_processor(images=image, return_tensors="pt")
    out = caption_model.generate(**inputs)
    caption = caption_processor.decode(out[0], skip_special_tokens=True)
    return caption

def full_pipeline(tamil_text):
    english_text = translate_tamil_to_english(tamil_text)
    generated_image = generate_image_from_text(english_text)
    description = describe_image(generated_image)
    return english_text, generated_image, description

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Tamil to English → Image → Description")

    with gr.Row():
        tamil_input = gr.Textbox(label="Enter Tamil Text", lines=2, placeholder="உதாரணம்: ஒரு பூந்தோட்டத்தில் செருப்புகள் இருக்கின்றன")
    
    with gr.Row():
        translate_btn = gr.Button("Translate and Generate")

    with gr.Row():
        english_output = gr.Textbox(label="Translated English Text")
        description_output = gr.Textbox(label="Image Description")

    image_output = gr.Image(label="Generated Image")

    translate_btn.click(
        fn=full_pipeline,
        inputs=tamil_input,
        outputs=[english_output, image_output, description_output]
    )

demo.launch()