import gradio as gr import torch import numpy as np from PIL import Image import os import json import base64 from io import BytesIO import requests from typing import Dict, List, Any, Optional from transformers.pipelines import pipeline # Initialize the model model = pipeline("image-feature-extraction", model="nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True) # Function to generate embeddings from an image def generate_embedding(image): if image is None: return None # Convert to PIL Image if needed if not isinstance(image, Image.Image): image = Image.fromarray(image) try: # Generate embedding using the transformers pipeline result = model(image) # Process the result based on its type embedding_list = None # Handle different possible output types if isinstance(result, torch.Tensor): embedding_list = result.detach().cpu().numpy().flatten().tolist() elif isinstance(result, np.ndarray): embedding_list = result.flatten().tolist() elif isinstance(result, list): # If it's a list of tensors or arrays if result and isinstance(result[0], (torch.Tensor, np.ndarray)): embedding_list = result[0].flatten().tolist() if hasattr(result[0], 'flatten') else result[0] else: embedding_list = result else: # Try to convert to a list as a last resort try: if result is not None: embedding_list = list(result) else: print("Result is None") return None except: print(f"Couldn't convert result of type {type(result)} to list") return None # Ensure we have a valid embedding list if embedding_list is None: return None # Calculate embedding dimension embedding_dim = len(embedding_list) return { "embedding": embedding_list, "dimension": embedding_dim } except Exception as e: print(f"Error generating embedding: {str(e)}") return None # Function to generate embeddings from an image URL def embed_image_from_url(image_url): try: # Download the image response = requests.get(image_url) image = Image.open(BytesIO(response.content)) # Generate embedding return generate_embedding(image) except Exception as e: return {"error": str(e)} # Function to generate embeddings from base64 image data def embed_image_from_base64(image_data): try: # Decode the base64 image decoded_data = base64.b64decode(image_data) image = Image.open(BytesIO(decoded_data)) # Generate embedding return generate_embedding(image) except Exception as e: return {"error": str(e)} # Create a Gradio app app = gr.Interface( fn=generate_embedding, inputs=gr.Image(type="pil", label="Input Image"), outputs=[ gr.JSON(label="Embedding Output"), gr.Textbox(label="Embedding Dimension") ], title="Nomic Vision Embedding Model (nomic-ai/nomic-embed-vision-v1.5)", description="Upload an image to generate embeddings using the Nomic Vision model.", examples=[["examples/example1.jpg"], ["examples/example2.jpg"]], allow_flagging="never" ) # Launch the app if __name__ == "__main__": app.launch(mcp_server=True)