AkinyemiAra commited on
Commit
12f8da0
·
verified ·
1 Parent(s): 6d819b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -110
app.py CHANGED
@@ -1,110 +1,110 @@
1
- import gradio as gr
2
- import torch
3
- import numpy as np
4
- from PIL import Image
5
- import os
6
- import json
7
- import base64
8
- from io import BytesIO
9
- import requests
10
- from typing import Dict, List, Any, Optional
11
- from transformers.pipelines import pipeline
12
-
13
- # Initialize the model
14
- model = pipeline("image-feature-extraction", model="nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True)
15
-
16
- # Function to generate embeddings from an image
17
- def generate_embedding(image):
18
- if image is None:
19
- return None
20
-
21
- # Convert to PIL Image if needed
22
- if not isinstance(image, Image.Image):
23
- image = Image.fromarray(image)
24
-
25
- try:
26
- # Generate embedding using the transformers pipeline
27
- result = model(image)
28
-
29
- # Process the result based on its type
30
- embedding_list = None
31
-
32
- # Handle different possible output types
33
- if isinstance(result, torch.Tensor):
34
- embedding_list = result.detach().cpu().numpy().flatten().tolist()
35
- elif isinstance(result, np.ndarray):
36
- embedding_list = result.flatten().tolist()
37
- elif isinstance(result, list):
38
- # If it's a list of tensors or arrays
39
- if result and isinstance(result[0], (torch.Tensor, np.ndarray)):
40
- embedding_list = result[0].flatten().tolist() if hasattr(result[0], 'flatten') else result[0]
41
- else:
42
- embedding_list = result
43
- else:
44
- # Try to convert to a list as a last resort
45
- try:
46
- if result is not None:
47
- embedding_list = list(result)
48
- else:
49
- print("Result is None")
50
- return None
51
- except:
52
- print(f"Couldn't convert result of type {type(result)} to list")
53
- return None
54
-
55
- # Ensure we have a valid embedding list
56
- if embedding_list is None:
57
- return None
58
-
59
- # Calculate embedding dimension
60
- embedding_dim = len(embedding_list)
61
-
62
- return {
63
- "embedding": embedding_list,
64
- "dimension": embedding_dim
65
- }
66
- except Exception as e:
67
- print(f"Error generating embedding: {str(e)}")
68
- return None
69
-
70
- # Function to generate embeddings from an image URL
71
- def embed_image_from_url(image_url):
72
- try:
73
- # Download the image
74
- response = requests.get(image_url)
75
- image = Image.open(BytesIO(response.content))
76
-
77
- # Generate embedding
78
- return generate_embedding(image)
79
- except Exception as e:
80
- return {"error": str(e)}
81
-
82
- # Function to generate embeddings from base64 image data
83
- def embed_image_from_base64(image_data):
84
- try:
85
- # Decode the base64 image
86
- decoded_data = base64.b64decode(image_data)
87
- image = Image.open(BytesIO(decoded_data))
88
-
89
- # Generate embedding
90
- return generate_embedding(image)
91
- except Exception as e:
92
- return {"error": str(e)}
93
-
94
- # Create a Gradio app
95
- app = gr.Interface(
96
- fn=generate_embedding,
97
- inputs=gr.Image(type="pil", label="Input Image"),
98
- outputs=[
99
- gr.JSON(label="Embedding Output"),
100
- gr.Textbox(label="Embedding Dimension")
101
- ],
102
- title="Nomic Vision Embedding Model (nomic-ai/nomic-embed-vision-v1.5)",
103
- description="Upload an image to generate embeddings using the Nomic Vision model.",
104
- examples=[["examples/example1.jpg"], ["examples/example2.jpg"]],
105
- allow_flagging="never"
106
- )
107
-
108
- # Launch the app
109
- if __name__ == "__main__":
110
- app.launch()
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ import os
6
+ import json
7
+ import base64
8
+ from io import BytesIO
9
+ import requests
10
+ from typing import Dict, List, Any, Optional
11
+ from transformers.pipelines import pipeline
12
+
13
+ # Initialize the model
14
+ model = pipeline("image-feature-extraction", model="nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True)
15
+
16
+ # Function to generate embeddings from an image
17
+ def generate_embedding(image):
18
+ if image is None:
19
+ return None
20
+
21
+ # Convert to PIL Image if needed
22
+ if not isinstance(image, Image.Image):
23
+ image = Image.fromarray(image)
24
+
25
+ try:
26
+ # Generate embedding using the transformers pipeline
27
+ result = model(image)
28
+
29
+ # Process the result based on its type
30
+ embedding_list = None
31
+
32
+ # Handle different possible output types
33
+ if isinstance(result, torch.Tensor):
34
+ embedding_list = result.detach().cpu().numpy().flatten().tolist()
35
+ elif isinstance(result, np.ndarray):
36
+ embedding_list = result.flatten().tolist()
37
+ elif isinstance(result, list):
38
+ # If it's a list of tensors or arrays
39
+ if result and isinstance(result[0], (torch.Tensor, np.ndarray)):
40
+ embedding_list = result[0].flatten().tolist() if hasattr(result[0], 'flatten') else result[0]
41
+ else:
42
+ embedding_list = result
43
+ else:
44
+ # Try to convert to a list as a last resort
45
+ try:
46
+ if result is not None:
47
+ embedding_list = list(result)
48
+ else:
49
+ print("Result is None")
50
+ return None
51
+ except:
52
+ print(f"Couldn't convert result of type {type(result)} to list")
53
+ return None
54
+
55
+ # Ensure we have a valid embedding list
56
+ if embedding_list is None:
57
+ return None
58
+
59
+ # Calculate embedding dimension
60
+ embedding_dim = len(embedding_list)
61
+
62
+ return {
63
+ "embedding": embedding_list,
64
+ "dimension": embedding_dim
65
+ }
66
+ except Exception as e:
67
+ print(f"Error generating embedding: {str(e)}")
68
+ return None
69
+
70
+ # Function to generate embeddings from an image URL
71
+ def embed_image_from_url(image_url):
72
+ try:
73
+ # Download the image
74
+ response = requests.get(image_url)
75
+ image = Image.open(BytesIO(response.content))
76
+
77
+ # Generate embedding
78
+ return generate_embedding(image)
79
+ except Exception as e:
80
+ return {"error": str(e)}
81
+
82
+ # Function to generate embeddings from base64 image data
83
+ def embed_image_from_base64(image_data):
84
+ try:
85
+ # Decode the base64 image
86
+ decoded_data = base64.b64decode(image_data)
87
+ image = Image.open(BytesIO(decoded_data))
88
+
89
+ # Generate embedding
90
+ return generate_embedding(image)
91
+ except Exception as e:
92
+ return {"error": str(e)}
93
+
94
+ # Create a Gradio app
95
+ app = gr.Interface(
96
+ fn=generate_embedding,
97
+ inputs=gr.Image(type="pil", label="Input Image"),
98
+ outputs=[
99
+ gr.JSON(label="Embedding Output"),
100
+ gr.Textbox(label="Embedding Dimension")
101
+ ],
102
+ title="Nomic Vision Embedding Model (nomic-ai/nomic-embed-vision-v1.5)",
103
+ description="Upload an image to generate embeddings using the Nomic Vision model.",
104
+ examples=[["examples/example1.jpg"], ["examples/example2.jpg"]],
105
+ allow_flagging="never"
106
+ )
107
+
108
+ # Launch the app
109
+ if __name__ == "__main__":
110
+ app.launch(mcp_server=True)