AkinyemiAra commited on
Commit
d20331b
·
verified ·
1 Parent(s): 89f930a

Update app_hf.py

Browse files
Files changed (1) hide show
  1. app_hf.py +107 -86
app_hf.py CHANGED
@@ -1,86 +1,107 @@
1
- import gradio as gr
2
- import torch
3
- import numpy as np
4
- from PIL import Image
5
- import os
6
- import json
7
- import base64
8
- from io import BytesIO
9
- import requests
10
- from typing import Dict, List, Any, Optional
11
- from transformers.pipelines import pipeline
12
-
13
- # Initialize the model
14
- model = pipeline("image-feature-extraction", model="nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True)
15
-
16
- # Function to generate embeddings from an image
17
- def generate_embedding(image):
18
- if image is None:
19
- return None, "No image provided"
20
-
21
- # Convert to PIL Image if needed
22
- if not isinstance(image, Image.Image):
23
- image = Image.fromarray(image)
24
-
25
- try:
26
- # Generate embedding using the transformers pipeline
27
- result = model(image)
28
-
29
- # Process the result based on its type
30
- embedding_list = None
31
-
32
- # Handle different possible output types
33
- if isinstance(result, torch.Tensor):
34
- embedding_list = result.detach().cpu().numpy().flatten().tolist()
35
- elif isinstance(result, np.ndarray):
36
- embedding_list = result.flatten().tolist()
37
- elif isinstance(result, list):
38
- # If it's a list of tensors or arrays
39
- if result and isinstance(result[0], (torch.Tensor, np.ndarray)):
40
- embedding_list = result[0].flatten().tolist() if hasattr(result[0], 'flatten') else result[0]
41
- else:
42
- embedding_list = result
43
- else:
44
- # Try to convert to a list as a last resort
45
- try:
46
- if result is not None:
47
- embedding_list = list(result)
48
- else:
49
- print("Result is None")
50
- return None, "Failed to generate embedding"
51
- except:
52
- print(f"Couldn't convert result of type {type(result)} to list")
53
- return None, "Failed to process embedding"
54
-
55
- # Ensure we have a valid embedding list
56
- if embedding_list is None:
57
- return None, "Failed to generate embedding"
58
-
59
- # Calculate embedding dimension
60
- embedding_dim = len(embedding_list)
61
-
62
- return {
63
- "embedding": embedding_list,
64
- "dimension": embedding_dim
65
- }, f"Dimension: {embedding_dim}"
66
- except Exception as e:
67
- print(f"Error generating embedding: {str(e)}")
68
- return None, f"Error: {str(e)}"
69
-
70
- # Create a Gradio app
71
- app = gr.Interface(
72
- fn=generate_embedding,
73
- inputs=gr.Image(type="pil", label="Input Image"),
74
- outputs=[
75
- gr.JSON(label="Embedding Output"),
76
- gr.Textbox(label="Embedding Dimension")
77
- ],
78
- title="Nomic Vision Embedding Model (nomic-ai/nomic-embed-vision-v1.5)",
79
- description="Upload an image to generate embeddings using the Nomic Vision model.",
80
- allow_flagging="never"
81
- )
82
-
83
- # Launch the app
84
- if __name__ == "__main__":
85
- # For Huggingface Spaces, we need to specify the server name and port
86
- app.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ import os
6
+ import json
7
+ import base64
8
+ from io import BytesIO
9
+ import requests
10
+ from typing import Dict, List, Any, Optional
11
+ from transformers.pipelines import pipeline
12
+
13
+ # Initialize the model
14
+ try:
15
+ model = pipeline("image-feature-extraction", model="nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True)
16
+ model_loaded = True
17
+ except Exception as e:
18
+ print(f"Error loading model: {str(e)}")
19
+ model = None
20
+ model_loaded = False
21
+
22
+ # Function to generate embeddings from an image
23
+ def generate_embedding(image):
24
+ if image is None:
25
+ return {"error": "No image provided"}, "No image provided"
26
+
27
+ if not model_loaded:
28
+ return {"error": "Model not loaded properly"}, "Error: Model not loaded properly"
29
+
30
+ # Convert to PIL Image if needed
31
+ if not isinstance(image, Image.Image):
32
+ try:
33
+ image = Image.fromarray(image)
34
+ except Exception as e:
35
+ print(f"Error converting image: {str(e)}")
36
+ return {"error": f"Invalid image format: {str(e)}"}, f"Error: Invalid image format"
37
+
38
+ try:
39
+ # Check if model is loaded before calling it
40
+ if model is None:
41
+ return {"error": "Model not loaded properly"}, "Error: Model not loaded properly"
42
+
43
+ # Generate embedding using the transformers pipeline
44
+ result = model(image)
45
+
46
+ # Process the result based on its type
47
+ embedding_list = None
48
+
49
+ # Handle different possible output types
50
+ if isinstance(result, torch.Tensor):
51
+ embedding_list = result.detach().cpu().numpy().flatten().tolist()
52
+ elif isinstance(result, np.ndarray):
53
+ embedding_list = result.flatten().tolist()
54
+ elif isinstance(result, list):
55
+ # If it's a list of tensors or arrays
56
+ if result and isinstance(result[0], (torch.Tensor, np.ndarray)):
57
+ embedding_list = result[0].flatten().tolist() if hasattr(result[0], 'flatten') else result[0]
58
+ else:
59
+ embedding_list = result
60
+ else:
61
+ # Try to convert to a list as a last resort
62
+ try:
63
+ if result is not None:
64
+ embedding_list = list(result)
65
+ else:
66
+ print("Result is None")
67
+ return {"error": "Failed to generate embedding"}, "Failed to generate embedding"
68
+ except:
69
+ print(f"Couldn't convert result of type {type(result)} to list")
70
+ return {"error": "Failed to process embedding"}, "Failed to process embedding"
71
+
72
+ # Ensure we have a valid embedding list
73
+ if embedding_list is None:
74
+ return {"error": "Failed to generate embedding"}, "Failed to generate embedding"
75
+
76
+ # Calculate embedding dimension
77
+ embedding_dim = len(embedding_list)
78
+
79
+ return {
80
+ "embedding": embedding_list,
81
+ "dimension": embedding_dim
82
+ }, f"Dimension: {embedding_dim}"
83
+ except Exception as e:
84
+ print(f"Error generating embedding: {str(e)}")
85
+ return {"error": f"Error generating embedding: {str(e)}"}, f"Error: {str(e)}"
86
+
87
+ # Create a Gradio app
88
+ app = gr.Interface(
89
+ fn=generate_embedding,
90
+ inputs=gr.Image(type="pil", label="Input Image"),
91
+ outputs=[
92
+ gr.JSON(label="Embedding Output"),
93
+ gr.Textbox(label="Embedding Dimension")
94
+ ],
95
+ title="Nomic Vision Embedding Model (nomic-ai/nomic-embed-vision-v1.5)",
96
+ description="Upload an image to generate embeddings using the Nomic Vision model.",
97
+ allow_flagging="never",
98
+ examples=[
99
+ ["nomic/examples/example1.jpg"],
100
+ ["nomic/examples/example2.jpg"]
101
+ ]
102
+ )
103
+
104
+ # Launch the app
105
+ if __name__ == "__main__":
106
+ # For Huggingface Spaces, we need to specify the server name and port
107
+ app.launch(server_name="0.0.0.0", server_port=7860, mcp_server=True)