Jordi Catafal commited on
Commit
03eefac
·
1 Parent(s): ebb30ca

should work xd

Browse files
CLAUDE.md CHANGED
@@ -6,14 +6,18 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
6
 
7
  This is a FastAPI-based multilingual embedding API that provides access to 5 specialized models for generating embeddings from Spanish, Catalan, English, and multilingual text. The API is deployed on Hugging Face Spaces and serves embeddings for different use cases including legal documents and general-purpose text.
8
 
9
- ## Available Models
10
 
11
- The API serves 5 models with different specializations:
12
- - **jina**: Bilingual Spanish-English (768D, 8192 tokens)
13
- - **robertalex**: Spanish legal domain (768D, 512 tokens)
14
- - **jina-v3**: Multilingual latest generation (1024D, 8192 tokens)
15
- - **legal-bert**: English legal domain (768D, 512 tokens)
16
- - **roberta-ca**: Catalan general purpose (1024D, 512 tokens)
 
 
 
 
17
 
18
  ## Architecture
19
 
@@ -40,17 +44,27 @@ The API will start on `http://0.0.0.0:7860` by default.
40
 
41
  ### Testing the API
42
  ```bash
43
- # Using the test script
44
- python test_api.py
45
 
46
  # Manual testing with curl
47
  # Health check
48
  curl http://localhost:7860/health
49
 
50
- # Generate embeddings
51
- curl -X POST "http://localhost:7860/embed" \
 
 
 
 
 
 
 
 
 
 
52
  -H "Content-Type: application/json" \
53
- -d '{"texts": ["Texto de prueba"], "model": "jina"}'
54
 
55
  # List models
56
  curl http://localhost:7860/models
 
6
 
7
  This is a FastAPI-based multilingual embedding API that provides access to 5 specialized models for generating embeddings from Spanish, Catalan, English, and multilingual text. The API is deployed on Hugging Face Spaces and serves embeddings for different use cases including legal documents and general-purpose text.
8
 
9
+ ## API Architecture - Endpoint Per Model
10
 
11
+ The API uses dedicated endpoints for each model with different loading strategies:
12
+
13
+ ### **Startup Model** (loads at app initialization):
14
+ - **jina-v3**: `/embed/jina-v3` - Multilingual latest generation (1024D, 8192 tokens)
15
+
16
+ ### **On-Demand Models** (load when first requested):
17
+ - **roberta-ca**: `/embed/roberta-ca` - Catalan general purpose (1024D, 512 tokens)
18
+ - **jina**: `/embed/jina` - Bilingual Spanish-English (768D, 8192 tokens)
19
+ - **robertalex**: `/embed/robertalex` - Spanish legal domain (768D, 512 tokens)
20
+ - **legal-bert**: `/embed/legal-bert` - English legal domain (768D, 512 tokens)
21
 
22
  ## Architecture
23
 
 
44
 
45
  ### Testing the API
46
  ```bash
47
+ # Using the endpoint test script
48
+ python test_endpoints.py
49
 
50
  # Manual testing with curl
51
  # Health check
52
  curl http://localhost:7860/health
53
 
54
+ # Test jina-v3 endpoint (startup model)
55
+ curl -X POST "http://localhost:7860/embed/jina-v3" \
56
+ -H "Content-Type: application/json" \
57
+ -d '{"texts": ["Hello world", "Hola mundo"], "normalize": true}'
58
+
59
+ # Test Catalan RoBERTa endpoint
60
+ curl -X POST "http://localhost:7860/embed/roberta-ca" \
61
+ -H "Content-Type: application/json" \
62
+ -d '{"texts": ["Bon dia", "Com estàs?"], "normalize": true}'
63
+
64
+ # Test Spanish legal endpoint
65
+ curl -X POST "http://localhost:7860/embed/robertalex" \
66
  -H "Content-Type: application/json" \
67
+ -d '{"texts": ["Artículo primero"], "normalize": true}'
68
 
69
  # List models
70
  curl http://localhost:7860/models
__pycache__/app.cpython-311.pyc CHANGED
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
 
__pycache__/app_endpoints.cpython-311.pyc ADDED
Binary file (12.3 kB). View file
 
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
 
3
  from typing import List
4
  import torch
5
  import uvicorn
@@ -7,32 +8,59 @@ import uvicorn
7
  from models.schemas import EmbeddingRequest, EmbeddingResponse, ModelInfo
8
  from utils.helpers import load_models, get_embeddings, cleanup_memory
9
 
10
- # Global model cache - completely on-demand loading
11
  models_cache = {}
12
 
13
- # All models load on demand to test deployment
14
- ON_DEMAND_MODELS = ["jina", "robertalex", "jina-v3", "legal-bert", "roberta-ca"]
15
 
16
- def ensure_model_loaded(model_name: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  """Load a specific model on demand if not already loaded"""
18
  global models_cache
19
  if model_name not in models_cache:
20
- if model_name in ON_DEMAND_MODELS:
21
- try:
22
- print(f"Loading model on demand: {model_name}...")
23
- new_models = load_models([model_name])
24
- models_cache.update(new_models)
25
- print(f"Model {model_name} loaded successfully!")
26
- except Exception as e:
27
- print(f"Failed to load model {model_name}: {str(e)}")
28
- raise HTTPException(status_code=500, detail=f"Model {model_name} loading failed: {str(e)}")
29
- else:
30
- raise HTTPException(status_code=400, detail=f"Unknown model: {model_name}")
 
 
 
 
 
 
 
 
31
 
32
  app = FastAPI(
33
  title="Multilingual & Legal Embedding API",
34
- description="Multi-model embedding API for Spanish, Catalan, English and Legal texts",
35
- version="3.0.0"
 
36
  )
37
 
38
  # Add CORS middleware to allow cross-origin requests
@@ -47,42 +75,151 @@ app.add_middleware(
47
  @app.get("/")
48
  async def root():
49
  return {
50
- "message": "Multilingual & Legal Embedding API - Minimal Version",
51
- "models": ["jina", "robertalex", "jina-v3", "legal-bert", "roberta-ca"],
52
  "status": "running",
53
  "docs": "/docs",
54
- "total_models": 5,
55
- "note": "All models load on first request"
 
 
 
 
 
 
56
  }
57
 
58
- @app.post("/embed", response_model=EmbeddingResponse)
59
- async def create_embeddings(request: EmbeddingRequest):
60
- """Generate embeddings for input texts"""
 
61
  try:
62
- # Load specific model on demand
63
- ensure_model_loaded(request.model)
64
 
65
- if not request.texts:
66
- raise HTTPException(status_code=400, detail="No texts provided")
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- if len(request.texts) > 50: # Rate limiting
69
- raise HTTPException(status_code=400, detail="Maximum 50 texts per request")
 
 
 
 
 
 
 
 
 
 
70
 
71
  embeddings = get_embeddings(
72
  request.texts,
73
- request.model,
74
  models_cache,
75
  request.normalize,
76
  request.max_length
77
  )
78
 
79
- # Cleanup memory after large batches
80
- if len(request.texts) > 20:
81
- cleanup_memory()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  return EmbeddingResponse(
84
  embeddings=embeddings,
85
- model_used=request.model,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  dimensions=len(embeddings[0]) if embeddings else 0,
87
  num_texts=len(request.texts)
88
  )
@@ -96,6 +233,24 @@ async def create_embeddings(request: EmbeddingRequest):
96
  async def list_models():
97
  """List available models and their specifications"""
98
  return [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  ModelInfo(
100
  model_id="jina",
101
  name="jinaai/jina-embeddings-v2-base-es",
@@ -114,15 +269,6 @@ async def list_models():
114
  model_type="legal domain",
115
  description="Spanish legal domain specialized embeddings"
116
  ),
117
- ModelInfo(
118
- model_id="jina-v3",
119
- name="jinaai/jina-embeddings-v3",
120
- dimensions=1024,
121
- max_sequence_length=8192,
122
- languages=["Multilingual"],
123
- model_type="multilingual",
124
- description="Latest Jina v3 with superior multilingual performance"
125
- ),
126
  ModelInfo(
127
  model_id="legal-bert",
128
  name="nlpaueb/legal-bert-base-uncased",
@@ -131,30 +277,27 @@ async def list_models():
131
  languages=["English"],
132
  model_type="legal domain",
133
  description="English legal domain BERT model"
134
- ),
135
- ModelInfo(
136
- model_id="roberta-ca",
137
- name="projecte-aina/roberta-large-ca-v2",
138
- dimensions=1024,
139
- max_sequence_length=512,
140
- languages=["Catalan"],
141
- model_type="general",
142
- description="Catalan RoBERTa-large model trained on large corpus"
143
  )
144
  ]
145
 
146
  @app.get("/health")
147
  async def health_check():
148
  """Health check endpoint"""
149
- all_models_loaded = len(models_cache) == 5
150
 
151
  return {
152
- "status": "healthy",
153
- "all_models_loaded": all_models_loaded,
 
154
  "available_models": list(models_cache.keys()),
155
- "on_demand_models": ON_DEMAND_MODELS,
156
  "models_count": len(models_cache),
157
- "note": "All models load on first embedding request - minimal deployment version"
 
 
 
 
 
 
158
  }
159
 
160
  if __name__ == "__main__":
 
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
+ from contextlib import asynccontextmanager
4
  from typing import List
5
  import torch
6
  import uvicorn
 
8
  from models.schemas import EmbeddingRequest, EmbeddingResponse, ModelInfo
9
  from utils.helpers import load_models, get_embeddings, cleanup_memory
10
 
11
+ # Global model cache
12
  models_cache = {}
13
 
14
+ # Load jina-v3 at startup (most important model)
15
+ STARTUP_MODEL = "jina-v3"
16
 
17
+ @asynccontextmanager
18
+ async def lifespan(app: FastAPI):
19
+ """Application lifespan handler for startup and shutdown"""
20
+ # Startup - load jina-v3 model
21
+ try:
22
+ global models_cache
23
+ print(f"Loading startup model: {STARTUP_MODEL}...")
24
+ models_cache = load_models([STARTUP_MODEL])
25
+ print(f"Startup model loaded successfully: {list(models_cache.keys())}")
26
+ yield
27
+ except Exception as e:
28
+ print(f"Failed to load startup model: {str(e)}")
29
+ # Continue anyway - jina-v3 can be loaded on demand if startup fails
30
+ yield
31
+ finally:
32
+ # Shutdown - cleanup resources
33
+ cleanup_memory()
34
+
35
+ def ensure_model_loaded(model_name: str, max_length_limit: int):
36
  """Load a specific model on demand if not already loaded"""
37
  global models_cache
38
  if model_name not in models_cache:
39
+ try:
40
+ print(f"Loading model on demand: {model_name}...")
41
+ new_models = load_models([model_name])
42
+ models_cache.update(new_models)
43
+ print(f"Model {model_name} loaded successfully!")
44
+ except Exception as e:
45
+ print(f"Failed to load model {model_name}: {str(e)}")
46
+ raise HTTPException(status_code=500, detail=f"Model {model_name} loading failed: {str(e)}")
47
+
48
+ def validate_request_for_model(request: EmbeddingRequest, model_name: str, max_length_limit: int):
49
+ """Validate request parameters for specific model"""
50
+ if not request.texts:
51
+ raise HTTPException(status_code=400, detail="No texts provided")
52
+
53
+ if len(request.texts) > 50:
54
+ raise HTTPException(status_code=400, detail="Maximum 50 texts per request")
55
+
56
+ if request.max_length is not None and request.max_length > max_length_limit:
57
+ raise HTTPException(status_code=400, detail=f"Max length for {model_name} is {max_length_limit}")
58
 
59
  app = FastAPI(
60
  title="Multilingual & Legal Embedding API",
61
+ description="Multi-model embedding API with dedicated endpoints per model",
62
+ version="4.0.0",
63
+ lifespan=lifespan
64
  )
65
 
66
  # Add CORS middleware to allow cross-origin requests
 
75
  @app.get("/")
76
  async def root():
77
  return {
78
+ "message": "Multilingual & Legal Embedding API - Endpoint Per Model",
79
+ "version": "4.0.0",
80
  "status": "running",
81
  "docs": "/docs",
82
+ "startup_model": STARTUP_MODEL,
83
+ "available_endpoints": {
84
+ "jina-v3": "/embed/jina-v3",
85
+ "roberta-ca": "/embed/roberta-ca",
86
+ "jina": "/embed/jina",
87
+ "robertalex": "/embed/robertalex",
88
+ "legal-bert": "/embed/legal-bert"
89
+ }
90
  }
91
 
92
+ # Jina v3 - Multilingual (loads at startup)
93
+ @app.post("/embed/jina-v3", response_model=EmbeddingResponse)
94
+ async def embed_jina_v3(request: EmbeddingRequest):
95
+ """Generate embeddings using Jina v3 model (multilingual)"""
96
  try:
97
+ ensure_model_loaded("jina-v3", 8192)
98
+ validate_request_for_model(request, "jina-v3", 8192)
99
 
100
+ embeddings = get_embeddings(
101
+ request.texts,
102
+ "jina-v3",
103
+ models_cache,
104
+ request.normalize,
105
+ request.max_length
106
+ )
107
+
108
+ return EmbeddingResponse(
109
+ embeddings=embeddings,
110
+ model_used="jina-v3",
111
+ dimensions=len(embeddings[0]) if embeddings else 0,
112
+ num_texts=len(request.texts)
113
+ )
114
 
115
+ except ValueError as e:
116
+ raise HTTPException(status_code=400, detail=str(e))
117
+ except Exception as e:
118
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
119
+
120
+ # Catalan RoBERTa
121
+ @app.post("/embed/roberta-ca", response_model=EmbeddingResponse)
122
+ async def embed_roberta_ca(request: EmbeddingRequest):
123
+ """Generate embeddings using Catalan RoBERTa model"""
124
+ try:
125
+ ensure_model_loaded("roberta-ca", 512)
126
+ validate_request_for_model(request, "roberta-ca", 512)
127
 
128
  embeddings = get_embeddings(
129
  request.texts,
130
+ "roberta-ca",
131
  models_cache,
132
  request.normalize,
133
  request.max_length
134
  )
135
 
136
+ return EmbeddingResponse(
137
+ embeddings=embeddings,
138
+ model_used="roberta-ca",
139
+ dimensions=len(embeddings[0]) if embeddings else 0,
140
+ num_texts=len(request.texts)
141
+ )
142
+
143
+ except ValueError as e:
144
+ raise HTTPException(status_code=400, detail=str(e))
145
+ except Exception as e:
146
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
147
+
148
+ # Jina v2 - Spanish/English
149
+ @app.post("/embed/jina", response_model=EmbeddingResponse)
150
+ async def embed_jina(request: EmbeddingRequest):
151
+ """Generate embeddings using Jina v2 Spanish/English model"""
152
+ try:
153
+ ensure_model_loaded("jina", 8192)
154
+ validate_request_for_model(request, "jina", 8192)
155
+
156
+ embeddings = get_embeddings(
157
+ request.texts,
158
+ "jina",
159
+ models_cache,
160
+ request.normalize,
161
+ request.max_length
162
+ )
163
 
164
  return EmbeddingResponse(
165
  embeddings=embeddings,
166
+ model_used="jina",
167
+ dimensions=len(embeddings[0]) if embeddings else 0,
168
+ num_texts=len(request.texts)
169
+ )
170
+
171
+ except ValueError as e:
172
+ raise HTTPException(status_code=400, detail=str(e))
173
+ except Exception as e:
174
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
175
+
176
+ # RoBERTalex - Spanish Legal
177
+ @app.post("/embed/robertalex", response_model=EmbeddingResponse)
178
+ async def embed_robertalex(request: EmbeddingRequest):
179
+ """Generate embeddings using RoBERTalex Spanish legal model"""
180
+ try:
181
+ ensure_model_loaded("robertalex", 512)
182
+ validate_request_for_model(request, "robertalex", 512)
183
+
184
+ embeddings = get_embeddings(
185
+ request.texts,
186
+ "robertalex",
187
+ models_cache,
188
+ request.normalize,
189
+ request.max_length
190
+ )
191
+
192
+ return EmbeddingResponse(
193
+ embeddings=embeddings,
194
+ model_used="robertalex",
195
+ dimensions=len(embeddings[0]) if embeddings else 0,
196
+ num_texts=len(request.texts)
197
+ )
198
+
199
+ except ValueError as e:
200
+ raise HTTPException(status_code=400, detail=str(e))
201
+ except Exception as e:
202
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
203
+
204
+ # Legal BERT - English Legal
205
+ @app.post("/embed/legal-bert", response_model=EmbeddingResponse)
206
+ async def embed_legal_bert(request: EmbeddingRequest):
207
+ """Generate embeddings using Legal BERT English model"""
208
+ try:
209
+ ensure_model_loaded("legal-bert", 512)
210
+ validate_request_for_model(request, "legal-bert", 512)
211
+
212
+ embeddings = get_embeddings(
213
+ request.texts,
214
+ "legal-bert",
215
+ models_cache,
216
+ request.normalize,
217
+ request.max_length
218
+ )
219
+
220
+ return EmbeddingResponse(
221
+ embeddings=embeddings,
222
+ model_used="legal-bert",
223
  dimensions=len(embeddings[0]) if embeddings else 0,
224
  num_texts=len(request.texts)
225
  )
 
233
  async def list_models():
234
  """List available models and their specifications"""
235
  return [
236
+ ModelInfo(
237
+ model_id="jina-v3",
238
+ name="jinaai/jina-embeddings-v3",
239
+ dimensions=1024,
240
+ max_sequence_length=8192,
241
+ languages=["Multilingual"],
242
+ model_type="multilingual",
243
+ description="Latest Jina v3 with superior multilingual performance - loaded at startup"
244
+ ),
245
+ ModelInfo(
246
+ model_id="roberta-ca",
247
+ name="projecte-aina/roberta-large-ca-v2",
248
+ dimensions=1024,
249
+ max_sequence_length=512,
250
+ languages=["Catalan"],
251
+ model_type="general",
252
+ description="Catalan RoBERTa-large model trained on large corpus"
253
+ ),
254
  ModelInfo(
255
  model_id="jina",
256
  name="jinaai/jina-embeddings-v2-base-es",
 
269
  model_type="legal domain",
270
  description="Spanish legal domain specialized embeddings"
271
  ),
 
 
 
 
 
 
 
 
 
272
  ModelInfo(
273
  model_id="legal-bert",
274
  name="nlpaueb/legal-bert-base-uncased",
 
277
  languages=["English"],
278
  model_type="legal domain",
279
  description="English legal domain BERT model"
 
 
 
 
 
 
 
 
 
280
  )
281
  ]
282
 
283
  @app.get("/health")
284
  async def health_check():
285
  """Health check endpoint"""
286
+ startup_loaded = STARTUP_MODEL in models_cache
287
 
288
  return {
289
+ "status": "healthy" if startup_loaded else "partial",
290
+ "startup_model": STARTUP_MODEL,
291
+ "startup_model_loaded": startup_loaded,
292
  "available_models": list(models_cache.keys()),
 
293
  "models_count": len(models_cache),
294
+ "endpoints": {
295
+ "jina-v3": f"/embed/jina-v3 {'(ready)' if 'jina-v3' in models_cache else '(loads on demand)'}",
296
+ "roberta-ca": f"/embed/roberta-ca {'(ready)' if 'roberta-ca' in models_cache else '(loads on demand)'}",
297
+ "jina": f"/embed/jina {'(ready)' if 'jina' in models_cache else '(loads on demand)'}",
298
+ "robertalex": f"/embed/robertalex {'(ready)' if 'robertalex' in models_cache else '(loads on demand)'}",
299
+ "legal-bert": f"/embed/legal-bert {'(ready)' if 'legal-bert' in models_cache else '(loads on demand)'}"
300
+ }
301
  }
302
 
303
  if __name__ == "__main__":
app_endpoints.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from contextlib import asynccontextmanager
4
+ from typing import List
5
+ import torch
6
+ import uvicorn
7
+
8
+ from models.schemas import EmbeddingRequest, EmbeddingResponse, ModelInfo
9
+ from utils.helpers import load_models, get_embeddings, cleanup_memory
10
+
11
+ # Global model cache
12
+ models_cache = {}
13
+
14
+ # Load jina-v3 at startup (most important model)
15
+ STARTUP_MODEL = "jina-v3"
16
+
17
+ @asynccontextmanager
18
+ async def lifespan(app: FastAPI):
19
+ """Application lifespan handler for startup and shutdown"""
20
+ # Startup - load jina-v3 model
21
+ try:
22
+ global models_cache
23
+ print(f"Loading startup model: {STARTUP_MODEL}...")
24
+ models_cache = load_models([STARTUP_MODEL])
25
+ print(f"Startup model loaded successfully: {list(models_cache.keys())}")
26
+ yield
27
+ except Exception as e:
28
+ print(f"Failed to load startup model: {str(e)}")
29
+ # Continue anyway - jina-v3 can be loaded on demand if startup fails
30
+ yield
31
+ finally:
32
+ # Shutdown - cleanup resources
33
+ cleanup_memory()
34
+
35
+ def ensure_model_loaded(model_name: str, max_length_limit: int):
36
+ """Load a specific model on demand if not already loaded"""
37
+ global models_cache
38
+ if model_name not in models_cache:
39
+ try:
40
+ print(f"Loading model on demand: {model_name}...")
41
+ new_models = load_models([model_name])
42
+ models_cache.update(new_models)
43
+ print(f"Model {model_name} loaded successfully!")
44
+ except Exception as e:
45
+ print(f"Failed to load model {model_name}: {str(e)}")
46
+ raise HTTPException(status_code=500, detail=f"Model {model_name} loading failed: {str(e)}")
47
+
48
+ def validate_request_for_model(request: EmbeddingRequest, model_name: str, max_length_limit: int):
49
+ """Validate request parameters for specific model"""
50
+ if not request.texts:
51
+ raise HTTPException(status_code=400, detail="No texts provided")
52
+
53
+ if len(request.texts) > 50:
54
+ raise HTTPException(status_code=400, detail="Maximum 50 texts per request")
55
+
56
+ if request.max_length is not None and request.max_length > max_length_limit:
57
+ raise HTTPException(status_code=400, detail=f"Max length for {model_name} is {max_length_limit}")
58
+
59
+ app = FastAPI(
60
+ title="Multilingual & Legal Embedding API",
61
+ description="Multi-model embedding API with dedicated endpoints per model",
62
+ version="4.0.0",
63
+ lifespan=lifespan
64
+ )
65
+
66
+ # Add CORS middleware to allow cross-origin requests
67
+ app.add_middleware(
68
+ CORSMiddleware,
69
+ allow_origins=["*"], # In production, specify actual domains
70
+ allow_credentials=True,
71
+ allow_methods=["*"],
72
+ allow_headers=["*"],
73
+ )
74
+
75
+ @app.get("/")
76
+ async def root():
77
+ return {
78
+ "message": "Multilingual & Legal Embedding API - Endpoint Per Model",
79
+ "version": "4.0.0",
80
+ "status": "running",
81
+ "docs": "/docs",
82
+ "startup_model": STARTUP_MODEL,
83
+ "available_endpoints": {
84
+ "jina-v3": "/embed/jina-v3",
85
+ "roberta-ca": "/embed/roberta-ca",
86
+ "jina": "/embed/jina",
87
+ "robertalex": "/embed/robertalex",
88
+ "legal-bert": "/embed/legal-bert"
89
+ }
90
+ }
91
+
92
+ # Jina v3 - Multilingual (loads at startup)
93
+ @app.post("/embed/jina-v3", response_model=EmbeddingResponse)
94
+ async def embed_jina_v3(request: EmbeddingRequest):
95
+ """Generate embeddings using Jina v3 model (multilingual)"""
96
+ try:
97
+ ensure_model_loaded("jina-v3", 8192)
98
+ validate_request_for_model(request, "jina-v3", 8192)
99
+
100
+ embeddings = get_embeddings(
101
+ request.texts,
102
+ "jina-v3",
103
+ models_cache,
104
+ request.normalize,
105
+ request.max_length
106
+ )
107
+
108
+ return EmbeddingResponse(
109
+ embeddings=embeddings,
110
+ model_used="jina-v3",
111
+ dimensions=len(embeddings[0]) if embeddings else 0,
112
+ num_texts=len(request.texts)
113
+ )
114
+
115
+ except ValueError as e:
116
+ raise HTTPException(status_code=400, detail=str(e))
117
+ except Exception as e:
118
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
119
+
120
+ # Catalan RoBERTa
121
+ @app.post("/embed/roberta-ca", response_model=EmbeddingResponse)
122
+ async def embed_roberta_ca(request: EmbeddingRequest):
123
+ """Generate embeddings using Catalan RoBERTa model"""
124
+ try:
125
+ ensure_model_loaded("roberta-ca", 512)
126
+ validate_request_for_model(request, "roberta-ca", 512)
127
+
128
+ embeddings = get_embeddings(
129
+ request.texts,
130
+ "roberta-ca",
131
+ models_cache,
132
+ request.normalize,
133
+ request.max_length
134
+ )
135
+
136
+ return EmbeddingResponse(
137
+ embeddings=embeddings,
138
+ model_used="roberta-ca",
139
+ dimensions=len(embeddings[0]) if embeddings else 0,
140
+ num_texts=len(request.texts)
141
+ )
142
+
143
+ except ValueError as e:
144
+ raise HTTPException(status_code=400, detail=str(e))
145
+ except Exception as e:
146
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
147
+
148
+ # Jina v2 - Spanish/English
149
+ @app.post("/embed/jina", response_model=EmbeddingResponse)
150
+ async def embed_jina(request: EmbeddingRequest):
151
+ """Generate embeddings using Jina v2 Spanish/English model"""
152
+ try:
153
+ ensure_model_loaded("jina", 8192)
154
+ validate_request_for_model(request, "jina", 8192)
155
+
156
+ embeddings = get_embeddings(
157
+ request.texts,
158
+ "jina",
159
+ models_cache,
160
+ request.normalize,
161
+ request.max_length
162
+ )
163
+
164
+ return EmbeddingResponse(
165
+ embeddings=embeddings,
166
+ model_used="jina",
167
+ dimensions=len(embeddings[0]) if embeddings else 0,
168
+ num_texts=len(request.texts)
169
+ )
170
+
171
+ except ValueError as e:
172
+ raise HTTPException(status_code=400, detail=str(e))
173
+ except Exception as e:
174
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
175
+
176
+ # RoBERTalex - Spanish Legal
177
+ @app.post("/embed/robertalex", response_model=EmbeddingResponse)
178
+ async def embed_robertalex(request: EmbeddingRequest):
179
+ """Generate embeddings using RoBERTalex Spanish legal model"""
180
+ try:
181
+ ensure_model_loaded("robertalex", 512)
182
+ validate_request_for_model(request, "robertalex", 512)
183
+
184
+ embeddings = get_embeddings(
185
+ request.texts,
186
+ "robertalex",
187
+ models_cache,
188
+ request.normalize,
189
+ request.max_length
190
+ )
191
+
192
+ return EmbeddingResponse(
193
+ embeddings=embeddings,
194
+ model_used="robertalex",
195
+ dimensions=len(embeddings[0]) if embeddings else 0,
196
+ num_texts=len(request.texts)
197
+ )
198
+
199
+ except ValueError as e:
200
+ raise HTTPException(status_code=400, detail=str(e))
201
+ except Exception as e:
202
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
203
+
204
+ # Legal BERT - English Legal
205
+ @app.post("/embed/legal-bert", response_model=EmbeddingResponse)
206
+ async def embed_legal_bert(request: EmbeddingRequest):
207
+ """Generate embeddings using Legal BERT English model"""
208
+ try:
209
+ ensure_model_loaded("legal-bert", 512)
210
+ validate_request_for_model(request, "legal-bert", 512)
211
+
212
+ embeddings = get_embeddings(
213
+ request.texts,
214
+ "legal-bert",
215
+ models_cache,
216
+ request.normalize,
217
+ request.max_length
218
+ )
219
+
220
+ return EmbeddingResponse(
221
+ embeddings=embeddings,
222
+ model_used="legal-bert",
223
+ dimensions=len(embeddings[0]) if embeddings else 0,
224
+ num_texts=len(request.texts)
225
+ )
226
+
227
+ except ValueError as e:
228
+ raise HTTPException(status_code=400, detail=str(e))
229
+ except Exception as e:
230
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
231
+
232
+ @app.get("/models", response_model=List[ModelInfo])
233
+ async def list_models():
234
+ """List available models and their specifications"""
235
+ return [
236
+ ModelInfo(
237
+ model_id="jina-v3",
238
+ name="jinaai/jina-embeddings-v3",
239
+ dimensions=1024,
240
+ max_sequence_length=8192,
241
+ languages=["Multilingual"],
242
+ model_type="multilingual",
243
+ description="Latest Jina v3 with superior multilingual performance - loaded at startup"
244
+ ),
245
+ ModelInfo(
246
+ model_id="roberta-ca",
247
+ name="projecte-aina/roberta-large-ca-v2",
248
+ dimensions=1024,
249
+ max_sequence_length=512,
250
+ languages=["Catalan"],
251
+ model_type="general",
252
+ description="Catalan RoBERTa-large model trained on large corpus"
253
+ ),
254
+ ModelInfo(
255
+ model_id="jina",
256
+ name="jinaai/jina-embeddings-v2-base-es",
257
+ dimensions=768,
258
+ max_sequence_length=8192,
259
+ languages=["Spanish", "English"],
260
+ model_type="bilingual",
261
+ description="Bilingual Spanish-English embeddings with long context support"
262
+ ),
263
+ ModelInfo(
264
+ model_id="robertalex",
265
+ name="PlanTL-GOB-ES/RoBERTalex",
266
+ dimensions=768,
267
+ max_sequence_length=512,
268
+ languages=["Spanish"],
269
+ model_type="legal domain",
270
+ description="Spanish legal domain specialized embeddings"
271
+ ),
272
+ ModelInfo(
273
+ model_id="legal-bert",
274
+ name="nlpaueb/legal-bert-base-uncased",
275
+ dimensions=768,
276
+ max_sequence_length=512,
277
+ languages=["English"],
278
+ model_type="legal domain",
279
+ description="English legal domain BERT model"
280
+ )
281
+ ]
282
+
283
+ @app.get("/health")
284
+ async def health_check():
285
+ """Health check endpoint"""
286
+ startup_loaded = STARTUP_MODEL in models_cache
287
+
288
+ return {
289
+ "status": "healthy" if startup_loaded else "partial",
290
+ "startup_model": STARTUP_MODEL,
291
+ "startup_model_loaded": startup_loaded,
292
+ "available_models": list(models_cache.keys()),
293
+ "models_count": len(models_cache),
294
+ "endpoints": {
295
+ "jina-v3": f"/embed/jina-v3 {'(ready)' if 'jina-v3' in models_cache else '(loads on demand)'}",
296
+ "roberta-ca": f"/embed/roberta-ca {'(ready)' if 'roberta-ca' in models_cache else '(loads on demand)'}",
297
+ "jina": f"/embed/jina {'(ready)' if 'jina' in models_cache else '(loads on demand)'}",
298
+ "robertalex": f"/embed/robertalex {'(ready)' if 'robertalex' in models_cache else '(loads on demand)'}",
299
+ "legal-bert": f"/embed/legal-bert {'(ready)' if 'legal-bert' in models_cache else '(loads on demand)'}"
300
+ }
301
+ }
302
+
303
+ if __name__ == "__main__":
304
+ # Set multi-threading for CPU
305
+ torch.set_num_threads(8)
306
+ torch.set_num_interop_threads(1)
307
+
308
+ uvicorn.run(app, host="0.0.0.0", port=7860)
app_old_minimal.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from typing import List
4
+ import torch
5
+ import uvicorn
6
+
7
+ from models.schemas import EmbeddingRequest, EmbeddingResponse, ModelInfo
8
+ from utils.helpers import load_models, get_embeddings, cleanup_memory
9
+
10
+ # Global model cache - completely on-demand loading
11
+ models_cache = {}
12
+
13
+ # All models load on demand to test deployment
14
+ ON_DEMAND_MODELS = ["jina", "robertalex", "jina-v3", "legal-bert", "roberta-ca"]
15
+
16
+ def ensure_model_loaded(model_name: str):
17
+ """Load a specific model on demand if not already loaded"""
18
+ global models_cache
19
+ if model_name not in models_cache:
20
+ if model_name in ON_DEMAND_MODELS:
21
+ try:
22
+ print(f"Loading model on demand: {model_name}...")
23
+ new_models = load_models([model_name])
24
+ models_cache.update(new_models)
25
+ print(f"Model {model_name} loaded successfully!")
26
+ except Exception as e:
27
+ print(f"Failed to load model {model_name}: {str(e)}")
28
+ raise HTTPException(status_code=500, detail=f"Model {model_name} loading failed: {str(e)}")
29
+ else:
30
+ raise HTTPException(status_code=400, detail=f"Unknown model: {model_name}")
31
+
32
+ app = FastAPI(
33
+ title="Multilingual & Legal Embedding API",
34
+ description="Multi-model embedding API for Spanish, Catalan, English and Legal texts",
35
+ version="3.0.0"
36
+ )
37
+
38
+ # Add CORS middleware to allow cross-origin requests
39
+ app.add_middleware(
40
+ CORSMiddleware,
41
+ allow_origins=["*"], # In production, specify actual domains
42
+ allow_credentials=True,
43
+ allow_methods=["*"],
44
+ allow_headers=["*"],
45
+ )
46
+
47
+ @app.get("/")
48
+ async def root():
49
+ return {
50
+ "message": "Multilingual & Legal Embedding API - Minimal Version",
51
+ "models": ["jina", "robertalex", "jina-v3", "legal-bert", "roberta-ca"],
52
+ "status": "running",
53
+ "docs": "/docs",
54
+ "total_models": 5,
55
+ "note": "All models load on first request"
56
+ }
57
+
58
+ @app.post("/embed", response_model=EmbeddingResponse)
59
+ async def create_embeddings(request: EmbeddingRequest):
60
+ """Generate embeddings for input texts"""
61
+ try:
62
+ # Load specific model on demand
63
+ ensure_model_loaded(request.model)
64
+
65
+ if not request.texts:
66
+ raise HTTPException(status_code=400, detail="No texts provided")
67
+
68
+ if len(request.texts) > 50: # Rate limiting
69
+ raise HTTPException(status_code=400, detail="Maximum 50 texts per request")
70
+
71
+ embeddings = get_embeddings(
72
+ request.texts,
73
+ request.model,
74
+ models_cache,
75
+ request.normalize,
76
+ request.max_length
77
+ )
78
+
79
+ # Cleanup memory after large batches
80
+ if len(request.texts) > 20:
81
+ cleanup_memory()
82
+
83
+ return EmbeddingResponse(
84
+ embeddings=embeddings,
85
+ model_used=request.model,
86
+ dimensions=len(embeddings[0]) if embeddings else 0,
87
+ num_texts=len(request.texts)
88
+ )
89
+
90
+ except ValueError as e:
91
+ raise HTTPException(status_code=400, detail=str(e))
92
+ except Exception as e:
93
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
94
+
95
+ @app.get("/models", response_model=List[ModelInfo])
96
+ async def list_models():
97
+ """List available models and their specifications"""
98
+ return [
99
+ ModelInfo(
100
+ model_id="jina",
101
+ name="jinaai/jina-embeddings-v2-base-es",
102
+ dimensions=768,
103
+ max_sequence_length=8192,
104
+ languages=["Spanish", "English"],
105
+ model_type="bilingual",
106
+ description="Bilingual Spanish-English embeddings with long context support"
107
+ ),
108
+ ModelInfo(
109
+ model_id="robertalex",
110
+ name="PlanTL-GOB-ES/RoBERTalex",
111
+ dimensions=768,
112
+ max_sequence_length=512,
113
+ languages=["Spanish"],
114
+ model_type="legal domain",
115
+ description="Spanish legal domain specialized embeddings"
116
+ ),
117
+ ModelInfo(
118
+ model_id="jina-v3",
119
+ name="jinaai/jina-embeddings-v3",
120
+ dimensions=1024,
121
+ max_sequence_length=8192,
122
+ languages=["Multilingual"],
123
+ model_type="multilingual",
124
+ description="Latest Jina v3 with superior multilingual performance"
125
+ ),
126
+ ModelInfo(
127
+ model_id="legal-bert",
128
+ name="nlpaueb/legal-bert-base-uncased",
129
+ dimensions=768,
130
+ max_sequence_length=512,
131
+ languages=["English"],
132
+ model_type="legal domain",
133
+ description="English legal domain BERT model"
134
+ ),
135
+ ModelInfo(
136
+ model_id="roberta-ca",
137
+ name="projecte-aina/roberta-large-ca-v2",
138
+ dimensions=1024,
139
+ max_sequence_length=512,
140
+ languages=["Catalan"],
141
+ model_type="general",
142
+ description="Catalan RoBERTa-large model trained on large corpus"
143
+ )
144
+ ]
145
+
146
+ @app.get("/health")
147
+ async def health_check():
148
+ """Health check endpoint"""
149
+ all_models_loaded = len(models_cache) == 5
150
+
151
+ return {
152
+ "status": "healthy",
153
+ "all_models_loaded": all_models_loaded,
154
+ "available_models": list(models_cache.keys()),
155
+ "on_demand_models": ON_DEMAND_MODELS,
156
+ "models_count": len(models_cache),
157
+ "note": "All models load on first embedding request - minimal deployment version"
158
+ }
159
+
160
+ if __name__ == "__main__":
161
+ # Set multi-threading for CPU
162
+ torch.set_num_threads(8)
163
+ torch.set_num_interop_threads(1)
164
+
165
+ uvicorn.run(app, host="0.0.0.0", port=7860)
models/__pycache__/schemas.cpython-311.pyc CHANGED
Binary files a/models/__pycache__/schemas.cpython-311.pyc and b/models/__pycache__/schemas.cpython-311.pyc differ
 
models/schemas.py CHANGED
@@ -11,10 +11,6 @@ class EmbeddingRequest(BaseModel):
11
  description="List of texts to embed",
12
  example=["Hola mundo", "¿Cómo estás?"]
13
  )
14
- model: Literal["jina", "robertalex", "jina-v3", "legal-bert", "roberta-ca"] = Field(
15
- default="jina",
16
- description="Model to use for embeddings"
17
- )
18
  normalize: bool = Field(
19
  default=True,
20
  description="Whether to normalize embeddings to unit length"
@@ -36,15 +32,12 @@ class EmbeddingRequest(BaseModel):
36
  return v
37
 
38
  @validator('max_length')
39
- def validate_max_length(cls, v, values):
40
  if v is not None:
41
- model = values.get('model', 'jina')
42
- if model in ['jina', 'jina-v3'] and v > 8192:
43
- raise ValueError(f"Max length for {model} model is 8192")
44
- elif model in ['robertalex', 'legal-bert', 'roberta-ca'] and v > 512:
45
- raise ValueError(f"Max length for {model} model is 512")
46
  if v < 1:
47
  raise ValueError("Max length must be positive")
 
 
48
  return v
49
 
50
  class EmbeddingResponse(BaseModel):
 
11
  description="List of texts to embed",
12
  example=["Hola mundo", "¿Cómo estás?"]
13
  )
 
 
 
 
14
  normalize: bool = Field(
15
  default=True,
16
  description="Whether to normalize embeddings to unit length"
 
32
  return v
33
 
34
  @validator('max_length')
35
+ def validate_max_length(cls, v):
36
  if v is not None:
 
 
 
 
 
37
  if v < 1:
38
  raise ValueError("Max length must be positive")
39
+ if v > 8192:
40
+ raise ValueError("Max length cannot exceed 8192")
41
  return v
42
 
43
  class EmbeddingResponse(BaseModel):
test_endpoints.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for the new endpoint-per-model API architecture
4
+ """
5
+
6
+ import requests
7
+ import json
8
+ import time
9
+
10
+ def test_endpoint_api(base_url="https://aurasystems-spanish-embeddings-api.hf.space"):
11
+ """Test the new endpoint-based API"""
12
+
13
+ print(f"Testing Endpoint-Based API at {base_url}")
14
+ print("=" * 60)
15
+
16
+ # Test root endpoint
17
+ try:
18
+ response = requests.get(f"{base_url}/")
19
+ print(f"✓ Root endpoint: {response.status_code}")
20
+ if response.status_code == 200:
21
+ data = response.json()
22
+ print(f" Version: {data.get('version', 'N/A')}")
23
+ print(f" Startup model: {data.get('startup_model', 'N/A')}")
24
+ print(f" Available endpoints: {list(data.get('available_endpoints', {}).keys())}")
25
+ else:
26
+ print(f" Error: {response.text}")
27
+ return False
28
+ except Exception as e:
29
+ print(f"✗ Root endpoint failed: {e}")
30
+ return False
31
+
32
+ # Test health endpoint
33
+ try:
34
+ response = requests.get(f"{base_url}/health")
35
+ print(f"✓ Health endpoint: {response.status_code}")
36
+ if response.status_code == 200:
37
+ health_data = response.json()
38
+ print(f" Startup model loaded: {health_data.get('startup_model_loaded', False)}")
39
+ print(f" Available models: {health_data.get('available_models', [])}")
40
+ print(f" Models count: {health_data.get('models_count', 0)}")
41
+ else:
42
+ print(f" Error: {response.text}")
43
+ except Exception as e:
44
+ print(f"✗ Health endpoint failed: {e}")
45
+
46
+ print("\n" + "=" * 60)
47
+ print("TESTING MODEL ENDPOINTS")
48
+ print("=" * 60)
49
+
50
+ # Test jina-v3 endpoint (startup model)
51
+ try:
52
+ payload = {
53
+ "texts": ["Hello world", "Bonjour le monde", "Hola mundo"],
54
+ "normalize": True
55
+ }
56
+ response = requests.post(f"{base_url}/embed/jina-v3", json=payload)
57
+ print(f"✓ Jina-v3 endpoint: {response.status_code}")
58
+ if response.status_code == 200:
59
+ data = response.json()
60
+ print(f" Model: {data.get('model_used', 'N/A')}")
61
+ print(f" Embeddings: {data.get('num_texts', 0)} texts → {data.get('dimensions', 0)} dimensions")
62
+ else:
63
+ print(f" Error: {response.text}")
64
+ except Exception as e:
65
+ print(f"✗ Jina-v3 endpoint failed: {e}")
66
+
67
+ # Test roberta-ca endpoint (on-demand)
68
+ try:
69
+ payload = {
70
+ "texts": ["Bon dia", "Com estàs?", "Catalunya és meravellosa"],
71
+ "normalize": True
72
+ }
73
+ response = requests.post(f"{base_url}/embed/roberta-ca", json=payload)
74
+ print(f"✓ RoBERTa-ca endpoint: {response.status_code}")
75
+ if response.status_code == 200:
76
+ data = response.json()
77
+ print(f" Model: {data.get('model_used', 'N/A')}")
78
+ print(f" Embeddings: {data.get('num_texts', 0)} texts → {data.get('dimensions', 0)} dimensions")
79
+ else:
80
+ print(f" Error: {response.text}")
81
+ except Exception as e:
82
+ print(f"✗ RoBERTa-ca endpoint failed: {e}")
83
+
84
+ # Test jina endpoint (on-demand)
85
+ try:
86
+ payload = {
87
+ "texts": ["Texto en español", "Text in English"],
88
+ "normalize": True
89
+ }
90
+ response = requests.post(f"{base_url}/embed/jina", json=payload)
91
+ print(f"✓ Jina endpoint: {response.status_code}")
92
+ if response.status_code == 200:
93
+ data = response.json()
94
+ print(f" Model: {data.get('model_used', 'N/A')}")
95
+ print(f" Embeddings: {data.get('num_texts', 0)} texts → {data.get('dimensions', 0)} dimensions")
96
+ else:
97
+ print(f" Error: {response.text}")
98
+ except Exception as e:
99
+ print(f"✗ Jina endpoint failed: {e}")
100
+
101
+ # Test robertalex endpoint (Spanish legal)
102
+ try:
103
+ payload = {
104
+ "texts": ["Artículo primero de la constitución", "El contrato será válido"],
105
+ "normalize": True
106
+ }
107
+ response = requests.post(f"{base_url}/embed/robertalex", json=payload)
108
+ print(f"✓ RoBERTalex endpoint: {response.status_code}")
109
+ if response.status_code == 200:
110
+ data = response.json()
111
+ print(f" Model: {data.get('model_used', 'N/A')}")
112
+ print(f" Embeddings: {data.get('num_texts', 0)} texts → {data.get('dimensions', 0)} dimensions")
113
+ else:
114
+ print(f" Error: {response.text}")
115
+ except Exception as e:
116
+ print(f"✗ RoBERTalex endpoint failed: {e}")
117
+
118
+ # Test legal-bert endpoint (English legal)
119
+ try:
120
+ payload = {
121
+ "texts": ["This agreement is legally binding", "The contract shall be valid"],
122
+ "normalize": True
123
+ }
124
+ response = requests.post(f"{base_url}/embed/legal-bert", json=payload)
125
+ print(f"✓ Legal-BERT endpoint: {response.status_code}")
126
+ if response.status_code == 200:
127
+ data = response.json()
128
+ print(f" Model: {data.get('model_used', 'N/A')}")
129
+ print(f" Embeddings: {data.get('num_texts', 0)} texts → {data.get('dimensions', 0)} dimensions")
130
+ else:
131
+ print(f" Error: {response.text}")
132
+ except Exception as e:
133
+ print(f"✗ Legal-BERT endpoint failed: {e}")
134
+
135
+ print("\n" + "=" * 60)
136
+ print("FINAL HEALTH CHECK")
137
+ print("=" * 60)
138
+
139
+ # Final health check to see all loaded models
140
+ try:
141
+ response = requests.get(f"{base_url}/health")
142
+ if response.status_code == 200:
143
+ health_data = response.json()
144
+ print(f"✓ Final status: {health_data.get('status', 'unknown')}")
145
+ print(f" Available models: {health_data.get('available_models', [])}")
146
+ print(f" Total models loaded: {health_data.get('models_count', 0)}/5")
147
+
148
+ endpoints = health_data.get('endpoints', {})
149
+ for model, status in endpoints.items():
150
+ print(f" {model}: {status}")
151
+ except Exception as e:
152
+ print(f"✗ Final health check failed: {e}")
153
+
154
+ return True
155
+
156
+ if __name__ == "__main__":
157
+ test_endpoint_api()