Víctor Sáez commited on
Commit
862343e
·
1 Parent(s): e319694

Add exception handling

Browse files
Files changed (1) hide show
  1. app.py +84 -94
app.py CHANGED
@@ -181,102 +181,92 @@ def translate_label(language_label, label):
181
  def detect_objects(image, language_selector, translated_model_selector, threshold):
182
  """Enhanced object detection with adjustable threshold and better info"""
183
  # Get the actual model key from the translated name
184
- model_selector = get_model_key_from_translation(translated_model_selector, language_selector)
185
 
186
- print(f"Processing image. Language: {language_selector}, Model: {model_selector}, Threshold: {threshold}")
187
-
188
- # Load the selected model
189
- model, processor = load_model(model_selector)
190
-
191
- # Process the image
192
- inputs = processor(images=image, return_tensors="pt")
193
- outputs = model(**inputs)
194
-
195
- # Convert model output to usable detection results with custom threshold
196
- target_sizes = torch.tensor([image.size[::-1]])
197
- results = processor.post_process_object_detection(
198
- outputs, threshold=threshold, target_sizes=target_sizes
199
- )[0]
200
-
201
- # Create a copy of the image for drawing
202
- image_with_boxes = image.copy()
203
- draw = ImageDraw.Draw(image_with_boxes)
204
-
205
- # Detection info
206
- detection_info = f"Detected {len(results['scores'])} objects with threshold {threshold}\n"
207
- detection_info += f"Model: {translated_model_selector} ({model_selector})\n\n"
208
-
209
- # Colors for different confidence levels
210
- colors = {
211
- 'high': 'red', # > 0.8
212
- 'medium': 'orange', # 0.5-0.8
213
- 'low': 'yellow' # < 0.5
214
- }
215
-
216
- detected_objects = []
217
-
218
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
219
- confidence = score.item()
220
- box = [round(x, 2) for x in box.tolist()]
221
-
222
- # Choose color based on confidence
223
- if confidence > 0.8:
224
- color = colors['high']
225
- elif confidence > 0.5:
226
- color = colors['medium']
227
- else:
228
- color = colors['low']
229
-
230
- # Draw bounding box
231
- draw.rectangle(box, outline=color, width=3)
232
-
233
- # Prepare label text
234
- label_text = model.config.id2label[label.item()]
235
- translated_label = translate_label(language_selector, label_text)
236
- display_text = f"{translated_label}: {round(confidence, 3)}"
237
-
238
- # Store detection info
239
- detected_objects.append({
240
- 'label': label_text,
241
- 'translated': translated_label,
242
- 'confidence': confidence,
243
- 'box': box
244
- })
245
-
246
- # Calculate text position and size - FIXED FONT USAGE
247
  try:
248
- image_width = image.size[0]
249
- # Calculate the font size for drawing labels, ensuring it scales with image width but is never smaller than 50 pixels.
250
- font_size = max(image_width // 40, 12) # Adjust font size based on image width
251
- font = get_font(font_size) # Use the fixed font function
252
-
253
- text_bbox = draw.textbbox((0, 0), display_text, font=font)
254
- text_width = text_bbox[2] - text_bbox[0]
255
- text_height = text_bbox[3] - text_bbox[1]
256
- except:
257
- # Fallback for older PIL versions
258
- font = get_font(12) # Use the fixed font function
259
- text_bbox = draw.textbbox((0, 0), display_text, font=font)
260
- text_width = text_bbox[2] - text_bbox[0]
261
- text_height = text_bbox[3] - text_bbox[1]
262
-
263
- # Draw text background
264
- text_bg = [
265
- box[0], box[1] - text_height - 4,
266
- box[0] + text_width + 4, box[1]
267
- ]
268
- draw.rectangle(text_bg, fill="black")
269
- draw.text((box[0] + 2, box[1] - text_height - 2), display_text, fill="white", font=font)
270
-
271
- # Create detailed detection info
272
- if detected_objects:
273
- detection_info += "Objects found:\n"
274
- for obj in sorted(detected_objects, key=lambda x: x['confidence'], reverse=True):
275
- detection_info += f"- {obj['translated']} ({obj['label']}): {obj['confidence']:.3f}\n"
276
- else:
277
- detection_info += "No objects detected. Try lowering the threshold."
278
-
279
- return image_with_boxes, detection_info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
 
282
  def build_app():
 
181
  def detect_objects(image, language_selector, translated_model_selector, threshold):
182
  """Enhanced object detection with adjustable threshold and better info"""
183
  # Get the actual model key from the translated name
 
184
 
185
+ def detect_objects(image, language_selector, translated_model_selector, threshold):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  try:
187
+ if image is None:
188
+ return None, "Por favor, sube una imagen antes de detectar objetos."
189
+
190
+ model_selector = get_model_key_from_translation(translated_model_selector, language_selector)
191
+ print(f"Processing image. Language: {language_selector}, Model: {model_selector}, Threshold: {threshold}")
192
+
193
+ model, processor = load_model(model_selector)
194
+
195
+ inputs = processor(images=image, return_tensors="pt")
196
+ outputs = model(**inputs)
197
+
198
+ target_sizes = torch.tensor([image.size[::-1]])
199
+ results = processor.post_process_object_detection(
200
+ outputs, threshold=threshold, target_sizes=target_sizes
201
+ )[0]
202
+
203
+ image_with_boxes = image.copy()
204
+ draw = ImageDraw.Draw(image_with_boxes)
205
+
206
+ detection_info = f"Detected {len(results['scores'])} objects with threshold {threshold}\n"
207
+ detection_info += f"Model: {translated_model_selector} ({model_selector})\n\n"
208
+
209
+ colors = {
210
+ 'high': 'red', # > 0.8
211
+ 'medium': 'orange', # 0.5-0.8
212
+ 'low': 'yellow' # < 0.5
213
+ }
214
+
215
+ detected_objects = []
216
+
217
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
218
+ confidence = score.item()
219
+ box = [round(x, 2) for x in box.tolist()]
220
+ if confidence > 0.8:
221
+ color = colors['high']
222
+ elif confidence > 0.5:
223
+ color = colors['medium']
224
+ else:
225
+ color = colors['low']
226
+
227
+ draw.rectangle(box, outline=color, width=3)
228
+ label_text = model.config.id2label[label.item()]
229
+ translated_label = translate_label(language_selector, label_text)
230
+ display_text = f"{translated_label}: {round(confidence, 3)}"
231
+ detected_objects.append({
232
+ 'label': label_text,
233
+ 'translated': translated_label,
234
+ 'confidence': confidence,
235
+ 'box': box
236
+ })
237
+
238
+ try:
239
+ image_width = image.size[0]
240
+ font_size = max(image_width // 40, 12)
241
+ font = get_font(font_size)
242
+ text_bbox = draw.textbbox((0, 0), display_text, font=font)
243
+ text_width = text_bbox[2] - text_bbox[0]
244
+ text_height = text_bbox[3] - text_bbox[1]
245
+ except:
246
+ font = get_font(12)
247
+ text_width = 50
248
+ text_height = 20
249
+
250
+ text_bg = [
251
+ box[0], box[1] - text_height - 4,
252
+ box[0] + text_width + 4, box[1]
253
+ ]
254
+ draw.rectangle(text_bg, fill="black")
255
+ draw.text((box[0] + 2, box[1] - text_height - 2), display_text, fill="white", font=font)
256
+
257
+ if detected_objects:
258
+ detection_info += "Objects found:\n"
259
+ for obj in sorted(detected_objects, key=lambda x: x['confidence'], reverse=True):
260
+ detection_info += f"- {obj['translated']} ({obj['label']}): {obj['confidence']:.3f}\n"
261
+ else:
262
+ detection_info += "No objects detected. Try lowering the threshold."
263
+
264
+ return image_with_boxes, detection_info
265
+ except Exception as e:
266
+ import traceback
267
+ print("ERROR EN DETECT_OBJECTS:", e)
268
+ traceback.print_exc()
269
+ return None, f"Error detecting objects: {e}"
270
 
271
 
272
  def build_app():