youssefboutaleb commited on
Commit
9ec40c6
·
1 Parent(s): 9e697e1

Upload 9 files

Browse files
Files changed (8) hide show
  1. README.md +5 -5
  2. app.py +70 -0
  3. best.pt +3 -0
  4. img1.jpg +0 -0
  5. img2.jpg +0 -0
  6. img3.jpg +0 -0
  7. render.py +63 -0
  8. requirements.txt +7 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: YOLOPUPIL
3
- emoji: 👁
4
  colorFrom: purple
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.13.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Yolov8 Meter
3
+ emoji: 😻
4
  colorFrom: purple
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.8.0
8
  app_file: app.py
9
  pinned: false
10
+ license: cc
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from ultralytics import YOLO
4
+ model = YOLO('./best.pt') # load your custom trained model
5
+ import torch
6
+ #from ultralyticsplus import render_result
7
+ from render import custom_render_result
8
+
9
+
10
+
11
+ def yoloV8_func(image: gr.Image = None,
12
+ image_size: int = 640,
13
+ conf_threshold: float = 0.4,
14
+ iou_threshold: float = 0.5):
15
+ """This function performs YOLOv8 object detection on the given image.
16
+
17
+ Args:
18
+ image (gr.Image, optional): Input image to detect objects on. Defaults to None.
19
+ image_size (int, optional): Desired image size for the model. Defaults to 640.
20
+ conf_threshold (float, optional): Confidence threshold for object detection. Defaults to 0.4.
21
+ iou_threshold (float, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
22
+ """
23
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
24
+ model_path = "yolov8n.pt"
25
+ # model = torch.hub.load('ultralytics/yolov8', 'custom', path='/content/best.pt', force_reload=True, trust_repo=True)
26
+
27
+ # Perform object detection on the input image using the YOLOv8 model
28
+ results = model.predict(image,
29
+ conf=conf_threshold,
30
+ iou=iou_threshold,
31
+ imgsz=image_size)
32
+
33
+ # Print the detected objects' information (class, coordinates, and probability)
34
+ box = results[0].boxes
35
+ print("Object type:", box.cls)
36
+ print("Coordinates:", box.xyxy)
37
+ print("Probability:", box.conf)
38
+
39
+ # Render the output image with bounding boxes around detected objects
40
+ render = custom_render_result(model=model, image=image, result=results[0])
41
+ return render
42
+
43
+
44
+ inputs = [
45
+ gr.Image(type="filepath", label="Input Image"),
46
+ gr.Slider(minimum=320, maximum=1280, step=32, label="Image Size", value=640),
47
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="Confidence Threshold"),
48
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="IOU Threshold"),
49
+ ]
50
+
51
+ outputs = gr.Image(type="filepath", label="Output Image")
52
+
53
+ title = "YOLOv8 101: Custom Object Detection on meter"
54
+
55
+ examples = [['img1.jpg', 640, 0.5, 0.7],
56
+ ['img2.jpg', 800, 0.5, 0.6],
57
+ ['img3.jpg', 900, 0.5, 0.8]]
58
+
59
+ yolo_app = gr.Interface(
60
+ fn=yoloV8_func,
61
+ inputs=inputs,
62
+ outputs=outputs,
63
+ title=title,
64
+ examples=examples,
65
+ cache_examples=False,
66
+ )
67
+
68
+ # Launch the Gradio interface in debug mode with queue enabled
69
+ yolo_app.launch(debug=True,share=True).queue()
70
+
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7600ffc22323b9d4a85db7fe474d336a1563e87bda6f3b7189ba400d3024cc1
3
+ size 87643838
img1.jpg ADDED
img2.jpg ADDED
img3.jpg ADDED
render.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from sahi.utils.cv import read_image_as_pil,get_bool_mask_from_coco_segmentation
4
+ from sahi.prediction import ObjectPrediction, PredictionScore,visualize_object_predictions
5
+ from PIL import Image
6
+ def custom_render_result(model,image, result,rect_th=2,text_th=2):
7
+ if model.overrides["task"] not in ["detect", "segment"]:
8
+ raise ValueError(
9
+ f"Model task must be either 'detect' or 'segment'. Got {model.overrides['task']}"
10
+ )
11
+
12
+ image = read_image_as_pil(image)
13
+ np_image = np.ascontiguousarray(image)
14
+
15
+ names = model.model.names
16
+
17
+ masks = result.masks
18
+ boxes = result.boxes
19
+
20
+ object_predictions = []
21
+ if boxes is not None:
22
+ det_ind = 0
23
+ for xyxy, conf, cls in zip(boxes.xyxy, boxes.conf, boxes.cls):
24
+ if masks:
25
+ img_height = np_image.shape[0]
26
+ img_width = np_image.shape[1]
27
+ segments = masks.segments
28
+ segments = segments[det_ind] # segments: np.array([[x1, y1], [x2, y2]])
29
+ # convert segments into full shape
30
+ segments[:, 0] = segments[:, 0] * img_width
31
+ segments[:, 1] = segments[:, 1] * img_height
32
+ segmentation = [segments.ravel().tolist()]
33
+
34
+ bool_mask = get_bool_mask_from_coco_segmentation(
35
+ segmentation, width=img_width, height=img_height
36
+ )
37
+ if sum(sum(bool_mask == 1)) <= 2:
38
+ continue
39
+ object_prediction = ObjectPrediction.from_coco_segmentation(
40
+ segmentation=segmentation,
41
+ category_name=names[int(cls)],
42
+ category_id=int(cls),
43
+ full_shape=[img_height, img_width],
44
+ )
45
+ object_prediction.score = PredictionScore(value=conf)
46
+ else:
47
+ object_prediction = ObjectPrediction(
48
+ bbox=xyxy.tolist(),
49
+ category_name=names[int(cls)],
50
+ category_id=int(cls),
51
+ score=conf,
52
+ )
53
+ object_predictions.append(object_prediction)
54
+ det_ind += 1
55
+
56
+ result = visualize_object_predictions(
57
+ image=np_image,
58
+ object_prediction_list=object_predictions,
59
+ rect_th=rect_th,
60
+ text_th=text_th,
61
+ )
62
+
63
+ return Image.fromarray(result["image"])
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio==4.8.0
2
+ numpy==1.26.2
3
+ opencv_python==4.7.0.72
4
+ Pillow==10.1.0
5
+ sahi==0.11.15
6
+ torch==2.1.1
7
+ ultralytics==8.0.223