anilbhujel commited on
Commit
a26ae4f
·
1 Parent(s): 9c62c6f

Save my local changes

Browse files
code/RGBD/usplf_rgbd_data_preparation.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, glob
2
+ import cv2
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+
6
+ # adjust these paths
7
+ color_dir = "path/to/your/color/images"
8
+ depth_dir = "path/to/your/depth/images"
9
+ out_dir = "path/to/your/rgbd/images"
10
+
11
+ os.makedirs(out_dir, exist_ok=True)
12
+
13
+ print("Processing color and depth images...")
14
+ for color_path in tqdm(glob.glob(os.path.join(color_dir, "*.png"))):
15
+ base = os.path.basename(color_path)
16
+ depth_path = os.path.join(depth_dir, base)
17
+ if not os.path.exists(depth_path):
18
+ continue
19
+
20
+ rgb = cv2.imread(color_path, cv2.IMREAD_UNCHANGED) # H×W×3
21
+ depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) # H×W (raw depth)
22
+ depth = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX)
23
+ depth = depth.astype(np.uint8)
24
+
25
+ # merge to H×W×4 (B,G,R,Depth)
26
+ rgba = cv2.merge([ rgb[:,:,0], rgb[:,:,1], rgb[:,:,2], depth ])
27
+ cv2.imwrite(os.path.join(out_dir, base), rgba)
28
+
29
+ print("RGBD data preparation completed.")
code/RGBD/yolo11_rgbd.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11 object detection model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/detect
6
+
7
+ # Parameters
8
+ nc: 5 # number of classes
9
+ ch: 4 # number of input channels (4 for RGB-D, 3 for RGB)
10
+ scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
11
+ # [depth, width, max_channels]
12
+ # n: [0.50, 0.25, 1024] # summary: 181 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
13
+ # s: [0.50, 0.50, 1024] # summary: 181 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs
14
+ # m: [0.50, 1.00, 512] # summary: 231 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs
15
+ l: [1.00, 1.00, 512] # summary: 357 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs
16
+ # x: [1.00, 1.50, 512] # summary: 357 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs
17
+
18
+ # YOLO11n backbone
19
+ backbone:
20
+ # [from, repeats, module, args]
21
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
22
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
23
+ - [-1, 2, C3k2, [256, False, 0.25]]
24
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
25
+ - [-1, 2, C3k2, [512, False, 0.25]]
26
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
27
+ - [-1, 2, C3k2, [512, True]]
28
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
29
+ - [-1, 2, C3k2, [1024, True]]
30
+ - [-1, 1, SPPF, [1024, 5]] # 9
31
+ - [-1, 2, C2PSA, [1024]] # 10
32
+
33
+ # YOLO11n head
34
+ head:
35
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
36
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
37
+ - [-1, 2, C3k2, [512, False]] # 13
38
+
39
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
40
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
41
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
42
+
43
+ - [-1, 1, Conv, [256, 3, 2]]
44
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
45
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
46
+
47
+ - [-1, 1, Conv, [512, 3, 2]]
48
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
49
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
50
+
51
+ - [[16, 19, 22], 1, Detect, [nc]] # Detect(P3, P4, P5)
code/RGBD/yolo_rgbd_model_inference.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import glob
4
+ import cv2
5
+ import numpy as np
6
+ import torch
7
+ from ultralytics import YOLO
8
+ from tqdm import tqdm
9
+
10
+ # ---- CONFIGURATION ----
11
+ model_path = 'path/to/your/model/weights/best.pt'
12
+ test_img_dir = 'path/to/your/rgbd/test/images'
13
+ output_rgb_dir = 'outputs/rgb'
14
+ output_depth_dir = 'outputs/depth'
15
+ class_names = ['Feeding', 'Lateral_lying', 'Sitting', 'Standing', 'Sternal_lying']
16
+ confidence_threshold = 0.65
17
+ input_size = 640 # Model input size
18
+
19
+ os.makedirs(output_rgb_dir, exist_ok=True)
20
+ os.makedirs(output_depth_dir, exist_ok=True)
21
+
22
+ # ---- Define consistent colors for each class ----
23
+ COLORS = {
24
+ 'Feeding': (255, 0, 0), # Blue
25
+ 'Lateral_lying': (0, 255, 0), # Green
26
+ 'Sitting': (0, 0, 255), # Red
27
+ 'Standing': (255, 255, 0), # Cyan
28
+ 'Sternal_lying': (255, 0, 255) # Magenta
29
+ }
30
+
31
+ # ---- LOAD MODEL ----
32
+ model = YOLO(model_path).cuda().eval()
33
+
34
+ # ---- INFERENCE LOOP ----
35
+ image_paths = sorted(glob.glob(os.path.join(test_img_dir, '*.png')))
36
+
37
+ for img_path in tqdm(image_paths, desc="Visualizing Predictions"):
38
+ base = os.path.splitext(os.path.basename(img_path))[0]
39
+
40
+ # Load original 4-channel image
41
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
42
+ if img is None or img.shape[-1] != 4:
43
+ print(f"Skipping {img_path}, invalid image format.")
44
+ continue
45
+
46
+ rgb = img[:, :, :3]
47
+ depth = img[:, :, 3]
48
+ orig_h, orig_w = rgb.shape[:2]
49
+
50
+ # Resize to model input size for inference
51
+ img_resized = cv2.resize(img, (input_size, input_size))
52
+ input_tensor = torch.from_numpy(img_resized).permute(2, 0, 1).float() / 255.0
53
+ input_tensor = input_tensor.unsqueeze(0).cuda()
54
+
55
+ # Inference
56
+ results = model.predict(input_tensor, imgsz=input_size, conf=confidence_threshold)[0]
57
+ boxes = results.boxes
58
+ classes = boxes.cls.cpu().numpy()
59
+ confidences = boxes.conf.cpu().numpy()
60
+ xyxy_resized = boxes.xyxy.cpu().numpy()
61
+
62
+
63
+ # Scale boxes back to original image size
64
+ scale_x = orig_w / input_size
65
+ scale_y = orig_h / input_size
66
+ xyxy_orig = np.copy(xyxy_resized)
67
+ xyxy_orig[:, [0, 2]] *= scale_x
68
+ xyxy_orig[:, [1, 3]] *= scale_y
69
+
70
+ # Normalize depth to uint8 for visualization
71
+ depth_norm = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX)
72
+ depth_uint8 = depth_norm.astype('uint8')
73
+
74
+ rgb_draw = rgb.copy()
75
+
76
+ # Apply a colormap for better visualization
77
+ depth_rgb = cv2.applyColorMap(depth_uint8, cv2.COLORMAP_VIRIDIS) # Or COLORMAP_VIRIDIS, INFERNO, etc.
78
+ depth_draw = cv2.cvtColor(depth_rgb, cv2.COLOR_BGR2RGB) # Convert BGR to RGB for matplotlib
79
+
80
+ # ---- Draw boxes on original-size RGB and Depth ----
81
+ for box, cls, conf in zip(xyxy_orig, classes, confidences):
82
+ x1, y1, x2, y2 = map(int, box)
83
+ label = f"{class_names[int(cls)]} {conf:.2f}"
84
+ color = COLORS.get(class_names[int(cls)], (255, 255, 255)) # Default to white
85
+
86
+ # RGB
87
+ cv2.rectangle(rgb_draw, (x1, y1), (x2, y2), color, 2)
88
+ cv2.putText(rgb_draw, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
89
+
90
+ # Depth
91
+ cv2.rectangle(depth_draw, (x1, y1), (x2, y2), color, 2)
92
+ cv2.putText(depth_draw, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
93
+
94
+ # Save images
95
+ cv2.imwrite(os.path.join(output_rgb_dir, f"{base}.png"), rgb_draw)
96
+ cv2.imwrite(os.path.join(output_depth_dir, f"{base}.png"), depth_draw)
code/RGBD/yolov11_usplf_rgbd.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ from ultralytics.data import build_dataloader
3
+ from ultralytics.data.dataset import YOLODataset
4
+ import torch
5
+ import cv2
6
+
7
+ class CustomYOLODataset(YOLODataset):
8
+ def __init__(self, *args, **kwargs):
9
+ kwargs["data"] = dict(kwargs.get("data", {}), channels=4)
10
+ super().__init__(*args, **kwargs)
11
+
12
+ def __getitem__(self, index):
13
+ img_path = self.im_files[index]
14
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
15
+ assert img.shape[-1] == 4, f"Image {img_path} has {img.shape[-1]} channels"
16
+ return super().__getitem__(index)
17
+
18
+ def build_dataloader_override(cfg, batch, img_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, shuffle=False, data_info=None):
19
+ dataset = CustomYOLODataset(
20
+ data=data_info,
21
+ img_size=img_size,
22
+ batch_size=batch,
23
+ augment=augment,
24
+ hyp=hyp,
25
+ rect=rect,
26
+ cache=cache,
27
+ single_cls=single_cls,
28
+ stride=int(stride),
29
+ pad=pad,
30
+ rank=rank,
31
+ )
32
+ loader = torch.utils.data.DataLoader(
33
+ dataset=dataset,
34
+ batch_size=batch,
35
+ shuffle=shuffle,
36
+ num_workers=workers,
37
+ sampler=None,
38
+ pin_memory=True,
39
+ collate_fn=getattr(dataset, "collate_fn", None),
40
+ )
41
+ return loader
42
+
43
+ build_dataloader.build_dataloader = build_dataloader_override
44
+
45
+ # Initialize model
46
+ model = YOLO("yolo11_rgbd.yaml") # Ensure YAML has ch=4
47
+
48
+ # ---- Load Pretrained Weights ----
49
+ # pretrained = YOLO("yolo11l.pt").model.state_dict()
50
+ pretrained = YOLO("yolo11n.pt").model.state_dict()
51
+ model_state = model.model.state_dict()
52
+ filtered_pretrained = {k: v for k, v in pretrained.items() if not k.startswith(("model.23", "model.0.conv"))}
53
+ model_state.update(filtered_pretrained)
54
+
55
+ with torch.no_grad():
56
+ rgb_weights = pretrained["model.0.conv.weight"][:, :3]
57
+ depth_weights = torch.randn(64, 1, 3, 3) * 0.1 # FOr Yolov11l model
58
+ # depth_weights = torch.randn(16, 1, 3, 3) * 0.1 # For Yolov11n model
59
+ model_state["model.0.conv.weight"] = torch.cat([rgb_weights, depth_weights], dim=1)
60
+
61
+ model.model.load_state_dict(model_state, strict=False)
62
+
63
+ # ---- Critical Warmup Fix ----
64
+ def custom_warmup(self, imgsz=(1, 4, 640, 640)): # Force 4-channel input
65
+ self.forward(torch.zeros(imgsz).to(self.device))
66
+
67
+ model.model.warmup = custom_warmup.__get__(model.model)
68
+
69
+ # Train
70
+ model.train(
71
+ data="usplf_rgbd_dataset.yaml",
72
+ epochs=200,
73
+ imgsz=640,
74
+ batch=10,
75
+ device="0",
76
+ name="yolov11_rgbd_pretrained"
77
+ )
78
+
79
+
code/color/usplf_dataset.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pig_detect.yaml
2
+ path: usplf
3
+ train: train
4
+ val: valid
5
+
6
+ # Classes
7
+ names:
8
+ 0: Feeding
9
+ 1: Lateral_lying
10
+ 2: Sitting
11
+ 3: Standing
12
+ 4: Sternal_lying
13
+
14
+ # 'feeding_pig', 'lateral_lying_pig', 'sitting_pig', 'standing_pig', 'sternal_lying_pig'
15
+
16
+ #/home/dcs/workspaces/pigDetect/datasets/data/annotation/val
code/color/usplf_hvd_data_preparation.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, glob
2
+ import cv2
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+
6
+ # Adjust these paths
7
+ color_dir = "path/to/your/color/images"
8
+ depth_dir = "path/to/your/depth/images"
9
+ out_dir = "path/to/your/hvd/images" # Hue-Value-Depth
10
+
11
+ os.makedirs(out_dir, exist_ok=True)
12
+
13
+ print("Creating HVD images (Hue-Value-Depth)...")
14
+ for color_path in tqdm(glob.glob(os.path.join(color_dir, "*.png"))):
15
+ base = os.path.basename(color_path)
16
+ depth_path = os.path.join(depth_dir, base)
17
+ if not os.path.exists(depth_path):
18
+ continue
19
+
20
+ # Read images
21
+ bgr = cv2.imread(color_path) # BGR format
22
+ depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
23
+
24
+ # Convert to HSV and extract channels
25
+ hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
26
+ h, s, v = cv2.split(hsv)
27
+
28
+ # Normalize depth
29
+ if depth.dtype == np.uint16:
30
+ # Preserve relative depth relationships while scaling
31
+ depth = (depth / depth.max() * 255).astype(np.uint8)
32
+ else:
33
+ depth = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX)
34
+ depth = depth.astype(np.uint8)
35
+
36
+ # Create HVD (Hue-Value-Depth) image
37
+ hvd = cv2.merge([h, v, depth])
38
+
39
+ # Save result
40
+ cv2.imwrite(os.path.join(out_dir, base), hvd)
41
+
42
+ print("HVD dataset preparation completed.")
code/color/usplf_hvd_dataset.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # # usplf_datasets.yaml (updated for RGB-D)
4
+ # path: ./datasets/usplf # Base path
5
+ # train:
6
+ # - path: train/images # RGB images
7
+ # depth: train/depth # Depth images
8
+ # labels: train/labels
9
+ # val:
10
+ # - path: valid/images
11
+ # depth: valid/depth
12
+ # labels: valid/labels
13
+
14
+ # # RGB-D specific parameters
15
+ # modality: rgbd # Marks this as RGB-D dataset
16
+ # depth_normalization: # Depth-specific settings
17
+ # min: 0.0 # Minimum depth in meters
18
+ # max: 2.0 # Maximum depth in meters
19
+ # scaling: 255.0 # Scale factor
20
+
21
+ # # Class names
22
+ # names:
23
+ # 0: Feeding
24
+ # 1: Lateral_lying
25
+ # 2: Sitting
26
+ # 3: Standing
27
+ # 4: Sternal_lying
28
+
29
+ # usplf_dataset.yaml
30
+ path: datasets/usplf/hvd # Root directory
31
+ train: train # Path to training images (relative to 'path')
32
+ val: valid # Path to validation images
33
+ test: test # Path to test images (optional)
34
+
35
+ # Number of input channels (R, G, B, D)
36
+ nc: 5 # 4-channel RGB-D input
37
+
38
+ # Class names (replace with your pig postures)
39
+ names:
40
+ 0: Feeding
41
+ 1: Lateral_lying
42
+ 2: Sitting
43
+ 3: Standing
44
+ 4: Sternal_lying
code/color/yolo_model_evaluation.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import numpy as np
4
+ import matplotlib
5
+ matplotlib.use('TkAgg') # interactive plotting
6
+ import matplotlib.pyplot as plt
7
+ from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report
8
+ from ultralytics import YOLO
9
+ from tqdm import tqdm
10
+ import cv2
11
+ from collections import defaultdict
12
+
13
+
14
+ # ---- CONFIGURATION ----
15
+ # model_path = 'datasets/usplf/model_weight/color_model/best.pt'
16
+ # model_path = 'runs/usplf_depth_color/train/weights/best.pt'
17
+ # test_img_dir = 'datasets/usplf/depth_color/test/images'
18
+ # test_lbl_dir = 'datasets/usplf/depth_color/test/labels'
19
+
20
+ model_path = 'path/to/your/model/weights/best.pt'
21
+ test_img_dir = 'path/to/your/test/images'
22
+ test_lbl_dir = 'path/to/your/test/labels'
23
+ class_names = ['Feeding', 'Lateral_lying', 'Sitting', 'Standing', 'Sternal_lying']
24
+ n_classes = len(class_names)
25
+ iou_threshold = 0.5
26
+ confidence_threshold = 0.65
27
+
28
+ # ---- LOAD MODEL ----
29
+ model = YOLO(model_path)
30
+
31
+ # ---- IOU FUNCTION ----
32
+ def compute_iou(box1, box2):
33
+ xa1, ya1 = box1[0] - box1[2]/2, box1[1] - box1[3]/2
34
+ xa2, ya2 = box1[0] + box1[2]/2, box1[1] + box1[3]/2
35
+ xb1, yb1 = box2[0] - box2[2]/2, box2[1] - box2[3]/2
36
+ xb2, yb2 = box2[0] + box2[2]/2, box2[1] + box2[3]/2
37
+
38
+ inter_x1, inter_y1 = max(xa1, xb1), max(ya1, yb1)
39
+ inter_x2, inter_y2 = min(xa2, xb2), min(ya2, yb2)
40
+ inter_area = max(0, inter_x2 - inter_x1) * max(0, inter_y2 - inter_y1)
41
+ box1_area = (xa2 - xa1) * (ya2 - ya1)
42
+ box2_area = (xb2 - xb1) * (yb2 - yb1)
43
+ union_area = box1_area + box2_area - inter_area
44
+ return inter_area / union_area if union_area > 0 else 0
45
+
46
+ # Count ground truth instances and missed predictions (FN)
47
+ gt_counts = defaultdict(int)
48
+ missed_counts = defaultdict(int)
49
+
50
+ # ---- COLLECT PRED/GT PAIRS ----
51
+ y_true_all, y_pred_all = [], []
52
+
53
+ # grab both jpg and png
54
+ image_paths = sorted(glob.glob(os.path.join(test_img_dir, '*.jpg')) +
55
+ glob.glob(os.path.join(test_img_dir, '*.png')))
56
+ print(f"Found {len(image_paths)} test images")
57
+
58
+ total_gt_inst = 0
59
+ for img_path in tqdm(image_paths, desc="Evaluating"):
60
+ img = cv2.imread(img_path)
61
+ h, w = img.shape[:2]
62
+ base = os.path.splitext(os.path.basename(img_path))[0]
63
+
64
+ # load GT boxes
65
+ gt_boxes = []
66
+ gt_file = os.path.join(test_lbl_dir, base + '.txt')
67
+ if os.path.exists(gt_file):
68
+ with open(gt_file) as f:
69
+ for line in f:
70
+ parts = list(map(float, line.split()))
71
+ # [cls, xc, yc, w, h] normalized
72
+ gt_boxes.append(parts)
73
+ total_gt_inst += 1
74
+ gt_used = [False]*len(gt_boxes)
75
+
76
+ # predict
77
+ res = model(img, conf=confidence_threshold, verbose=False)[0]
78
+ dets = res.boxes
79
+ preds = []
80
+ if dets is not None and len(dets.cls):
81
+ for cls, xywh in zip(dets.cls.cpu().numpy(), dets.xywhn.cpu().numpy()):
82
+ preds.append((int(cls), xywh))
83
+
84
+ # match preds → GT
85
+ for pred_cls, pred_box in preds:
86
+ matched = False
87
+ for i,(gt_cls,*gt_box) in enumerate(gt_boxes):
88
+ if gt_used[i]: continue
89
+ iou = compute_iou(pred_box, gt_box)
90
+ if iou >= iou_threshold:
91
+ y_true_all.append(int(gt_cls))
92
+ y_pred_all.append(pred_cls)
93
+ gt_used[i] = True
94
+ matched = True
95
+ # print("GT: ",gt_cls, "Prd: ",pred_cls)
96
+ break
97
+ if not matched:
98
+ # a prediction with no GT match → FP
99
+ y_true_all.append(n_classes) # use index=n_classes for “background”
100
+ y_pred_all.append(pred_cls)
101
+
102
+ # any GT not matched → FN
103
+ for used, (gt_cls, *_) in zip(gt_used, gt_boxes):
104
+ if not used:
105
+ y_true_all.append(int(gt_cls))
106
+ y_pred_all.append(n_classes) # “predicted” background
107
+
108
+ # ---- BUILD & SHOW METRICS ----
109
+ # we have n_classes real + 1 background class → ignore background in report
110
+ labels = list(range(n_classes))
111
+ # filter out any true/pred == n_classes (we don't pass them to classification_report)
112
+ # mask = [ (t in labels and p in labels) for t,p in zip(y_true_all, y_pred_all) ]
113
+ # y_true = [y_true_all[i] for i,m in enumerate(mask) if m]
114
+ # y_pred = [y_pred_all[i] for i,m in enumerate(mask) if m]
115
+ y_true = y_true_all
116
+ y_pred = y_pred_all
117
+
118
+ print("Total GT instances: ",total_gt_inst)
119
+ # confusion matrix
120
+ cm = confusion_matrix(y_true, y_pred, labels=labels) # without normalization
121
+ # cm = confusion_matrix(y_true, y_pred, labels=labels, normalize='true') # normalize by row
122
+ disp = ConfusionMatrixDisplay(cm, display_labels=class_names)
123
+ disp.plot(xticks_rotation=45, cmap='Blues')
124
+ plt.title("Confusion Matrix")
125
+ plt.tight_layout()
126
+ plt.savefig("path/to/your/confusion_matrix.png")
127
+ plt.show()
128
+
129
+ # classification report
130
+ print(classification_report(
131
+ y_true, y_pred,
132
+ labels=labels,
133
+ target_names=class_names,
134
+ digits=3,
135
+ zero_division=0
136
+ ))
code/color/yolo_model_usplf_inference.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import cv2
4
+ import torch
5
+ from ultralytics import YOLO
6
+ from tqdm import tqdm
7
+
8
+ # ---- CONFIGURATION ----
9
+ # model_path = 'runs/usplf_color_yolo11n/train/weights/best.pt'
10
+ # test_img_dir = 'datasets/usplf/test/images'
11
+ model_path = 'path/to/your/model/weights/best.pt'
12
+ test_img_dir = 'path/to/your/test/images'
13
+ class_names = ['Feeding', 'Lateral_lying', 'Sitting', 'Standing', 'Sternal_lying']
14
+ output_rgb_dir = 'path/to/your/output/test_frames'
15
+ confidence_threshold = 0.45
16
+
17
+ os.makedirs(output_rgb_dir, exist_ok=True)
18
+
19
+ # ---- Define consistent colors for each class ----
20
+ COLORS = {
21
+ 'Feeding': (255, 0, 0), # Blue
22
+ 'Lateral_lying': (0, 255, 0), # Green
23
+ 'Sitting': (128, 128, 128), # Grey
24
+ 'Standing': (255, 255, 0), # Yellow
25
+ 'Sternal_lying': (255, 0, 255) # Magenta
26
+ }
27
+
28
+ os.makedirs(output_rgb_dir, exist_ok=True)
29
+
30
+ # ---- LOAD MODEL ----
31
+ model = YOLO(model_path).cuda().eval()
32
+
33
+ # ---- INFERENCE LOOP ----
34
+ image_paths = sorted(glob.glob(os.path.join(test_img_dir, '*.png'))) # Ensure consistent sorting
35
+
36
+ for img_path in tqdm(image_paths, desc="Visualizing Predictions"):
37
+ base = os.path.splitext(os.path.basename(img_path))[0]
38
+
39
+ # Load original image
40
+ img = cv2.imread(img_path)
41
+ if img is None:
42
+ print(f"Warning: Could not read image {img_path}")
43
+ continue
44
+
45
+ # Inference - use the numpy array directly
46
+ results = model(img, conf=confidence_threshold, verbose=False)[0] # Process single image
47
+ boxes = results.boxes
48
+
49
+ # Create a copy for drawing detections
50
+ rgb_draw = img.copy()
51
+
52
+ # Process detections if any exist
53
+ if boxes is not None and len(boxes):
54
+ # Convert results to numpy arrays
55
+ classes = boxes.cls.cpu().numpy()
56
+ confidences = boxes.conf.cpu().numpy()
57
+ xyxy_coords = boxes.xyxy.cpu().numpy()
58
+
59
+ # Draw each detection
60
+ for box, cls_id, conf in zip(xyxy_coords, classes, confidences):
61
+ x1, y1, x2, y2 = map(int, box) # Convert coordinates to integers
62
+ label = class_names[int(cls_id)]
63
+ color = COLORS.get(label, (255, 255, 255)) # Default to white if not found
64
+
65
+ # Draw bounding box
66
+ cv2.rectangle(rgb_draw, (x1, y1), (x2, y2), color, 2)
67
+
68
+ # Prepare label text
69
+ text = f"{label} {conf:.2f}"
70
+
71
+ # Calculate text position (avoid top overflow)
72
+ y_text = max(y1 - 5, 15)
73
+
74
+ # Draw label background
75
+ (text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
76
+ cv2.rectangle(rgb_draw, (x1, y1), (x1 + text_width, y1 - text_height - 5), color, -1)
77
+
78
+ # Draw text
79
+ cv2.putText(rgb_draw, text, (x1, y_text),
80
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
81
+
82
+ # Save output
83
+ cv2.imwrite(os.path.join(output_rgb_dir, f"{base}.png"), rgb_draw)
code/color/yolov11_usplf.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+
3
+ # Load a model
4
+ model = YOLO("yolo11l.pt")
5
+
6
+ # Train the model
7
+ train_results = model.train(
8
+ # data="usplf_hvd_dataset.yaml", # path to dataset YAML for usplf depth_color dataset
9
+ data="usplf_dataset.yaml", # path to dataset YAML for usplf color dataset
10
+ epochs=150, # number of training epochs
11
+ imgsz=640, # training image size
12
+ device="0", # device to run on, i.e. device=0 or device=0,1,2,3 or device=cpu
13
+ batch=8, # batch size
14
+ )
15
+
16
+ # Evaluate model performance on the validation set
17
+ metrics = model.val()
18
+
19
+ # Perform object detection on an image
20
+ # results = model("p1c1_20250108_085727.png")
21
+ results = model("datasets/usplf/hvd/test/images/p1c1_20250108_085727.png")
22
+ results[0].show()
23
+
24
+ # Export the model to ONNX format
25
+ path = model.export(format="onnx") # return path to exported model
model_weight/Depth/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6e3ba32605b7c68104f7cf2dc573a40b2b5bfc5a936240179745b1f6c0f2af2
3
+ size 5461395
model_weight/HVD/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28736c85f144eb6e84e55c6a1eb1aaeb61b26eecec43cc58e5f5512dcec45102
3
+ size 51189202
model_weight/RGB/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac0424695ade7f4946c8248f4cfdd3f78ef37afe8650039f9894d5c71fde1f85
3
+ size 152952970
model_weight/RGBD/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab2932581d4e29ed1daac770990efd5ca721ab490e5258492ef494542a55fdc
3
+ size 51190617