|
a |
|
b/MODEL/Validation.py |
|
|
1 |
import os |
|
|
2 |
import cv2 |
|
|
3 |
from ultralytics import YOLO |
|
|
4 |
import numpy as np |
|
|
5 |
import pandas as pd |
|
|
6 |
from sklearn.metrics import classification_report, accuracy_score, f1_score |
|
|
7 |
from tqdm import tqdm |
|
|
8 |
|
|
|
9 |
# Paths to your images and labels folders |
|
|
10 |
images_folder = r'D:\CSE\Semester - 5\Extra\IIT jammu\YOLO\Code\Data\valid\images' |
|
|
11 |
labels_folder = r'D:\CSE\Semester - 5\Extra\IIT jammu\YOLO\Code\Data\valid\labels' |
|
|
12 |
|
|
|
13 |
# Initialize lists to store results |
|
|
14 |
true_labels = [] |
|
|
15 |
predicted_labels = [] |
|
|
16 |
|
|
|
17 |
# Initialize lists to store IoU scores |
|
|
18 |
iou_scores = [] |
|
|
19 |
|
|
|
20 |
# Initialize the YOLOv8 model |
|
|
21 |
model = YOLO("best_75.pt") |
|
|
22 |
|
|
|
23 |
# Initialize lists to store detection results |
|
|
24 |
detection_results = [] |
|
|
25 |
|
|
|
26 |
def calculate_iou(box1_tensor, box2_list, image_width, image_height): |
|
|
27 |
# Extract values from the tensor |
|
|
28 |
x1, y1, x2, y2, confidence, class_id = box1_tensor.tolist() |
|
|
29 |
|
|
|
30 |
# Convert values from the list to strings |
|
|
31 |
class_id2, x2_gt, y2_gt, w2_gt, h2_gt = map(str, box2_list) |
|
|
32 |
|
|
|
33 |
# Convert coordinates to float |
|
|
34 |
x2_gt, y2_gt, w2_gt, h2_gt = float(x2_gt), float(y2_gt), float(w2_gt), float(h2_gt) |
|
|
35 |
|
|
|
36 |
# Convert the model's bounding box coordinates to YOLO format |
|
|
37 |
x1, y1, x2, y2 = x1 / image_width, y1 / image_height, x2 / image_width, y2 / image_height |
|
|
38 |
|
|
|
39 |
# Calculate the intersection coordinates |
|
|
40 |
xA = max(x1, x2_gt) |
|
|
41 |
yA = max(y1, y2_gt) |
|
|
42 |
xB = min(x1 + x2, x2_gt + w2_gt) |
|
|
43 |
yB = min(y1 + y2, y2_gt + h2_gt) |
|
|
44 |
|
|
|
45 |
# Calculate the intersection area |
|
|
46 |
interArea = max(0, xB - xA) * max(0, yB - yA) |
|
|
47 |
|
|
|
48 |
# Calculate the areas of the boxes |
|
|
49 |
box1Area = x2 * y2 |
|
|
50 |
box2Area = w2_gt * h2_gt |
|
|
51 |
|
|
|
52 |
# Calculate the IoU |
|
|
53 |
iou = interArea / float(box1Area + box2Area - interArea) |
|
|
54 |
|
|
|
55 |
return iou |
|
|
56 |
|
|
|
57 |
# Iterate through image files and corresponding label files |
|
|
58 |
for image_file in tqdm(os.listdir(images_folder)): |
|
|
59 |
if image_file.endswith('.png'): |
|
|
60 |
image_path = os.path.join(images_folder, image_file) |
|
|
61 |
|
|
|
62 |
# Determine class (bleeding or non-bleeding) from image file name |
|
|
63 |
is_bleeding = image_file.startswith('img-') |
|
|
64 |
|
|
|
65 |
# Load the image |
|
|
66 |
image = cv2.imread(image_path) |
|
|
67 |
|
|
|
68 |
# Perform inference |
|
|
69 |
results = model.predict(image) |
|
|
70 |
|
|
|
71 |
# Check if bleeding is detected |
|
|
72 |
detected_bleeding = results[0].boxes.shape[0] > 0 |
|
|
73 |
|
|
|
74 |
# Append true and predicted labels |
|
|
75 |
true_labels.append(is_bleeding) |
|
|
76 |
predicted_labels.append(detected_bleeding) |
|
|
77 |
|
|
|
78 |
# Extract ground truth bounding boxes from label file |
|
|
79 |
label_file_name = os.path.splitext(image_file)[0] + '.txt' |
|
|
80 |
label_file_path = os.path.join(labels_folder, label_file_name) |
|
|
81 |
|
|
|
82 |
if os.path.exists(label_file_path): |
|
|
83 |
with open(label_file_path, 'r') as label_file: |
|
|
84 |
lines = label_file.readlines() |
|
|
85 |
ground_truth_boxes = [list(map(float, line.strip().split())) for line in lines] |
|
|
86 |
|
|
|
87 |
# Calculate IoU for each detected box |
|
|
88 |
if results[0].boxes.shape[0] > 0: |
|
|
89 |
for box in results[0].boxes.boxes: |
|
|
90 |
iou_values = [] |
|
|
91 |
for gt_box in ground_truth_boxes: |
|
|
92 |
iou = calculate_iou(box, gt_box, 224, 224) |
|
|
93 |
iou_values.append(iou) |
|
|
94 |
|
|
|
95 |
max_iou = max(iou_values) if iou_values else 0 |
|
|
96 |
iou_scores.append(max_iou) |
|
|
97 |
|
|
|
98 |
# Append detection results |
|
|
99 |
if results[0].boxes.shape[0] > 0: |
|
|
100 |
box = results[0].boxes[0] |
|
|
101 |
detection_results.append({ |
|
|
102 |
'image_path': image_path, |
|
|
103 |
'is_bleeding': is_bleeding, |
|
|
104 |
'detection_confidence': box.conf[0].item() |
|
|
105 |
}) |
|
|
106 |
|
|
|
107 |
# Calculate classification metrics |
|
|
108 |
accuracy = accuracy_score(true_labels, predicted_labels) |
|
|
109 |
recall = classification_report(true_labels, predicted_labels, target_names=['Non-Bleeding', 'Bleeding'], output_dict=True) |
|
|
110 |
f1 = f1_score(true_labels, predicted_labels) |
|
|
111 |
|
|
|
112 |
# Create a table of achieved evaluation metrics |
|
|
113 |
classification_metrics = { |
|
|
114 |
'Accuracy': [accuracy], |
|
|
115 |
'Recall (Non-Bleeding)': [recall['Non-Bleeding']['recall']], |
|
|
116 |
'Recall (Bleeding)': [recall['Bleeding']['recall']], |
|
|
117 |
'F1-Score': [f1] |
|
|
118 |
} |
|
|
119 |
|
|
|
120 |
classification_df = pd.DataFrame(classification_metrics) |
|
|
121 |
print("Classification Metrics:") |
|
|
122 |
print(classification_df) |
|
|
123 |
|
|
|
124 |
print() |
|
|
125 |
print("Detection Metrics:") |
|
|
126 |
print("IoU Metrics:") |
|
|
127 |
# Calculate and print IoU statistics |
|
|
128 |
iou_scores = np.array(iou_scores) |
|
|
129 |
iou_mean = np.mean(iou_scores) |
|
|
130 |
iou_median = np.median(iou_scores) |
|
|
131 |
iou_75th_percentile = np.percentile(iou_scores, 75) |
|
|
132 |
print("IoU Mean:", iou_mean) |
|
|
133 |
print("IoU Median:", iou_median) |
|
|
134 |
print("IoU 75th Percentile:", iou_75th_percentile) |
|
|
135 |
|
|
|
136 |
# Calculate Detection Average Precision (AP) and Mean Average Precision (mAP) |
|
|
137 |
detection_results_df = pd.DataFrame(detection_results) |
|
|
138 |
detection_results_df['detection_confidence'] = detection_results_df['detection_confidence'].apply(lambda x: x.item() if isinstance(x, np.float32) else x) |
|
|
139 |
detection_results_df['is_detected'] = detection_results_df['detection_confidence'] > 0.5 # You can adjust the confidence threshold as needed |
|
|
140 |
detection_results_df['is_detected'] = detection_results_df['is_detected'].astype(int) |
|
|
141 |
detection_ap = detection_results_df.groupby('is_bleeding')['is_detected'].apply(lambda x: np.mean(x)) |
|
|
142 |
mAP = detection_ap.mean() |
|
|
143 |
print("Detection Average Precision (AP):") |
|
|
144 |
print(detection_ap) |
|
|
145 |
print("Mean Average Precision (mAP):", mAP) |