|
|
""" |
|
|
학습된 모델 성능 평가 스크립트 |
|
|
- Validation 데이터로 상세 평가 |
|
|
- 클래스별 성능 분석 |
|
|
- 혼동 행렬, PR 곡선 등 시각화 |
|
|
""" |
|
|
|
|
|
from ultralytics import YOLO |
|
|
from pathlib import Path |
|
|
import json |
|
|
import matplotlib.pyplot as plt |
|
|
import seaborn as sns |
|
|
import numpy as np |
|
|
from sklearn.metrics import classification_report, confusion_matrix |
|
|
|
|
|
|
|
|
def evaluate_model(model_path, data_yaml='dataset_split/data.yaml', save_dir='evaluation_results'): |
|
|
""" |
|
|
모델 상세 평가 |
|
|
|
|
|
Args: |
|
|
model_path: 학습된 모델 경로 (.pt 파일) |
|
|
data_yaml: 데이터셋 설정 파일 |
|
|
save_dir: 결과 저장 디렉토리 |
|
|
""" |
|
|
|
|
|
print("=" * 70) |
|
|
print("모델 성능 평가") |
|
|
print("=" * 70) |
|
|
|
|
|
|
|
|
print(f"\n모델 로드: {model_path}") |
|
|
model = YOLO(model_path) |
|
|
|
|
|
|
|
|
save_path = Path(save_dir) |
|
|
save_path.mkdir(exist_ok=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("1. Validation 데이터셋 평가") |
|
|
print("=" * 70) |
|
|
|
|
|
metrics = model.val( |
|
|
data=data_yaml, |
|
|
split='val', |
|
|
save_json=True, |
|
|
save_hybrid=True, |
|
|
conf=0.001, |
|
|
iou=0.6, |
|
|
max_det=300, |
|
|
plots=True, |
|
|
) |
|
|
|
|
|
|
|
|
print("\n📊 전체 성능 지표:") |
|
|
print(f" mAP50 : {metrics.box.map50:.4f} (50% IoU에서 정확도)") |
|
|
print(f" mAP50-95 : {metrics.box.map:.4f} (50-95% IoU 평균)") |
|
|
print(f" Precision : {metrics.box.mp:.4f} (정밀도)") |
|
|
print(f" Recall : {metrics.box.mr:.4f} (재현율)") |
|
|
|
|
|
|
|
|
print("\n📋 클래스별 성능:") |
|
|
print(f"{'Class':<15} {'mAP50':>8} {'mAP50-95':>10} {'Precision':>10} {'Recall':>8}") |
|
|
print("-" * 65) |
|
|
|
|
|
class_names = ['Plastic', 'Vinyl', 'Can', 'Glass', 'Paper'] |
|
|
|
|
|
for i, name in enumerate(class_names): |
|
|
if i < len(metrics.box.ap50): |
|
|
map50 = metrics.box.ap50[i] |
|
|
map50_95 = metrics.box.ap[i] |
|
|
precision = metrics.box.p[i] if i < len(metrics.box.p) else 0 |
|
|
recall = metrics.box.r[i] if i < len(metrics.box.r) else 0 |
|
|
|
|
|
print(f"{name:<15} {map50:>8.4f} {map50_95:>10.4f} {precision:>10.4f} {recall:>8.4f}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("2. 상세 분석 - Validation 이미지 예측") |
|
|
print("=" * 70) |
|
|
|
|
|
|
|
|
val_txt = Path('dataset_split/val.txt') |
|
|
if val_txt.exists(): |
|
|
with open(val_txt, 'r') as f: |
|
|
val_images = [line.strip() for line in f.readlines()] |
|
|
else: |
|
|
|
|
|
val_images_dir = Path('dataset_split/images/val') |
|
|
val_images = list(val_images_dir.glob('**/*.[jJ][pP][gG]')) |
|
|
|
|
|
print(f"Validation 이미지 수: {len(val_images)}") |
|
|
|
|
|
|
|
|
all_true_labels = [] |
|
|
all_pred_labels = [] |
|
|
all_confidences = [] |
|
|
|
|
|
print("예측 진행 중...") |
|
|
for img_path in val_images[:100]: |
|
|
|
|
|
results = model.predict(img_path, verbose=False, conf=0.25) |
|
|
|
|
|
|
|
|
label_path = str(img_path).replace('/images/', '/labels/').replace('\\images\\', '\\labels\\') |
|
|
label_path = label_path.replace('.jpg', '.txt').replace('.JPG', '.txt') |
|
|
|
|
|
if Path(label_path).exists(): |
|
|
with open(label_path, 'r') as f: |
|
|
for line in f: |
|
|
parts = line.strip().split() |
|
|
if len(parts) >= 5: |
|
|
true_class = int(parts[0]) |
|
|
all_true_labels.append(true_class) |
|
|
|
|
|
|
|
|
for r in results: |
|
|
for box in r.boxes: |
|
|
pred_class = int(box.cls[0]) |
|
|
conf = float(box.conf[0]) |
|
|
all_pred_labels.append(pred_class) |
|
|
all_confidences.append(conf) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("3. 혼동 행렬 생성") |
|
|
print("=" * 70) |
|
|
|
|
|
if len(all_true_labels) > 0 and len(all_pred_labels) > 0: |
|
|
|
|
|
min_len = min(len(all_true_labels), len(all_pred_labels)) |
|
|
all_true_labels = all_true_labels[:min_len] |
|
|
all_pred_labels = all_pred_labels[:min_len] |
|
|
|
|
|
|
|
|
cm = confusion_matrix(all_true_labels, all_pred_labels, labels=range(5)) |
|
|
|
|
|
|
|
|
plt.figure(figsize=(10, 8)) |
|
|
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', |
|
|
xticklabels=class_names, |
|
|
yticklabels=class_names) |
|
|
plt.title('Confusion Matrix', fontsize=16, fontweight='bold') |
|
|
plt.ylabel('True Label', fontsize=12) |
|
|
plt.xlabel('Predicted Label', fontsize=12) |
|
|
plt.tight_layout() |
|
|
|
|
|
cm_path = save_path / 'confusion_matrix_detailed.png' |
|
|
plt.savefig(cm_path, dpi=300, bbox_inches='tight') |
|
|
print(f"✅ 혼동 행렬 저장: {cm_path}") |
|
|
plt.close() |
|
|
|
|
|
|
|
|
print("\n📊 Classification Report:") |
|
|
report = classification_report( |
|
|
all_true_labels, |
|
|
all_pred_labels, |
|
|
target_names=class_names, |
|
|
digits=4 |
|
|
) |
|
|
print(report) |
|
|
|
|
|
|
|
|
with open(save_path / 'classification_report.txt', 'w') as f: |
|
|
f.write(report) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("4. 성능 분석 그래프 생성") |
|
|
print("=" * 70) |
|
|
|
|
|
|
|
|
fig, axes = plt.subplots(2, 2, figsize=(14, 10)) |
|
|
|
|
|
|
|
|
ax1 = axes[0, 0] |
|
|
map50_values = [metrics.box.ap50[i] if i < len(metrics.box.ap50) else 0 |
|
|
for i in range(5)] |
|
|
colors = ['#FF6B6B', '#4ECDC4', '#45B7D1', '#FFA07A', '#98D8C8'] |
|
|
bars1 = ax1.bar(class_names, map50_values, color=colors, alpha=0.7, edgecolor='black') |
|
|
ax1.set_ylabel('mAP50', fontsize=12, fontweight='bold') |
|
|
ax1.set_title('클래스별 mAP50', fontsize=14, fontweight='bold') |
|
|
ax1.set_ylim(0, 1) |
|
|
ax1.grid(axis='y', alpha=0.3) |
|
|
|
|
|
|
|
|
for bar in bars1: |
|
|
height = bar.get_height() |
|
|
ax1.text(bar.get_x() + bar.get_width()/2., height, |
|
|
f'{height:.3f}', ha='center', va='bottom', fontsize=10) |
|
|
|
|
|
|
|
|
ax2 = axes[0, 1] |
|
|
map50_95_values = [metrics.box.ap[i] if i < len(metrics.box.ap) else 0 |
|
|
for i in range(5)] |
|
|
bars2 = ax2.bar(class_names, map50_95_values, color=colors, alpha=0.7, edgecolor='black') |
|
|
ax2.set_ylabel('mAP50-95', fontsize=12, fontweight='bold') |
|
|
ax2.set_title('클래스별 mAP50-95', fontsize=14, fontweight='bold') |
|
|
ax2.set_ylim(0, 1) |
|
|
ax2.grid(axis='y', alpha=0.3) |
|
|
|
|
|
for bar in bars2: |
|
|
height = bar.get_height() |
|
|
ax2.text(bar.get_x() + bar.get_width()/2., height, |
|
|
f'{height:.3f}', ha='center', va='bottom', fontsize=10) |
|
|
|
|
|
|
|
|
ax3 = axes[1, 0] |
|
|
precision_values = [metrics.box.p[i] if i < len(metrics.box.p) else 0 |
|
|
for i in range(5)] |
|
|
recall_values = [metrics.box.r[i] if i < len(metrics.box.r) else 0 |
|
|
for i in range(5)] |
|
|
|
|
|
x = np.arange(len(class_names)) |
|
|
width = 0.35 |
|
|
ax3.bar(x - width/2, precision_values, width, label='Precision', |
|
|
color='skyblue', alpha=0.8, edgecolor='black') |
|
|
ax3.bar(x + width/2, recall_values, width, label='Recall', |
|
|
color='lightcoral', alpha=0.8, edgecolor='black') |
|
|
ax3.set_ylabel('Score', fontsize=12, fontweight='bold') |
|
|
ax3.set_title('Precision vs Recall', fontsize=14, fontweight='bold') |
|
|
ax3.set_xticks(x) |
|
|
ax3.set_xticklabels(class_names) |
|
|
ax3.legend() |
|
|
ax3.set_ylim(0, 1) |
|
|
ax3.grid(axis='y', alpha=0.3) |
|
|
|
|
|
|
|
|
ax4 = axes[1, 1] |
|
|
f1_scores = [2 * (p * r) / (p + r) if (p + r) > 0 else 0 |
|
|
for p, r in zip(precision_values, recall_values)] |
|
|
bars4 = ax4.bar(class_names, f1_scores, color=colors, alpha=0.7, edgecolor='black') |
|
|
ax4.set_ylabel('F1-Score', fontsize=12, fontweight='bold') |
|
|
ax4.set_title('클래스별 F1-Score', fontsize=14, fontweight='bold') |
|
|
ax4.set_ylim(0, 1) |
|
|
ax4.grid(axis='y', alpha=0.3) |
|
|
|
|
|
for bar in bars4: |
|
|
height = bar.get_height() |
|
|
ax4.text(bar.get_x() + bar.get_width()/2., height, |
|
|
f'{height:.3f}', ha='center', va='bottom', fontsize=10) |
|
|
|
|
|
plt.tight_layout() |
|
|
performance_path = save_path / 'performance_analysis.png' |
|
|
plt.savefig(performance_path, dpi=300, bbox_inches='tight') |
|
|
print(f"✅ 성능 분석 그래프 저장: {performance_path}") |
|
|
plt.close() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
results_dict = { |
|
|
'overall': { |
|
|
'mAP50': float(metrics.box.map50), |
|
|
'mAP50_95': float(metrics.box.map), |
|
|
'precision': float(metrics.box.mp), |
|
|
'recall': float(metrics.box.mr), |
|
|
}, |
|
|
'per_class': {} |
|
|
} |
|
|
|
|
|
for i, name in enumerate(class_names): |
|
|
if i < len(metrics.box.ap50): |
|
|
results_dict['per_class'][name] = { |
|
|
'mAP50': float(metrics.box.ap50[i]), |
|
|
'mAP50_95': float(metrics.box.ap[i]), |
|
|
'precision': float(metrics.box.p[i]) if i < len(metrics.box.p) else 0, |
|
|
'recall': float(metrics.box.r[i]) if i < len(metrics.box.r) else 0, |
|
|
'f1_score': f1_scores[i], |
|
|
} |
|
|
|
|
|
json_path = save_path / 'evaluation_results.json' |
|
|
with open(json_path, 'w', encoding='utf-8') as f: |
|
|
json.dump(results_dict, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
print(f"✅ 결과 JSON 저장: {json_path}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("평가 완료!") |
|
|
print("=" * 70) |
|
|
print(f"\n저장 위치: {save_path.absolute()}") |
|
|
print("\n생성된 파일:") |
|
|
print(f" - confusion_matrix_detailed.png") |
|
|
print(f" - performance_analysis.png") |
|
|
print(f" - classification_report.txt") |
|
|
print(f" - evaluation_results.json") |
|
|
|
|
|
return metrics, results_dict |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
import sys |
|
|
|
|
|
|
|
|
if len(sys.argv) > 1: |
|
|
model_path = sys.argv[1] |
|
|
else: |
|
|
|
|
|
default_path = 'waste_classification/yolov8n_5class/weights/best.pt' |
|
|
|
|
|
print(f"모델 경로를 입력하세요 (엔터: {default_path}):") |
|
|
user_input = input().strip() |
|
|
model_path = user_input if user_input else default_path |
|
|
|
|
|
|
|
|
if not Path(model_path).exists(): |
|
|
print(f"❌ 모델 파일을 찾을 수 없습니다: {model_path}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
evaluate_model(model_path) |
|
|
|