Spaces:
Sleeping
Sleeping
| import os | |
| import sys | |
| from detect import detect_face_shape | |
| # Configuration | |
| TEST_DATASET_DIR = "/home/codernotme/Projects/Github/katariaoptics/dataset/test" | |
| def test_accuracy(): | |
| print(f"Testing accuracy using dataset at: {TEST_DATASET_DIR}") | |
| if not os.path.exists(TEST_DATASET_DIR): | |
| print("Test dataset directory not found.") | |
| return | |
| total_images = 0 | |
| correct_predictions = 0 | |
| class_stats = {} | |
| # Get list of classes (subdirectories) | |
| classes = [d for d in os.listdir(TEST_DATASET_DIR) if os.path.isdir(os.path.join(TEST_DATASET_DIR, d))] | |
| for label in classes: | |
| folder_path = os.path.join(TEST_DATASET_DIR, label) | |
| files = os.listdir(folder_path) | |
| # Ground truth label (normalize to lowercase to match model classes if needed, | |
| # though detect.py returns capitalized keys usually? Let's check model keys.) | |
| # The model uses "heart", "oblong" etc. (lowercase) as keys in the pickle from train_means.py | |
| # BUT detect.py / classifier.py capitalizes the keys in the return dict. | |
| # Let's verify what classifier.py returns. | |
| # classifier.py: return dict(sorted(probabilities.items()...)) where keys are labels. | |
| # In train_means.py, keys are "heart", "oblong"... (lowercase). | |
| # In classifier.py, it constructs the dict from the `means` keys. | |
| # So classifier.py returns lowercase keys like "heart", "oblong". | |
| # Wait, let me double check classifier.py content. | |
| ground_truth = label.lower() | |
| class_stats[ground_truth] = {"total": 0, "correct": 0} | |
| print(f"Processing class {label} (Ground Truth: {ground_truth})...") | |
| for filename in files: | |
| img_path = os.path.join(folder_path, filename) | |
| try: | |
| # Run detection | |
| result = detect_face_shape(img_path) | |
| # Get predicted label (highest probability) | |
| # detect.py returns dict like {'heart': 0.8, 'oval': 0.2} | |
| if not result or "Error" in result or "Unknown" in result: | |
| continue | |
| predicted_label = list(result.keys())[0].lower() | |
| total_images += 1 | |
| class_stats[ground_truth]["total"] += 1 | |
| if predicted_label == ground_truth: | |
| correct_predictions += 1 | |
| class_stats[ground_truth]["correct"] += 1 | |
| except Exception as e: | |
| # print(f"Error processing {filename}: {e}") | |
| pass | |
| if total_images == 0: | |
| print("No images found or processed.") | |
| return | |
| print("\n--- Results ---") | |
| print(f"Overall Accuracy: {correct_predictions}/{total_images} ({correct_predictions/total_images*100:.2f}%)") | |
| print("\nPer Class Accuracy:") | |
| for cls, stats in class_stats.items(): | |
| if stats["total"] > 0: | |
| acc = stats["correct"] / stats["total"] * 100 | |
| print(f" {cls}: {stats['correct']}/{stats['total']} ({acc:.2f}%)") | |
| else: | |
| print(f" {cls}: No images") | |
| if __name__ == "__main__": | |
| test_accuracy() | |