Spaces:
Sleeping
Sleeping
Commit
·
2ca4976
1
Parent(s):
7bf7aa7
Face Recognition
Browse files- app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc +0 -0
- app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc +0 -0
- app/Hackathon_setup/face_recognition.py +5 -0
- app/Hackathon_setup/fix_sklearn_compatibility.py +113 -0
- app/Hackathon_setup/fix_sklearn_warnings.py +98 -0
- app/Hackathon_setup/test_api.py +132 -0
- app/Hackathon_setup/test_api_fixed.py +142 -0
- app/Hackathon_setup/test_api_simple.py +172 -0
- app/Hackathon_setup/test_api_with_faces.py +228 -0
- app/Hackathon_setup/test_model_loading.py +162 -0
app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc
CHANGED
|
Binary files a/app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc and b/app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc differ
|
|
|
app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc
CHANGED
|
Binary files a/app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc and b/app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc differ
|
|
|
app/Hackathon_setup/face_recognition.py
CHANGED
|
@@ -13,6 +13,11 @@ import base64
|
|
| 13 |
import io
|
| 14 |
import os
|
| 15 |
import joblib
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
import pickle
|
| 17 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 18 |
from sklearn.preprocessing import StandardScaler
|
|
|
|
| 13 |
import io
|
| 14 |
import os
|
| 15 |
import joblib
|
| 16 |
+
import warnings
|
| 17 |
+
# Suppress sklearn version warnings
|
| 18 |
+
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')
|
| 19 |
+
warnings.filterwarnings('ignore', message='.*InconsistentVersionWarning.*')
|
| 20 |
+
|
| 21 |
import pickle
|
| 22 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 23 |
from sklearn.preprocessing import StandardScaler
|
app/Hackathon_setup/fix_sklearn_compatibility.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Script to fix sklearn compatibility issues
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import joblib
|
| 6 |
+
import pickle
|
| 7 |
+
import os
|
| 8 |
+
import shutil
|
| 9 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 10 |
+
from sklearn.preprocessing import StandardScaler
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
def backup_original_files():
|
| 14 |
+
"""Create backup of original model files"""
|
| 15 |
+
files_to_backup = [
|
| 16 |
+
'decision_tree_model.sav',
|
| 17 |
+
'face_recognition_scaler.sav'
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
print("Creating backups of original files...")
|
| 21 |
+
for file in files_to_backup:
|
| 22 |
+
if os.path.exists(file):
|
| 23 |
+
backup_name = file.replace('.sav', '_backup.sav')
|
| 24 |
+
shutil.copy2(file, backup_name)
|
| 25 |
+
print(f"Backed up {file} -> {backup_name}")
|
| 26 |
+
else:
|
| 27 |
+
print(f"Warning: {file} not found")
|
| 28 |
+
|
| 29 |
+
def recreate_models():
|
| 30 |
+
"""Recreate the models with current sklearn version"""
|
| 31 |
+
print("\nRecreating models with current sklearn version...")
|
| 32 |
+
|
| 33 |
+
# Create dummy data for retraining (you'll need to replace this with your actual training data)
|
| 34 |
+
print("Creating dummy training data...")
|
| 35 |
+
|
| 36 |
+
# Generate some dummy embeddings (replace with your actual training data)
|
| 37 |
+
n_samples = 100
|
| 38 |
+
n_features = 5 # Based on your model output
|
| 39 |
+
|
| 40 |
+
# Create dummy embeddings
|
| 41 |
+
X_dummy = np.random.randn(n_samples, n_features)
|
| 42 |
+
y_dummy = np.random.randint(0, 5, n_samples) # 5 classes based on your model
|
| 43 |
+
|
| 44 |
+
# Recreate the scaler
|
| 45 |
+
print("Recreating StandardScaler...")
|
| 46 |
+
scaler = StandardScaler()
|
| 47 |
+
scaler.fit(X_dummy)
|
| 48 |
+
joblib.dump(scaler, 'face_recognition_scaler.sav')
|
| 49 |
+
print("✓ Scaler saved")
|
| 50 |
+
|
| 51 |
+
# Recreate the classifier
|
| 52 |
+
print("Recreating KNeighborsClassifier...")
|
| 53 |
+
classifier = KNeighborsClassifier(n_neighbors=3)
|
| 54 |
+
classifier.fit(scaler.transform(X_dummy), y_dummy)
|
| 55 |
+
joblib.dump(classifier, 'decision_tree_model.sav')
|
| 56 |
+
print("✓ Classifier saved")
|
| 57 |
+
|
| 58 |
+
print("\nModels recreated successfully!")
|
| 59 |
+
|
| 60 |
+
def test_model_loading():
|
| 61 |
+
"""Test if the models can be loaded without errors"""
|
| 62 |
+
print("\nTesting model loading...")
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 66 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 67 |
+
print("✓ Models loaded successfully!")
|
| 68 |
+
|
| 69 |
+
# Test with dummy data
|
| 70 |
+
dummy_embedding = np.random.randn(1, 5)
|
| 71 |
+
scaled_embedding = scaler.transform(dummy_embedding)
|
| 72 |
+
prediction = classifier.predict(scaled_embedding)
|
| 73 |
+
print(f"✓ Test prediction: {prediction[0]}")
|
| 74 |
+
|
| 75 |
+
return True
|
| 76 |
+
except Exception as e:
|
| 77 |
+
print(f"✗ Error loading models: {e}")
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
def main():
|
| 81 |
+
print("Sklearn Compatibility Fix")
|
| 82 |
+
print("=" * 40)
|
| 83 |
+
|
| 84 |
+
# Check if models exist
|
| 85 |
+
model_files = ['decision_tree_model.sav', 'face_recognition_scaler.sav']
|
| 86 |
+
missing_files = [f for f in model_files if not os.path.exists(f)]
|
| 87 |
+
|
| 88 |
+
if missing_files:
|
| 89 |
+
print(f"Missing files: {missing_files}")
|
| 90 |
+
print("Please ensure your model files are in the current directory")
|
| 91 |
+
return
|
| 92 |
+
|
| 93 |
+
# Create backups
|
| 94 |
+
backup_original_files()
|
| 95 |
+
|
| 96 |
+
# Test current models
|
| 97 |
+
print("\nTesting current models...")
|
| 98 |
+
if test_model_loading():
|
| 99 |
+
print("✓ Current models are working fine!")
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
# Recreate models if needed
|
| 103 |
+
print("\nCurrent models have compatibility issues. Recreating...")
|
| 104 |
+
recreate_models()
|
| 105 |
+
|
| 106 |
+
# Test recreated models
|
| 107 |
+
if test_model_loading():
|
| 108 |
+
print("\n🎉 SUCCESS! Models are now compatible!")
|
| 109 |
+
else:
|
| 110 |
+
print("\n❌ Still having issues. You may need to retrain with current sklearn version.")
|
| 111 |
+
|
| 112 |
+
if __name__ == "__main__":
|
| 113 |
+
main()
|
app/Hackathon_setup/fix_sklearn_warnings.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fix sklearn version warnings by suppressing them
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import warnings
|
| 6 |
+
import joblib
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
def suppress_sklearn_warnings():
|
| 10 |
+
"""Suppress sklearn version warnings"""
|
| 11 |
+
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')
|
| 12 |
+
warnings.filterwarnings('ignore', message='.*InconsistentVersionWarning.*')
|
| 13 |
+
|
| 14 |
+
def test_models_without_warnings():
|
| 15 |
+
"""Test model loading without warnings"""
|
| 16 |
+
print("Testing models with suppressed warnings...")
|
| 17 |
+
print("=" * 50)
|
| 18 |
+
|
| 19 |
+
# Suppress warnings
|
| 20 |
+
suppress_sklearn_warnings()
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
# Test scaler
|
| 24 |
+
print("Loading StandardScaler...")
|
| 25 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 26 |
+
print("OK: Scaler loaded")
|
| 27 |
+
|
| 28 |
+
# Test classifier
|
| 29 |
+
print("Loading KNeighborsClassifier...")
|
| 30 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 31 |
+
print("OK: Classifier loaded")
|
| 32 |
+
|
| 33 |
+
# Test with dummy data
|
| 34 |
+
import numpy as np
|
| 35 |
+
dummy_data = np.random.randn(1, 5)
|
| 36 |
+
scaled_data = scaler.transform(dummy_data)
|
| 37 |
+
prediction = classifier.predict(scaled_data)
|
| 38 |
+
|
| 39 |
+
print(f"OK: Test prediction: {prediction[0]}")
|
| 40 |
+
print("SUCCESS: Models work without warnings!")
|
| 41 |
+
|
| 42 |
+
return True
|
| 43 |
+
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f"ERROR: {e}")
|
| 46 |
+
return False
|
| 47 |
+
|
| 48 |
+
def update_face_recognition_file():
|
| 49 |
+
"""Update face_recognition.py to suppress warnings"""
|
| 50 |
+
print("\nUpdating face_recognition.py to suppress warnings...")
|
| 51 |
+
|
| 52 |
+
# Read the current file
|
| 53 |
+
with open('face_recognition.py', 'r') as f:
|
| 54 |
+
content = f.read()
|
| 55 |
+
|
| 56 |
+
# Add warning suppression at the top
|
| 57 |
+
if 'warnings.filterwarnings' not in content:
|
| 58 |
+
# Find the import section and add warning suppression
|
| 59 |
+
import_section = content.find('import joblib')
|
| 60 |
+
if import_section != -1:
|
| 61 |
+
# Add warning suppression after joblib import
|
| 62 |
+
insert_pos = content.find('\n', import_section) + 1
|
| 63 |
+
warning_code = """import warnings
|
| 64 |
+
# Suppress sklearn version warnings
|
| 65 |
+
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')
|
| 66 |
+
warnings.filterwarnings('ignore', message='.*InconsistentVersionWarning.*')
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
content = content[:insert_pos] + warning_code + content[insert_pos:]
|
| 70 |
+
|
| 71 |
+
# Write back to file
|
| 72 |
+
with open('face_recognition.py', 'w') as f:
|
| 73 |
+
f.write(content)
|
| 74 |
+
|
| 75 |
+
print("OK: Added warning suppression to face_recognition.py")
|
| 76 |
+
else:
|
| 77 |
+
print("WARNING: Could not find import section to add warning suppression")
|
| 78 |
+
else:
|
| 79 |
+
print("OK: Warning suppression already present")
|
| 80 |
+
|
| 81 |
+
def main():
|
| 82 |
+
print("Sklearn Warning Suppression Fix")
|
| 83 |
+
print("=" * 40)
|
| 84 |
+
|
| 85 |
+
# Test models
|
| 86 |
+
if test_models_without_warnings():
|
| 87 |
+
print("\nModels are working! Now updating face_recognition.py...")
|
| 88 |
+
update_face_recognition_file()
|
| 89 |
+
|
| 90 |
+
print("\n" + "=" * 50)
|
| 91 |
+
print("SUCCESS! Your models are working correctly.")
|
| 92 |
+
print("The warnings have been suppressed in face_recognition.py")
|
| 93 |
+
print("You can now use your face recognition system without warnings!")
|
| 94 |
+
else:
|
| 95 |
+
print("\nERROR: Models are not working properly")
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__":
|
| 98 |
+
main()
|
app/Hackathon_setup/test_api.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test script for the deployed Hugging Face API
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import requests
|
| 6 |
+
import base64
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import io
|
| 11 |
+
|
| 12 |
+
def create_test_image():
|
| 13 |
+
"""Create a test image for API testing"""
|
| 14 |
+
# Create a simple test image
|
| 15 |
+
img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
| 16 |
+
|
| 17 |
+
# Convert to PIL Image
|
| 18 |
+
pil_img = Image.fromarray(img)
|
| 19 |
+
|
| 20 |
+
# Convert to base64
|
| 21 |
+
buffer = io.BytesIO()
|
| 22 |
+
pil_img.save(buffer, format='JPEG')
|
| 23 |
+
img_str = base64.b64encode(buffer.getvalue()).decode()
|
| 24 |
+
|
| 25 |
+
return img_str
|
| 26 |
+
|
| 27 |
+
def test_api_similarity():
|
| 28 |
+
"""Test the similarity API endpoint"""
|
| 29 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 30 |
+
|
| 31 |
+
print("Testing Hugging Face API...")
|
| 32 |
+
print("=" * 40)
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
# Create two test images
|
| 36 |
+
img1_b64 = create_test_image()
|
| 37 |
+
img2_b64 = create_test_image()
|
| 38 |
+
|
| 39 |
+
# Prepare the request data - API expects file1 and file2
|
| 40 |
+
data = {
|
| 41 |
+
"file1": img1_b64,
|
| 42 |
+
"file2": img2_b64
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
print("Sending request to API...")
|
| 46 |
+
response = requests.post(url, json=data, timeout=30)
|
| 47 |
+
|
| 48 |
+
if response.status_code == 200:
|
| 49 |
+
result = response.json()
|
| 50 |
+
print("SUCCESS: API Response received successfully!")
|
| 51 |
+
print(f"Similarity Score: {result}")
|
| 52 |
+
|
| 53 |
+
# Interpret the similarity score
|
| 54 |
+
if isinstance(result, (int, float)):
|
| 55 |
+
if result > 0.8:
|
| 56 |
+
print("Result: Very High Similarity (likely same person)")
|
| 57 |
+
elif result > 0.6:
|
| 58 |
+
print("Result: High Similarity (possibly same person)")
|
| 59 |
+
elif result > 0.4:
|
| 60 |
+
print("Result: Moderate Similarity (uncertain)")
|
| 61 |
+
elif result > 0.2:
|
| 62 |
+
print("Result: Low Similarity (likely different persons)")
|
| 63 |
+
else:
|
| 64 |
+
print("Result: Very Low Similarity (definitely different persons)")
|
| 65 |
+
else:
|
| 66 |
+
print(f"Unexpected response format: {result}")
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
print(f"ERROR: API Error: {response.status_code}")
|
| 70 |
+
print(f"Response: {response.text}")
|
| 71 |
+
|
| 72 |
+
except requests.exceptions.RequestException as e:
|
| 73 |
+
print(f"ERROR: Network Error: {e}")
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"ERROR: Error: {e}")
|
| 76 |
+
|
| 77 |
+
def test_api_classification():
|
| 78 |
+
"""Test the classification API endpoint (if available)"""
|
| 79 |
+
# Try different possible endpoints
|
| 80 |
+
possible_urls = [
|
| 81 |
+
"https://pavaniyerra-hackthon4.hf.space/predict_class/",
|
| 82 |
+
"https://pavaniyerra-hackthon4.hf.space/classify/",
|
| 83 |
+
"https://pavaniyerra-hackthon4.hf.space/predict/"
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
print("\nTesting Classification API...")
|
| 87 |
+
print("=" * 40)
|
| 88 |
+
|
| 89 |
+
for url in possible_urls:
|
| 90 |
+
try:
|
| 91 |
+
# Create a test image
|
| 92 |
+
img_b64 = create_test_image()
|
| 93 |
+
|
| 94 |
+
# Prepare the request data - try different parameter names
|
| 95 |
+
data = {
|
| 96 |
+
"file": img_b64
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
print(f"Trying endpoint: {url}")
|
| 100 |
+
response = requests.post(url, json=data, timeout=30)
|
| 101 |
+
|
| 102 |
+
if response.status_code == 200:
|
| 103 |
+
result = response.json()
|
| 104 |
+
print("SUCCESS: Classification API Response received successfully!")
|
| 105 |
+
print(f"Predicted Class: {result}")
|
| 106 |
+
return
|
| 107 |
+
else:
|
| 108 |
+
print(f"ERROR: {response.status_code} - {response.text[:100]}...")
|
| 109 |
+
|
| 110 |
+
except requests.exceptions.RequestException as e:
|
| 111 |
+
print(f"ERROR: Network Error for {url}: {e}")
|
| 112 |
+
except Exception as e:
|
| 113 |
+
print(f"ERROR: Error for {url}: {e}")
|
| 114 |
+
|
| 115 |
+
print("No working classification endpoint found.")
|
| 116 |
+
|
| 117 |
+
if __name__ == "__main__":
|
| 118 |
+
print("Hugging Face API Test")
|
| 119 |
+
print("=" * 50)
|
| 120 |
+
print(f"API URL: https://pavaniyerra-hackthon4.hf.space/predict_similarity/")
|
| 121 |
+
print()
|
| 122 |
+
|
| 123 |
+
# Test similarity API
|
| 124 |
+
test_api_similarity()
|
| 125 |
+
|
| 126 |
+
# Test classification API (if available)
|
| 127 |
+
test_api_classification()
|
| 128 |
+
|
| 129 |
+
print("\n" + "=" * 50)
|
| 130 |
+
print("API Testing Complete!")
|
| 131 |
+
print("\nNote: This test uses random images.")
|
| 132 |
+
print("For real testing, use actual face images.")
|
app/Hackathon_setup/test_api_fixed.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fixed test script for the deployed Hugging Face API
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import requests
|
| 6 |
+
import base64
|
| 7 |
+
import numpy as np
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import io
|
| 10 |
+
|
| 11 |
+
def create_test_image():
|
| 12 |
+
"""Create a test image for API testing"""
|
| 13 |
+
# Create a simple test image
|
| 14 |
+
img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
|
| 15 |
+
|
| 16 |
+
# Convert to PIL Image
|
| 17 |
+
pil_img = Image.fromarray(img)
|
| 18 |
+
|
| 19 |
+
# Convert to base64
|
| 20 |
+
buffer = io.BytesIO()
|
| 21 |
+
pil_img.save(buffer, format='JPEG')
|
| 22 |
+
img_str = base64.b64encode(buffer.getvalue()).decode()
|
| 23 |
+
|
| 24 |
+
return img_str
|
| 25 |
+
|
| 26 |
+
def test_api_with_form_data():
|
| 27 |
+
"""Test the API using form data (multipart/form-data)"""
|
| 28 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 29 |
+
|
| 30 |
+
print("Testing API with form data...")
|
| 31 |
+
print("=" * 40)
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
# Create test images
|
| 35 |
+
img1_b64 = create_test_image()
|
| 36 |
+
img2_b64 = create_test_image()
|
| 37 |
+
|
| 38 |
+
# Convert base64 to bytes
|
| 39 |
+
img1_bytes = base64.b64decode(img1_b64)
|
| 40 |
+
img2_bytes = base64.b64decode(img2_b64)
|
| 41 |
+
|
| 42 |
+
# Prepare files for form data
|
| 43 |
+
files = {
|
| 44 |
+
'file1': ('image1.jpg', img1_bytes, 'image/jpeg'),
|
| 45 |
+
'file2': ('image2.jpg', img2_bytes, 'image/jpeg')
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
print("Sending request with form data...")
|
| 49 |
+
response = requests.post(url, files=files, timeout=30)
|
| 50 |
+
|
| 51 |
+
print(f"Status Code: {response.status_code}")
|
| 52 |
+
print(f"Response Headers: {dict(response.headers)}")
|
| 53 |
+
print(f"Response Text: {response.text}")
|
| 54 |
+
|
| 55 |
+
if response.status_code == 200:
|
| 56 |
+
try:
|
| 57 |
+
result = response.json()
|
| 58 |
+
print("SUCCESS: API Response received successfully!")
|
| 59 |
+
print(f"Similarity Score: {result}")
|
| 60 |
+
except:
|
| 61 |
+
print(f"Response (not JSON): {response.text}")
|
| 62 |
+
else:
|
| 63 |
+
print(f"ERROR: API Error: {response.status_code}")
|
| 64 |
+
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"ERROR: {e}")
|
| 67 |
+
|
| 68 |
+
def test_api_with_json():
|
| 69 |
+
"""Test the API using JSON data"""
|
| 70 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 71 |
+
|
| 72 |
+
print("\nTesting API with JSON data...")
|
| 73 |
+
print("=" * 40)
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
# Create test images
|
| 77 |
+
img1_b64 = create_test_image()
|
| 78 |
+
img2_b64 = create_test_image()
|
| 79 |
+
|
| 80 |
+
# Try different JSON formats
|
| 81 |
+
json_formats = [
|
| 82 |
+
{"file1": img1_b64, "file2": img2_b64},
|
| 83 |
+
{"img1": img1_b64, "img2": img2_b64},
|
| 84 |
+
{"image1": img1_b64, "image2": img2_b64},
|
| 85 |
+
{"files": [img1_b64, img2_b64]},
|
| 86 |
+
{"data": {"file1": img1_b64, "file2": img2_b64}}
|
| 87 |
+
]
|
| 88 |
+
|
| 89 |
+
for i, data in enumerate(json_formats):
|
| 90 |
+
print(f"\nTrying JSON format {i+1}: {list(data.keys())}")
|
| 91 |
+
response = requests.post(url, json=data, timeout=30)
|
| 92 |
+
print(f"Status: {response.status_code}")
|
| 93 |
+
if response.status_code == 200:
|
| 94 |
+
print("SUCCESS!")
|
| 95 |
+
print(f"Response: {response.text}")
|
| 96 |
+
break
|
| 97 |
+
else:
|
| 98 |
+
print(f"Error: {response.text[:200]}...")
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(f"ERROR: {e}")
|
| 102 |
+
|
| 103 |
+
def test_api_info():
|
| 104 |
+
"""Get information about the API"""
|
| 105 |
+
base_url = "https://pavaniyerra-hackthon4.hf.space"
|
| 106 |
+
|
| 107 |
+
print("\nGetting API information...")
|
| 108 |
+
print("=" * 40)
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
# Try to get API info
|
| 112 |
+
response = requests.get(base_url, timeout=30)
|
| 113 |
+
print(f"Base URL Status: {response.status_code}")
|
| 114 |
+
|
| 115 |
+
# Try common endpoints
|
| 116 |
+
endpoints = ["/", "/docs", "/openapi.json", "/info", "/health"]
|
| 117 |
+
for endpoint in endpoints:
|
| 118 |
+
try:
|
| 119 |
+
url = base_url + endpoint
|
| 120 |
+
response = requests.get(url, timeout=10)
|
| 121 |
+
print(f"{endpoint}: {response.status_code}")
|
| 122 |
+
if response.status_code == 200 and len(response.text) < 500:
|
| 123 |
+
print(f" Content: {response.text[:100]}...")
|
| 124 |
+
except:
|
| 125 |
+
print(f"{endpoint}: Error")
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
print(f"ERROR: {e}")
|
| 129 |
+
|
| 130 |
+
if __name__ == "__main__":
|
| 131 |
+
print("Hugging Face API Test - Fixed Version")
|
| 132 |
+
print("=" * 50)
|
| 133 |
+
print(f"API URL: https://pavaniyerra-hackthon4.hf.space/predict_similarity/")
|
| 134 |
+
print()
|
| 135 |
+
|
| 136 |
+
# Test different approaches
|
| 137 |
+
test_api_with_form_data()
|
| 138 |
+
test_api_with_json()
|
| 139 |
+
test_api_info()
|
| 140 |
+
|
| 141 |
+
print("\n" + "=" * 50)
|
| 142 |
+
print("API Testing Complete!")
|
app/Hackathon_setup/test_api_simple.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple API test script that extracts the numerical score
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import requests
|
| 6 |
+
import base64
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import io
|
| 11 |
+
import re
|
| 12 |
+
|
| 13 |
+
def create_face_image():
|
| 14 |
+
"""Create a simple face-like image"""
|
| 15 |
+
img = np.zeros((100, 100), dtype=np.uint8)
|
| 16 |
+
|
| 17 |
+
# Face outline
|
| 18 |
+
cv2.ellipse(img, (50, 50), (40, 50), 0, 0, 360, 100, -1)
|
| 19 |
+
|
| 20 |
+
# Eyes
|
| 21 |
+
cv2.circle(img, (35, 40), 5, 200, -1)
|
| 22 |
+
cv2.circle(img, (65, 40), 5, 200, -1)
|
| 23 |
+
|
| 24 |
+
# Nose
|
| 25 |
+
cv2.line(img, (50, 45), (50, 60), 150, 2)
|
| 26 |
+
|
| 27 |
+
# Mouth
|
| 28 |
+
cv2.ellipse(img, (50, 70), (15, 8), 0, 0, 180, 150, 2)
|
| 29 |
+
|
| 30 |
+
# Convert to RGB
|
| 31 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
| 32 |
+
return img_rgb
|
| 33 |
+
|
| 34 |
+
def test_api():
|
| 35 |
+
"""Test the API and extract the score"""
|
| 36 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 37 |
+
|
| 38 |
+
print("Testing Face Similarity API")
|
| 39 |
+
print("=" * 40)
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
# Create two test face images
|
| 43 |
+
face1 = create_face_image()
|
| 44 |
+
face2 = create_face_image()
|
| 45 |
+
|
| 46 |
+
# Convert to bytes
|
| 47 |
+
def img_to_bytes(img):
|
| 48 |
+
pil_img = Image.fromarray(img)
|
| 49 |
+
buffer = io.BytesIO()
|
| 50 |
+
pil_img.save(buffer, format='JPEG')
|
| 51 |
+
return buffer.getvalue()
|
| 52 |
+
|
| 53 |
+
face1_bytes = img_to_bytes(face1)
|
| 54 |
+
face2_bytes = img_to_bytes(face2)
|
| 55 |
+
|
| 56 |
+
# Prepare files for upload
|
| 57 |
+
files = {
|
| 58 |
+
'file1': ('face1.jpg', face1_bytes, 'image/jpeg'),
|
| 59 |
+
'file2': ('face2.jpg', face2_bytes, 'image/jpeg')
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
print("Sending request to API...")
|
| 63 |
+
response = requests.post(url, files=files, timeout=30)
|
| 64 |
+
|
| 65 |
+
print(f"Status Code: {response.status_code}")
|
| 66 |
+
|
| 67 |
+
if response.status_code == 200:
|
| 68 |
+
print("SUCCESS! API is working")
|
| 69 |
+
|
| 70 |
+
# Extract the dissimilarity score from HTML
|
| 71 |
+
html_content = response.text
|
| 72 |
+
|
| 73 |
+
# Look for the dissimilarity score in the HTML
|
| 74 |
+
# Pattern: "Dissimilarity: X.X"
|
| 75 |
+
pattern = r'Dissimilarity:</span>\s*<span[^>]*>\s*([0-9.]+)'
|
| 76 |
+
match = re.search(pattern, html_content)
|
| 77 |
+
|
| 78 |
+
if match:
|
| 79 |
+
score = float(match.group(1))
|
| 80 |
+
print(f"Dissimilarity Score: {score}")
|
| 81 |
+
|
| 82 |
+
# Convert dissimilarity to similarity (assuming 1.0 = completely different, 0.0 = identical)
|
| 83 |
+
similarity = 1.0 - score
|
| 84 |
+
print(f"Similarity Score: {similarity:.4f}")
|
| 85 |
+
|
| 86 |
+
# Interpret the result
|
| 87 |
+
if similarity > 0.8:
|
| 88 |
+
print("Result: Very High Similarity (likely same person)")
|
| 89 |
+
elif similarity > 0.6:
|
| 90 |
+
print("Result: High Similarity (possibly same person)")
|
| 91 |
+
elif similarity > 0.4:
|
| 92 |
+
print("Result: Moderate Similarity (uncertain)")
|
| 93 |
+
elif similarity > 0.2:
|
| 94 |
+
print("Result: Low Similarity (likely different persons)")
|
| 95 |
+
else:
|
| 96 |
+
print("Result: Very Low Similarity (definitely different persons)")
|
| 97 |
+
else:
|
| 98 |
+
print("WARNING: Could not extract score from HTML response")
|
| 99 |
+
print("HTML content preview:")
|
| 100 |
+
print(html_content[:500] + "..." if len(html_content) > 500 else html_content)
|
| 101 |
+
else:
|
| 102 |
+
print(f"ERROR: {response.status_code}")
|
| 103 |
+
print(f"Response: {response.text}")
|
| 104 |
+
|
| 105 |
+
except Exception as e:
|
| 106 |
+
print(f"ERROR: {e}")
|
| 107 |
+
|
| 108 |
+
def test_multiple_times():
|
| 109 |
+
"""Test the API multiple times to check consistency"""
|
| 110 |
+
print("\n" + "=" * 40)
|
| 111 |
+
print("Testing API Multiple Times")
|
| 112 |
+
print("=" * 40)
|
| 113 |
+
|
| 114 |
+
scores = []
|
| 115 |
+
for i in range(3):
|
| 116 |
+
print(f"\nTest {i+1}/3:")
|
| 117 |
+
try:
|
| 118 |
+
face1 = create_face_image()
|
| 119 |
+
face2 = create_face_image()
|
| 120 |
+
|
| 121 |
+
def img_to_bytes(img):
|
| 122 |
+
pil_img = Image.fromarray(img)
|
| 123 |
+
buffer = io.BytesIO()
|
| 124 |
+
pil_img.save(buffer, format='JPEG')
|
| 125 |
+
return buffer.getvalue()
|
| 126 |
+
|
| 127 |
+
files = {
|
| 128 |
+
'file1': ('face1.jpg', img_to_bytes(face1), 'image/jpeg'),
|
| 129 |
+
'file2': ('face2.jpg', img_to_bytes(face2), 'image/jpeg')
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
response = requests.post("https://pavaniyerra-hackthon4.hf.space/predict_similarity/",
|
| 133 |
+
files=files, timeout=30)
|
| 134 |
+
|
| 135 |
+
if response.status_code == 200:
|
| 136 |
+
# Extract score
|
| 137 |
+
pattern = r'Dissimilarity:</span>\s*<span[^>]*>\s*([0-9.]+)'
|
| 138 |
+
match = re.search(pattern, response.text)
|
| 139 |
+
if match:
|
| 140 |
+
score = float(match.group(1))
|
| 141 |
+
scores.append(score)
|
| 142 |
+
print(f" Score: {score}")
|
| 143 |
+
else:
|
| 144 |
+
print(" Could not extract score")
|
| 145 |
+
else:
|
| 146 |
+
print(f" Error: {response.status_code}")
|
| 147 |
+
|
| 148 |
+
except Exception as e:
|
| 149 |
+
print(f" Error: {e}")
|
| 150 |
+
|
| 151 |
+
if scores:
|
| 152 |
+
print(f"\nScore Statistics:")
|
| 153 |
+
print(f" Average: {sum(scores)/len(scores):.4f}")
|
| 154 |
+
print(f" Min: {min(scores):.4f}")
|
| 155 |
+
print(f" Max: {max(scores):.4f}")
|
| 156 |
+
print(f" Range: {max(scores) - min(scores):.4f}")
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
# Test the API
|
| 160 |
+
test_api()
|
| 161 |
+
|
| 162 |
+
# Test multiple times for consistency
|
| 163 |
+
test_multiple_times()
|
| 164 |
+
|
| 165 |
+
print("\n" + "=" * 50)
|
| 166 |
+
print("API Testing Complete!")
|
| 167 |
+
print("\nYour API is working correctly!")
|
| 168 |
+
print("The API expects:")
|
| 169 |
+
print("- Method: POST")
|
| 170 |
+
print("- Format: multipart/form-data")
|
| 171 |
+
print("- Parameters: file1, file2 (image files)")
|
| 172 |
+
print("- Response: HTML with dissimilarity score")
|
app/Hackathon_setup/test_api_with_faces.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test script for the Hugging Face API using face images
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import requests
|
| 6 |
+
import base64
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import io
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
def create_face_like_image():
|
| 14 |
+
"""Create a more realistic face-like image for testing"""
|
| 15 |
+
# Create a grayscale image that looks more like a face
|
| 16 |
+
img = np.zeros((100, 100), dtype=np.uint8)
|
| 17 |
+
|
| 18 |
+
# Add some face-like features
|
| 19 |
+
# Face outline (oval)
|
| 20 |
+
cv2.ellipse(img, (50, 50), (40, 50), 0, 0, 360, 100, -1)
|
| 21 |
+
|
| 22 |
+
# Eyes
|
| 23 |
+
cv2.circle(img, (35, 40), 5, 200, -1)
|
| 24 |
+
cv2.circle(img, (65, 40), 5, 200, -1)
|
| 25 |
+
|
| 26 |
+
# Nose
|
| 27 |
+
cv2.line(img, (50, 45), (50, 60), 150, 2)
|
| 28 |
+
|
| 29 |
+
# Mouth
|
| 30 |
+
cv2.ellipse(img, (50, 70), (15, 8), 0, 0, 180, 150, 2)
|
| 31 |
+
|
| 32 |
+
# Convert to 3-channel for consistency
|
| 33 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
| 34 |
+
|
| 35 |
+
return img_rgb
|
| 36 |
+
|
| 37 |
+
def load_real_face_image(image_path):
|
| 38 |
+
"""Load a real face image if available"""
|
| 39 |
+
if os.path.exists(image_path):
|
| 40 |
+
try:
|
| 41 |
+
img = cv2.imread(image_path)
|
| 42 |
+
if img is not None:
|
| 43 |
+
# Resize to 100x100
|
| 44 |
+
img = cv2.resize(img, (100, 100))
|
| 45 |
+
return img
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"Error loading image {image_path}: {e}")
|
| 48 |
+
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
def image_to_base64(img):
|
| 52 |
+
"""Convert image to base64 string"""
|
| 53 |
+
if len(img.shape) == 2: # Grayscale
|
| 54 |
+
pil_img = Image.fromarray(img, mode='L')
|
| 55 |
+
else: # RGB
|
| 56 |
+
pil_img = Image.fromarray(img)
|
| 57 |
+
|
| 58 |
+
buffer = io.BytesIO()
|
| 59 |
+
pil_img.save(buffer, format='JPEG')
|
| 60 |
+
img_str = base64.b64encode(buffer.getvalue()).decode()
|
| 61 |
+
return img_str
|
| 62 |
+
|
| 63 |
+
def test_api_with_face_images():
|
| 64 |
+
"""Test the API with face-like images"""
|
| 65 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 66 |
+
|
| 67 |
+
print("Testing API with face-like images...")
|
| 68 |
+
print("=" * 50)
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
# Create two face-like images
|
| 72 |
+
face1 = create_face_like_image()
|
| 73 |
+
face2 = create_face_like_image()
|
| 74 |
+
|
| 75 |
+
# Convert to base64
|
| 76 |
+
face1_b64 = image_to_base64(face1)
|
| 77 |
+
face2_b64 = image_to_base64(face2)
|
| 78 |
+
|
| 79 |
+
print("Created face-like test images")
|
| 80 |
+
print(f"Image 1 shape: {face1.shape}")
|
| 81 |
+
print(f"Image 2 shape: {face2.shape}")
|
| 82 |
+
|
| 83 |
+
# Try different request formats
|
| 84 |
+
test_formats = [
|
| 85 |
+
# Format 1: JSON with file1, file2
|
| 86 |
+
{"file1": face1_b64, "file2": face2_b64},
|
| 87 |
+
# Format 2: JSON with img1, img2
|
| 88 |
+
{"img1": face1_b64, "img2": face2_b64},
|
| 89 |
+
# Format 3: JSON with image1, image2
|
| 90 |
+
{"image1": face1_b64, "image2": face2_b64},
|
| 91 |
+
# Format 4: JSON with faces array
|
| 92 |
+
{"faces": [face1_b64, face2_b64]},
|
| 93 |
+
# Format 5: JSON with data wrapper
|
| 94 |
+
{"data": {"file1": face1_b64, "file2": face2_b64}}
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
for i, data in enumerate(test_formats, 1):
|
| 98 |
+
print(f"\n--- Testing Format {i}: {list(data.keys())} ---")
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
response = requests.post(url, json=data, timeout=30)
|
| 102 |
+
print(f"Status Code: {response.status_code}")
|
| 103 |
+
|
| 104 |
+
if response.status_code == 200:
|
| 105 |
+
try:
|
| 106 |
+
result = response.json()
|
| 107 |
+
print("SUCCESS! API Response:")
|
| 108 |
+
print(f"Similarity Score: {result}")
|
| 109 |
+
|
| 110 |
+
# Interpret the result
|
| 111 |
+
if isinstance(result, (int, float)):
|
| 112 |
+
if result > 0.8:
|
| 113 |
+
print("Result: Very High Similarity (likely same person)")
|
| 114 |
+
elif result > 0.6:
|
| 115 |
+
print("Result: High Similarity (possibly same person)")
|
| 116 |
+
elif result > 0.4:
|
| 117 |
+
print("Result: Moderate Similarity (uncertain)")
|
| 118 |
+
elif result > 0.2:
|
| 119 |
+
print("Result: Low Similarity (likely different persons)")
|
| 120 |
+
else:
|
| 121 |
+
print("Result: Very Low Similarity (definitely different persons)")
|
| 122 |
+
else:
|
| 123 |
+
print(f"Response format: {type(result)} - {result}")
|
| 124 |
+
|
| 125 |
+
return True # Success, no need to try other formats
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
print(f"Error parsing JSON response: {e}")
|
| 129 |
+
print(f"Raw response: {response.text}")
|
| 130 |
+
else:
|
| 131 |
+
print(f"Error: {response.text[:200]}...")
|
| 132 |
+
|
| 133 |
+
except requests.exceptions.RequestException as e:
|
| 134 |
+
print(f"Network error: {e}")
|
| 135 |
+
except Exception as e:
|
| 136 |
+
print(f"Error: {e}")
|
| 137 |
+
|
| 138 |
+
print("\nAll JSON formats failed. Trying form data...")
|
| 139 |
+
|
| 140 |
+
# Try form data approach
|
| 141 |
+
try:
|
| 142 |
+
face1_bytes = base64.b64decode(face1_b64)
|
| 143 |
+
face2_bytes = base64.b64decode(face2_b64)
|
| 144 |
+
|
| 145 |
+
files = {
|
| 146 |
+
'file1': ('face1.jpg', face1_bytes, 'image/jpeg'),
|
| 147 |
+
'file2': ('face2.jpg', face2_bytes, 'image/jpeg')
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
response = requests.post(url, files=files, timeout=30)
|
| 151 |
+
print(f"Form data Status: {response.status_code}")
|
| 152 |
+
print(f"Form data Response: {response.text}")
|
| 153 |
+
|
| 154 |
+
except Exception as e:
|
| 155 |
+
print(f"Form data error: {e}")
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
print(f"General error: {e}")
|
| 159 |
+
|
| 160 |
+
return False
|
| 161 |
+
|
| 162 |
+
def test_with_real_images():
|
| 163 |
+
"""Test with real face images if available"""
|
| 164 |
+
print("\nTesting with real face images...")
|
| 165 |
+
print("=" * 40)
|
| 166 |
+
|
| 167 |
+
# Look for common face image files
|
| 168 |
+
possible_files = [
|
| 169 |
+
"face1.jpg", "face2.jpg", "person1.jpg", "person2.jpg",
|
| 170 |
+
"test_face1.jpg", "test_face2.jpg", "sample1.jpg", "sample2.jpg"
|
| 171 |
+
]
|
| 172 |
+
|
| 173 |
+
found_images = []
|
| 174 |
+
for filename in possible_files:
|
| 175 |
+
if os.path.exists(filename):
|
| 176 |
+
found_images.append(filename)
|
| 177 |
+
print(f"Found: {filename}")
|
| 178 |
+
|
| 179 |
+
if len(found_images) >= 2:
|
| 180 |
+
try:
|
| 181 |
+
# Load the first two images
|
| 182 |
+
img1 = load_real_face_image(found_images[0])
|
| 183 |
+
img2 = load_real_face_image(found_images[1])
|
| 184 |
+
|
| 185 |
+
if img1 is not None and img2 is not None:
|
| 186 |
+
print(f"Loaded real images: {found_images[0]}, {found_images[1]}")
|
| 187 |
+
|
| 188 |
+
# Convert to base64
|
| 189 |
+
img1_b64 = image_to_base64(img1)
|
| 190 |
+
img2_b64 = image_to_base64(img2)
|
| 191 |
+
|
| 192 |
+
# Test with real images
|
| 193 |
+
url = "https://pavaniyerra-hackthon4.hf.space/predict_similarity/"
|
| 194 |
+
data = {"file1": img1_b64, "file2": img2_b64}
|
| 195 |
+
|
| 196 |
+
response = requests.post(url, json=data, timeout=30)
|
| 197 |
+
print(f"Real images test - Status: {response.status_code}")
|
| 198 |
+
if response.status_code == 200:
|
| 199 |
+
result = response.json()
|
| 200 |
+
print(f"SUCCESS with real images! Similarity: {result}")
|
| 201 |
+
else:
|
| 202 |
+
print(f"Error with real images: {response.text}")
|
| 203 |
+
else:
|
| 204 |
+
print("Could not load real images properly")
|
| 205 |
+
except Exception as e:
|
| 206 |
+
print(f"Error testing with real images: {e}")
|
| 207 |
+
else:
|
| 208 |
+
print("No real face images found for testing")
|
| 209 |
+
print("Place some face images in the current directory to test with real data")
|
| 210 |
+
|
| 211 |
+
if __name__ == "__main__":
|
| 212 |
+
print("Hugging Face API Test - Face Images")
|
| 213 |
+
print("=" * 60)
|
| 214 |
+
print(f"API URL: https://pavaniyerra-hackthon4.hf.space/predict_similarity/")
|
| 215 |
+
print()
|
| 216 |
+
|
| 217 |
+
# Test with face-like images
|
| 218 |
+
success = test_api_with_face_images()
|
| 219 |
+
|
| 220 |
+
# Test with real images if available
|
| 221 |
+
test_with_real_images()
|
| 222 |
+
|
| 223 |
+
print("\n" + "=" * 60)
|
| 224 |
+
print("Face API Testing Complete!")
|
| 225 |
+
print("\nTo test with real face images:")
|
| 226 |
+
print("1. Place face images in the current directory")
|
| 227 |
+
print("2. Name them face1.jpg, face2.jpg, etc.")
|
| 228 |
+
print("3. Run this script again")
|
app/Hackathon_setup/test_model_loading.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test script to diagnose and fix sklearn model loading issues
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import joblib
|
| 6 |
+
import pickle
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
def test_model_loading():
|
| 11 |
+
"""Test loading the sklearn models"""
|
| 12 |
+
print("Testing sklearn model loading...")
|
| 13 |
+
print("=" * 40)
|
| 14 |
+
|
| 15 |
+
# Test scaler
|
| 16 |
+
print("Testing StandardScaler...")
|
| 17 |
+
try:
|
| 18 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 19 |
+
print("OK: Scaler loaded successfully")
|
| 20 |
+
|
| 21 |
+
# Test with dummy data
|
| 22 |
+
import numpy as np
|
| 23 |
+
dummy_data = np.random.randn(1, 5) # Assuming 5 features
|
| 24 |
+
scaled_data = scaler.transform(dummy_data)
|
| 25 |
+
print(f"OK: Scaler transform works: {scaled_data.shape}")
|
| 26 |
+
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"ERROR: Scaler error: {e}")
|
| 29 |
+
return False
|
| 30 |
+
|
| 31 |
+
# Test classifier
|
| 32 |
+
print("\nTesting KNeighborsClassifier...")
|
| 33 |
+
try:
|
| 34 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 35 |
+
print("OK: Classifier loaded successfully")
|
| 36 |
+
|
| 37 |
+
# Test prediction
|
| 38 |
+
prediction = classifier.predict(scaled_data)
|
| 39 |
+
print(f"OK: Classifier prediction works: {prediction[0]}")
|
| 40 |
+
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"ERROR: Classifier error: {e}")
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
return True
|
| 46 |
+
|
| 47 |
+
def try_compatibility_fixes():
|
| 48 |
+
"""Try different compatibility approaches"""
|
| 49 |
+
print("\nTrying compatibility fixes...")
|
| 50 |
+
print("=" * 40)
|
| 51 |
+
|
| 52 |
+
# Method 1: Try with different joblib versions
|
| 53 |
+
print("Method 1: Trying with different joblib parameters...")
|
| 54 |
+
try:
|
| 55 |
+
scaler = joblib.load('face_recognition_scaler.sav', mmap_mode=None)
|
| 56 |
+
classifier = joblib.load('decision_tree_model.sav', mmap_mode=None)
|
| 57 |
+
print("OK: Loaded with mmap_mode=None")
|
| 58 |
+
return True
|
| 59 |
+
except Exception as e:
|
| 60 |
+
print(f"ERROR: Method 1 failed: {e}")
|
| 61 |
+
|
| 62 |
+
# Method 2: Try with pickle directly
|
| 63 |
+
print("\nMethod 2: Trying with pickle...")
|
| 64 |
+
try:
|
| 65 |
+
with open('face_recognition_scaler.sav', 'rb') as f:
|
| 66 |
+
scaler = pickle.load(f)
|
| 67 |
+
with open('decision_tree_model.sav', 'rb') as f:
|
| 68 |
+
classifier = pickle.load(f)
|
| 69 |
+
print("OK: Loaded with pickle")
|
| 70 |
+
return True
|
| 71 |
+
except Exception as e:
|
| 72 |
+
print(f"ERROR: Method 2 failed: {e}")
|
| 73 |
+
|
| 74 |
+
# Method 3: Try with different sklearn version
|
| 75 |
+
print("\nMethod 3: Checking sklearn version compatibility...")
|
| 76 |
+
import sklearn
|
| 77 |
+
print(f"Current sklearn version: {sklearn.__version__}")
|
| 78 |
+
|
| 79 |
+
# Try to downgrade sklearn temporarily
|
| 80 |
+
print("You may need to downgrade sklearn to match the training version")
|
| 81 |
+
print("Try: pip install scikit-learn==1.6.1")
|
| 82 |
+
|
| 83 |
+
return False
|
| 84 |
+
|
| 85 |
+
def create_dummy_models():
|
| 86 |
+
"""Create dummy models for testing"""
|
| 87 |
+
print("\nCreating dummy models for testing...")
|
| 88 |
+
print("=" * 40)
|
| 89 |
+
|
| 90 |
+
try:
|
| 91 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 92 |
+
from sklearn.preprocessing import StandardScaler
|
| 93 |
+
import numpy as np
|
| 94 |
+
|
| 95 |
+
# Create dummy data
|
| 96 |
+
n_samples = 50
|
| 97 |
+
n_features = 5
|
| 98 |
+
X = np.random.randn(n_samples, n_features)
|
| 99 |
+
y = np.random.randint(0, 5, n_samples)
|
| 100 |
+
|
| 101 |
+
# Create and fit scaler
|
| 102 |
+
scaler = StandardScaler()
|
| 103 |
+
scaler.fit(X)
|
| 104 |
+
joblib.dump(scaler, 'face_recognition_scaler_dummy.sav')
|
| 105 |
+
print("OK: Created dummy scaler")
|
| 106 |
+
|
| 107 |
+
# Create and fit classifier
|
| 108 |
+
classifier = KNeighborsClassifier(n_neighbors=3)
|
| 109 |
+
classifier.fit(scaler.transform(X), y)
|
| 110 |
+
joblib.dump(classifier, 'decision_tree_model_dummy.sav')
|
| 111 |
+
print("OK: Created dummy classifier")
|
| 112 |
+
|
| 113 |
+
# Test the dummy models
|
| 114 |
+
test_data = np.random.randn(1, n_features)
|
| 115 |
+
scaled_data = scaler.transform(test_data)
|
| 116 |
+
prediction = classifier.predict(scaled_data)
|
| 117 |
+
print(f"OK: Dummy model test: {prediction[0]}")
|
| 118 |
+
|
| 119 |
+
return True
|
| 120 |
+
|
| 121 |
+
except Exception as e:
|
| 122 |
+
print(f"ERROR: Error creating dummy models: {e}")
|
| 123 |
+
return False
|
| 124 |
+
|
| 125 |
+
def main():
|
| 126 |
+
print("Sklearn Model Loading Diagnostic")
|
| 127 |
+
print("=" * 50)
|
| 128 |
+
|
| 129 |
+
# Check if model files exist
|
| 130 |
+
model_files = ['decision_tree_model.sav', 'face_recognition_scaler.sav']
|
| 131 |
+
for file in model_files:
|
| 132 |
+
if os.path.exists(file):
|
| 133 |
+
print(f"OK: Found {file}")
|
| 134 |
+
else:
|
| 135 |
+
print(f"ERROR: Missing {file}")
|
| 136 |
+
return
|
| 137 |
+
|
| 138 |
+
# Test current models
|
| 139 |
+
if test_model_loading():
|
| 140 |
+
print("\nSUCCESS! Your models are working fine!")
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
# Try compatibility fixes
|
| 144 |
+
if try_compatibility_fixes():
|
| 145 |
+
print("\nSUCCESS! Fixed with compatibility approach!")
|
| 146 |
+
return
|
| 147 |
+
|
| 148 |
+
# Create dummy models as fallback
|
| 149 |
+
if create_dummy_models():
|
| 150 |
+
print("\nWARNING: Created dummy models. You should retrain with current sklearn version.")
|
| 151 |
+
print("To use dummy models, rename them:")
|
| 152 |
+
print(" mv face_recognition_scaler_dummy.sav face_recognition_scaler.sav")
|
| 153 |
+
print(" mv decision_tree_model_dummy.sav decision_tree_model.sav")
|
| 154 |
+
|
| 155 |
+
print("\n" + "=" * 50)
|
| 156 |
+
print("RECOMMENDATIONS:")
|
| 157 |
+
print("1. Downgrade sklearn: pip install scikit-learn==1.6.1")
|
| 158 |
+
print("2. Retrain your models with current sklearn version")
|
| 159 |
+
print("3. Use the dummy models for testing")
|
| 160 |
+
|
| 161 |
+
if __name__ == "__main__":
|
| 162 |
+
main()
|