Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,7 @@ import requests
|
|
| 9 |
import io
|
| 10 |
import logging
|
| 11 |
import time
|
|
|
|
| 12 |
|
| 13 |
# Set up logging
|
| 14 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -40,7 +41,7 @@ MODEL_CONFIGS = {
|
|
| 40 |
}
|
| 41 |
}
|
| 42 |
|
| 43 |
-
# Performance metrics
|
| 44 |
MODEL_METRICS = {
|
| 45 |
"Custom CNN": {"accuracy": 95.2, "inference_time": 45, "model_size": 25.3},
|
| 46 |
"MobileNetV2": {"accuracy": 92.8, "inference_time": 18, "model_size": 8.7},
|
|
@@ -49,18 +50,42 @@ MODEL_METRICS = {
|
|
| 49 |
}
|
| 50 |
|
| 51 |
def load_model(model_name):
|
| 52 |
-
"""Load model from Hugging Face with error handling"""
|
| 53 |
try:
|
| 54 |
logger.info(f"Loading model: {model_name}")
|
| 55 |
url = MODEL_CONFIGS[model_name]["url"]
|
| 56 |
response = requests.get(url, timeout=60, stream=True)
|
| 57 |
response.raise_for_status()
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
model_bytes = io.BytesIO(response.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
model = tf.keras.models.load_model(model_bytes)
|
| 62 |
logger.info(f"Successfully loaded model: {model_name}")
|
| 63 |
return model, None
|
|
|
|
|
|
|
|
|
|
| 64 |
except Exception as e:
|
| 65 |
logger.error(f"Error loading {model_name}: {str(e)}")
|
| 66 |
return None, f"Error loading {model_name}: {str(e)}"
|
|
|
|
| 9 |
import io
|
| 10 |
import logging
|
| 11 |
import time
|
| 12 |
+
import h5py
|
| 13 |
|
| 14 |
# Set up logging
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 41 |
}
|
| 42 |
}
|
| 43 |
|
| 44 |
+
# Performance metrics
|
| 45 |
MODEL_METRICS = {
|
| 46 |
"Custom CNN": {"accuracy": 95.2, "inference_time": 45, "model_size": 25.3},
|
| 47 |
"MobileNetV2": {"accuracy": 92.8, "inference_time": 18, "model_size": 8.7},
|
|
|
|
| 50 |
}
|
| 51 |
|
| 52 |
def load_model(model_name):
|
| 53 |
+
"""Load model from Hugging Face with enhanced error handling"""
|
| 54 |
try:
|
| 55 |
logger.info(f"Loading model: {model_name}")
|
| 56 |
url = MODEL_CONFIGS[model_name]["url"]
|
| 57 |
response = requests.get(url, timeout=60, stream=True)
|
| 58 |
response.raise_for_status()
|
| 59 |
+
|
| 60 |
+
# Check content length
|
| 61 |
+
content_length = int(response.headers.get('content-length', 0))
|
| 62 |
+
if content_length < 1000:
|
| 63 |
+
logger.error(f"Model {model_name} file too small: {content_length} bytes")
|
| 64 |
+
return None, f"Model {model_name} file is too small ({content_length} bytes)"
|
| 65 |
+
|
| 66 |
+
# Read content into BytesIO
|
| 67 |
model_bytes = io.BytesIO(response.content)
|
| 68 |
+
|
| 69 |
+
# Verify if the file is a valid HDF5 file
|
| 70 |
+
try:
|
| 71 |
+
with h5py.File(model_bytes, 'r') as f:
|
| 72 |
+
if 'keras_version' not in f.attrs:
|
| 73 |
+
logger.error(f"Model {model_name} is not a valid HDF5 file")
|
| 74 |
+
return None, f"Model {model_name} is not a valid HDF5 file"
|
| 75 |
+
except Exception as h5py_error:
|
| 76 |
+
logger.error(f"Invalid HDF5 file for {model_name}: {str(h5py_error)}")
|
| 77 |
+
return None, f"Invalid HDF5 file for {model_name}: {str(h5py_error)}"
|
| 78 |
+
|
| 79 |
+
# Reset BytesIO position
|
| 80 |
+
model_bytes.seek(0)
|
| 81 |
+
|
| 82 |
+
# Load the model
|
| 83 |
model = tf.keras.models.load_model(model_bytes)
|
| 84 |
logger.info(f"Successfully loaded model: {model_name}")
|
| 85 |
return model, None
|
| 86 |
+
except requests.exceptions.RequestException as e:
|
| 87 |
+
logger.error(f"Network error loading {model_name}: {str(e)}")
|
| 88 |
+
return None, f"Network error loading {model_name}: {str(e)}"
|
| 89 |
except Exception as e:
|
| 90 |
logger.error(f"Error loading {model_name}: {str(e)}")
|
| 91 |
return None, f"Error loading {model_name}: {str(e)}"
|