Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -159,6 +159,7 @@ def classify_images(image_dir, model_pipeline, model_idx):
|
|
| 159 |
for folder_name, ground_truth_label in [('real', 1), ('ai', 0)]:
|
| 160 |
folder_path = os.path.join(image_dir, folder_name)
|
| 161 |
if not os.path.exists(folder_path):
|
|
|
|
| 162 |
continue
|
| 163 |
for img_name in os.listdir(folder_path):
|
| 164 |
img_path = os.path.join(folder_path, img_name)
|
|
@@ -167,13 +168,15 @@ def classify_images(image_dir, model_pipeline, model_idx):
|
|
| 167 |
|
| 168 |
# Ensure that each image is being processed by the correct model pipeline
|
| 169 |
pred = model_pipeline(img)
|
| 170 |
-
pred_label =
|
| 171 |
|
| 172 |
preds.append(pred_label)
|
| 173 |
labels.append(ground_truth_label)
|
| 174 |
images.append(img_name)
|
| 175 |
except Exception as e:
|
| 176 |
print(f"Error processing image {img_name} in model {model_idx}: {e}")
|
|
|
|
|
|
|
| 177 |
return labels, preds, images
|
| 178 |
|
| 179 |
# Function to generate evaluation metrics
|
|
@@ -210,13 +213,18 @@ def process_zip(zip_file):
|
|
| 210 |
# Run classification for each model
|
| 211 |
results = {}
|
| 212 |
for idx in range(len(models)):
|
| 213 |
-
print(f"Processing with model {idx}")
|
| 214 |
|
| 215 |
# Create a new pipeline for each model within the loop
|
| 216 |
pipe = pipeline("image-classification", f"{models[idx]}")
|
|
|
|
| 217 |
|
| 218 |
# Classify images with the correct pipeline per model
|
| 219 |
labels, preds, images = classify_images(extracted_dir, pipe, idx)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
|
| 221 |
|
| 222 |
# Store results for each model
|
|
@@ -228,7 +236,7 @@ def process_zip(zip_file):
|
|
| 228 |
|
| 229 |
shutil.rmtree(extracted_dir) # Clean up extracted files
|
| 230 |
|
| 231 |
-
# Return results for all
|
| 232 |
return (results['Model_0_accuracy'], results['Model_0_roc_score'], results['Model_0_report'],
|
| 233 |
results['Model_0_cm_fig'], results['Model_0_roc_fig'],
|
| 234 |
results['Model_1_accuracy'], results['Model_1_roc_score'], results['Model_1_report'],
|
|
@@ -238,6 +246,7 @@ def process_zip(zip_file):
|
|
| 238 |
|
| 239 |
|
| 240 |
|
|
|
|
| 241 |
# Single image section
|
| 242 |
def load_url(url):
|
| 243 |
try:
|
|
|
|
| 159 |
for folder_name, ground_truth_label in [('real', 1), ('ai', 0)]:
|
| 160 |
folder_path = os.path.join(image_dir, folder_name)
|
| 161 |
if not os.path.exists(folder_path):
|
| 162 |
+
print(f"Folder not found: {folder_path}")
|
| 163 |
continue
|
| 164 |
for img_name in os.listdir(folder_path):
|
| 165 |
img_path = os.path.join(folder_path, img_name)
|
|
|
|
| 168 |
|
| 169 |
# Ensure that each image is being processed by the correct model pipeline
|
| 170 |
pred = model_pipeline(img)
|
| 171 |
+
pred_label = 0 if pred[0]['label'] == 'AI' else 1 # Assuming 'AI' is label 0 and 'Real' is label 1
|
| 172 |
|
| 173 |
preds.append(pred_label)
|
| 174 |
labels.append(ground_truth_label)
|
| 175 |
images.append(img_name)
|
| 176 |
except Exception as e:
|
| 177 |
print(f"Error processing image {img_name} in model {model_idx}: {e}")
|
| 178 |
+
|
| 179 |
+
print(f"Model {model_idx} processed {len(images)} images")
|
| 180 |
return labels, preds, images
|
| 181 |
|
| 182 |
# Function to generate evaluation metrics
|
|
|
|
| 213 |
# Run classification for each model
|
| 214 |
results = {}
|
| 215 |
for idx in range(len(models)):
|
| 216 |
+
print(f"Processing with model {models[idx]}") # Debugging to show which model is being used
|
| 217 |
|
| 218 |
# Create a new pipeline for each model within the loop
|
| 219 |
pipe = pipeline("image-classification", f"{models[idx]}")
|
| 220 |
+
print(f"Initialized pipeline for {models[idx]}") # Confirm pipeline is initialized correctly
|
| 221 |
|
| 222 |
# Classify images with the correct pipeline per model
|
| 223 |
labels, preds, images = classify_images(extracted_dir, pipe, idx)
|
| 224 |
+
|
| 225 |
+
# Debugging: Print the predictions to ensure they're different
|
| 226 |
+
print(f"Predictions for model {models[idx]}: {preds}")
|
| 227 |
+
|
| 228 |
accuracy, roc_score, report, cm_fig, roc_fig = evaluate_model(labels, preds)
|
| 229 |
|
| 230 |
# Store results for each model
|
|
|
|
| 236 |
|
| 237 |
shutil.rmtree(extracted_dir) # Clean up extracted files
|
| 238 |
|
| 239 |
+
# Return results for all models
|
| 240 |
return (results['Model_0_accuracy'], results['Model_0_roc_score'], results['Model_0_report'],
|
| 241 |
results['Model_0_cm_fig'], results['Model_0_roc_fig'],
|
| 242 |
results['Model_1_accuracy'], results['Model_1_roc_score'], results['Model_1_report'],
|
|
|
|
| 246 |
|
| 247 |
|
| 248 |
|
| 249 |
+
|
| 250 |
# Single image section
|
| 251 |
def load_url(url):
|
| 252 |
try:
|