Update app.py
Browse files
app.py
CHANGED
|
@@ -14,18 +14,14 @@ os.makedirs(upload_folder, exist_ok=True)
|
|
| 14 |
# Updated Fake News Detection Models
|
| 15 |
news_models = {
|
| 16 |
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
|
| 17 |
-
"
|
| 18 |
-
"
|
| 19 |
}
|
| 20 |
|
| 21 |
-
# Updated Image
|
| 22 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 23 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 24 |
|
| 25 |
-
ai_image_models = {
|
| 26 |
-
"clip-vit-base-patch32": clip_model
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
# Image transformation pipeline
|
| 30 |
transform = transforms.Compose([
|
| 31 |
transforms.Resize((224, 224)),
|
|
@@ -33,7 +29,7 @@ transform = transforms.Compose([
|
|
| 33 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 34 |
])
|
| 35 |
|
| 36 |
-
# HTML Template with Model Selection
|
| 37 |
HTML_TEMPLATE = """
|
| 38 |
<!DOCTYPE html>
|
| 39 |
<html lang="en">
|
|
@@ -42,7 +38,7 @@ HTML_TEMPLATE = """
|
|
| 42 |
<title>AI & News Detection</title>
|
| 43 |
<style>
|
| 44 |
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
|
| 45 |
-
.container { background: white; padding: 30px; border-radius: 12px; max-width:
|
| 46 |
textarea, select { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
|
| 47 |
button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; }
|
| 48 |
button:hover { background-color: #45a049; }
|
|
@@ -57,8 +53,8 @@ HTML_TEMPLATE = """
|
|
| 57 |
<label for="model">Select Fake News Model:</label>
|
| 58 |
<select name="model" required>
|
| 59 |
<option value="mrm8488">MRM8488 (BERT-Tiny)</option>
|
| 60 |
-
<option value="
|
| 61 |
-
<option value="
|
| 62 |
</select>
|
| 63 |
<button type="submit">Detect News Authenticity</button>
|
| 64 |
</form>
|
|
@@ -80,6 +76,7 @@ HTML_TEMPLATE = """
|
|
| 80 |
<div class="result">
|
| 81 |
<h2>📷 Image Detection Result:</h2>
|
| 82 |
<p>{{ image_prediction }}</p>
|
|
|
|
| 83 |
</div>
|
| 84 |
{% endif %}
|
| 85 |
</div>
|
|
@@ -118,13 +115,18 @@ def detect_image():
|
|
| 118 |
inputs = clip_processor(images=img, return_tensors="pt")
|
| 119 |
|
| 120 |
with torch.no_grad():
|
| 121 |
-
image_features =
|
|
|
|
|
|
|
| 122 |
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
return render_template_string(
|
| 126 |
HTML_TEMPLATE,
|
| 127 |
-
image_prediction=
|
| 128 |
)
|
| 129 |
|
| 130 |
if __name__ == "__main__":
|
|
|
|
| 14 |
# Updated Fake News Detection Models
|
| 15 |
news_models = {
|
| 16 |
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
|
| 17 |
+
"google-electra": pipeline("text-classification", model="google/electra-base-discriminator"),
|
| 18 |
+
"bert-base": pipeline("text-classification", model="bert-base-uncased")
|
| 19 |
}
|
| 20 |
|
| 21 |
+
# Updated Image Model for AI vs. Human Detection
|
| 22 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 23 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# Image transformation pipeline
|
| 26 |
transform = transforms.Compose([
|
| 27 |
transforms.Resize((224, 224)),
|
|
|
|
| 29 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 30 |
])
|
| 31 |
|
| 32 |
+
# HTML Template with Model Selection and Explanations
|
| 33 |
HTML_TEMPLATE = """
|
| 34 |
<!DOCTYPE html>
|
| 35 |
<html lang="en">
|
|
|
|
| 38 |
<title>AI & News Detection</title>
|
| 39 |
<style>
|
| 40 |
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
|
| 41 |
+
.container { background: white; padding: 30px; border-radius: 12px; max-width: 850px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); }
|
| 42 |
textarea, select { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
|
| 43 |
button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; }
|
| 44 |
button:hover { background-color: #45a049; }
|
|
|
|
| 53 |
<label for="model">Select Fake News Model:</label>
|
| 54 |
<select name="model" required>
|
| 55 |
<option value="mrm8488">MRM8488 (BERT-Tiny)</option>
|
| 56 |
+
<option value="google-electra">Google Electra (Base Discriminator)</option>
|
| 57 |
+
<option value="bert-base">BERT-Base Uncased</option>
|
| 58 |
</select>
|
| 59 |
<button type="submit">Detect News Authenticity</button>
|
| 60 |
</form>
|
|
|
|
| 76 |
<div class="result">
|
| 77 |
<h2>📷 Image Detection Result:</h2>
|
| 78 |
<p>{{ image_prediction }}</p>
|
| 79 |
+
<p><strong>Explanation:</strong> The model compares visual features against known AI-generated and human-created image patterns. High feature uniformity often indicates AI generation, while more varied features suggest human-created content.</p>
|
| 80 |
</div>
|
| 81 |
{% endif %}
|
| 82 |
</div>
|
|
|
|
| 115 |
inputs = clip_processor(images=img, return_tensors="pt")
|
| 116 |
|
| 117 |
with torch.no_grad():
|
| 118 |
+
image_features = clip_model.get_image_features(**inputs)
|
| 119 |
+
mean_feature_value = torch.mean(image_features).item()
|
| 120 |
+
prediction = "AI-Generated" if mean_feature_value > 0 else "Human-Created"
|
| 121 |
|
| 122 |
+
explanation = (
|
| 123 |
+
f"Prediction: {prediction} (Feature Value: {mean_feature_value:.4f}). "
|
| 124 |
+
"Higher feature uniformity suggests AI generation, while more variability indicates human creation."
|
| 125 |
+
)
|
| 126 |
|
| 127 |
return render_template_string(
|
| 128 |
HTML_TEMPLATE,
|
| 129 |
+
image_prediction=explanation
|
| 130 |
)
|
| 131 |
|
| 132 |
if __name__ == "__main__":
|