File size: 4,885 Bytes
a911010 e064416 a911010 d0c4209 e064416 d0c4209 e064416 a911010 d0c4209 a911010 e064416 d0c4209 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 d0c4209 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 e064416 a911010 d0c4209 a911010 e064416 a911010 e064416 a911010 e064416 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import os
from flask import Flask, request, render_template_string
from PIL import Image
import torch
from torchvision import models, transforms
from transformers import pipeline, CLIPProcessor, CLIPModel
app = Flask(__name__)
# Create the 'static/uploads' folder if it doesn't exist
upload_folder = os.path.join('static', 'uploads')
os.makedirs(upload_folder, exist_ok=True)
# Updated Fake News Detection Models
news_models = {
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
"liam168": pipeline("text-classification", model="liam168/fake-news-bert-base-uncased"),
"distilbert": pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
}
# Updated Image Models for AI vs. Human Detection
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
ai_image_models = {
"clip-vit-base-patch32": clip_model
}
# Image transformation pipeline
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# HTML Template with Model Selection
HTML_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>AI & News Detection</title>
<style>
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
.container { background: white; padding: 30px; border-radius: 12px; max-width: 800px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); }
textarea, select { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; }
button:hover { background-color: #45a049; }
.result { background: #e7f3fe; padding: 15px; border-radius: 10px; margin-top: 20px; }
</style>
</head>
<body>
<div class="container">
<h1>๐ฐ Fake News Detection</h1>
<form method="POST" action="/detect">
<textarea name="text" placeholder="Enter news text..." required></textarea>
<label for="model">Select Fake News Model:</label>
<select name="model" required>
<option value="mrm8488">MRM8488 (BERT-Tiny)</option>
<option value="liam168">Liam168 (BERT)</option>
<option value="distilbert">DistilBERT (SST-2)</option>
</select>
<button type="submit">Detect News Authenticity</button>
</form>
{% if news_prediction %}
<div class="result">
<h2>๐ง News Detection Result:</h2>
<p>{{ news_prediction }}</p>
</div>
{% endif %}
<h1>๐ผ๏ธ AI vs. Human Image Detection</h1>
<form method="POST" action="/detect_image" enctype="multipart/form-data">
<input type="file" name="image" required>
<button type="submit">Upload and Detect</button>
</form>
{% if image_prediction %}
<div class="result">
<h2>๐ท Image Detection Result:</h2>
<p>{{ image_prediction }}</p>
</div>
{% endif %}
</div>
</body>
</html>
"""
@app.route("/", methods=["GET"])
def home():
return render_template_string(HTML_TEMPLATE)
@app.route("/detect", methods=["POST"])
def detect():
text = request.form.get("text")
model_key = request.form.get("model")
if not text or model_key not in news_models:
return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input or model selection.")
result = news_models[model_key](text)[0]
label = "REAL" if result['label'].lower() in ["real", "label_1"] else "FAKE"
confidence = result['score'] * 100
return render_template_string(
HTML_TEMPLATE,
news_prediction=f"News is {label} (Confidence: {confidence:.2f}%)"
)
@app.route("/detect_image", methods=["POST"])
def detect_image():
if "image" not in request.files:
return render_template_string(HTML_TEMPLATE, image_prediction="No image uploaded.")
file = request.files["image"]
img = Image.open(file).convert("RGB")
inputs = clip_processor(images=img, return_tensors="pt")
with torch.no_grad():
image_features = ai_image_models["clip-vit-base-patch32"].get_image_features(**inputs)
prediction = "AI-Generated" if torch.mean(image_features).item() > 0 else "Human-Created"
return render_template_string(
HTML_TEMPLATE,
image_prediction=f"Prediction: {prediction}"
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860) # Suitable for Hugging Face Spaces
|