Spaces:
Sleeping
Sleeping
Initial project upload via Python API for Flask Space
Browse files- .gitattributes +3 -0
- LICENSE +21 -0
- README.md +82 -13
- app.py +49 -0
- demo/demo.mp4 +3 -0
- demo/demo1.png +3 -0
- demo/demo2.png +3 -0
- models/efficientnet_b3_full_ai_image_classifier.pt +3 -0
- requirements.txt +7 -3
- ui.py +21 -0
- utils.py +54 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
demo/demo.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
demo/demo1.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
demo/demo2.png filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Eslam Tarek
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,20 +1,89 @@
|
|
| 1 |
---
|
| 2 |
-
title: ImageProof
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk:
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
- streamlit
|
| 10 |
pinned: false
|
| 11 |
-
short_description: 'Deep Learning Model for Detecting AI-Generated Images using '
|
| 12 |
license: mit
|
| 13 |
---
|
| 14 |
|
| 15 |
-
# Welcome to Streamlit!
|
| 16 |
|
| 17 |
-
Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: ImageProof – Deep learning AI-generated Image Detection
|
| 3 |
+
emoji: 🚗
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: "1.40.0" # latest stable streamlit
|
| 8 |
+
app_file: app.py
|
|
|
|
| 9 |
pinned: false
|
|
|
|
| 10 |
license: mit
|
| 11 |
---
|
| 12 |
|
|
|
|
| 13 |
|
|
|
|
| 14 |
|
| 15 |
+
# ImageProof - AI Image Authenticity Detector 🧠
|
| 16 |
+
|
| 17 |
+
A Streamlit-based web application that uses a fine-tuned EfficientNet-B3 model to detect whether images are AI-generated or real.
|
| 18 |
+
|
| 19 |
+
## Table of Contents
|
| 20 |
+
- [Demo](#demo)
|
| 21 |
+
- [Features](#features)
|
| 22 |
+
- [Installation](#installation)
|
| 23 |
+
- [Usage](#usage)
|
| 24 |
+
- [Contributing](#contributing)
|
| 25 |
+
- [License](#license)
|
| 26 |
+
|
| 27 |
+
## Demo
|
| 28 |
+
Check out the application in action with these demo files:
|
| 29 |
+
|
| 30 |
+
<video controls>
|
| 31 |
+
<source src="demo/demo.mp4" type="video/mp4">
|
| 32 |
+
Your browser does not support the video tag.
|
| 33 |
+
</video>
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
------------------------
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
## Features
|
| 42 |
+
- **Image Upload**: Support for JPG, JPEG, and PNG files.
|
| 43 |
+
- **URL Input**: Analyze images directly from web URLs.
|
| 44 |
+
- **Real-time Prediction**: Instant classification with confidence scores.
|
| 45 |
+
- **Interactive UI**: Built with Streamlit for easy use.
|
| 46 |
+
- **Model Integration**: Leverages EfficientNet-B3 for accurate detection.
|
| 47 |
+
|
| 48 |
+
## Installation
|
| 49 |
+
To get started, clone the repository and set up a virtual environment.
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
# Create a virtual environment
|
| 53 |
+
python -m venv .venv
|
| 54 |
+
|
| 55 |
+
# Activate it
|
| 56 |
+
# On Linux/Mac:
|
| 57 |
+
source .venv/bin/activate
|
| 58 |
+
# On Windows:
|
| 59 |
+
.venv\Scripts\activate
|
| 60 |
+
|
| 61 |
+
# Install dependencies
|
| 62 |
+
pip install -r requirements.txt
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
## Usage
|
| 66 |
+
Run the application using Streamlit:
|
| 67 |
+
|
| 68 |
+
```bash
|
| 69 |
+
streamlit run app.py
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
1. Open the app in your browser.
|
| 73 |
+
2. Choose to upload an image or enter an image URL.
|
| 74 |
+
3. View the prediction results, including the label (AI-generated or Real) and confidence score.
|
| 75 |
+
|
| 76 |
+
Example prediction output:
|
| 77 |
+
- Label: 🧠 AI-generated
|
| 78 |
+
- Confidence: 0.95
|
| 79 |
+
|
| 80 |
+
## Contributing
|
| 81 |
+
Contributions are welcome! Please fork the repository and submit a pull request. Ensure code follows best practices and includes tests.
|
| 82 |
+
|
| 83 |
+
## License
|
| 84 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
| 85 |
+
|
| 86 |
+
## Acknowledgements
|
| 87 |
+
- Built with [Streamlit](https://streamlit.io/) for the web interface.
|
| 88 |
+
- Model based on [EfficientNet](https://github.com/lukemelas/EfficientNet-PyTorch) and [timm](https://github.com/rwightman/pytorch-image-models).
|
| 89 |
+
- Thanks to the open-source community for PyTorch and related libraries.
|
app.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from utils import load_model, load_image, preprocess_image, predict
|
| 3 |
+
from ui import show_header, show_image
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# ========================================
|
| 7 |
+
# 🔧 Configuration
|
| 8 |
+
# ========================================
|
| 9 |
+
MODEL_DIR = "models"
|
| 10 |
+
MODEL_PATH = os.path.join(MODEL_DIR, "efficientnet_b3_full_ai_image_classifier.pt")
|
| 11 |
+
|
| 12 |
+
# ========================================
|
| 13 |
+
# 🚀 Streamlit App
|
| 14 |
+
# ========================================
|
| 15 |
+
def main():
|
| 16 |
+
st.set_page_config(page_title="AI Image Detector", page_icon="🧠", layout="centered")
|
| 17 |
+
show_header()
|
| 18 |
+
|
| 19 |
+
# Load model once and cache
|
| 20 |
+
@st.cache_resource
|
| 21 |
+
def get_model():
|
| 22 |
+
return load_model(MODEL_PATH)
|
| 23 |
+
|
| 24 |
+
model = get_model()
|
| 25 |
+
|
| 26 |
+
# User options
|
| 27 |
+
option = st.radio("Choose Input Type:", ("Upload Image", "From URL"))
|
| 28 |
+
|
| 29 |
+
img = None
|
| 30 |
+
if option == "Upload Image":
|
| 31 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
| 32 |
+
if uploaded_file:
|
| 33 |
+
img = load_image(uploaded_file)
|
| 34 |
+
else:
|
| 35 |
+
url = st.text_input("Enter Image URL")
|
| 36 |
+
if url:
|
| 37 |
+
img = load_image(url)
|
| 38 |
+
|
| 39 |
+
# Predict
|
| 40 |
+
if img is not None:
|
| 41 |
+
img_tensor = preprocess_image(img)
|
| 42 |
+
label, prob = predict(model, img_tensor)
|
| 43 |
+
show_image(img, label, prob)
|
| 44 |
+
else:
|
| 45 |
+
st.info("👆 Upload an image or enter a URL to start.")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
main()
|
demo/demo.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5dc118ac01e5f3f4a9564d35b89c4abc6eb8a9f0ce1864a1d3c8c8a8f8e921e
|
| 3 |
+
size 3634263
|
demo/demo1.png
ADDED
|
Git LFS Details
|
demo/demo2.png
ADDED
|
Git LFS Details
|
models/efficientnet_b3_full_ai_image_classifier.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb6c6ede00a59848ccf176fd1207d421ae8ba7236daa7fe1ac07bfee3e549640
|
| 3 |
+
size 43455736
|
requirements.txt
CHANGED
|
@@ -1,3 +1,7 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.28.1
|
| 2 |
+
torch==2.0.1
|
| 3 |
+
timm==0.9.7
|
| 4 |
+
torchvision==0.15.2
|
| 5 |
+
pillow==10.0.1
|
| 6 |
+
requests==2.31.0
|
| 7 |
+
matplotlib==3.8.0
|
ui.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
|
| 4 |
+
def show_header():
|
| 5 |
+
"""Display app title and description."""
|
| 6 |
+
st.title("🧠 AI-Generated Image Detector")
|
| 7 |
+
st.markdown("""
|
| 8 |
+
**Upload an image or enter an image URL** to detect whether it’s AI-generated or real.
|
| 9 |
+
The model is based on **EfficientNet-B3**, fine-tuned for image authenticity detection.
|
| 10 |
+
""")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def show_image(img, label, prob):
|
| 14 |
+
"""Display image and prediction result."""
|
| 15 |
+
st.image(img, caption="Uploaded Image", use_container_width=True)
|
| 16 |
+
|
| 17 |
+
st.subheader("Prediction Results")
|
| 18 |
+
st.write(f"**Label:** {label}")
|
| 19 |
+
|
| 20 |
+
# Optional: display confidence bar
|
| 21 |
+
st.progress(min(int(prob * 100), 100))
|
utils.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import timm
|
| 3 |
+
from torchvision import transforms
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import requests
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
|
| 8 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 9 |
+
|
| 10 |
+
# 🔹 Preprocessing (same as training)
|
| 11 |
+
transform = transforms.Compose([
|
| 12 |
+
transforms.Resize((300, 300)),
|
| 13 |
+
transforms.ToTensor(),
|
| 14 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 15 |
+
std=[0.229, 0.224, 0.225]),
|
| 16 |
+
])
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def load_model(model_path: str):
|
| 20 |
+
"""Load EfficientNet-B3 model from file."""
|
| 21 |
+
if model_path.endswith(".pt"):
|
| 22 |
+
model = torch.load(model_path, map_location=DEVICE, weights_only=False)
|
| 23 |
+
else:
|
| 24 |
+
model = timm.create_model("efficientnet_b3", pretrained=False, num_classes=1)
|
| 25 |
+
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
|
| 26 |
+
model.to(DEVICE)
|
| 27 |
+
model.eval()
|
| 28 |
+
return model
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def load_image(input_source):
|
| 32 |
+
"""Load and preprocess image from file upload or URL."""
|
| 33 |
+
if isinstance(input_source, str): # URL
|
| 34 |
+
response = requests.get(input_source)
|
| 35 |
+
img = Image.open(BytesIO(response.content)).convert("RGB")
|
| 36 |
+
else: # Uploaded file
|
| 37 |
+
img = Image.open(input_source).convert("RGB")
|
| 38 |
+
return img
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def preprocess_image(img):
|
| 42 |
+
"""Apply transform and return tensor."""
|
| 43 |
+
return transform(img).unsqueeze(0)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def predict(model, img_tensor):
|
| 47 |
+
"""Predict class and confidence score."""
|
| 48 |
+
with torch.no_grad():
|
| 49 |
+
outputs = model(img_tensor.to(DEVICE))
|
| 50 |
+
probs = torch.sigmoid(outputs)
|
| 51 |
+
prob = probs.item()
|
| 52 |
+
print(prob)
|
| 53 |
+
label = "🧠 AI-generated" if prob >= 0.00001 else "📸 Real"
|
| 54 |
+
return label, prob
|