vaniv commited on
Commit
d1c5b3b
·
verified ·
1 Parent(s): 0a5584c

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +40 -12
  2. app.py +102 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,12 +1,40 @@
1
- ---
2
- title: Deepfakedetect
3
- emoji: 🔥
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Image Classifier — Keras/TensorFlow (Hugging Face Space)
3
+
4
+ A dead-simple image classification app you can deploy in minutes.
5
+
6
+ ## How it works
7
+
8
+ - If `model.h5` exists in the repository root, the app loads **your custom Keras model**.
9
+ - Optionally add `labels.txt` (one class name per line) to show readable labels.
10
+ - Input is resized to **224×224**. Adjust `TARGET_SIZE` in `app.py` if your model expects a different size.
11
+ - If no `model.h5` is found, it falls back to **MobileNetV2 (ImageNet)**.
12
+
13
+ ## Run locally
14
+
15
+ ```bash
16
+ pip install -r requirements.txt
17
+ python app.py
18
+ ```
19
+
20
+ Then open the local URL printed by Gradio.
21
+
22
+ ## Deploy to Hugging Face Spaces
23
+
24
+ 1. Create a new **Space** → **Gradio** (Python).
25
+ 2. Upload these files: `app.py`, `requirements.txt`, `README.md`.
26
+ 3. (Optional) Upload your `model.h5` and `labels.txt` to use your own model.
27
+ 4. The Space will build and auto-start.
28
+
29
+ ## Using your notebook's model
30
+
31
+ If your notebook trained a model, export it:
32
+
33
+ ```python
34
+ model.save("model.h5")
35
+ # Optional labels file (one per line)
36
+ with open("labels.txt", "w") as f:
37
+ f.write("\n".join(class_names))
38
+ ```
39
+
40
+ Commit both files to the Space.
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import typing as t
4
+
5
+ import gradio as gr
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ from PIL import Image
9
+
10
+ # Try to load a user-provided Keras model if available; else fallback to a pretrained MobileNetV2.
11
+ CUSTOM_MODEL_PATH = "model.h5"
12
+ LABELS_PATH = "labels.txt"
13
+
14
+ # Global objects
15
+ MODEL = None
16
+ USE_IMAGENET_DECODE = False
17
+ CLASS_NAMES: t.Optional[t.List[str]] = None
18
+ TARGET_SIZE = (224, 224)
19
+
20
+ def _maybe_load_labels(path: str) -> t.Optional[t.List[str]]:
21
+ if os.path.exists(path):
22
+ with open(path, "r", encoding="utf-8") as f:
23
+ lines = [x.strip() for x in f.readlines() if x.strip()]
24
+ return lines
25
+ return None
26
+
27
+ def _load_model():
28
+ global MODEL, USE_IMAGENET_DECODE, CLASS_NAMES
29
+ if os.path.exists(CUSTOM_MODEL_PATH):
30
+ try:
31
+ MODEL = tf.keras.models.load_model(CUSTOM_MODEL_PATH, compile=False)
32
+ CLASS_NAMES = _maybe_load_labels(LABELS_PATH)
33
+ USE_IMAGENET_DECODE = False
34
+ print("Loaded custom model from model.h5")
35
+ return
36
+ except Exception as e:
37
+ print("Failed to load custom model:", e)
38
+
39
+ # Fallback: MobileNetV2 pretrained on ImageNet
40
+ MODEL = tf.keras.applications.MobileNetV2(weights="imagenet")
41
+ USE_IMAGENET_DECODE = True
42
+ CLASS_NAMES = None
43
+ print("Loaded MobileNetV2 (ImageNet) fallback.")
44
+
45
+ def _preprocess(img: Image.Image) -> np.ndarray:
46
+ # Convert to RGB and resize
47
+ img = img.convert("RGB").resize(TARGET_SIZE)
48
+ arr = np.array(img).astype("float32")
49
+ # If it's the MobileNetV2 fallback, apply its preprocess; otherwise just scale 0..1
50
+ if USE_IMAGENET_DECODE:
51
+ arr = tf.keras.applications.mobilenet_v2.preprocess_input(arr)
52
+ else:
53
+ arr = arr / 255.0
54
+ arr = np.expand_dims(arr, axis=0)
55
+ return arr
56
+
57
+ def _decode_predictions(preds: np.ndarray, top: int = 3):
58
+ # preds: (1, num_classes)
59
+ preds = preds[0]
60
+ if USE_IMAGENET_DECODE:
61
+ decoded = tf.keras.applications.imagenet_utils.decode_predictions(preds[np.newaxis, :], top=top)[0]
62
+ # decoded is list of tuples: (class_id, class_name, score)
63
+ return [(name, float(score)) for (_, name, score) in decoded]
64
+ else:
65
+ # For custom model: if CLASS_NAMES provided, map; else show class indices
66
+ top_indices = preds.argsort()[-top:][::-1]
67
+ out = []
68
+ for idx in top_indices:
69
+ label = CLASS_NAMES[idx] if (CLASS_NAMES is not None and idx < len(CLASS_NAMES)) else f"class_{idx}"
70
+ out.append((label, float(preds[idx])))
71
+ return out
72
+
73
+ def predict(image: Image.Image):
74
+ if image is None:
75
+ return [], None
76
+ x = _preprocess(image)
77
+ preds = MODEL.predict(x)
78
+ top3 = _decode_predictions(preds, top=3)
79
+ # Also return a bar plot-friendly structure for Gradio's Label component
80
+ scores = {label: score for (label, score) in top3}
81
+ return scores, image
82
+
83
+ # Initialize
84
+ _load_model()
85
+
86
+ with gr.Blocks(title="Image Classifier (Keras/TF)") as demo:
87
+ gr.Markdown("# Image Classifier\nUpload an image to classify using a Keras model.\n\n"
88
+ "- Drop in your own `model.h5` (and optional `labels.txt`) to switch from ImageNet to your custom model.\n"
89
+ "- For custom models, ensure input size is 224x224x3 or adjust code.\n")
90
+
91
+ with gr.Row():
92
+ with gr.Column(scale=1):
93
+ inp = gr.Image(type="pil", label="Upload image")
94
+ btn = gr.Button("Predict")
95
+ with gr.Column(scale=1):
96
+ out_label = gr.Label(num_top_classes=3, label="Top Predictions")
97
+ out_img = gr.Image(type="pil", label="Preview")
98
+
99
+ btn.click(fn=predict, inputs=inp, outputs=[out_label, out_img])
100
+
101
+ if __name__ == "__main__":
102
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio>=4.44.0
2
+ tensorflow>=2.12
3
+ Pillow