Spaces:
Build error
Build error
DimasMP3 commited on
Commit ·
486e475
1
Parent(s): 5093d6d
update setup to b4
Browse files- inference.py +64 -89
- models/class_indices.json +7 -0
- models/idx2class.json +7 -0
inference.py
CHANGED
|
@@ -1,132 +1,107 @@
|
|
| 1 |
-
# inference.py
|
| 2 |
import os, json, time
|
| 3 |
-
from typing import List, Dict,
|
| 4 |
import numpy as np
|
| 5 |
from PIL import Image
|
| 6 |
import tensorflow as tf
|
|
|
|
| 7 |
|
| 8 |
-
# ----------
|
| 9 |
_DEFAULT_LABELS = ["Heart","Oblong","Oval","Round","Square"]
|
| 10 |
|
| 11 |
-
def
|
| 12 |
p_ci = os.path.join("models", "class_indices.json")
|
| 13 |
p_i2c = os.path.join("models", "idx2class.json")
|
| 14 |
-
# Prioritas: class_indices.json
|
| 15 |
try:
|
| 16 |
with open(p_ci, "r") as f:
|
| 17 |
-
d = json.load(f) # {"Label": idx
|
| 18 |
-
|
| 19 |
-
labels = [k for k,_ in items]
|
| 20 |
-
print(f"[LABEL] from class_indices.json -> {labels}")
|
| 21 |
-
return labels
|
| 22 |
except Exception:
|
| 23 |
pass
|
| 24 |
-
# Fallback: idx2class.json
|
| 25 |
try:
|
| 26 |
with open(p_i2c, "r") as f:
|
| 27 |
-
d = json.load(f) # {"0":"Label"
|
| 28 |
-
|
| 29 |
-
labels = [v for _,v in items]
|
| 30 |
-
print(f"[LABEL] from idx2class.json -> {labels}")
|
| 31 |
-
return labels
|
| 32 |
except Exception:
|
| 33 |
-
print(f"[LABEL] fallback default -> {_DEFAULT_LABELS}")
|
| 34 |
return _DEFAULT_LABELS
|
| 35 |
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
# ----------
|
| 39 |
class FaceShapeModel:
|
| 40 |
-
def __init__(self, model_path
|
| 41 |
-
self.
|
| 42 |
-
self.img_size = 224
|
| 43 |
-
self.external_rescale = False
|
| 44 |
-
|
| 45 |
full_path = os.path.join(os.getcwd(), model_path)
|
| 46 |
print(f"[LOAD] {full_path}")
|
| 47 |
self.model = tf.keras.models.load_model(full_path, compile=False)
|
| 48 |
|
| 49 |
-
#
|
| 50 |
ishape = self.model.input_shape
|
| 51 |
-
|
| 52 |
-
h = ishape[1] if len(ishape) > 1 else None
|
| 53 |
-
if isinstance(h, int) and h > 0:
|
| 54 |
-
self.img_size = h
|
| 55 |
print(f"[MODEL] input img_size = {self.img_size}")
|
| 56 |
|
| 57 |
-
#
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
self.external_rescale = not
|
| 61 |
-
print(f"[MODEL] internal_preproc={
|
|
|
|
|
|
|
| 62 |
|
| 63 |
-
#
|
| 64 |
try:
|
| 65 |
_ = self.model(tf.zeros((1, self.img_size, self.img_size, 3), dtype=tf.float32))
|
| 66 |
except Exception as e:
|
| 67 |
print("[WARN] warmup failed:", e)
|
| 68 |
|
| 69 |
-
def _preprocess(self,
|
| 70 |
-
if
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
x =
|
| 74 |
-
|
| 75 |
-
x = x / 255.0
|
| 76 |
-
return np.expand_dims(x, axis=0)
|
| 77 |
|
| 78 |
-
def
|
| 79 |
-
if self.model is None:
|
| 80 |
-
return {"Error": "Model tidak dimuat."}
|
| 81 |
-
if image is None:
|
| 82 |
-
return {"Error": "Gambar kosong."}
|
| 83 |
t0 = time.perf_counter()
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
out = {lbl: float(
|
| 87 |
-
print(f"[INF] {len(
|
| 88 |
return out
|
| 89 |
|
| 90 |
-
# singleton
|
| 91 |
_model = FaceShapeModel()
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
return _model.predict_image(image_pil)
|
| 97 |
-
return {"Error": "Instance model tidak tersedia."}
|
| 98 |
|
| 99 |
-
#
|
| 100 |
-
def
|
| 101 |
-
if not d or any(k == "Error" for k in d):
|
| 102 |
-
return {"label": "", "confidences": []}
|
| 103 |
items = sorted(d.items(), key=lambda kv: kv[1], reverse=True)
|
| 104 |
-
return {"label": items[0][0], "confidences": [{"label":
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
continue
|
| 117 |
-
return out
|
| 118 |
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
return {"error": "Instance model tidak tersedia."}
|
| 122 |
-
imgs = _to_pil_list(files)
|
| 123 |
-
if not imgs:
|
| 124 |
-
return {"error": "Tidak ada gambar valid."}
|
| 125 |
-
best, bestc = {"label":"", "confidences":[]}, -1.0
|
| 126 |
-
for img in imgs:
|
| 127 |
-
d = _model.predict_image(img)
|
| 128 |
-
p = _dict_to_payload(d)
|
| 129 |
-
c = float(p["confidences"][0]["confidence"]) if p["confidences"] else 0.0
|
| 130 |
-
if c > bestc:
|
| 131 |
-
best, bestc = p, c
|
| 132 |
-
return best
|
|
|
|
|
|
|
| 1 |
import os, json, time
|
| 2 |
+
from typing import List, Dict, Any
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
import tensorflow as tf
|
| 6 |
+
import gradio as gr
|
| 7 |
|
| 8 |
+
# ---------- label loader ----------
|
| 9 |
_DEFAULT_LABELS = ["Heart","Oblong","Oval","Round","Square"]
|
| 10 |
|
| 11 |
+
def load_labels() -> List[str]:
|
| 12 |
p_ci = os.path.join("models", "class_indices.json")
|
| 13 |
p_i2c = os.path.join("models", "idx2class.json")
|
|
|
|
| 14 |
try:
|
| 15 |
with open(p_ci, "r") as f:
|
| 16 |
+
d = json.load(f) # {"Label": idx}
|
| 17 |
+
return [k for k,_ in sorted(d.items(), key=lambda kv: kv[1])]
|
|
|
|
|
|
|
|
|
|
| 18 |
except Exception:
|
| 19 |
pass
|
|
|
|
| 20 |
try:
|
| 21 |
with open(p_i2c, "r") as f:
|
| 22 |
+
d = json.load(f) # {"0":"Label"}
|
| 23 |
+
return [d[str(i)] for i in range(len(d))]
|
|
|
|
|
|
|
|
|
|
| 24 |
except Exception:
|
|
|
|
| 25 |
return _DEFAULT_LABELS
|
| 26 |
|
| 27 |
+
def generate_config_if_missing(model: tf.keras.Model, labels: List[str], path="config.json"):
|
| 28 |
+
if os.path.exists(path): return
|
| 29 |
+
ishape = model.input_shape
|
| 30 |
+
h = ishape[1] if isinstance(ishape, (list, tuple)) and len(ishape) > 1 else None
|
| 31 |
+
assert isinstance(h, int) and h > 0, f"Input shape aneh: {ishape}"
|
| 32 |
+
cfg = {
|
| 33 |
+
"architectures": ["EfficientNetB4"],
|
| 34 |
+
"image_size": int(h),
|
| 35 |
+
"num_labels": len(labels),
|
| 36 |
+
"id2label": {str(i): lbl for i, lbl in enumerate(labels)},
|
| 37 |
+
"label2id": {lbl: i for i, lbl in enumerate(labels)}
|
| 38 |
+
}
|
| 39 |
+
with open(path, "w") as f: json.dump(cfg, f, indent=2)
|
| 40 |
+
print(f"[CFG] wrote {path} (image_size={h})")
|
| 41 |
|
| 42 |
+
# ---------- model wrapper ----------
|
| 43 |
class FaceShapeModel:
|
| 44 |
+
def __init__(self, model_path="models/model.keras"):
|
| 45 |
+
self.labels = load_labels()
|
|
|
|
|
|
|
|
|
|
| 46 |
full_path = os.path.join(os.getcwd(), model_path)
|
| 47 |
print(f"[LOAD] {full_path}")
|
| 48 |
self.model = tf.keras.models.load_model(full_path, compile=False)
|
| 49 |
|
| 50 |
+
# input size
|
| 51 |
ishape = self.model.input_shape
|
| 52 |
+
self.img_size = int(ishape[1])
|
|
|
|
|
|
|
|
|
|
| 53 |
print(f"[MODEL] input img_size = {self.img_size}")
|
| 54 |
|
| 55 |
+
# detect internal preprocessing
|
| 56 |
+
first = [l.name.lower() for l in self.model.layers[:8]]
|
| 57 |
+
has_pp = any(("rescaling" in n) or ("normalization" in n) for n in first)
|
| 58 |
+
self.external_rescale = not has_pp
|
| 59 |
+
print(f"[MODEL] internal_preproc={has_pp} -> external_rescale={self.external_rescale}")
|
| 60 |
+
|
| 61 |
+
generate_config_if_missing(self.model, self.labels)
|
| 62 |
|
| 63 |
+
# warmup
|
| 64 |
try:
|
| 65 |
_ = self.model(tf.zeros((1, self.img_size, self.img_size, 3), dtype=tf.float32))
|
| 66 |
except Exception as e:
|
| 67 |
print("[WARN] warmup failed:", e)
|
| 68 |
|
| 69 |
+
def _preprocess(self, img: Image.Image) -> np.ndarray:
|
| 70 |
+
if img.mode != "RGB": img = img.convert("RGB")
|
| 71 |
+
img = img.resize((self.img_size, self.img_size))
|
| 72 |
+
x = np.asarray(img, dtype=np.float32)
|
| 73 |
+
if self.external_rescale: x = x / 255.0
|
| 74 |
+
return np.expand_dims(x, 0)
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
def predict_dict(self, img: Image.Image) -> Dict[str,float]:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
t0 = time.perf_counter()
|
| 78 |
+
x = self._preprocess(img)
|
| 79 |
+
p = self.model.predict(x, verbose=0)[0]
|
| 80 |
+
out = {lbl: float(prob) for lbl, prob in zip(self.labels, p)}
|
| 81 |
+
print(f"[INF] {len(self.labels)}-class in {(time.perf_counter()-t0)*1000:.1f} ms")
|
| 82 |
return out
|
| 83 |
|
|
|
|
| 84 |
_model = FaceShapeModel()
|
| 85 |
|
| 86 |
+
def predict(image: Image.Image) -> Dict[str, float]:
|
| 87 |
+
if image is None: return {"Error":"No image"}
|
| 88 |
+
return _model.predict_dict(image)
|
|
|
|
|
|
|
| 89 |
|
| 90 |
+
# --- Gradio UI ---
|
| 91 |
+
def _payload(d: Dict[str,float]):
|
|
|
|
|
|
|
| 92 |
items = sorted(d.items(), key=lambda kv: kv[1], reverse=True)
|
| 93 |
+
return {"label": items[0][0], "confidences": [{"label":k, "confidence":v} for k,v in items]}
|
| 94 |
|
| 95 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 96 |
+
gr.Markdown("# Face Shape Classification — EfficientNetB4 (300×300)")
|
| 97 |
+
inp = gr.Image(type="pil", label="Upload face image (frontal)")
|
| 98 |
+
out = gr.Label(num_top_classes=5, label="Predictions")
|
| 99 |
+
btn = gr.Button("Predict")
|
| 100 |
+
btn.click(lambda im: _payload(predict(im)), inputs=inp, outputs=out)
|
| 101 |
+
gr.Examples(
|
| 102 |
+
examples=[os.path.join("examples", f) for f in os.listdir("examples")] if os.path.exists("examples") else [],
|
| 103 |
+
inputs=inp
|
| 104 |
+
)
|
|
|
|
|
|
|
| 105 |
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models/class_indices.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Heart": 0,
|
| 3 |
+
"Oblong": 1,
|
| 4 |
+
"Oval": 2,
|
| 5 |
+
"Round": 3,
|
| 6 |
+
"Square": 4
|
| 7 |
+
}
|
models/idx2class.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"0": "Heart",
|
| 3 |
+
"1": "Oblong",
|
| 4 |
+
"2": "Oval",
|
| 5 |
+
"3": "Round",
|
| 6 |
+
"4": "Square"
|
| 7 |
+
}
|