DimasMP3 commited on
Commit
217c3af
·
0 Parent(s):

Initial upload face-shape Space

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. app.py +120 -0
  3. config.json +13 -0
  4. config.py +132 -0
  5. inference.py +61 -0
  6. model/best_model.keras +3 -0
  7. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.keras filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import os
5
+ from typing import Dict, List, Optional, Tuple
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ import tensorflow as tf
10
+ from PIL import Image
11
+
12
+ from config import settings
13
+
14
+ logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s %(message)s")
15
+ logger = logging.getLogger(__name__)
16
+
17
+ IMG_SIZE = 244
18
+
19
+
20
+ class FaceShapeModel:
21
+ def __init__(self, model_path: str, labels: List[str]):
22
+ if not os.path.exists(model_path):
23
+ raise FileNotFoundError(f"Model file not found at: {model_path}")
24
+
25
+ self.labels = labels
26
+ logger.info("Loading TensorFlow model from %s", model_path)
27
+ self.model = tf.keras.models.load_model(model_path)
28
+ logger.info("Model loaded successfully with %d labels", len(labels))
29
+
30
+ @staticmethod
31
+ def _preprocess(image: Image.Image) -> np.ndarray:
32
+ if image.mode != "RGB":
33
+ image = image.convert("RGB")
34
+
35
+ resized = image.resize((IMG_SIZE, IMG_SIZE), Image.BILINEAR)
36
+ array = tf.keras.preprocessing.image.img_to_array(resized)
37
+ array = np.expand_dims(array, axis=0)
38
+ return array
39
+
40
+ def predict_image(self, image: Image.Image) -> Dict[str, float]:
41
+ batch = self._preprocess(image)
42
+ preds = self.model.predict(batch, verbose=0)
43
+
44
+ if isinstance(preds, (list, tuple)):
45
+ preds = preds[0]
46
+
47
+ scores = np.asarray(preds).squeeze()
48
+
49
+ if scores.ndim == 0:
50
+ scores = np.array([float(scores)])
51
+
52
+ if len(scores) != len(self.labels):
53
+ raise ValueError(
54
+ "Model output length does not match labels. "
55
+ f"Expected {len(self.labels)} values, got {len(scores)}."
56
+ )
57
+
58
+ return {label: float(score) for label, score in zip(self.labels, scores.tolist())}
59
+
60
+
61
+ _model: Optional[FaceShapeModel] = None
62
+
63
+
64
+ def get_model() -> FaceShapeModel:
65
+ global _model
66
+ if _model is None:
67
+ _model = FaceShapeModel(settings.model_path, settings.labels)
68
+ return _model
69
+
70
+
71
+ def predict(image: Image.Image) -> Dict[str, float]:
72
+ try:
73
+ model = get_model()
74
+ except Exception as exc: # pragma: no cover - defensive logging
75
+ logger.exception("Failed to load model")
76
+ raise gr.Error(f"Model gagal dimuat: {exc}") from exc
77
+
78
+ try:
79
+ return model.predict_image(image)
80
+ except Exception as exc:
81
+ logger.exception("Prediction failed")
82
+ raise gr.Error(f"Prediksi gagal: {exc}") from exc
83
+
84
+
85
+ def build_interface() -> gr.Interface:
86
+ return gr.Interface(
87
+ fn=predict,
88
+ inputs=gr.Image(type="pil", image_mode="RGB"),
89
+ outputs=gr.Label(num_top_classes=3),
90
+ title="Face Shape Detection",
91
+ description="Unggah foto wajah untuk mendeteksi bentuk wajah Anda menggunakan model TensorFlow.",
92
+ allow_flagging="never",
93
+ )
94
+
95
+
96
+ def launch_app():
97
+ iface = build_interface()
98
+
99
+ auth_credentials: Optional[Tuple[str, str]] = None
100
+ if settings.gradio_username and settings.gradio_password:
101
+ auth_credentials = (settings.gradio_username, settings.gradio_password)
102
+
103
+ iface.launch(
104
+ server_name="0.0.0.0",
105
+ server_port=settings.port,
106
+ share=settings.share,
107
+ auth=auth_credentials,
108
+ auth_message="Masukkan kredensial untuk mengakses demo",
109
+ auth_token=settings.gradio_auth_token,
110
+ cors=settings.cors_allowed_origins,
111
+ show_api=True,
112
+ )
113
+
114
+
115
+ if __name__ == "__main__":
116
+ try:
117
+ launch_app()
118
+ except Exception: # pragma: no cover - console visibility
119
+ logger.exception("Gradio application terminated due to an error")
120
+ raise
config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_path": "model/best_model.keras",
3
+ "labels": ["Heart", "Oblong", "Oval", "Round", "Square"],
4
+ "port": 7860,
5
+ "cors_allowed_origins": [
6
+ "http://localhost:3000",
7
+ "http://127.0.0.1:3000"
8
+ ],
9
+ "share": false,
10
+ "gradio_auth_token": null,
11
+ "gradio_username": null,
12
+ "gradio_password": null
13
+ }
config.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ from dataclasses import dataclass
6
+ from functools import lru_cache
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ CONFIG_FILE = Path(__file__).with_name("config.json")
11
+ DEFAULT_LABELS: List[str] = [
12
+ "Heart",
13
+ "Oblong",
14
+ "Oval",
15
+ "Round",
16
+ "Square",
17
+ ]
18
+ DEFAULT_ALLOWED_ORIGINS: List[str] = [
19
+ "http://localhost:3000",
20
+ "http://127.0.0.1:3000",
21
+ ]
22
+ DEFAULT_PORT = 7860
23
+
24
+
25
+ def _load_config_file() -> Dict[str, Any]:
26
+ if not CONFIG_FILE.exists():
27
+ return {}
28
+
29
+ try:
30
+ return json.loads(CONFIG_FILE.read_text())
31
+ except json.JSONDecodeError as exc:
32
+ raise ValueError(f"Konfigurasi JSON tidak valid: {CONFIG_FILE}") from exc
33
+
34
+
35
+ def _parse_labels(raw: Any) -> List[str]:
36
+ if raw is None:
37
+ return DEFAULT_LABELS.copy()
38
+
39
+ if isinstance(raw, str):
40
+ parts = [label.strip() for label in raw.split(",") if label.strip()]
41
+ return parts or DEFAULT_LABELS.copy()
42
+
43
+ if isinstance(raw, list):
44
+ filtered = [str(label).strip() for label in raw if str(label).strip()]
45
+ return filtered or DEFAULT_LABELS.copy()
46
+
47
+ return DEFAULT_LABELS.copy()
48
+
49
+
50
+ def _parse_origins(raw: Any) -> List[str]:
51
+ if raw is None:
52
+ return DEFAULT_ALLOWED_ORIGINS.copy()
53
+
54
+ if isinstance(raw, str):
55
+ parts = [origin.strip() for origin in raw.split(",") if origin.strip()]
56
+ return parts or DEFAULT_ALLOWED_ORIGINS.copy()
57
+
58
+ if isinstance(raw, list):
59
+ filtered = [str(origin).strip() for origin in raw if str(origin).strip()]
60
+ return filtered or DEFAULT_ALLOWED_ORIGINS.copy()
61
+
62
+ return DEFAULT_ALLOWED_ORIGINS.copy()
63
+
64
+
65
+ def _parse_bool(raw: Any, fallback: bool = False) -> bool:
66
+ if raw is None:
67
+ return fallback
68
+
69
+ if isinstance(raw, bool):
70
+ return raw
71
+
72
+ return str(raw).strip().lower() in {"1", "true", "yes", "on"}
73
+
74
+
75
+ def _parse_int(raw: Any, fallback: int) -> int:
76
+ if raw is None:
77
+ return fallback
78
+
79
+ try:
80
+ value = int(raw)
81
+ except (TypeError, ValueError):
82
+ return fallback
83
+
84
+ return value if value > 0 else fallback
85
+
86
+
87
+ def _resolve_model_path(file_value: Optional[str]) -> str:
88
+ env_value = os.environ.get("MODEL_PATH")
89
+ candidate = env_value or file_value or "model/best_model.keras"
90
+ path = Path(candidate)
91
+ if not path.is_absolute():
92
+ path = (CONFIG_FILE.parent / path).resolve()
93
+ return str(path)
94
+
95
+
96
+ @dataclass(frozen=True)
97
+ class Settings:
98
+ model_path: str
99
+ labels: List[str]
100
+ port: int
101
+ cors_allowed_origins: List[str]
102
+ gradio_auth_token: str | None
103
+ gradio_username: str | None
104
+ gradio_password: str | None
105
+ share: bool
106
+
107
+
108
+ _CONFIG_CACHE = _load_config_file()
109
+
110
+
111
+ @lru_cache(maxsize=1)
112
+ def get_settings() -> Settings:
113
+ labels_raw = os.environ.get("MODEL_LABELS")
114
+ origins_raw = os.environ.get("CORS_ALLOWED_ORIGINS")
115
+ share_raw: Any = os.environ.get("GRADIO_SHARE")
116
+ port_raw: Any = os.environ.get("PORT")
117
+
118
+ return Settings(
119
+ model_path=_resolve_model_path(_CONFIG_CACHE.get("model_path")),
120
+ labels=_parse_labels(labels_raw if labels_raw is not None else _CONFIG_CACHE.get("labels")),
121
+ port=_parse_int(port_raw if port_raw is not None else _CONFIG_CACHE.get("port"), DEFAULT_PORT),
122
+ cors_allowed_origins=_parse_origins(
123
+ origins_raw if origins_raw is not None else _CONFIG_CACHE.get("cors_allowed_origins")
124
+ ),
125
+ gradio_auth_token=os.environ.get("GRADIO_AUTH_TOKEN") or _CONFIG_CACHE.get("gradio_auth_token"),
126
+ gradio_username=os.environ.get("GRADIO_USERNAME") or _CONFIG_CACHE.get("gradio_username"),
127
+ gradio_password=os.environ.get("GRADIO_PASSWORD") or _CONFIG_CACHE.get("gradio_password"),
128
+ share=_parse_bool(share_raw if share_raw is not None else _CONFIG_CACHE.get("share"), False),
129
+ )
130
+
131
+
132
+ settings = get_settings()
inference.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ from typing import Any, Dict, List
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+ import tensorflow as tf
9
+
10
+ LABELS: List[str] = [
11
+ "Heart",
12
+ "Oblong",
13
+ "Oval",
14
+ "Round",
15
+ "Square",
16
+ ]
17
+
18
+ TARGET_SIZE = 244
19
+
20
+ def _load_image(image_bytes: bytes) -> Image.Image:
21
+ image = Image.open(io.BytesIO(image_bytes))
22
+ if image.mode != "RGB":
23
+ image = image.convert("RGB")
24
+ return image
25
+
26
+
27
+ def _preprocess(image_bytes: bytes) -> np.ndarray:
28
+ image = _load_image(image_bytes)
29
+ resized = image.resize((TARGET_SIZE, TARGET_SIZE), Image.BILINEAR)
30
+ array = np.asarray(resized, dtype="float32")
31
+ array /= 255.0
32
+ return np.expand_dims(array, axis=0)
33
+
34
+
35
+ class PreTrainedModel:
36
+ def __init__(self, model_path: str = "model/best_model.keras") -> None:
37
+ self.model = tf.keras.models.load_model(model_path)
38
+
39
+ def predict(self, inputs: bytes) -> List[Dict[str, Any]]:
40
+ batch = _preprocess(inputs)
41
+ preds = self.model.predict(batch, verbose=0)
42
+
43
+ if isinstance(preds, (list, tuple)):
44
+ preds = preds[0]
45
+
46
+ scores = np.asarray(preds).squeeze()
47
+ if scores.ndim == 0:
48
+ scores = np.array([float(scores)])
49
+
50
+ scores_list = scores.tolist()
51
+ top_index = int(np.argmax(scores_list))
52
+ return [
53
+ {
54
+ "label": LABELS[top_index],
55
+ "score": float(scores_list[top_index]),
56
+ }
57
+ ]
58
+
59
+
60
+ def load_model(model_dir: str = ".") -> PreTrainedModel:
61
+ return PreTrainedModel(model_path=f"{model_dir}/model/best_model.keras")
model/best_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da4bb3f7d3abd04447e80d50a183354f03921c9971a471f6ee0144dceaf1b60a
3
+ size 32285016
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ tensorflow==2.16.1
2
+ numpy
3
+ Pillow
4
+ requests
5
+ gradio