yasyn14 commited on
Commit
e7b36c2
·
1 Parent(s): b69845a

edited docker and main.py

Browse files
Files changed (5) hide show
  1. Dockerfile +11 -24
  2. main.py +39 -18
  3. routes/predict.py +34 -16
  4. utils/predictor.py +46 -11
  5. utils/recommend.py +80 -11
Dockerfile CHANGED
@@ -4,45 +4,32 @@ FROM python:3.12-slim
4
  # 1. Create non‑root user
5
  RUN useradd --create-home --shell /bin/bash --uid 1000 appuser
6
 
7
- # 2. Environment variables
8
  ENV PYTHONDONTWRITEBYTECODE=1 \
9
  PYTHONUNBUFFERED=1 \
10
- HF_HOME=/tmp/huggingface \
11
- TRANSFORMERS_CACHE=/tmp/huggingface/transformers \
12
- HF_HUB_CACHE=/tmp/huggingface/hub \
13
  PORT=7860 \
14
  PATH=/home/appuser/.local/bin:$PATH
15
 
16
  # 3. Set working directory
17
  WORKDIR /home/appuser/app
18
 
19
- # 4. Install Python dependencies (as root)
20
  COPY requirements.txt .
21
  RUN pip install --upgrade pip \
22
- && pip install --no-cache-dir -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt
23
 
24
  # 5. Copy code and set permissions
25
  COPY --chown=appuser:appuser . .
26
- RUN chown -R appuser:appuser /home/appuser
27
 
28
- # 6. Switch to appuser BEFORE downloading the model
29
- USER appuser
30
-
31
- # 7. Download model (as appuser)
32
- RUN python - <<'PY'
33
- import os, pathlib
34
- from huggingface_hub import hf_hub_download
35
 
36
- hf_home = os.environ.get("HF_HOME", "/tmp/huggingface")
37
- pathlib.Path(hf_home).mkdir(parents=True, exist_ok=True)
38
 
39
- hf_hub_download(
40
- repo_id="yasyn14/skin-analyzer",
41
- filename="model-v1.keras",
42
- cache_dir=hf_home
43
- )
44
- PY
45
 
46
- # 8. Expose port & run app
47
  EXPOSE 7860
48
- CMD ["sh", "-c", "fastapi run main.py --host 0.0.0.0 --port 7860"]
 
4
  # 1. Create non‑root user
5
  RUN useradd --create-home --shell /bin/bash --uid 1000 appuser
6
 
7
+ # 2. Environment variables - set HF_HOME to match what's used in lifespan
8
  ENV PYTHONDONTWRITEBYTECODE=1 \
9
  PYTHONUNBUFFERED=1 \
10
+ HF_HOME=/home/appuser/huggingface \
 
 
11
  PORT=7860 \
12
  PATH=/home/appuser/.local/bin:$PATH
13
 
14
  # 3. Set working directory
15
  WORKDIR /home/appuser/app
16
 
17
+ # 4. Install Python dependencies
18
  COPY requirements.txt .
19
  RUN pip install --upgrade pip \
20
+ && pip install --no-cache-dir -r requirements.txt
21
 
22
  # 5. Copy code and set permissions
23
  COPY --chown=appuser:appuser . .
 
24
 
25
+ # 6. Create huggingface directory with proper permissions
26
+ RUN mkdir -p /home/appuser/huggingface && \
27
+ chown -R appuser:appuser /home/appuser
 
 
 
 
28
 
29
+ # 7. Switch to appuser
30
+ USER appuser
31
 
 
 
 
 
 
 
32
 
33
+ # 9. Expose port & run app using uvicorn
34
  EXPOSE 7860
35
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py CHANGED
@@ -11,34 +11,44 @@ from api.v1 import router as v1_router
11
  from models.model_loader import load_skin_condition_model
12
  from utils.predictor import predict_skin_condition
13
 
 
 
 
 
 
 
 
 
14
  os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
15
- logger = logging.getLogger("uvicorn.error")
16
 
17
  @asynccontextmanager
18
  async def lifespan(app: FastAPI):
19
  try:
20
- logger.info("Downloading model from Hugging Face Hub…")
 
 
 
21
  model_path = hf_hub_download(
22
  repo_id="yasyn14/skin-analyzer",
23
  filename="model-v1.keras",
24
- cache_dir="/tmp/huggingface"
25
  )
26
-
27
- logger.info("Loading model")
28
  model = await asyncio.to_thread(load_skin_condition_model, model_path)
29
-
30
  # warm‑up
31
  dummy = np.zeros((1, 224, 224, 3), dtype=np.uint8)
32
  await asyncio.to_thread(model.predict, dummy)
33
-
34
  app.state.model = model
35
  logger.info("Model ready ✅")
36
  yield
37
-
38
  except Exception as e:
39
  logger.exception("Failed during startup:")
40
  raise RuntimeError("Failed to load skin-condition model") from e
41
-
42
  finally:
43
  logger.info("Shutting down: releasing resources")
44
  if hasattr(app.state, "model"):
@@ -60,7 +70,6 @@ app.add_middleware(
60
  allow_headers=["*"],
61
  )
62
 
63
-
64
  @app.get("/healthz", tags=["Health"])
65
  async def health_check():
66
  return {"status": "ok"}
@@ -68,33 +77,45 @@ async def health_check():
68
  # include your versioned REST API
69
  app.include_router(v1_router)
70
 
71
-
72
  # === Gradio UI Setup ===
73
  def predict_skin_condition_grad(image: Image.Image):
74
  if image is None:
75
  return "No image provided"
76
-
 
 
 
77
  model = app.state.model
78
-
79
  # Preprocess image
80
  img = image.resize((224, 224)).convert("RGB")
81
  img_array = np.array(img)
82
-
83
  # Predict
84
  prediction = predict_skin_condition(img_array, model)
85
-
86
 
87
  confidence = prediction.get("confidence")
88
  label = prediction.get("condition")
89
-
90
  return f"{label} ({confidence:.2%} confidence)"
91
 
92
  gradio_interface = gr.Interface(
93
  fn=predict_skin_condition_grad,
94
- inputs=gr.Image(type="pil", label="Upload a skin image"),
 
 
 
 
 
95
  outputs=gr.Text(label="Prediction"),
96
  title="Skin Analyzer",
97
- description="Upload a photo of skin to detect conditions like acne, eczema, dryness, etc."
 
 
 
 
 
 
98
  )
99
 
100
  # Mount Gradio on root
 
11
  from models.model_loader import load_skin_condition_model
12
  from utils.predictor import predict_skin_condition
13
 
14
+ # Configure logging
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
18
+ )
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Disable OneDNN optimizations if needed
22
  os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
 
23
 
24
  @asynccontextmanager
25
  async def lifespan(app: FastAPI):
26
  try:
27
+ # Get cache directory from environment variable or use a default
28
+ cache_dir = os.environ.get("HF_HOME", "/tmp/huggingface")
29
+
30
+ logger.info(f"Downloading model from Hugging Face Hub using cache_dir: {cache_dir}...")
31
  model_path = hf_hub_download(
32
  repo_id="yasyn14/skin-analyzer",
33
  filename="model-v1.keras",
34
+ cache_dir=cache_dir
35
  )
36
+
37
+ logger.info(f"Loading model from path: {model_path}")
38
  model = await asyncio.to_thread(load_skin_condition_model, model_path)
39
+
40
  # warm‑up
41
  dummy = np.zeros((1, 224, 224, 3), dtype=np.uint8)
42
  await asyncio.to_thread(model.predict, dummy)
43
+
44
  app.state.model = model
45
  logger.info("Model ready ✅")
46
  yield
47
+
48
  except Exception as e:
49
  logger.exception("Failed during startup:")
50
  raise RuntimeError("Failed to load skin-condition model") from e
51
+
52
  finally:
53
  logger.info("Shutting down: releasing resources")
54
  if hasattr(app.state, "model"):
 
70
  allow_headers=["*"],
71
  )
72
 
 
73
  @app.get("/healthz", tags=["Health"])
74
  async def health_check():
75
  return {"status": "ok"}
 
77
  # include your versioned REST API
78
  app.include_router(v1_router)
79
 
 
80
  # === Gradio UI Setup ===
81
  def predict_skin_condition_grad(image: Image.Image):
82
  if image is None:
83
  return "No image provided"
84
+
85
+ if not hasattr(app.state, "model"):
86
+ return "Model is not loaded yet. Please try again in a moment."
87
+
88
  model = app.state.model
89
+
90
  # Preprocess image
91
  img = image.resize((224, 224)).convert("RGB")
92
  img_array = np.array(img)
93
+
94
  # Predict
95
  prediction = predict_skin_condition(img_array, model)
 
96
 
97
  confidence = prediction.get("confidence")
98
  label = prediction.get("condition")
99
+
100
  return f"{label} ({confidence:.2%} confidence)"
101
 
102
  gradio_interface = gr.Interface(
103
  fn=predict_skin_condition_grad,
104
+ inputs=gr.Image(
105
+ type="pil",
106
+ label="Upload or capture a skin image",
107
+ sources=["upload", "webcam"], # Explicitly enable both upload and webcam
108
+ webcam_options={"facingMode": "environment"} # Use back camera by default (better for skin photos)
109
+ ),
110
  outputs=gr.Text(label="Prediction"),
111
  title="Skin Analyzer",
112
+ description="Upload a photo or use your camera to detect skin conditions like acne, eczema, dryness, etc.",
113
+ examples=[
114
+ # Optional: Add example images if you have them
115
+ # ["examples/acne.jpg"],
116
+ # ["examples/eczema.jpg"]
117
+ ],
118
+ allow_flagging="never" # Disable flagging option
119
  )
120
 
121
  # Mount Gradio on root
routes/predict.py CHANGED
@@ -1,5 +1,5 @@
1
  import io
2
-
3
  from fastapi import APIRouter, Depends, Request, UploadFile, File, HTTPException
4
  from PIL import Image
5
  import numpy as np
@@ -10,6 +10,9 @@ from utils.recommend import get_recommended_products
10
 
11
  router = APIRouter()
12
 
 
 
 
13
  def get_model(request: Request):
14
  """
15
  Dependency that retrieves the preloaded ML model from app state.
@@ -20,6 +23,12 @@ def get_model(request: Request):
20
  raise HTTPException(status_code=500, detail="Model not loaded")
21
  return model
22
 
 
 
 
 
 
 
23
  @router.post(
24
  "/predict/",
25
  response_model=PredictionResponse,
@@ -37,35 +46,41 @@ async def predict(
37
  # Validate content type
38
  if not file.content_type.startswith("image/"):
39
  raise HTTPException(status_code=415, detail="Unsupported file type. Please upload an image.")
40
-
41
  try:
42
- contents = await file.read()
 
 
 
 
 
 
 
 
 
 
 
43
  await file.close()
44
-
45
- # Load and preprocess image
46
- img = Image.open(io.BytesIO(contents)).convert("RGB")
47
- img = img.resize((224, 224))
48
- img_array = np.array(img)
49
-
50
  # Run prediction
51
  prediction = predict_skin_condition(img_array, model)
52
  predicted_condition = prediction.get("condition")
53
  confidence = prediction.get("confidence")
54
-
55
  # Handle no-detection case
56
  if not predicted_condition:
57
  raise HTTPException(
58
  status_code=422,
59
  detail="Unable to detect a skin condition from the provided image."
60
  )
61
-
62
  # Build response
63
  result = {
64
  "predicted_condition": predicted_condition,
65
  "confidence": confidence,
66
  "info": None,
67
  }
68
-
69
  # Attach recommendations if available
70
  condition_data = get_recommended_products(predicted_condition)
71
  if not condition_data:
@@ -73,13 +88,16 @@ async def predict(
73
  status_code=404,
74
  detail=f"No recommendation data found for detected condition: {predicted_condition}"
75
  )
76
-
77
  result["info"] = ConditionInfo(**condition_data)
78
  return result
79
-
80
  except HTTPException:
81
  # Propagate HTTPExceptions
82
  raise
83
- except Exception:
 
 
 
84
  # Hide technical details from client
85
- raise HTTPException(status_code=500, detail="Internal server error")
 
1
  import io
2
+ import asyncio
3
  from fastapi import APIRouter, Depends, Request, UploadFile, File, HTTPException
4
  from PIL import Image
5
  import numpy as np
 
10
 
11
  router = APIRouter()
12
 
13
+ # Maximum file size (10MB)
14
+ MAX_FILE_SIZE = 10 * 1024 * 1024
15
+
16
  def get_model(request: Request):
17
  """
18
  Dependency that retrieves the preloaded ML model from app state.
 
23
  raise HTTPException(status_code=500, detail="Model not loaded")
24
  return model
25
 
26
+ async def preprocess_image(image_data: bytes) -> np.ndarray:
27
+ """Preprocess raw image bytes into model-ready numpy array"""
28
+ return await asyncio.to_thread(
29
+ lambda: np.array(Image.open(io.BytesIO(image_data)).convert("RGB").resize((224, 224)))
30
+ )
31
+
32
  @router.post(
33
  "/predict/",
34
  response_model=PredictionResponse,
 
46
  # Validate content type
47
  if not file.content_type.startswith("image/"):
48
  raise HTTPException(status_code=415, detail="Unsupported file type. Please upload an image.")
49
+
50
  try:
51
+ # Check file size before reading
52
+ contents = await file.read(MAX_FILE_SIZE + 1)
53
+ if len(contents) > MAX_FILE_SIZE:
54
+ raise HTTPException(
55
+ status_code=413,
56
+ detail=f"File too large. Maximum size is {MAX_FILE_SIZE/(1024*1024)}MB"
57
+ )
58
+
59
+ # Process image
60
+ img_array = await preprocess_image(contents)
61
+
62
+ # Release file resources
63
  await file.close()
64
+
 
 
 
 
 
65
  # Run prediction
66
  prediction = predict_skin_condition(img_array, model)
67
  predicted_condition = prediction.get("condition")
68
  confidence = prediction.get("confidence")
69
+
70
  # Handle no-detection case
71
  if not predicted_condition:
72
  raise HTTPException(
73
  status_code=422,
74
  detail="Unable to detect a skin condition from the provided image."
75
  )
76
+
77
  # Build response
78
  result = {
79
  "predicted_condition": predicted_condition,
80
  "confidence": confidence,
81
  "info": None,
82
  }
83
+
84
  # Attach recommendations if available
85
  condition_data = get_recommended_products(predicted_condition)
86
  if not condition_data:
 
88
  status_code=404,
89
  detail=f"No recommendation data found for detected condition: {predicted_condition}"
90
  )
91
+
92
  result["info"] = ConditionInfo(**condition_data)
93
  return result
94
+
95
  except HTTPException:
96
  # Propagate HTTPExceptions
97
  raise
98
+ except Exception as e:
99
+ # Log the actual error in production systems
100
+ # logger.exception(f"Prediction error: {str(e)}")
101
+
102
  # Hide technical details from client
103
+ raise HTTPException(status_code=500, detail="Internal server error")
utils/predictor.py CHANGED
@@ -1,15 +1,50 @@
1
  import numpy as np
 
 
2
  import keras
3
  from config import CLASS_NAMES
4
 
5
- def predict_skin_condition(img_array, model):
6
- img_array = np.expand_dims(img_array, axis=0)
7
- img_array = keras.applications.efficientnet.preprocess_input(img_array)
8
-
9
- pred_probs = model.predict(img_array)[0]
10
- top_index = np.argmax(pred_probs)
11
-
12
- return {
13
- "condition": CLASS_NAMES[top_index],
14
- "confidence": float(pred_probs[top_index])
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import numpy as np
2
+ from typing import Dict, Any, Union
3
+ import tensorflow as tf
4
  import keras
5
  from config import CLASS_NAMES
6
 
7
+ def predict_skin_condition(img_array: np.ndarray, model: tf.keras.Model) -> Dict[str, Union[str, float]]:
8
+ """
9
+ Predict skin condition from an input image using the provided model.
10
+
11
+ Args:
12
+ img_array: Input image as numpy array (H, W, C) with RGB channels
13
+ model: Loaded Keras/TensorFlow model for prediction
14
+
15
+ Returns:
16
+ Dictionary containing predicted condition name and confidence score
17
+ """
18
+ try:
19
+ # Add batch dimension if not present
20
+ if len(img_array.shape) == 3:
21
+ img_array = np.expand_dims(img_array, axis=0)
22
+
23
+ # Apply EfficientNet preprocessing
24
+ preprocessed_img = keras.applications.efficientnet.preprocess_input(img_array)
25
+
26
+ # Make prediction (with TF warning suppression)
27
+ with tf.device('/CPU:0'): # Force CPU prediction for consistent behavior
28
+ pred_probs = model.predict(preprocessed_img, verbose=0)[0]
29
+
30
+ # Get top prediction
31
+ top_index = np.argmax(pred_probs)
32
+
33
+ # Ensure index is valid
34
+ if top_index >= len(CLASS_NAMES):
35
+ raise ValueError(f"Predicted index {top_index} exceeds available class names")
36
+
37
+ return {
38
+ "condition": CLASS_NAMES[top_index],
39
+ "confidence": float(pred_probs[top_index])
40
+ }
41
+
42
+ except Exception as e:
43
+ # Log error in production systems
44
+ # logger.error(f"Prediction error: {str(e)}")
45
+
46
+ # Return empty result with error indication
47
+ return {
48
+ "condition": "error",
49
+ "confidence": 0.0
50
+ }
utils/recommend.py CHANGED
@@ -1,18 +1,87 @@
1
  import json
 
 
 
2
 
3
- try:
4
- with open('data/skin_conditions.json', encoding='utf-8') as f:
5
- json_data = json.load(f)
6
- except (FileNotFoundError, json.JSONDecodeError) as e:
7
- print(f"Error loading JSON: {e}")
8
- json_data = []
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
 
 
 
 
 
 
 
 
 
11
 
12
- def get_recommended_products(condition_query):
13
  condition_query = condition_query.strip().lower()
14
- for entry in json_data:
15
- condition = entry.get("condition", "").strip().lower()
16
- if condition_query in condition:
 
 
 
 
 
 
 
 
 
 
17
  return entry
18
- return {}
 
 
 
 
1
  import json
2
+ import os
3
+ from typing import Dict, Any, Optional
4
+ import logging
5
 
6
+ # Setup logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
 
 
 
9
 
10
+ # Global variables
11
+ _condition_data = {}
12
+ _last_load_time = 0
13
+
14
+ def _load_condition_data() -> Dict[str, Any]:
15
+ """Load skin condition data from JSON file with proper error handling"""
16
+ json_path = 'data/skin_conditions.json'
17
+
18
+ try:
19
+ # Get file modification time to check for changes
20
+ mtime = os.path.getmtime(json_path)
21
+
22
+ # If we've already loaded and file hasn't changed, return cached data
23
+ global _condition_data, _last_load_time
24
+ if _condition_data and mtime <= _last_load_time:
25
+ return _condition_data
26
+
27
+ # Load and parse JSON file
28
+ with open(json_path, encoding='utf-8') as f:
29
+ data = json.load(f)
30
+
31
+ # Index data by lowercase condition name for faster lookups
32
+ condition_map = {}
33
+ for entry in data:
34
+ condition = entry.get("condition", "").strip().lower()
35
+ if condition: # Only add entries with a valid condition name
36
+ condition_map[condition] = entry
37
+
38
+ # Update cache
39
+ _condition_data = condition_map
40
+ _last_load_time = mtime
41
+ logger.info(f"Loaded {len(condition_map)} skin conditions from {json_path}")
42
+ return condition_map
43
+
44
+ except FileNotFoundError:
45
+ logger.error(f"Skin conditions file not found: {json_path}")
46
+ return {}
47
+ except json.JSONDecodeError as e:
48
+ logger.error(f"Invalid JSON in skin conditions file: {e}")
49
+ return {}
50
+ except Exception as e:
51
+ logger.error(f"Error loading skin conditions: {e}")
52
+ return {}
53
+
54
+ def get_recommended_products(condition_query: str) -> Dict[str, Any]:
55
+ """
56
+ Get recommended products for a given skin condition.
57
 
58
+ Args:
59
+ condition_query: The skin condition to look up
60
+
61
+ Returns:
62
+ Dictionary containing condition info and product recommendations,
63
+ or empty dict if no match found
64
+ """
65
+ if not condition_query:
66
+ return {}
67
 
68
+ # Standardize the query
69
  condition_query = condition_query.strip().lower()
70
+
71
+ # Load or refresh condition data
72
+ condition_data = _load_condition_data()
73
+
74
+ # Try exact match first (most reliable)
75
+ if condition_query in condition_data:
76
+ return condition_data[condition_query]
77
+
78
+ # If exact match fails, try to find the closest match
79
+ # This is a fallback for when condition names might have slight variations
80
+ for cond_name, entry in condition_data.items():
81
+ # Check if query is a substring of a condition or vice versa
82
+ if condition_query in cond_name or cond_name in condition_query:
83
  return entry
84
+
85
+ # No match found
86
+ logger.warning(f"No recommendations found for condition: {condition_query}")
87
+ return {}