Spaces:
Sleeping
Sleeping
saniaE commited on
Commit ·
f2bc4d7
1
Parent(s): 46056bf
added api setup
Browse files- Dockerfile +36 -0
- app.py +92 -0
- requirements.txt +21 -0
Dockerfile
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9-slim
|
| 2 |
+
|
| 3 |
+
# Install system dependencies for OpenCV and TFOD Protobuf
|
| 4 |
+
RUN apt-get update && apt-get install -y \
|
| 5 |
+
libgl1-mesa-glx \
|
| 6 |
+
libglib2.0-0 \
|
| 7 |
+
protobuf-compiler \
|
| 8 |
+
git \
|
| 9 |
+
wget \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Set up a new user
|
| 13 |
+
RUN useradd -m -u 1000 user
|
| 14 |
+
USER user
|
| 15 |
+
ENV HOME=/home/user \
|
| 16 |
+
PATH=/home/user/.local/bin:$PATH \
|
| 17 |
+
PYTHONPATH=$HOME/app:$HOME/app/models/research:$HOME/app/models/research/slim
|
| 18 |
+
|
| 19 |
+
WORKDIR $HOME/app
|
| 20 |
+
|
| 21 |
+
# Install Python dependencies
|
| 22 |
+
COPY --chown=user requirements.txt .
|
| 23 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 24 |
+
|
| 25 |
+
# Clone and install TensorFlow Object Detection API
|
| 26 |
+
RUN git clone --depth 1 https://github.com/tensorflow/models.git
|
| 27 |
+
WORKDIR $HOME/app/models/research
|
| 28 |
+
RUN protoc object_detection/protos/*.proto --python_out=.
|
| 29 |
+
RUN cp object_detection/packages/tf2/setup.py . && pip install .
|
| 30 |
+
|
| 31 |
+
WORKDIR $HOME/app
|
| 32 |
+
|
| 33 |
+
# Copy the rest of the application
|
| 34 |
+
COPY --chown=user . .
|
| 35 |
+
|
| 36 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import io
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from fastapi import FastAPI, UploadFile, File
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from huggingface_hub import snapshot_download
|
| 9 |
+
|
| 10 |
+
from object_detection.utils import label_map_util, config_util
|
| 11 |
+
from object_detection.builders import model_builder
|
| 12 |
+
|
| 13 |
+
app = FastAPI()
|
| 14 |
+
|
| 15 |
+
# 1. Download Private Models
|
| 16 |
+
HF_TOKEN = os.getenv("HF_Token")
|
| 17 |
+
REPO_ID = "SaniaE/Car_Damage_Detection"
|
| 18 |
+
|
| 19 |
+
print("Downloading models from Hugging Face...")
|
| 20 |
+
model_dir = snapshot_download(
|
| 21 |
+
repo_id=REPO_ID,
|
| 22 |
+
token=HF_TOKEN,
|
| 23 |
+
local_dir="./models_data"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
PIPELINE_CONFIG = os.path.join(model_dir, "object_detection_model/pipeline.config")
|
| 27 |
+
CHECKPOINT_PATH = os.path.join(model_dir, "object_detection_model/ckpt-37")
|
| 28 |
+
LABEL_MAP_PATH = os.path.join(model_dir, "object_detection_model/label_map.pbtxt")
|
| 29 |
+
CNN_MODEL_PATH = os.path.join(model_dir, "cnn_filter.h5")
|
| 30 |
+
|
| 31 |
+
# 3. Load Models
|
| 32 |
+
# Load CNN Filter
|
| 33 |
+
cnn_filter = tf.keras.models.load_model(CNN_MODEL_PATH)
|
| 34 |
+
|
| 35 |
+
# Load Object Detection Model
|
| 36 |
+
configs = config_util.get_configs_from_pipeline_file(PIPELINE_CONFIG)
|
| 37 |
+
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
|
| 38 |
+
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
|
| 39 |
+
ckpt.restore(CHECKPOINT_PATH).expect_partial()
|
| 40 |
+
|
| 41 |
+
category_index = label_map_util.create_category_index_from_labelmap(LABEL_MAP_PATH)
|
| 42 |
+
|
| 43 |
+
@tf.function
|
| 44 |
+
def detect_fn(image):
|
| 45 |
+
image, shapes = detection_model.preprocess(image)
|
| 46 |
+
prediction_dict = detection_model.predict(image, shapes)
|
| 47 |
+
detections = detection_model.postprocess(prediction_dict, shapes)
|
| 48 |
+
return detections
|
| 49 |
+
|
| 50 |
+
@app.get("/")
|
| 51 |
+
def read_root():
|
| 52 |
+
return {"status": "Model is Online", "model_repo": REPO_ID}
|
| 53 |
+
|
| 54 |
+
@app.post("/predict")
|
| 55 |
+
async def predict(file: UploadFile = File(...)):
|
| 56 |
+
# Read Image
|
| 57 |
+
contents = await file.read()
|
| 58 |
+
image_pil = Image.open(io.BytesIO(contents)).convert("RGB")
|
| 59 |
+
image_np = np.array(image_pil)
|
| 60 |
+
|
| 61 |
+
# Step 1: CNN Filter (Check for damage)
|
| 62 |
+
img_cnn = image_pil.resize((64, 64))
|
| 63 |
+
x = tf.keras.preprocessing.image.img_to_array(img_cnn)
|
| 64 |
+
x = np.expand_dims(x, axis=0)
|
| 65 |
+
|
| 66 |
+
cnn_pred = cnn_filter.predict(x)
|
| 67 |
+
is_damage_labels = ['Clear', 'Damaged']
|
| 68 |
+
status = is_damage_labels[np.argmax(cnn_pred)]
|
| 69 |
+
|
| 70 |
+
results = {"status": status}
|
| 71 |
+
|
| 72 |
+
# Step 2: Object Detection (If damaged)
|
| 73 |
+
if status == 'Damaged':
|
| 74 |
+
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
|
| 75 |
+
detections = detect_fn(input_tensor)
|
| 76 |
+
|
| 77 |
+
# Format detections for JSON response
|
| 78 |
+
scores = detections['detection_scores'][0].numpy()
|
| 79 |
+
classes = detections['detection_classes'][0].numpy().astype(int)
|
| 80 |
+
boxes = detections['detection_boxes'][0].numpy()
|
| 81 |
+
|
| 82 |
+
detected_objects = []
|
| 83 |
+
for i in range(len(scores)):
|
| 84 |
+
if scores[i] > 0.4: # min_score_thresh
|
| 85 |
+
detected_objects.append({
|
| 86 |
+
"label": category_index.get(classes[i] + 1, {}).get('name', 'unknown'),
|
| 87 |
+
"score": float(scores[i]),
|
| 88 |
+
"box": boxes[i].tolist()
|
| 89 |
+
})
|
| 90 |
+
results["detections"] = detected_objects
|
| 91 |
+
|
| 92 |
+
return results
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tensorflow==2.13.0
|
| 2 |
+
tf-keras==2.15.0
|
| 3 |
+
keras==2.13.1
|
| 4 |
+
numpy==1.24.3
|
| 5 |
+
pandas==2.0.3
|
| 6 |
+
pillow==10.4.0
|
| 7 |
+
opencv-python-headless==4.10.0.84
|
| 8 |
+
scipy==1.10.1
|
| 9 |
+
protobuf==3.20.3
|
| 10 |
+
pycocotools==2.0.7
|
| 11 |
+
lvis==0.5.3
|
| 12 |
+
gin-config==0.5.0
|
| 13 |
+
tf-slim==1.1.0
|
| 14 |
+
pyyaml==6.0.2
|
| 15 |
+
matplotlib==3.2.0
|
| 16 |
+
lxml==5.3.0
|
| 17 |
+
fastapi
|
| 18 |
+
uvicorn
|
| 19 |
+
python-multipart
|
| 20 |
+
huggingface_hub
|
| 21 |
+
wget
|