kiddoos commited on
Commit
780800d
·
1 Parent(s): f1d36e4

Add application file

Browse files
Files changed (3) hide show
  1. Dockerfile +15 -0
  2. app.py +48 -0
  3. requirements.txt +7 -0
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Set up a new user to avoid root permission issues
4
+ RUN useradd -m -u 1000 user
5
+ USER user
6
+ ENV PATH="/home/user/.local/bin:${PATH}"
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user . /app
11
+
12
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
13
+
14
+ # Port 7860 is the default for HF Spaces
15
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from PIL import Image
3
+ import torch
4
+ from torchvision import models, transforms
5
+ import io
6
+
7
+ app = FastAPI()
8
+
9
+ # 1. Load the pre-trained MobileNetV2 model
10
+ model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.DEFAULT)
11
+ model.eval()
12
+
13
+ # 2. Define the image transformation
14
+ preprocess = transforms.Compose([
15
+ transforms.Resize(256),
16
+ transforms.CenterCrop(224),
17
+ transforms.ToTensor(),
18
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
19
+ ])
20
+
21
+ # 3. Load ImageNet labels (simplified helper)
22
+ import json
23
+ from urllib.request import urlopen
24
+ LABELS_URL = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_class_index.json"
25
+ labels = json.load(urlopen(LABELS_URL))
26
+
27
+ @app.get("/")
28
+ def home():
29
+ return {"message": "MobileNet API is running! Send a POST request to /predict"}
30
+
31
+ @app.post("/predict")
32
+ async def predict(file: UploadFile = File(...)):
33
+ # Read image
34
+ image_bytes = await file.read()
35
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
36
+
37
+ # Preprocess and Infer
38
+ input_tensor = preprocess(image).unsqueeze(0)
39
+ with torch.no_grad():
40
+ output = model(input_tensor)
41
+
42
+ # Get top prediction
43
+ percentage = torch.nn.functional.softmax(output, dim=1)[0] * 100
44
+ _, index = torch.max(output, 1)
45
+ label = labels[str(index.item())][1]
46
+ confidence = percentage[index[0]].item()
47
+
48
+ return {"prediction": label, "confidence": f"{confidence:.2f}%"}
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cpu
2
+ torch
3
+ torchvision
4
+ fastapi
5
+ uvicorn
6
+ pillow
7
+ python-multipart