Rahul Saini commited on
Commit
2df88c2
·
1 Parent(s): 01225fb

Add application file

Browse files
Files changed (5) hide show
  1. Dockerfile +11 -0
  2. app/food_model.py +18 -0
  3. app/fruit_model.py +17 -0
  4. app/main.py +23 -0
  5. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+ COPY requirements.txt .
5
+ RUN pip install -r requirements.txt
6
+
7
+ COPY ./app /app/app
8
+
9
+ EXPOSE 7860
10
+
11
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
app/food_model.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import ViTFeatureExtractor, ViTForImageClassification
2
+ from PIL import Image
3
+ import torch
4
+
5
+ # Load model and feature extractor once
6
+ model_name = "DrishtiSharma/finetuned-ViT-Indian-Food-Classification-v3"
7
+ model = ViTForImageClassification.from_pretrained(model_name)
8
+ extractor = ViTFeatureExtractor.from_pretrained(model_name)
9
+
10
+ def classify_indian_food(image: Image.Image):
11
+ # Preprocess and predict
12
+ inputs = extractor(images=image, return_tensors="pt")
13
+ with torch.no_grad():
14
+ outputs = model(**inputs)
15
+ logits = outputs.logits
16
+ predicted_idx = logits.argmax(-1).item()
17
+ label = model.config.id2label[predicted_idx]
18
+ return label
app/fruit_model.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForImageClassification, AutoImageProcessor
2
+ from PIL import Image
3
+ import torch
4
+
5
+ # Load model and processor once at startup
6
+ model = AutoModelForImageClassification.from_pretrained("jazzmacedo/fruits-and-vegetables-detector-36")
7
+ processor = AutoImageProcessor.from_pretrained("jazzmacedo/fruits-and-vegetables-detector-36")
8
+ labels = list(model.config.id2label.values())
9
+
10
+ def classify_food(image: Image.Image):
11
+ # Preprocess the image using the Hugging Face processor
12
+ inputs = processor(images=image, return_tensors="pt")
13
+ with torch.no_grad():
14
+ outputs = model(**inputs)
15
+ predicted_idx = torch.argmax(outputs.logits, dim=1).item()
16
+ predicted_label = labels[predicted_idx]
17
+ return predicted_label
app/main.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from PIL import Image
3
+ import io
4
+ from .food_model import classify_food
5
+ from .fruit_model import classify_fruit
6
+
7
+ app = FastAPI()
8
+
9
+ @app.get("/")
10
+ def root():
11
+ return {"message": "Welcome to Fruit & Food Classifier API"}
12
+
13
+ @app.post("/predict/food")
14
+ async def predict_food(file: UploadFile = File(...)):
15
+ image = Image.open(io.BytesIO(await file.read())).convert("RGB")
16
+ result = classify_food(image)
17
+ return {"prediction": result}
18
+
19
+ @app.post("/predict/fruit")
20
+ async def predict_fruit(file: UploadFile = File(...)):
21
+ image = Image.open(io.BytesIO(await file.read())).convert("RGB")
22
+ result = classify_fruit(image)
23
+ return {"prediction": result}
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ torch
4
+ transformers
5
+ Pillow