Spaces:
Sleeping
Sleeping
File size: 2,299 Bytes
2c723f8 18b879a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import uvicorn
from fastapi import FastAPI, File, UploadFile
from PIL import Image
import numpy as np
import tensorflow as tf
import io
app = FastAPI()
# --- 1. CONFIGURATION ---
MODEL_PATH = "Orangemodel.tflite"
# ⚠️ IMPORTANT: You must list your classes here in alphabetical order!
# (TFLite does not store these names, so you must type them manually)
CLASS_NAMES = ['Citrus canker', 'Citrus greening', 'Citrus mealybugs', 'Die back', 'Foliage damaged', 'Healthy leaf', 'Powdery mildew', 'Shot hole', 'Spiny whitefly', 'Yellow dragon', 'Yellow leaves']
# --- 2. LOAD TFLITE MODEL ---
# We use the Interpreter, which is like a lightweight player for the model
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
# Get input and output details so we know how to talk to the model
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
@app.get("/")
def home():
return {"message": "Orange Disease Detection API (TFLite Version) is Running!"}
@app.post("/predict")
async def predict(file: UploadFile = File(...)):
# --- 3. PREPROCESS IMAGE ---
# Read the file uploaded by the user
image_data = await file.read()
image = Image.open(io.BytesIO(image_data)).convert("RGB")
# Resize to 224x224 (The size MobileNetV2 expects)
image = image.resize((224, 224))
# Convert to numpy array
input_data = np.array(image)
# Normalize the image (0 to 1) just like in training
input_data = input_data / 255.0
# Add the batch dimension (1, 224, 224, 3)
input_data = np.expand_dims(input_data, axis=0)
# TFLite requires FLOAT32 data type
input_data = input_data.astype(np.float32)
# --- 4. RUN INFERENCE ---
# Tell the interpreter to look at our image
interpreter.set_tensor(input_details[0]['index'], input_data)
# Run the model
interpreter.invoke()
# Get the results
output_data = interpreter.get_tensor(output_details[0]['index'])
# --- 5. PROCESS RESULTS ---
# Find the highest score
prediction = np.argmax(output_data[0])
confidence = float(np.max(output_data[0]))
predicted_class = CLASS_NAMES[prediction]
return {
"class": predicted_class,
"confidence": confidence
} |