Upload 4 files
Browse files- Dockerfile +27 -0
- app_D.py +75 -0
- requirements.txt +6 -0
- waste_classifier_v2_clean.h5 +3 -0
Dockerfile
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory in the container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy the requirements file into the container
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install any needed packages specified in requirements.txt
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Copy the rest of the application code into the container
|
| 14 |
+
COPY . .
|
| 15 |
+
|
| 16 |
+
# Make port 7860 available to the world outside this container
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
|
| 19 |
+
# Define environment variable that Hugging Face uses
|
| 20 |
+
ENV PORT 7860
|
| 21 |
+
|
| 22 |
+
# Create the necessary folders for the app to store uploads
|
| 23 |
+
RUN mkdir -p static/uploads
|
| 24 |
+
|
| 25 |
+
# Run the Flask app using Gunicorn when the container launches
|
| 26 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "app:app"]
|
| 27 |
+
|
app_D.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
from flask import Flask, request, render_template, redirect, url_for
|
| 4 |
+
from werkzeug.utils import secure_filename
|
| 5 |
+
|
| 6 |
+
# Initialize the Flask application
|
| 7 |
+
app = Flask(__name__)
|
| 8 |
+
|
| 9 |
+
# --- Load the Clean, Compatible .h5 Model ---
|
| 10 |
+
# This model was saved with save_format='h5' for maximum compatibility.
|
| 11 |
+
MODEL_PATH = 'waste_classifier_v2_clean.h5'
|
| 12 |
+
try:
|
| 13 |
+
model = tf.keras.models.load_model(MODEL_PATH)
|
| 14 |
+
print("Image classification model loaded successfully!")
|
| 15 |
+
except Exception as e:
|
| 16 |
+
print(f"Error loading image model: {e}")
|
| 17 |
+
exit()
|
| 18 |
+
|
| 19 |
+
# Define the class names in the correct order for the model's output
|
| 20 |
+
CLASS_NAMES = ['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash']
|
| 21 |
+
|
| 22 |
+
def preprocess_image(image_path):
|
| 23 |
+
"""
|
| 24 |
+
Loads an image from a file path and preprocesses it for the model.
|
| 25 |
+
This function ensures the input image matches the format used during training.
|
| 26 |
+
"""
|
| 27 |
+
img = tf.keras.preprocessing.image.load_img(image_path, target_size=(224, 224))
|
| 28 |
+
img_array = tf.keras.preprocessing.image.img_to_array(img)
|
| 29 |
+
img_array = tf.expand_dims(img_array, 0) # Create a batch of one
|
| 30 |
+
# Apply the MobileNetV2-specific preprocessing
|
| 31 |
+
return tf.keras.applications.mobilenet_v2.preprocess_input(img_array)
|
| 32 |
+
|
| 33 |
+
@app.route('/', methods=['GET'])
|
| 34 |
+
def index():
|
| 35 |
+
"""Renders the main upload page."""
|
| 36 |
+
return render_template('index.html')
|
| 37 |
+
|
| 38 |
+
@app.route('/predict', methods=['POST'])
|
| 39 |
+
def predict():
|
| 40 |
+
"""Handles the image upload, prediction, and renders the result."""
|
| 41 |
+
# Check if a file was uploaded
|
| 42 |
+
if 'file' not in request.files:
|
| 43 |
+
return redirect(request.url)
|
| 44 |
+
file = request.files['file']
|
| 45 |
+
if file.filename == '':
|
| 46 |
+
return redirect(request.url)
|
| 47 |
+
|
| 48 |
+
if file:
|
| 49 |
+
# Save the file securely
|
| 50 |
+
filename = secure_filename(file.filename)
|
| 51 |
+
filepath = os.path.join('static/uploads', filename)
|
| 52 |
+
file.save(filepath)
|
| 53 |
+
|
| 54 |
+
# Preprocess the image and get a prediction
|
| 55 |
+
preprocessed_image = preprocess_image(filepath)
|
| 56 |
+
prediction = model.predict(preprocessed_image)
|
| 57 |
+
|
| 58 |
+
# Decode the prediction
|
| 59 |
+
predicted_class_index = tf.argmax(prediction[0]).numpy()
|
| 60 |
+
predicted_class = CLASS_NAMES[predicted_class_index]
|
| 61 |
+
confidence = tf.reduce_max(prediction[0]).numpy() * 100
|
| 62 |
+
|
| 63 |
+
# Pass the results to the HTML template
|
| 64 |
+
return render_template('index.html',
|
| 65 |
+
prediction=f'Prediction: {predicted_class}',
|
| 66 |
+
confidence=f'Confidence: {confidence:.2f}%',
|
| 67 |
+
uploaded_image=filepath)
|
| 68 |
+
return redirect(request.url)
|
| 69 |
+
|
| 70 |
+
if __name__ == '__main__':
|
| 71 |
+
# Ensure the upload folder exists
|
| 72 |
+
os.makedirs('static/uploads', exist_ok=True)
|
| 73 |
+
# This host and port configuration is important for deployment services like Hugging Face
|
| 74 |
+
app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
|
| 75 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Flask
|
| 2 |
+
tensorflow==2.16.1
|
| 3 |
+
tf-keras==2.16.0
|
| 4 |
+
numpy<2.0
|
| 5 |
+
gradio
|
| 6 |
+
gunicorn
|
waste_classifier_v2_clean.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b7d18edfcba92d50b60e8bf6b9d0bc778d79ef5f7aed0e36ea22a56f031516e
|
| 3 |
+
size 21730560
|