Upload folder using huggingface_hub
Browse files- Dockerfile +16 -0
- app.py +97 -0
- gradient_tuned.joblib +3 -0
- requirements.txt +10 -0
Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9-slim
|
| 2 |
+
|
| 3 |
+
# Set the working directory inside the container
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Copy all files from the current directory to the container's working directory
|
| 7 |
+
COPY . .
|
| 8 |
+
|
| 9 |
+
# Install dependencies from the requirements file without using cache to reduce image size
|
| 10 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 11 |
+
|
| 12 |
+
# Expose API port
|
| 13 |
+
EXPOSE 7860
|
| 14 |
+
|
| 15 |
+
# Start Flask API with Gunicorn (4 workers, bind to 7860)
|
| 16 |
+
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860","app:sales_prediction_api"]
|
app.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import joblib
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from flask import Flask, request, jsonify
|
| 4 |
+
|
| 5 |
+
# -------------------------------
|
| 6 |
+
# Initialize Flask App
|
| 7 |
+
# -------------------------------
|
| 8 |
+
sales_prediction_api = Flask('Superkart Sales Prediction')
|
| 9 |
+
|
| 10 |
+
# -------------------------------
|
| 11 |
+
# Load the trained Gradient Boosting model
|
| 12 |
+
# -------------------------------
|
| 13 |
+
model = joblib.load("gradient_tuned.joblib")
|
| 14 |
+
|
| 15 |
+
# -------------------------------
|
| 16 |
+
# Root endpoint (Health check)
|
| 17 |
+
# -------------------------------
|
| 18 |
+
@sales_prediction_api.get("/")
|
| 19 |
+
def home():
|
| 20 |
+
"""Simple health check endpoint"""
|
| 21 |
+
return 'Welcome to the SuperKart Sales Prediction API'
|
| 22 |
+
|
| 23 |
+
# -------------------------------
|
| 24 |
+
# Single Prediction Endpoint
|
| 25 |
+
# -------------------------------
|
| 26 |
+
@sales_prediction_api.post("/v1/salesdata")
|
| 27 |
+
def predict_sales():
|
| 28 |
+
"""Predict sales for a single product/store record"""
|
| 29 |
+
try:
|
| 30 |
+
# Parse incoming JSON
|
| 31 |
+
sales_data = request.get_json()
|
| 32 |
+
|
| 33 |
+
# Prepare input dictionary with only the features used by the model
|
| 34 |
+
sample = {
|
| 35 |
+
'Product_Weight': sales_data['Product_Weight'],
|
| 36 |
+
'Product_Allocated_Area': sales_data['Product_Allocated_Area'],
|
| 37 |
+
'Product_MRP': sales_data['Product_MRP'],
|
| 38 |
+
'Product_Sugar_Content': sales_data['Product_Sugar_Content'],
|
| 39 |
+
'Product_Type': sales_data['Product_Type'],
|
| 40 |
+
'Store_Id': sales_data['Store_Id'],
|
| 41 |
+
'Store_Size': sales_data['Store_Size'],
|
| 42 |
+
'Store_Location_City_Type': sales_data['Store_Location_City_Type'],
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
# Convert to DataFrame
|
| 46 |
+
input_df = pd.DataFrame([sample])
|
| 47 |
+
|
| 48 |
+
# Predict sales
|
| 49 |
+
predicted_sales = model.predict(input_df)[0]
|
| 50 |
+
predicted_sales = round(float(predicted_sales), 2)
|
| 51 |
+
|
| 52 |
+
# Return JSON response
|
| 53 |
+
return jsonify({"predicted_sales": predicted_sales})
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
return jsonify({"error": str(e)})
|
| 57 |
+
|
| 58 |
+
# -------------------------------
|
| 59 |
+
# Batch Prediction Endpoint
|
| 60 |
+
# -------------------------------
|
| 61 |
+
@sales_prediction_api.post("/v1/salesdatabatch")
|
| 62 |
+
def predict_sales_batch():
|
| 63 |
+
"""Predict sales for multiple product/store records from a CSV file"""
|
| 64 |
+
try:
|
| 65 |
+
# Get uploaded CSV file
|
| 66 |
+
file = request.files['file']
|
| 67 |
+
input_df = pd.read_csv(file)
|
| 68 |
+
|
| 69 |
+
# Keep only the features used by the model
|
| 70 |
+
features = [
|
| 71 |
+
'Product_Weight', 'Product_Allocated_Area', 'Product_MRP',
|
| 72 |
+
'Product_Sugar_Content', 'Product_Type', 'Store_Id',
|
| 73 |
+
'Store_Size', 'Store_Location_City_Type'
|
| 74 |
+
]
|
| 75 |
+
input_features = input_df[features]
|
| 76 |
+
|
| 77 |
+
# Predict sales
|
| 78 |
+
predicted_sales = model.predict(input_features)
|
| 79 |
+
predicted_sales = [round(float(sale), 2) for sale in predicted_sales]
|
| 80 |
+
|
| 81 |
+
# Attach predictions to Product IDs if available
|
| 82 |
+
if 'Product_Id' in input_df.columns:
|
| 83 |
+
product_ids = input_df['Product_Id'].tolist()
|
| 84 |
+
output_dict = dict(zip(product_ids, predicted_sales))
|
| 85 |
+
else:
|
| 86 |
+
output_dict = {"predicted_sales": predicted_sales}
|
| 87 |
+
|
| 88 |
+
return jsonify(output_dict)
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
return jsonify({"error": str(e)})
|
| 92 |
+
|
| 93 |
+
# -------------------------------
|
| 94 |
+
# Run Flask App
|
| 95 |
+
# -------------------------------
|
| 96 |
+
if __name__ == '__main__':
|
| 97 |
+
sales_prediction_api.run(debug=True)
|
gradient_tuned.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42b4d70fdf4fd9609e15aec0d5d8426f4a6b1d04b8d958f98de8c2227cfd097a
|
| 3 |
+
size 1650258
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas==2.2.2
|
| 2 |
+
numpy==2.0.2
|
| 3 |
+
scikit-learn==1.6.1
|
| 4 |
+
xgboost==2.1.4
|
| 5 |
+
joblib==1.4.2
|
| 6 |
+
Werkzeug==2.2.2
|
| 7 |
+
flask==2.2.2
|
| 8 |
+
gunicorn==20.1.0
|
| 9 |
+
requests==2.32.3
|
| 10 |
+
uvicorn[standard]==0.32.0
|