Spaces:
Sleeping
Sleeping
| # Import necessary libraries | |
| import numpy as np | |
| import joblib # For loading the serialized model | |
| import pandas as pd # For data manipulation | |
| from flask import Flask, request, jsonify # For creating the Flask API | |
| import os | |
| # Initialize the Flask application | |
| sales_forecast_predictor_api = Flask("Superkart sales forecast predictor") | |
| # Load the trained machine learning model | |
| model_path = os.path.join(os.path.dirname(__file__), "store_sales_prediction_model_v1_0.joblib") | |
| model = joblib.load(model_path) | |
| # Define a route for the home page (GET request) | |
| def home(): | |
| return "Welcome to the Superkart sales forecast predictor!" | |
| # Define an endpoint for single prediction (POST request) | |
| def predict_sales_single(): | |
| """ | |
| Handles POST requests to the '/v1/sales' endpoint. | |
| Expects JSON payload with product-store details and returns predicted sales. | |
| """ | |
| # Get JSON data | |
| property_data = request.get_json() | |
| # Create DataFrame from JSON | |
| df = pd.DataFrame([property_data]) | |
| # Keep IDs for reference (if present) | |
| product_id = property_data.get("Product_Id", None) | |
| store_id = property_data.get("Store_Id", None) | |
| # Drop IDs before prediction | |
| features_df = df.drop(columns=["Product_Id", "Store_Id"], errors="ignore") | |
| # Make prediction | |
| predicted_sales = model.predict(features_df)[0] | |
| predicted_sales = round(float(predicted_sales), 2) | |
| # Build response | |
| response = { | |
| "Predicted_Product_Store_Sales_Total": predicted_sales | |
| } | |
| if product_id and store_id: | |
| response["Product_Id"] = product_id | |
| response["Store_Id"] = store_id | |
| return jsonify(response) | |
| # Define an endpoint for batch prediction (POST request) | |
| def predict_sales_batch(): | |
| """ | |
| Handles POST requests to the '/v1/salesbatch' endpoint. | |
| Expects a CSV file containing multiple product-store records. | |
| Returns predicted sales for all rows in the file. | |
| """ | |
| # Get uploaded CSV file | |
| file = request.files['file'] | |
| df = pd.read_csv(file) | |
| # Keep IDs for reference (if available) | |
| ids = None | |
| if "Product_Id" in df.columns and "Store_Id" in df.columns: | |
| ids = df["Product_Id"].astype(str) + "_" + df["Store_Id"].astype(str) | |
| # Drop IDs before prediction | |
| features_df = df.drop(columns=["Product_Id", "Store_Id"], errors="ignore") | |
| # Make predictions | |
| predicted_sales = model.predict(features_df).tolist() | |
| predicted_sales = [round(float(val), 2) for val in predicted_sales] | |
| # Build response | |
| if ids is not None: | |
| output_dict = dict(zip(ids, predicted_sales)) | |
| else: | |
| output_dict = {"Row_" + str(i): val for i, val in enumerate(predicted_sales)} | |
| return jsonify(output_dict) | |
| # Run the Flask application | |
| if __name__ == '__main__': | |
| sales_forecast_predictor_api.run(debug=True) | |