Spaces:
Sleeping
Sleeping
File size: 3,337 Bytes
e6db51b f71d52a e6db51b e60e933 e6db51b da6b7c2 e60e933 e6db51b f71d52a e6db51b a2e6a14 e6db51b e4a4e90 de58829 e4a4e90 e6db51b 96021e2 e6db51b 96021e2 e6db51b f71d52a e6db51b f71d52a e6db51b f71d52a e6db51b f71d52a e6db51b f71d52a e6db51b 543f920 f71d52a 454fb25 f544e8f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import joblib
import pandas as pd
from flask import Flask, request, jsonify
# Initialize Flask app with a name
SuperKart_predictor_api = Flask ("SuperKart Predictor")
# Load the trained churn prediction model
model = joblib.load ("SuperKart_prediction_model_v1_0.joblib")
# Define a route for the home page
@SuperKart_predictor_api.get ('/')
def home ():
return "Welcome to the Super Kart Sales Prediction!"
# Define an endpoint to predict sales for Super Kart
@SuperKart_predictor_api.post ('/v1/SuperKartSales')
def predict_sales ():
# Get JSON data from the request
sales_data = request.get_json ()
import datetime
current_year = datetime.datetime.now ().year # dynamic current year
# Extract relevant features from the input data
data_info = {
'Product_Weight' : sales_data ['Product_Weight'],
'Product_Sugar_Content' : sales_data ['Product_Sugar_Content'],
'Product_Allocated_Area' : sales_data ['Product_Allocated_Area'],
'Product_Type' : sales_data ['Product_Type'],
'Product_MRP' : sales_data ['Product_MRP'],
'Store_Id' : sales_data ['Store_Id'],
'Store_Size' : sales_data ['Store_Size'],
'Store_Location_City_Type' : sales_data ['Store_Location_City_Type'],
'Store_Type' : sales_data ['Store_Type']
}
# Convert the extracted data into a DataFrame
input_data = pd.DataFrame ([data_info])
# Enforce types
#input_data["Store_Establishment_Year"] = input_data["Store_Establishment_Year"].astype (int)
numeric_cols = ["Product_Weight", "Product_Allocated_Area", "Product_MRP"]
input_data[numeric_cols] = input_data[numeric_cols].astype (float)
# Make a churn prediction using the trained model
#prediction = model.predict (input_data)
predicted_sales = model.predict (input_data).tolist ()[0]
# Return the prediction as a JSON response
return jsonify ({'SalesPrediction': predicted_sales})
#return jsonify ({'SalesPrediction': 6789.56})
# Define an endpoint to predict sales for a batch of data
# here we assume the data to conatain same columns as per the data provided for this project
@SuperKart_predictor_api.post ('/v1/SuperKartBatchSales')
def predict_churn_batch ():
# Get the uploaded CSV file from the request
file = request.files ['file']
# Read the file into a DataFrame
input_data = pd.read_csv (file)
# Handle Product_Id if present
if "Product_Id" in input_data.columns:
product_ids = input_data["Product_Id"].copy ()
X = input_data.drop(columns=["Product_Id"])
else:
product_ids = None
X = input_data
# Make predictions
predictions = model.predict (X).tolist ()
# Prepare response
if product_ids is not None:
# Return mapping of Product_Id to predictions
output_dict = dict(zip(product_ids.tolist (), predictions))
else:
# If no Product_Id, just return index → predictions
output_dict = dict(zip(input_data.index.tolist (), predictions))
return jsonify (output_dict)
# Run the Flask app
if __name__ == "__main__":
import os
port = int (os.environ.get("PORT", 7860))
SuperKart_predictor_api.run(host="0.0.0.0", port=port)
|