Upload folder using huggingface_hub
Browse files
app.py
CHANGED
|
@@ -70,48 +70,33 @@ def predict_Product_Sales():
|
|
| 70 |
|
| 71 |
|
| 72 |
# Define an endpoint for batch prediction (POST request)
|
|
|
|
| 73 |
@product_sales_predictor_api.post('/v1/ProductBatchSales')
|
| 74 |
def predict_sales_batch():
|
| 75 |
-
"""
|
| 76 |
-
This function handles POST requests to the '/v1/ProductBatchSales' endpoint.
|
| 77 |
-
It expects a CSV file containing details for multiple products
|
| 78 |
-
and returns the predicted product sales prices as a dictionary in the JSON response.
|
| 79 |
-
"""
|
| 80 |
-
|
| 81 |
print(">>> Batch endpoint invoked!", flush=True)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
file = request.files['file']
|
| 86 |
-
|
| 87 |
-
# Read the CSV file into a Pandas DataFrame
|
| 88 |
-
input_data = pd.read_csv(file)
|
| 89 |
-
|
| 90 |
-
# Save Product_Id for mapping in output
|
| 91 |
-
product_ids = input_data['Product_Id'].tolist()
|
| 92 |
-
|
| 93 |
-
# Columns that your model does NOT need
|
| 94 |
-
drop_cols = [
|
| 95 |
-
'Product_Id',
|
| 96 |
-
'Store_Id',
|
| 97 |
-
'Store_Establishment_Year',
|
| 98 |
-
'Product_Store_Sales_Total' # target column
|
| 99 |
-
]
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
-
|
| 108 |
-
|
| 109 |
|
| 110 |
-
|
| 111 |
-
output = dict(zip(product_ids, predictions))
|
| 112 |
|
| 113 |
-
|
| 114 |
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
|
|
|
| 70 |
|
| 71 |
|
| 72 |
# Define an endpoint for batch prediction (POST request)
|
| 73 |
+
|
| 74 |
@product_sales_predictor_api.post('/v1/ProductBatchSales')
|
| 75 |
def predict_sales_batch():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
print(">>> Batch endpoint invoked!", flush=True)
|
| 77 |
+
try:
|
| 78 |
+
file = request.files.get('file')
|
| 79 |
+
print(">>> File received:", file is not None, flush=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
input_data = pd.read_csv(file)
|
| 82 |
+
print(">>> CSV loaded. Columns:", list(input_data.columns), flush=True)
|
| 83 |
|
| 84 |
+
drop_cols = [
|
| 85 |
+
'Product_Id',
|
| 86 |
+
'Store_Id',
|
| 87 |
+
'Store_Establishment_Year',
|
| 88 |
+
'Product_Store_Sales_Total'
|
| 89 |
+
]
|
| 90 |
+
input_data = input_data.drop(columns=[c for c in drop_cols if c in input_data.columns])
|
| 91 |
+
print(">>> After column drop:", list(input_data.columns), flush=True)
|
| 92 |
|
| 93 |
+
predictions = model.predict(input_data)
|
| 94 |
+
predictions = [float(p) for p in predictions]
|
| 95 |
|
| 96 |
+
print(">>> Predictions completed", flush=True)
|
|
|
|
| 97 |
|
| 98 |
+
return jsonify({"predictions": predictions})
|
| 99 |
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(">>> ERROR:", str(e), flush=True)
|
| 102 |
+
return jsonify({"error": str(e)}), 500
|