Upload folder using huggingface_hub
Browse files- Dockerfile +1 -1
- app.py +11 -11
Dockerfile
CHANGED
|
@@ -13,4 +13,4 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
|
| 13 |
# - `-w 4`: Uses 4 worker processes for handling requests
|
| 14 |
# - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
|
| 15 |
# - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
|
| 16 |
-
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:
|
|
|
|
| 13 |
# - `-w 4`: Uses 4 worker processes for handling requests
|
| 14 |
# - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
|
| 15 |
# - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
|
| 16 |
+
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:store_sales_predictor_api"]
|
app.py
CHANGED
|
@@ -24,7 +24,7 @@ def home():
|
|
| 24 |
def predict_store_sales():
|
| 25 |
"""
|
| 26 |
This function handles POST requests to the '/v1/storeSales' endpoint.
|
| 27 |
-
It expects a JSON payload containing
|
| 28 |
the predicted store sales as a JSON response.
|
| 29 |
"""
|
| 30 |
# Get the JSON data from the request body
|
|
@@ -32,15 +32,15 @@ def predict_store_sales():
|
|
| 32 |
|
| 33 |
# Extract relevant features from the JSON data
|
| 34 |
sample = {
|
| 35 |
-
'
|
| 36 |
-
'
|
| 37 |
-
'
|
| 38 |
-
'
|
| 39 |
-
'
|
| 40 |
-
'
|
| 41 |
-
'
|
| 42 |
-
'
|
| 43 |
-
'
|
| 44 |
}
|
| 45 |
|
| 46 |
# Convert the extracted data into a Pandas DataFrame
|
|
@@ -82,7 +82,7 @@ def predict_store_sales_batch():
|
|
| 82 |
predicted_sales = [round(float(np.exp(store_sales)), 2) for log_price in predicted_store_sales]
|
| 83 |
|
| 84 |
# Create a dictionary of predictions with store IDs as keys
|
| 85 |
-
store_ids = input_data['id'].tolist() # Assuming 'id' is the
|
| 86 |
output_dict = dict(zip(store_ids, predicted_sales)) # Use actual sales
|
| 87 |
|
| 88 |
# Return the predictions dictionary as a JSON response
|
|
|
|
| 24 |
def predict_store_sales():
|
| 25 |
"""
|
| 26 |
This function handles POST requests to the '/v1/storeSales' endpoint.
|
| 27 |
+
It expects a JSON payload containing product details and returns
|
| 28 |
the predicted store sales as a JSON response.
|
| 29 |
"""
|
| 30 |
# Get the JSON data from the request body
|
|
|
|
| 32 |
|
| 33 |
# Extract relevant features from the JSON data
|
| 34 |
sample = {
|
| 35 |
+
'Product_Weight': sales_data['Product_Weight'],
|
| 36 |
+
'Product_Sugar_Content': sales_data['Product_Sugar_Content'],
|
| 37 |
+
'Product_Allocated_Area': sales_data['Product_Allocated_Area'],
|
| 38 |
+
'Product_Type': sales_data['Product_Type'],
|
| 39 |
+
'Product_MRP': sales_data['Product_MRP'],
|
| 40 |
+
'Store_Id': sales_data['Store_Id'],
|
| 41 |
+
'Store_Size': sales_data['Store_Size'],
|
| 42 |
+
'Store_Location_City_Type': sales_data['Store_Location_City_Type'],
|
| 43 |
+
'Store_Type': sales_data['Store_Type']
|
| 44 |
}
|
| 45 |
|
| 46 |
# Convert the extracted data into a Pandas DataFrame
|
|
|
|
| 82 |
predicted_sales = [round(float(np.exp(store_sales)), 2) for log_price in predicted_store_sales]
|
| 83 |
|
| 84 |
# Create a dictionary of predictions with store IDs as keys
|
| 85 |
+
store_ids = input_data['id'].tolist() # Assuming 'id' is the store ID column
|
| 86 |
output_dict = dict(zip(store_ids, predicted_sales)) # Use actual sales
|
| 87 |
|
| 88 |
# Return the predictions dictionary as a JSON response
|