Upload folder using huggingface_hub
Browse files- Dockerfile +1 -1
- app.py +3 -17
Dockerfile
CHANGED
|
@@ -6,5 +6,5 @@ COPY . .
|
|
| 6 |
|
| 7 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 8 |
|
| 9 |
-
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "
|
| 10 |
|
|
|
|
| 6 |
|
| 7 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 8 |
|
| 9 |
+
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:superkart_sales_api"]
|
| 10 |
|
app.py
CHANGED
|
@@ -28,10 +28,8 @@ def predict_sales():
|
|
| 28 |
Expects JSON payload with product + store details.
|
| 29 |
Returns the predicted store sales as JSON.
|
| 30 |
"""
|
| 31 |
-
# Get the JSON data from the request body
|
| 32 |
data = request.get_json()
|
| 33 |
|
| 34 |
-
# Extract relevant features from the JSON data
|
| 35 |
sample = {
|
| 36 |
'Product_Id': data['Product_Id'],
|
| 37 |
'Product_Weight': data['Product_Weight'],
|
|
@@ -46,16 +44,10 @@ def predict_sales():
|
|
| 46 |
'Store_Type': data['Store_Type']
|
| 47 |
}
|
| 48 |
|
| 49 |
-
# Convert the extracted data into a Pandas DataFrame
|
| 50 |
input_data = pd.DataFrame([sample])
|
| 51 |
-
|
| 52 |
-
# Make prediction
|
| 53 |
predicted_sales = model.predict(input_data)[0]
|
| 54 |
-
|
| 55 |
-
# Convert to Python float (avoid NumPy serialization issues)
|
| 56 |
predicted_sales = round(float(predicted_sales), 2)
|
| 57 |
|
| 58 |
-
# Return the predicted sales as JSON
|
| 59 |
return jsonify({'Predicted Store Sales': predicted_sales})
|
| 60 |
|
| 61 |
# Define an endpoint for batch predictions (CSV upload)
|
|
@@ -66,24 +58,18 @@ def predict_sales_batch():
|
|
| 66 |
Expects a CSV file with product + store details.
|
| 67 |
Returns predicted sales for each row as JSON dictionary.
|
| 68 |
"""
|
| 69 |
-
# Get uploaded CSV file
|
| 70 |
file = request.files['file']
|
| 71 |
-
|
| 72 |
-
# Read into DataFrame
|
| 73 |
input_data = pd.read_csv(file)
|
| 74 |
|
| 75 |
-
# Make predictions
|
| 76 |
predictions = model.predict(input_data).tolist()
|
| 77 |
-
|
| 78 |
-
# Round predictions
|
| 79 |
predictions = [round(float(p), 2) for p in predictions]
|
| 80 |
|
| 81 |
-
# Use (Product_Id, Store_Id) tuple as unique key
|
| 82 |
ids = list(zip(input_data['Product_Id'], input_data['Store_Id']))
|
| 83 |
output = {str(k): v for k, v in zip(ids, predictions)}
|
| 84 |
|
| 85 |
return jsonify(output)
|
| 86 |
|
| 87 |
-
# Run the Flask application
|
| 88 |
if __name__ == '__main__':
|
| 89 |
-
superkart_sales_api.run(debug=True)
|
|
|
|
|
|
| 28 |
Expects JSON payload with product + store details.
|
| 29 |
Returns the predicted store sales as JSON.
|
| 30 |
"""
|
|
|
|
| 31 |
data = request.get_json()
|
| 32 |
|
|
|
|
| 33 |
sample = {
|
| 34 |
'Product_Id': data['Product_Id'],
|
| 35 |
'Product_Weight': data['Product_Weight'],
|
|
|
|
| 44 |
'Store_Type': data['Store_Type']
|
| 45 |
}
|
| 46 |
|
|
|
|
| 47 |
input_data = pd.DataFrame([sample])
|
|
|
|
|
|
|
| 48 |
predicted_sales = model.predict(input_data)[0]
|
|
|
|
|
|
|
| 49 |
predicted_sales = round(float(predicted_sales), 2)
|
| 50 |
|
|
|
|
| 51 |
return jsonify({'Predicted Store Sales': predicted_sales})
|
| 52 |
|
| 53 |
# Define an endpoint for batch predictions (CSV upload)
|
|
|
|
| 58 |
Expects a CSV file with product + store details.
|
| 59 |
Returns predicted sales for each row as JSON dictionary.
|
| 60 |
"""
|
|
|
|
| 61 |
file = request.files['file']
|
|
|
|
|
|
|
| 62 |
input_data = pd.read_csv(file)
|
| 63 |
|
|
|
|
| 64 |
predictions = model.predict(input_data).tolist()
|
|
|
|
|
|
|
| 65 |
predictions = [round(float(p), 2) for p in predictions]
|
| 66 |
|
|
|
|
| 67 |
ids = list(zip(input_data['Product_Id'], input_data['Store_Id']))
|
| 68 |
output = {str(k): v for k, v in zip(ids, predictions)}
|
| 69 |
|
| 70 |
return jsonify(output)
|
| 71 |
|
| 72 |
+
# Run the Flask application (for local debugging)
|
| 73 |
if __name__ == '__main__':
|
| 74 |
+
superkart_sales_api.run(debug=True, host="0.0.0.0", port=7860)
|
| 75 |
+
|