Upload folder using huggingface_hub
Browse files- Dockerfile +6 -4
- app.py +35 -39
Dockerfile
CHANGED
|
@@ -6,9 +6,11 @@ WORKDIR /app
|
|
| 6 |
# Copy all files from the current directory to the container's working directory
|
| 7 |
COPY . .
|
| 8 |
|
| 9 |
-
# Install dependencies from the requirements file without using cache
|
| 10 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
| 11 |
|
| 12 |
# Define the command to start the application using Gunicorn with 4 worker processes
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
| 6 |
# Copy all files from the current directory to the container's working directory
|
| 7 |
COPY . .
|
| 8 |
|
| 9 |
+
# Install dependencies from the requirements file without using cache to reduce image size
|
| 10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 11 |
|
| 12 |
# Define the command to start the application using Gunicorn with 4 worker processes
|
| 13 |
+
# - `-w 4`: Uses 4 worker processes for handling requests
|
| 14 |
+
# - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
|
| 15 |
+
# - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
|
| 16 |
+
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:house_price_api"]
|
app.py
CHANGED
|
@@ -2,70 +2,66 @@ import joblib
|
|
| 2 |
import pandas as pd
|
| 3 |
from flask import Flask, request, jsonify
|
| 4 |
|
| 5 |
-
# Initialize Flask app
|
| 6 |
-
|
| 7 |
|
| 8 |
-
# Load the trained
|
| 9 |
-
model = joblib.load("
|
| 10 |
|
| 11 |
# Define a route for the home page
|
| 12 |
-
@
|
| 13 |
def home():
|
| 14 |
-
return "Welcome to the
|
| 15 |
|
| 16 |
-
# Define an endpoint to predict
|
| 17 |
-
@
|
| 18 |
-
def
|
| 19 |
# Get JSON data from the request
|
| 20 |
-
|
| 21 |
|
| 22 |
-
# Extract relevant
|
| 23 |
sample = {
|
| 24 |
-
'
|
| 25 |
-
'
|
| 26 |
-
'
|
| 27 |
-
'
|
| 28 |
-
'
|
| 29 |
-
'
|
| 30 |
-
'
|
| 31 |
-
'
|
| 32 |
-
'
|
| 33 |
-
'
|
| 34 |
}
|
| 35 |
|
| 36 |
# Convert the extracted data into a DataFrame
|
| 37 |
input_data = pd.DataFrame([sample])
|
| 38 |
|
| 39 |
-
# Make a
|
| 40 |
prediction = model.predict(input_data).tolist()[0]
|
| 41 |
|
| 42 |
-
# Map prediction result to a human-readable label
|
| 43 |
-
prediction_label = "churn" if prediction == 1 else "not churn"
|
| 44 |
-
|
| 45 |
# Return the prediction as a JSON response
|
| 46 |
-
return jsonify({'
|
| 47 |
|
| 48 |
-
# Define an endpoint to predict
|
| 49 |
-
@
|
| 50 |
-
def
|
| 51 |
# Get the uploaded CSV file from the request
|
| 52 |
file = request.files['file']
|
| 53 |
|
| 54 |
# Read the file into a DataFrame
|
| 55 |
input_data = pd.read_csv(file)
|
| 56 |
|
| 57 |
-
# Make predictions for the batch data
|
| 58 |
-
predictions =
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
]
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
|
| 67 |
-
return
|
| 68 |
|
| 69 |
# Run the Flask app in debug mode
|
| 70 |
if __name__ == '__main__':
|
| 71 |
-
|
|
|
|
| 2 |
import pandas as pd
|
| 3 |
from flask import Flask, request, jsonify
|
| 4 |
|
| 5 |
+
# Initialize Flask app
|
| 6 |
+
super_kart_api = Flask("Super Kart Product Sales Predictor")
|
| 7 |
|
| 8 |
+
# Load the trained model
|
| 9 |
+
model = joblib.load("super_kart_model_v1_0.joblib")
|
| 10 |
|
| 11 |
# Define a route for the home page
|
| 12 |
+
@super_kart_api.get('/')
|
| 13 |
def home():
|
| 14 |
+
return "Welcome to the Super Kart Product Sales Predictor API!"
|
| 15 |
|
| 16 |
+
# Define an endpoint to predict price for a single product
|
| 17 |
+
@super_kart_api.post('/v1/product')
|
| 18 |
+
def predict_product_sales_price():
|
| 19 |
# Get JSON data from the request
|
| 20 |
+
product_data = request.get_json()
|
| 21 |
|
| 22 |
+
# Extract relevant house features from the input data
|
| 23 |
sample = {
|
| 24 |
+
'Product_Weight': product_data['Product_Weight'],
|
| 25 |
+
'Product_Sugar_Content': product_data['Product_Sugar_Content'],
|
| 26 |
+
'Product_Allocated_Area': product_data['Product_Allocated_Area'],
|
| 27 |
+
'Product_Type': product_data['Product_Type'],
|
| 28 |
+
'Product_MRP': product_data['Product_MRP'],
|
| 29 |
+
'Store_Id': product_data['Store_Id'],
|
| 30 |
+
'Store_Establishment_Year': product_data['Store_Establishment_Year'],
|
| 31 |
+
'Store_Size': product_data['Store_Size'],
|
| 32 |
+
'Store_Location_City_Type': product_data['Store_Location_City_Type'],
|
| 33 |
+
'Store_Type': product_data['Store_Type']
|
| 34 |
}
|
| 35 |
|
| 36 |
# Convert the extracted data into a DataFrame
|
| 37 |
input_data = pd.DataFrame([sample])
|
| 38 |
|
| 39 |
+
# Make a prediction using the trained model
|
| 40 |
prediction = model.predict(input_data).tolist()[0]
|
| 41 |
|
|
|
|
|
|
|
|
|
|
| 42 |
# Return the prediction as a JSON response
|
| 43 |
+
return jsonify({'Predicted_Product_Sales': prediction})
|
| 44 |
|
| 45 |
+
# Define an endpoint to predict product sales price for a batch of product
|
| 46 |
+
@super_kart_api.post('/v1/productbatch')
|
| 47 |
+
def predict_house_batch():
|
| 48 |
# Get the uploaded CSV file from the request
|
| 49 |
file = request.files['file']
|
| 50 |
|
| 51 |
# Read the file into a DataFrame
|
| 52 |
input_data = pd.read_csv(file)
|
| 53 |
|
| 54 |
+
# Make predictions for the batch data
|
| 55 |
+
predictions = model.predict(input_data).tolist()
|
| 56 |
+
|
| 57 |
+
# Add predictions to the DataFrame
|
| 58 |
+
input_data['Predicted_Product_Sales'] = predictions
|
|
|
|
| 59 |
|
| 60 |
+
# Convert results to dictionary
|
| 61 |
+
result = input_data.to_dict(orient="records")
|
| 62 |
|
| 63 |
+
return jsonify(result)
|
| 64 |
|
| 65 |
# Run the Flask app in debug mode
|
| 66 |
if __name__ == '__main__':
|
| 67 |
+
house_price_api.run(debug=True)
|