kjdeka commited on
Commit
7f73f05
·
verified ·
1 Parent(s): 91db89f

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +7 -7
Dockerfile CHANGED
@@ -13,4 +13,4 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
13
  # - `-w 4`: Uses 4 worker processes for handling requests
14
  # - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
15
  # - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
16
- CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:revenue_predictor_api"]
 
13
  # - `-w 4`: Uses 4 worker processes for handling requests
14
  # - `-b 0.0.0.0:7860`: Binds the server to port 7860 on all network interfaces
15
  # - `app:app`: Runs the Flask app (assuming `app.py` contains the Flask instance named `app`)
16
+ CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:7860", "app:backend_predictor_api"]
app.py CHANGED
@@ -40,13 +40,13 @@ def predict_dijak_backend():
40
  # Convert the extracted data into a DataFrame
41
  input_data = pd.DataFrame([sample])
42
 
43
- # Make a revenue prediction using the trained model
44
  prediction = model.predict(input_data).tolist()[0]
45
 
46
  # Return the prediction as a JSON response
47
  return jsonify({'Prediction': prediction})
48
 
49
- # Define an endpoint to predict revenue for a batch of revenue
50
  @backend_predictor_api.post('/v1/dijakbnbatch')
51
  def predict_dijak_backend_batch():
52
  # Get the uploaded CSV file from the request
@@ -56,18 +56,18 @@ def predict_dijak_backend_batch():
56
  input_data = pd.read_csv(file)
57
 
58
  # Drop Product_Id before prediction
59
- features = input_data.drop("Product_Id", axis=1)
60
 
61
  # Make predictions
62
  predictions = model.predict(features).tolist()
63
 
64
- # Build structured output with Product_Id, Store_Id, and rounded revenue
65
  output_list = []
66
  for i in range(len(predictions)):
67
  output_list.append({
68
- "Product_Id": input_data.loc[i, "Product_Id"],
69
- "Store_Id": input_data.loc[i, "Store_Id"],
70
- "Revenue Prediction": round(predictions[i], 2)
71
  })
72
 
73
  return jsonify(output_list)
 
40
  # Convert the extracted data into a DataFrame
41
  input_data = pd.DataFrame([sample])
42
 
43
+ # Make a prediction using the trained model
44
  prediction = model.predict(input_data).tolist()[0]
45
 
46
  # Return the prediction as a JSON response
47
  return jsonify({'Prediction': prediction})
48
 
49
+ # Define an endpoint to predict for a batch of input
50
  @backend_predictor_api.post('/v1/dijakbnbatch')
51
  def predict_dijak_backend_batch():
52
  # Get the uploaded CSV file from the request
 
56
  input_data = pd.read_csv(file)
57
 
58
  # Drop Product_Id before prediction
59
+ features = input_data.drop("Personal_Loan", axis=1)
60
 
61
  # Make predictions
62
  predictions = model.predict(features).tolist()
63
 
64
+ # Build structured output with Product_Id, Store_Id, and rounded output
65
  output_list = []
66
  for i in range(len(predictions)):
67
  output_list.append({
68
+ #"Product_Id": input_data.loc[i, "Product_Id"],
69
+ #"Store_Id": input_data.loc[i, "Store_Id"],
70
+ "Prediction": round(predictions[i], 2)
71
  })
72
 
73
  return jsonify(output_list)