SagarChhabriya commited on
Commit
ab68e7e
·
verified ·
1 Parent(s): f017f50

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +16 -0
  2. app.py +274 -0
  3. requirements.txt +8 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY requirements.txt .
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ # Copy application code
10
+ COPY . .
11
+
12
+ # Expose port (Hugging Face uses port 7860)
13
+ EXPOSE 7860
14
+
15
+ # Start the application
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ### app.py for vercel deployment #####
2
+
3
+ # from fastapi import FastAPI, HTTPException
4
+ # from pydantic import BaseModel
5
+ # import joblib
6
+ # import numpy as np
7
+ # # import pandas as pd
8
+ # import os
9
+
10
+ # app = FastAPI(title="ML Model API", version="1.0.0")
11
+
12
+ # # Load model and feature names
13
+ # try:
14
+ # model = joblib.load('model/model.joblib')
15
+ # feature_names = joblib.load('model/feature_names.joblib')
16
+ # print("Model loaded successfully!")
17
+ # except Exception as e:
18
+ # print(f"Error loading model: {e}")
19
+ # model = None
20
+ # feature_names = []
21
+
22
+ # # Define input schema
23
+ # class PredictionInput(BaseModel):
24
+ # sepal_length: float
25
+ # sepal_width: float
26
+ # petal_length: float
27
+ # petal_width: float
28
+
29
+ # class BatchPredictionInput(BaseModel):
30
+ # data: list[list[float]] # List of feature arrays
31
+
32
+ # @app.get("/")
33
+ # async def root():
34
+ # return {
35
+ # "message": "ML Model API is running!",
36
+ # "endpoints": {
37
+ # "health": "/health",
38
+ # "single_prediction": "/predict",
39
+ # "batch_prediction": "/predict-batch",
40
+ # "model_info": "/model-info"
41
+ # }
42
+ # }
43
+
44
+ # @app.get("/health")
45
+ # async def health_check():
46
+ # return {
47
+ # "status": "healthy",
48
+ # "model_loaded": model is not None,
49
+ # "model_type": "RandomForestClassifier" if model else "None"
50
+ # }
51
+
52
+ # @app.get("/model-info")
53
+ # async def model_info():
54
+ # if not model:
55
+ # raise HTTPException(status_code=500, detail="Model not loaded")
56
+
57
+ # return {
58
+ # "model_type": str(type(model).__name__),
59
+ # "feature_names": feature_names,
60
+ # "n_features": len(feature_names),
61
+ # "n_classes": getattr(model, 'n_classes_', 'Unknown')
62
+ # }
63
+
64
+ # @app.post("/predict")
65
+ # async def predict_single(input_data: PredictionInput):
66
+ # if not model:
67
+ # raise HTTPException(status_code=500, detail="Model not loaded")
68
+
69
+ # try:
70
+ # # Convert input to array
71
+ # features = np.array([
72
+ # input_data.sepal_length,
73
+ # input_data.sepal_width,
74
+ # input_data.petal_length,
75
+ # input_data.petal_width
76
+ # ]).reshape(1, -1)
77
+
78
+ # # Make prediction
79
+ # prediction = model.predict(features)
80
+ # probabilities = model.predict_proba(features)
81
+
82
+ # return {
83
+ # "prediction": int(prediction[0]),
84
+ # "probabilities": probabilities[0].tolist(),
85
+ # "class_names": ["setosa", "versicolor", "virginica"], # Replace with your class names
86
+ # "input_features": input_data.dict()
87
+ # }
88
+ # except Exception as e:
89
+ # raise HTTPException(status_code=400, detail=f"Prediction error: {str(e)}")
90
+
91
+ # @app.post("/predict-batch")
92
+ # async def predict_batch(input_data: BatchPredictionInput):
93
+ # if not model:
94
+ # raise HTTPException(status_code=500, detail="Model not loaded")
95
+
96
+ # try:
97
+ # # Convert to numpy array
98
+ # features = np.array(input_data.data)
99
+
100
+ # # Validate input shape
101
+ # if features.shape[1] != len(feature_names):
102
+ # raise HTTPException(
103
+ # status_code=400,
104
+ # detail=f"Expected {len(feature_names)} features, got {features.shape[1]}"
105
+ # )
106
+
107
+ # # Make predictions
108
+ # predictions = model.predict(features)
109
+ # probabilities = model.predict_proba(features)
110
+
111
+ # return {
112
+ # "predictions": predictions.tolist(),
113
+ # "probabilities": probabilities.tolist(),
114
+ # "batch_size": len(predictions)
115
+ # }
116
+ # except Exception as e:
117
+ # raise HTTPException(status_code=400, detail=f"Batch prediction error: {str(e)}")
118
+
119
+ # # For local development
120
+ # if __name__ == "__main__":
121
+ # import uvicorn
122
+ # uvicorn.run(app, host="0.0.0.0", port=8000)
123
+
124
+
125
+ ############ Via HuggingFace ################
126
+ from fastapi import FastAPI, HTTPException
127
+ from pydantic import BaseModel
128
+ from huggingface_hub import hf_hub_download
129
+ import joblib
130
+ import numpy as np
131
+ import os
132
+
133
+ app = FastAPI(title="ML Model API on Hugging Face", version="1.0.0")
134
+
135
+ # Hugging Face model repository
136
+ HF_REPO_ID = "SagarChhabriya/ml-model-api"
137
+ MODEL_FILENAME = "model.joblib"
138
+ FEATURE_NAMES_FILENAME = "feature_names.joblib"
139
+
140
+ # Load model from Hugging Face Hub
141
+ def load_model_from_hf():
142
+ try:
143
+ print("📥 Downloading model from Hugging Face Hub...")
144
+
145
+ # Download model file
146
+ model_path = hf_hub_download(
147
+ repo_id=HF_REPO_ID,
148
+ filename=MODEL_FILENAME
149
+ )
150
+
151
+ # Download feature names file
152
+ feature_names_path = hf_hub_download(
153
+ repo_id=HF_REPO_ID,
154
+ filename=FEATURE_NAMES_FILENAME
155
+ )
156
+
157
+ # Load files
158
+ model = joblib.load(model_path)
159
+ feature_names = joblib.load(feature_names_path)
160
+
161
+ print("Model and feature names loaded successfully from Hugging Face!")
162
+ return model, feature_names
163
+
164
+ except Exception as e:
165
+ print(f"Error loading from Hugging Face: {e}")
166
+ return None, []
167
+
168
+ # Load model on startup
169
+ model, feature_names = load_model_from_hf()
170
+
171
+ # Define input schema
172
+ class PredictionInput(BaseModel):
173
+ sepal_length: float
174
+ sepal_width: float
175
+ petal_length: float
176
+ petal_width: float
177
+
178
+ class BatchPredictionInput(BaseModel):
179
+ data: list[list[float]] # List of feature arrays
180
+
181
+ @app.get("/")
182
+ async def root():
183
+ return {
184
+ "message": "ML Model API deployed on Hugging Face Spaces! 🚀",
185
+ "endpoints": {
186
+ "health": "/health",
187
+ "single_prediction": "/predict",
188
+ "batch_prediction": "/predict-batch",
189
+ "model_info": "/model-info",
190
+ "docs": "/docs"
191
+ },
192
+ "model_source": "Hugging Face Hub",
193
+ "repository": HF_REPO_ID
194
+ }
195
+
196
+ @app.get("/health")
197
+ async def health_check():
198
+ return {
199
+ "status": "healthy",
200
+ "model_loaded": model is not None,
201
+ "model_type": "RandomForestClassifier" if model else "None",
202
+ "features_loaded": len(feature_names) > 0
203
+ }
204
+
205
+ @app.get("/model-info")
206
+ async def model_info():
207
+ if not model:
208
+ raise HTTPException(status_code=500, detail="Model not loaded")
209
+
210
+ return {
211
+ "model_type": str(type(model).__name__),
212
+ "feature_names": feature_names,
213
+ "n_features": len(feature_names),
214
+ "n_classes": getattr(model, 'n_classes_', 'Unknown'),
215
+ "source": HF_REPO_ID
216
+ }
217
+
218
+ @app.post("/predict")
219
+ async def predict_single(input_data: PredictionInput):
220
+ if not model:
221
+ raise HTTPException(status_code=500, detail="Model not loaded")
222
+
223
+ try:
224
+ # Convert input to array
225
+ features = np.array([
226
+ input_data.sepal_length,
227
+ input_data.sepal_width,
228
+ input_data.petal_length,
229
+ input_data.petal_width
230
+ ]).reshape(1, -1)
231
+
232
+ # Make prediction
233
+ prediction = model.predict(features)
234
+ probabilities = model.predict_proba(features)
235
+
236
+ return {
237
+ "prediction": int(prediction[0]),
238
+ "probabilities": probabilities[0].tolist(),
239
+ "class_names": ["setosa", "versicolor", "virginica"],
240
+ "input_features": input_data.dict(),
241
+ "model_source": HF_REPO_ID
242
+ }
243
+ except Exception as e:
244
+ raise HTTPException(status_code=400, detail=f"Prediction error: {str(e)}")
245
+
246
+ @app.post("/predict-batch")
247
+ async def predict_batch(input_data: BatchPredictionInput):
248
+ if not model:
249
+ raise HTTPException(status_code=500, detail="Model not loaded")
250
+
251
+ try:
252
+ # Convert to numpy array
253
+ features = np.array(input_data.data)
254
+
255
+ # Validate input shape
256
+ if features.shape[1] != len(feature_names):
257
+ raise HTTPException(
258
+ status_code=400,
259
+ detail=f"Expected {len(feature_names)} features, got {features.shape[1]}"
260
+ )
261
+
262
+ # Make predictions
263
+ predictions = model.predict(features)
264
+ probabilities = model.predict_proba(features)
265
+
266
+ return {
267
+ "predictions": predictions.tolist(),
268
+ "probabilities": probabilities.tolist(),
269
+ "batch_size": len(predictions),
270
+ "feature_names": feature_names,
271
+ "model_source": HF_REPO_ID
272
+ }
273
+ except Exception as e:
274
+ raise HTTPException(status_code=400, detail=f"Batch prediction error: {str(e)}")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ uvicorn==0.35.0
2
+ # scikit-learn==1.7.1 # required while training
3
+ pydantic==2.11.7
4
+ # pandas==2.3.2 # required while training
5
+ numpy==2.3.2
6
+ fastapi==0.116.1
7
+ joblib==1.5.2
8
+ huggingface-hub==0.34.4