muhalwan commited on
Commit
816b103
·
verified ·
1 Parent(s): 511d60d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
 
2
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
3
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
4
  import logging
5
  import io
6
  import joblib
@@ -29,6 +30,7 @@ try:
29
  except (RuntimeError, ValueError) as e:
30
  logging.warning(f"Could not disable GPU for TensorFlow: {e}")
31
 
 
32
  # --- Model Loading ---
33
  def load_models():
34
  logging.info("Loading all models from the Hub...")
@@ -51,14 +53,14 @@ def load_models():
51
 
52
  try:
53
  xgb_model_path = hf_hub_download(
54
- repo_id="muhalwan/california_housing_price_predictor",
55
  filename="xgb_model.joblib"
56
  )
57
  scaler_path = hf_hub_download(
58
- repo_id="muhalwan/california_housing_price_predictor",
59
  filename="scaler.joblib"
60
  )
61
-
62
  housing_model = joblib.load(xgb_model_path)
63
  housing_scaler = joblib.load(scaler_path)
64
  logging.info("Housing price model and scaler loaded successfully.")
@@ -68,14 +70,17 @@ def load_models():
68
 
69
  return tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler
70
 
 
71
  # --- FastAPI App Initialization ---
72
  app = FastAPI()
73
  tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler = load_models()
74
  app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
75
 
 
76
  class SentimentRequest(BaseModel):
77
  text: str
78
 
 
79
  class HousingRequest(BaseModel):
80
  MedInc: float
81
  HouseAge: float
@@ -86,11 +91,13 @@ class HousingRequest(BaseModel):
86
  Latitude: float
87
  Longitude: float
88
 
 
89
  # --- API Endpoints ---
90
  @app.get("/")
91
  async def read_root():
92
  return FileResponse('index.html')
93
 
 
94
  @app.post("/predict/sentiment")
95
  async def predict_sentiment(request: SentimentRequest):
96
  if not tokenizer or not sentiment_model:
@@ -101,13 +108,16 @@ async def predict_sentiment(request: SentimentRequest):
101
  with torch.no_grad():
102
  outputs = sentiment_model(**inputs)
103
  probabilities = F.softmax(outputs.logits, dim=-1).squeeze()
104
- labels = ['Bearish', 'Bullish']
105
- prediction = labels[torch.argmax(probabilities).item()]
 
 
106
  return {"prediction": prediction}
107
  except Exception as e:
108
  logging.error(f"Sentiment prediction error: {e}")
109
  raise HTTPException(status_code=500, detail="An error occurred during sentiment analysis.")
110
 
 
111
  @app.post("/predict/catdog")
112
  async def predict_catdog(file: UploadFile = File(...)):
113
  if not cat_dog_model:
@@ -124,7 +134,7 @@ async def predict_catdog(file: UploadFile = File(...)):
124
  img_array = np.expand_dims(img_array, axis=0)
125
  prob = cat_dog_model.predict(img_array, verbose=0)[0, 0]
126
  label = "Dog" if prob >= 0.5 else "Cat"
127
-
128
  return {"prediction": label}
129
  except Exception as e:
130
  logging.error(f"Cat/Dog prediction error: {e}")
 
1
  import os
2
+
3
  os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
4
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
5
  import logging
6
  import io
7
  import joblib
 
30
  except (RuntimeError, ValueError) as e:
31
  logging.warning(f"Could not disable GPU for TensorFlow: {e}")
32
 
33
+
34
  # --- Model Loading ---
35
  def load_models():
36
  logging.info("Loading all models from the Hub...")
 
53
 
54
  try:
55
  xgb_model_path = hf_hub_download(
56
+ repo_id="muhalwan/california_housing_price_predictor",
57
  filename="xgb_model.joblib"
58
  )
59
  scaler_path = hf_hub_download(
60
+ repo_id="muhalwan/california_housing_price_predictor",
61
  filename="scaler.joblib"
62
  )
63
+
64
  housing_model = joblib.load(xgb_model_path)
65
  housing_scaler = joblib.load(scaler_path)
66
  logging.info("Housing price model and scaler loaded successfully.")
 
70
 
71
  return tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler
72
 
73
+
74
  # --- FastAPI App Initialization ---
75
  app = FastAPI()
76
  tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler = load_models()
77
  app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
78
 
79
+
80
  class SentimentRequest(BaseModel):
81
  text: str
82
 
83
+
84
  class HousingRequest(BaseModel):
85
  MedInc: float
86
  HouseAge: float
 
91
  Latitude: float
92
  Longitude: float
93
 
94
+
95
  # --- API Endpoints ---
96
  @app.get("/")
97
  async def read_root():
98
  return FileResponse('index.html')
99
 
100
+
101
  @app.post("/predict/sentiment")
102
  async def predict_sentiment(request: SentimentRequest):
103
  if not tokenizer or not sentiment_model:
 
108
  with torch.no_grad():
109
  outputs = sentiment_model(**inputs)
110
  probabilities = F.softmax(outputs.logits, dim=-1).squeeze()
111
+ id2label = sentiment_model.config.id2label
112
+ labels = [id2label[int(idx)] for idx in sorted([int(k) for k in id2label.keys()])]
113
+ pred_idx = torch.argmax(probabilities).item()
114
+ prediction = labels[pred_idx] if pred_idx < len(labels) else str(pred_idx)
115
  return {"prediction": prediction}
116
  except Exception as e:
117
  logging.error(f"Sentiment prediction error: {e}")
118
  raise HTTPException(status_code=500, detail="An error occurred during sentiment analysis.")
119
 
120
+
121
  @app.post("/predict/catdog")
122
  async def predict_catdog(file: UploadFile = File(...)):
123
  if not cat_dog_model:
 
134
  img_array = np.expand_dims(img_array, axis=0)
135
  prob = cat_dog_model.predict(img_array, verbose=0)[0, 0]
136
  label = "Dog" if prob >= 0.5 else "Cat"
137
+
138
  return {"prediction": label}
139
  except Exception as e:
140
  logging.error(f"Cat/Dog prediction error: {e}")