Sahil Garg commited on
Commit
1457ba8
·
1 Parent(s): de597ec

flexible assest id handling

Browse files
Files changed (4) hide show
  1. app.py +1 -1
  2. ml/inference.py +8 -2
  3. src/models.py +1 -0
  4. src/services.py +2 -2
app.py CHANGED
@@ -14,7 +14,7 @@ service = AnalysisService(config)
14
  async def analyze_sensor_data(data: SensorData):
15
  try:
16
  logger.info(f"Processing request with {len(data.vdc1)} voltage and {len(data.idc1)} current data points")
17
- ml_output, agent_output = service.analyze(data.vdc1, data.idc1, data.api_key)
18
  return AnalysisResponse(ml_output=ml_output, agent_output=agent_output)
19
  except HTTPException:
20
  raise
 
14
  async def analyze_sensor_data(data: SensorData):
15
  try:
16
  logger.info(f"Processing request with {len(data.vdc1)} voltage and {len(data.idc1)} current data points")
17
+ ml_output, agent_output = service.analyze(data.vdc1, data.idc1, data.api_key, data.asset_id)
18
  return AnalysisResponse(ml_output=ml_output, agent_output=agent_output)
19
  except HTTPException:
20
  raise
ml/inference.py CHANGED
@@ -111,7 +111,7 @@ class MLEngine:
111
  "confidence": confidence
112
  }
113
 
114
- def predict_from_raw(self, raw_df: pd.DataFrame):
115
  logger.info("ML analysis start")
116
  df = build_features(raw_df, self.window)
117
  df = df[self.feature_cols].dropna()
@@ -122,9 +122,15 @@ class MLEngine:
122
  )
123
  anomaly_lstm, health = self._compute_anomalies(df_scaled)
124
  predictions = self._make_predictions(df_scaled, anomaly_lstm, health)
 
 
 
 
 
 
125
  logger.info("ML analysis end")
126
  return {
127
- "asset_id": "PV_INVERTER_001",
128
  "failure_probability": round(predictions["failure_prob"], 2),
129
  "expected_ttf_days": round(predictions["ttf_days"], 1),
130
  "expected_rul_days": round(predictions["rul_days"], 1),
 
111
  "confidence": confidence
112
  }
113
 
114
+ def predict_from_raw(self, raw_df: pd.DataFrame, asset_id: str = None):
115
  logger.info("ML analysis start")
116
  df = build_features(raw_df, self.window)
117
  df = df[self.feature_cols].dropna()
 
122
  )
123
  anomaly_lstm, health = self._compute_anomalies(df_scaled)
124
  predictions = self._make_predictions(df_scaled, anomaly_lstm, health)
125
+
126
+ # Use provided asset_id or generate default
127
+ if asset_id is None:
128
+ import uuid
129
+ asset_id = f"Solar_Panel_{str(uuid.uuid4())[:8]}"
130
+
131
  logger.info("ML analysis end")
132
  return {
133
+ "asset_id": asset_id,
134
  "failure_probability": round(predictions["failure_prob"], 2),
135
  "expected_ttf_days": round(predictions["ttf_days"], 1),
136
  "expected_rul_days": round(predictions["rul_days"], 1),
src/models.py CHANGED
@@ -4,6 +4,7 @@ class SensorData(BaseModel):
4
  vdc1: list[float]
5
  idc1: list[float]
6
  api_key: str = None # Optional Google API key for LLM features
 
7
 
8
  class AnalysisResponse(BaseModel):
9
  ml_output: dict
 
4
  vdc1: list[float]
5
  idc1: list[float]
6
  api_key: str = None # Optional Google API key for LLM features
7
+ asset_id: str = None # Optional asset identifier
8
 
9
  class AnalysisResponse(BaseModel):
10
  ml_output: dict
src/services.py CHANGED
@@ -12,12 +12,12 @@ class AnalysisService:
12
  self.config = config
13
  self.ml_engine = MLEngine()
14
 
15
- def analyze(self, vdc1: list, idc1: list, api_key: str) -> tuple:
16
  """Analyze sensor data and return ML and agent outputs."""
17
  logger.info(f"Complete analysis start - processing {len(vdc1)} data points")
18
  validate_sensor_data(vdc1, idc1)
19
  raw_df = prepare_dataframe(vdc1, idc1)
20
- ml_output = self.ml_engine.predict_from_raw(raw_df)
21
  agent_output = self.get_agent_output(api_key, ml_output)
22
 
23
  logger.info("Complete analysis end")
 
12
  self.config = config
13
  self.ml_engine = MLEngine()
14
 
15
+ def analyze(self, vdc1: list, idc1: list, api_key: str, asset_id: str = None) -> tuple:
16
  """Analyze sensor data and return ML and agent outputs."""
17
  logger.info(f"Complete analysis start - processing {len(vdc1)} data points")
18
  validate_sensor_data(vdc1, idc1)
19
  raw_df = prepare_dataframe(vdc1, idc1)
20
+ ml_output = self.ml_engine.predict_from_raw(raw_df, asset_id)
21
  agent_output = self.get_agent_output(api_key, ml_output)
22
 
23
  logger.info("Complete analysis end")