AjaykumarPilla commited on
Commit
42527db
·
verified ·
1 Parent(s): 0863e1d

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +13 -21
model.py CHANGED
@@ -33,12 +33,15 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
33
  """
34
  model_name = "t5-small"
35
  max_retries = 3
36
- retry_delay = 20 # Increased for network stability
37
 
38
  # Log system resources if psutil is available
39
  if psutil:
40
- memory = psutil.virtual_memory()
41
- logger.info(f"System memory - Total: {memory.total / 1e9:.2f} GB, Available: {memory.available / 1e9:.2f} GB, Used: {memory.percent}%")
 
 
 
42
  else:
43
  logger.warning("psutil not available; cannot log system memory usage")
44
 
@@ -56,24 +59,17 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
56
 
57
  logger.info("Model loaded successfully. Generating insights...")
58
  prompt = f"""
59
- Summarize delay risk data into 2-3 concise insights:
60
- Project: {input_data.get('project_name', 'Unnamed')}
61
- Phase: {input_data.get('phase', '')}
62
- Task: {input_data.get('task', '')}
63
- Progress: {input_data.get('current_progress', 0)}%
64
- Workforce Gap: {input_data.get('workforce_gap', 0)}%
65
- Skill: {input_data.get('workforce_skill_level', '').lower()}
66
- Weather Score: {input_data.get('weather_impact_score', 0)}
67
  Risk: {delay_risk:.1f}%
68
 
69
- Format as a list, e.g., ["Insight 1", "Insight 2"].
70
  """
71
 
72
  with torch.no_grad():
73
- inputs = tokenizer(prompt, return_tensors="pt", max_length=32, truncation=True).to("cpu")
74
  outputs = model.generate(
75
  **inputs,
76
- max_new_tokens=15,
77
  num_beams=1,
78
  temperature=0.7,
79
  do_sample=True
@@ -82,7 +78,7 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
82
 
83
  insights = [line.strip() for line in response.split("\n") if line.strip() and line.strip() not in [prompt]]
84
  logger.info(f"Generated insights: {insights}")
85
- return insights[:3] or ["No insights generated; review input data."]
86
  except Exception as e:
87
  logger.error(f"Attempt {attempt + 1}/{max_retries} - Model inference failed: {str(e)}")
88
  if attempt < max_retries - 1:
@@ -92,13 +88,9 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
92
  logger.error("Max retries reached. Using fallback insights.")
93
  fallback_insights = []
94
  if delay_risk > 75:
95
- fallback_insights.append("High risk; allocate resources urgently.")
96
  elif delay_risk > 50:
97
- fallback_insights.append("Moderate risk; extend shift hours.")
98
- if input_data.get('workforce_gap', 0) > 20:
99
- fallback_insights.append("Workforce gap; recruit workers.")
100
- if input_data.get('weather_impact_score', 0) > 50:
101
- fallback_insights.append("Adverse weather; prioritize indoor tasks.")
102
  return fallback_insights or ["AI model failed to generate insights; check system resources."]
103
 
104
  def predict_delay(input_data: Dict) -> Dict:
 
33
  """
34
  model_name = "t5-small"
35
  max_retries = 3
36
+ retry_delay = 30 # Increased for network stability
37
 
38
  # Log system resources if psutil is available
39
  if psutil:
40
+ try:
41
+ memory = psutil.virtual_memory()
42
+ logger.info(f"System memory - Total: {memory.total / 1e9:.2f} GB, Available: {memory.available / 1e9:.2f} GB, Used: {memory.percent}%")
43
+ except Exception as e:
44
+ logger.warning(f"Failed to log system memory: {str(e)}")
45
  else:
46
  logger.warning("psutil not available; cannot log system memory usage")
47
 
 
59
 
60
  logger.info("Model loaded successfully. Generating insights...")
61
  prompt = f"""
62
+ Summarize risk:
 
 
 
 
 
 
 
63
  Risk: {delay_risk:.1f}%
64
 
65
+ Format: ["Insight 1"].
66
  """
67
 
68
  with torch.no_grad():
69
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=8, truncation=True).to("cpu")
70
  outputs = model.generate(
71
  **inputs,
72
+ max_new_tokens=5,
73
  num_beams=1,
74
  temperature=0.7,
75
  do_sample=True
 
78
 
79
  insights = [line.strip() for line in response.split("\n") if line.strip() and line.strip() not in [prompt]]
80
  logger.info(f"Generated insights: {insights}")
81
+ return insights[:1] or ["No insights generated."]
82
  except Exception as e:
83
  logger.error(f"Attempt {attempt + 1}/{max_retries} - Model inference failed: {str(e)}")
84
  if attempt < max_retries - 1:
 
88
  logger.error("Max retries reached. Using fallback insights.")
89
  fallback_insights = []
90
  if delay_risk > 75:
91
+ fallback_insights.append("High risk detected.")
92
  elif delay_risk > 50:
93
+ fallback_insights.append("Moderate risk found.")
 
 
 
 
94
  return fallback_insights or ["AI model failed to generate insights; check system resources."]
95
 
96
  def predict_delay(input_data: Dict) -> Dict: