Spaces:
Sleeping
Sleeping
Update model.py
Browse files
model.py
CHANGED
|
@@ -33,7 +33,7 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
|
|
| 33 |
"""
|
| 34 |
model_name = "t5-small"
|
| 35 |
max_retries = 3
|
| 36 |
-
retry_delay =
|
| 37 |
|
| 38 |
# Log system resources if psutil is available
|
| 39 |
if psutil:
|
|
@@ -56,28 +56,25 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
|
|
| 56 |
|
| 57 |
logger.info("Model loaded successfully. Generating insights...")
|
| 58 |
prompt = f"""
|
| 59 |
-
Summarize
|
| 60 |
-
Project: {input_data.get('project_name', 'Unnamed
|
| 61 |
Phase: {input_data.get('phase', '')}
|
| 62 |
Task: {input_data.get('task', '')}
|
| 63 |
-
|
| 64 |
-
Actual Duration: {input_data.get('task_actual_duration', 0)} days
|
| 65 |
-
Current Progress: {input_data.get('current_progress', 0)}%
|
| 66 |
Workforce Gap: {input_data.get('workforce_gap', 0)}%
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
Calculated Delay Risk: {delay_risk:.1f}%
|
| 71 |
|
| 72 |
-
Format
|
| 73 |
"""
|
| 74 |
|
| 75 |
with torch.no_grad():
|
| 76 |
-
inputs = tokenizer(prompt, return_tensors="pt", max_length=
|
| 77 |
outputs = model.generate(
|
| 78 |
**inputs,
|
| 79 |
-
max_new_tokens=
|
| 80 |
-
num_beams=
|
| 81 |
temperature=0.7,
|
| 82 |
do_sample=True
|
| 83 |
)
|
|
@@ -85,7 +82,7 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
|
|
| 85 |
|
| 86 |
insights = [line.strip() for line in response.split("\n") if line.strip() and line.strip() not in [prompt]]
|
| 87 |
logger.info(f"Generated insights: {insights}")
|
| 88 |
-
return insights[:
|
| 89 |
except Exception as e:
|
| 90 |
logger.error(f"Attempt {attempt + 1}/{max_retries} - Model inference failed: {str(e)}")
|
| 91 |
if attempt < max_retries - 1:
|
|
@@ -95,11 +92,11 @@ def call_ai_model_for_insights(input_data: Dict, delay_risk: float) -> List[str]
|
|
| 95 |
logger.error("Max retries reached. Using fallback insights.")
|
| 96 |
fallback_insights = []
|
| 97 |
if delay_risk > 75:
|
| 98 |
-
fallback_insights.append("High risk
|
| 99 |
elif delay_risk > 50:
|
| 100 |
-
fallback_insights.append("Moderate risk;
|
| 101 |
if input_data.get('workforce_gap', 0) > 20:
|
| 102 |
-
fallback_insights.append("
|
| 103 |
if input_data.get('weather_impact_score', 0) > 50:
|
| 104 |
fallback_insights.append("Adverse weather; prioritize indoor tasks.")
|
| 105 |
return fallback_insights or ["AI model failed to generate insights; check system resources."]
|
|
|
|
| 33 |
"""
|
| 34 |
model_name = "t5-small"
|
| 35 |
max_retries = 3
|
| 36 |
+
retry_delay = 20 # Increased for network stability
|
| 37 |
|
| 38 |
# Log system resources if psutil is available
|
| 39 |
if psutil:
|
|
|
|
| 56 |
|
| 57 |
logger.info("Model loaded successfully. Generating insights...")
|
| 58 |
prompt = f"""
|
| 59 |
+
Summarize delay risk data into 2-3 concise insights:
|
| 60 |
+
Project: {input_data.get('project_name', 'Unnamed')}
|
| 61 |
Phase: {input_data.get('phase', '')}
|
| 62 |
Task: {input_data.get('task', '')}
|
| 63 |
+
Progress: {input_data.get('current_progress', 0)}%
|
|
|
|
|
|
|
| 64 |
Workforce Gap: {input_data.get('workforce_gap', 0)}%
|
| 65 |
+
Skill: {input_data.get('workforce_skill_level', '').lower()}
|
| 66 |
+
Weather Score: {input_data.get('weather_impact_score', 0)}
|
| 67 |
+
Risk: {delay_risk:.1f}%
|
|
|
|
| 68 |
|
| 69 |
+
Format as a list, e.g., ["Insight 1", "Insight 2"].
|
| 70 |
"""
|
| 71 |
|
| 72 |
with torch.no_grad():
|
| 73 |
+
inputs = tokenizer(prompt, return_tensors="pt", max_length=32, truncation=True).to("cpu")
|
| 74 |
outputs = model.generate(
|
| 75 |
**inputs,
|
| 76 |
+
max_new_tokens=15,
|
| 77 |
+
num_beams=1,
|
| 78 |
temperature=0.7,
|
| 79 |
do_sample=True
|
| 80 |
)
|
|
|
|
| 82 |
|
| 83 |
insights = [line.strip() for line in response.split("\n") if line.strip() and line.strip() not in [prompt]]
|
| 84 |
logger.info(f"Generated insights: {insights}")
|
| 85 |
+
return insights[:3] or ["No insights generated; review input data."]
|
| 86 |
except Exception as e:
|
| 87 |
logger.error(f"Attempt {attempt + 1}/{max_retries} - Model inference failed: {str(e)}")
|
| 88 |
if attempt < max_retries - 1:
|
|
|
|
| 92 |
logger.error("Max retries reached. Using fallback insights.")
|
| 93 |
fallback_insights = []
|
| 94 |
if delay_risk > 75:
|
| 95 |
+
fallback_insights.append("High risk; allocate resources urgently.")
|
| 96 |
elif delay_risk > 50:
|
| 97 |
+
fallback_insights.append("Moderate risk; extend shift hours.")
|
| 98 |
if input_data.get('workforce_gap', 0) > 20:
|
| 99 |
+
fallback_insights.append("Workforce gap; recruit workers.")
|
| 100 |
if input_data.get('weather_impact_score', 0) > 50:
|
| 101 |
fallback_insights.append("Adverse weather; prioritize indoor tasks.")
|
| 102 |
return fallback_insights or ["AI model failed to generate insights; check system resources."]
|