Spaces:
Runtime error
Phase 4: Implement LLM-based summarization node
Browse filesπ Major Features:
- New llm_summarization_node() in agent/nodes.py for intelligent data analysis
- Enhanced LLMClient with generate_insights() method for data summarization
- Complete Phase 4 architecture: task-specific formatting β LLM insights β response generation
π§ͺ Comprehensive Testing:
- 5/5 unit tests for LLM summarization node with mock LLM clients
- End-to-end integration tests for temperature and precipitation workflows
- Enhanced test visibility showing complete workflow phases
- Mock testing framework for API-free validation
π OMIRL Improvements:
- Removed task-agnostic summarization (replaced with task-specific formatting)
- Enhanced precipitation data parsing and formatting
- Improved station data formatting with scenario-based output
- Updated adapter to use direct task formatting (no LLM dependency)
π§ Architecture Enhancements:
- Clean separation: LLM summarization adds insights, response generation handles formatting
- Graceful LLM failure handling - workflow continues without insights
- Enhanced response generation to display LLM insights in user-friendly format
- Maintained backward compatibility with existing workflow
β
Validation:
- Phase 4 working end-to-end with detailed workflow visibility
- Both temperature and precipitation query testing
- Complete test coverage with fast mock tests and integration tests
- Performance analysis and browser management improvements
ποΈ Cleanup:
- Removed unused legacy test files and discovery artifacts
- Cleaned up old task-agnostic summarization service
- Reorganized test structure with proper agent/ subdirectory
- agent/llm_client.py +79 -0
- agent/nodes.py +122 -0
- services/text/task_agnostic_summarization.py +0 -633
- tests/agent/test_llm_summarization_node.py +308 -0
- tests/agent/test_llm_sumnode_integration.py +339 -0
- tests/debug_node.py +0 -0
- tests/debug_state.py +0 -0
- tests/omirl/performance_analysis.py +86 -0
- tests/{test_adapter_integration.py β omirl/test_adapter_integration.py} +81 -7
- tests/omirl/test_adapter_with_precipitation.py +0 -178
- tests/omirl/test_fast.py +252 -0
- tests/omirl/test_massimi_precipitazione.py +301 -53
- tests/omirl/test_validation.py +390 -0
- tests/omirl/test_valori_stazioni.py +296 -0
- tests/test_llm_summarization_node.py +0 -0
- tests/test_omirl_implementation.py +0 -1
- tests/test_phase4_workflow.py +0 -0
- tests/test_valori_stazioni.py +0 -155
- tools/omirl/adapter.py +8 -11
- tools/omirl/tables/__init__.py +2 -3
- tools/omirl/tables/massimi_precipitazione.py +129 -72
- tools/omirl/tables/valori_stazioni.py +103 -47
|
@@ -365,6 +365,85 @@ class LLMClient:
|
|
| 365 |
except Exception as e:
|
| 366 |
raise ValueError(f"Failed to parse LLM response: {e}")
|
| 367 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
def _create_fallback_proposal(self, user_query: str, error: str) -> ToolProposal:
|
| 369 |
"""
|
| 370 |
Create fallback proposal when LLM routing fails
|
|
|
|
| 365 |
except Exception as e:
|
| 366 |
raise ValueError(f"Failed to parse LLM response: {e}")
|
| 367 |
|
| 368 |
+
async def generate_insights(self, data_prompt: str) -> str:
|
| 369 |
+
"""
|
| 370 |
+
Generate intelligent insights from OMIRL data using LLM
|
| 371 |
+
|
| 372 |
+
This method takes formatted data from task-specific tools and generates
|
| 373 |
+
higher-level insights, trends analysis, and operational recommendations.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
data_prompt: Formatted prompt with OMIRL data to analyze
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
String with LLM-generated insights and recommendations
|
| 380 |
+
"""
|
| 381 |
+
try:
|
| 382 |
+
if self.client == "mock_client":
|
| 383 |
+
# Return mock insights for testing
|
| 384 |
+
return ("β’ **Temperatura**: Valori nella norma per la stagione\n"
|
| 385 |
+
"β’ **Precipitazioni**: Leggera attivitΓ nelle zone montane\n"
|
| 386 |
+
"β’ **Raccomandazione**: Monitoraggio ordinario, nessuna criticitΓ rilevata")
|
| 387 |
+
|
| 388 |
+
if self.provider == "gemini":
|
| 389 |
+
# Use Gemini for insight generation
|
| 390 |
+
response = await self.client.generate_content_async(
|
| 391 |
+
data_prompt,
|
| 392 |
+
generation_config=genai.types.GenerationConfig(
|
| 393 |
+
temperature=self.temperature,
|
| 394 |
+
max_output_tokens=self.max_tokens,
|
| 395 |
+
candidate_count=1
|
| 396 |
+
)
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
if response and response.text:
|
| 400 |
+
return response.text.strip()
|
| 401 |
+
else:
|
| 402 |
+
raise ValueError("Empty response from Gemini")
|
| 403 |
+
|
| 404 |
+
elif self.provider == "openai":
|
| 405 |
+
# Use OpenAI for insight generation
|
| 406 |
+
response = await self.client.chat.completions.create(
|
| 407 |
+
model=self.model,
|
| 408 |
+
messages=[
|
| 409 |
+
{
|
| 410 |
+
"role": "system",
|
| 411 |
+
"content": "Sei un analista meteorologico esperto che genera insights operativi dai dati OMIRL."
|
| 412 |
+
},
|
| 413 |
+
{"role": "user", "content": data_prompt}
|
| 414 |
+
],
|
| 415 |
+
temperature=self.temperature,
|
| 416 |
+
max_tokens=self.max_tokens,
|
| 417 |
+
timeout=self.timeout
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
if response.choices and response.choices[0].message.content:
|
| 421 |
+
return response.choices[0].message.content.strip()
|
| 422 |
+
else:
|
| 423 |
+
raise ValueError("Empty response from OpenAI")
|
| 424 |
+
|
| 425 |
+
else:
|
| 426 |
+
raise ValueError(f"Insight generation not implemented for provider: {self.provider}")
|
| 427 |
+
|
| 428 |
+
except Exception as e:
|
| 429 |
+
logging.error(f"LLM insight generation failed: {e}")
|
| 430 |
+
# Try fallback provider if available
|
| 431 |
+
if hasattr(self, 'fallback_provider') and self.fallback_provider != self.provider:
|
| 432 |
+
try:
|
| 433 |
+
# Quick fallback attempt with simpler prompt
|
| 434 |
+
fallback_client = LLMClient(
|
| 435 |
+
provider=self.fallback_provider,
|
| 436 |
+
model=self.fallback_model,
|
| 437 |
+
temperature=self.temperature,
|
| 438 |
+
max_tokens=300 # Shorter fallback
|
| 439 |
+
)
|
| 440 |
+
return await fallback_client.generate_insights(data_prompt)
|
| 441 |
+
except Exception as fallback_error:
|
| 442 |
+
logging.error(f"Fallback insight generation failed: {fallback_error}")
|
| 443 |
+
|
| 444 |
+
# Return empty string on failure - don't break the workflow
|
| 445 |
+
return ""
|
| 446 |
+
|
| 447 |
def _create_fallback_proposal(self, user_query: str, error: str) -> ToolProposal:
|
| 448 |
"""
|
| 449 |
Create fallback proposal when LLM routing fails
|
|
@@ -31,6 +31,7 @@ from datetime import datetime
|
|
| 31 |
from .state import AgentState, ToolCall, ToolResult, update_processing_status, add_tool_result, add_error
|
| 32 |
from .registry import get_tool_registry, get_tool_by_name, validate_tool_parameters
|
| 33 |
from .llm_router_node import llm_router_node
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
async def llm_routing_node(state: AgentState) -> AgentState:
|
|
@@ -192,6 +193,121 @@ async def tool_execution_node(state: AgentState) -> AgentState:
|
|
| 192 |
return state
|
| 193 |
|
| 194 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
async def response_generation_node(state: AgentState) -> AgentState:
|
| 196 |
"""
|
| 197 |
Generate final response based on tool results and LLM router status
|
|
@@ -231,6 +347,12 @@ async def response_generation_node(state: AgentState) -> AgentState:
|
|
| 231 |
for result in successful_results:
|
| 232 |
response_parts.append(result.summary_text)
|
| 233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
# Add artifact information
|
| 235 |
if result.artifacts:
|
| 236 |
response_parts.append(f"\nπ **File generati:** {len(result.artifacts)}")
|
|
|
|
| 31 |
from .state import AgentState, ToolCall, ToolResult, update_processing_status, add_tool_result, add_error
|
| 32 |
from .registry import get_tool_registry, get_tool_by_name, validate_tool_parameters
|
| 33 |
from .llm_router_node import llm_router_node
|
| 34 |
+
from .llm_client import LLMClient
|
| 35 |
|
| 36 |
|
| 37 |
async def llm_routing_node(state: AgentState) -> AgentState:
|
|
|
|
| 193 |
return state
|
| 194 |
|
| 195 |
|
| 196 |
+
async def llm_summarization_node(state: AgentState) -> AgentState:
|
| 197 |
+
"""
|
| 198 |
+
Generate intelligent insights from tool results using LLM
|
| 199 |
+
|
| 200 |
+
This node takes the raw task-specific formatted data and generates
|
| 201 |
+
higher-level insights, trends, and cross-task analysis using an LLM.
|
| 202 |
+
This is Phase 4 of the architecture: LLM-based intelligent summarization.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
state: Current agent state with tool results
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
Updated state with LLM-generated insights added to metadata
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
print(f"π§ Generating LLM-based insights...")
|
| 212 |
+
|
| 213 |
+
state = update_processing_status(state, "llm_summarization", "Analyzing data with LLM")
|
| 214 |
+
|
| 215 |
+
tool_results = state["tool_results"]
|
| 216 |
+
|
| 217 |
+
# Only process successful results that have substantive data
|
| 218 |
+
successful_results = [r for r in tool_results if r.success and r.summary_text]
|
| 219 |
+
|
| 220 |
+
if not successful_results:
|
| 221 |
+
print(f"βΉοΈ No successful results to analyze")
|
| 222 |
+
return state
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
# Initialize LLM client for summarization
|
| 226 |
+
llm_client = LLMClient(
|
| 227 |
+
provider="gemini",
|
| 228 |
+
temperature=0.3, # Slightly higher for more creative insights
|
| 229 |
+
max_tokens=800, # Allow for richer analysis
|
| 230 |
+
timeout=15
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# Build summarization prompt
|
| 234 |
+
prompt_parts = [
|
| 235 |
+
"Analizza i seguenti dati meteorologici OMIRL e genera insights intelligenti.",
|
| 236 |
+
"Concentrati su: tendenze, valori anomali, confronti geografici, raccomandazioni operative.",
|
| 237 |
+
"Rispondi in italiano con bullet points chiari e concisi.\n",
|
| 238 |
+
"DATI DA ANALIZZARE:"
|
| 239 |
+
]
|
| 240 |
+
|
| 241 |
+
# Add each tool result's data
|
| 242 |
+
for i, result in enumerate(successful_results, 1):
|
| 243 |
+
prompt_parts.append(f"\n{i}. {result.tool_name.upper()}:")
|
| 244 |
+
prompt_parts.append(f" {result.summary_text}")
|
| 245 |
+
|
| 246 |
+
# Add key metadata for context
|
| 247 |
+
if result.metadata:
|
| 248 |
+
relevant_metadata = {
|
| 249 |
+
k: v for k, v in result.metadata.items()
|
| 250 |
+
if k in ['sensor_type', 'filters_applied', 'total_after_filtering', 'zona_allerta_records', 'province_records']
|
| 251 |
+
}
|
| 252 |
+
if relevant_metadata:
|
| 253 |
+
prompt_parts.append(f" Dettagli: {relevant_metadata}")
|
| 254 |
+
|
| 255 |
+
prompt_parts.append("\nGENERA INSIGHTS OPERATIVI:")
|
| 256 |
+
|
| 257 |
+
full_prompt = "\n".join(prompt_parts)
|
| 258 |
+
|
| 259 |
+
# Get LLM insights (with fallback)
|
| 260 |
+
try:
|
| 261 |
+
insights = await llm_client.generate_insights(full_prompt)
|
| 262 |
+
|
| 263 |
+
if insights and len(insights.strip()) > 20: # Valid response
|
| 264 |
+
# Add insights to the first successful result's metadata
|
| 265 |
+
if successful_results:
|
| 266 |
+
# Find existing metadata or create new
|
| 267 |
+
original_result = successful_results[0]
|
| 268 |
+
enhanced_metadata = original_result.metadata.copy()
|
| 269 |
+
enhanced_metadata["llm_insights"] = insights
|
| 270 |
+
enhanced_metadata["insights_generated_at"] = datetime.now().isoformat()
|
| 271 |
+
|
| 272 |
+
# Create new enhanced result
|
| 273 |
+
enhanced_result = ToolResult(
|
| 274 |
+
tool_name=original_result.tool_name,
|
| 275 |
+
success=original_result.success,
|
| 276 |
+
summary_text=original_result.summary_text,
|
| 277 |
+
artifacts=original_result.artifacts,
|
| 278 |
+
sources=original_result.sources,
|
| 279 |
+
metadata=enhanced_metadata,
|
| 280 |
+
warnings=original_result.warnings
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# Update the state with enhanced result using state functions
|
| 284 |
+
# Remove the original result first
|
| 285 |
+
new_results = [r for r in state["tool_results"] if r != original_result]
|
| 286 |
+
# Add the enhanced result
|
| 287 |
+
new_results.insert(0, enhanced_result)
|
| 288 |
+
# Update state
|
| 289 |
+
updated_state = dict(state)
|
| 290 |
+
updated_state["tool_results"] = new_results
|
| 291 |
+
state = updated_state
|
| 292 |
+
|
| 293 |
+
print(f"β
LLM insights generated ({len(insights)} chars)")
|
| 294 |
+
|
| 295 |
+
else:
|
| 296 |
+
print(f"β οΈ LLM generated empty insights, skipping")
|
| 297 |
+
|
| 298 |
+
except Exception as llm_error:
|
| 299 |
+
print(f"β οΈ LLM summarization failed: {llm_error}")
|
| 300 |
+
# Don't fail the whole workflow - just skip insights
|
| 301 |
+
|
| 302 |
+
except Exception as e:
|
| 303 |
+
print(f"β οΈ Summarization node error: {e}")
|
| 304 |
+
# Don't break the workflow - summarization is optional enhancement
|
| 305 |
+
|
| 306 |
+
print(f"β
LLM summarization complete")
|
| 307 |
+
|
| 308 |
+
return state
|
| 309 |
+
|
| 310 |
+
|
| 311 |
async def response_generation_node(state: AgentState) -> AgentState:
|
| 312 |
"""
|
| 313 |
Generate final response based on tool results and LLM router status
|
|
|
|
| 347 |
for result in successful_results:
|
| 348 |
response_parts.append(result.summary_text)
|
| 349 |
|
| 350 |
+
# Add LLM insights if available (Phase 4 enhancement)
|
| 351 |
+
llm_insights = result.metadata.get("llm_insights")
|
| 352 |
+
if llm_insights and len(llm_insights.strip()) > 20:
|
| 353 |
+
response_parts.append(f"\nπ§ **Analisi Intelligente:**")
|
| 354 |
+
response_parts.append(llm_insights)
|
| 355 |
+
|
| 356 |
# Add artifact information
|
| 357 |
if result.artifacts:
|
| 358 |
response_parts.append(f"\nπ **File generati:** {len(result.artifacts)}")
|
|
@@ -1,633 +0,0 @@
|
|
| 1 |
-
# services/text/task_agnostic_summarization.py
|
| 2 |
-
"""
|
| 3 |
-
Task-Agnostic Multi-Task Summarization Service
|
| 4 |
-
|
| 5 |
-
This module provides intelligent summarization that works across all OMIRL tasks
|
| 6 |
-
using standardized data formats. It analyzes multiple task results together and
|
| 7 |
-
generates comprehensive summaries with trend analysis.
|
| 8 |
-
|
| 9 |
-
Key Features:
|
| 10 |
-
- Task-agnostic: Works with any OMIRL task (valori_stazioni, massimi_precipitazione, etc.)
|
| 11 |
-
- Multi-task: Combines results from multiple tasks in a single summary
|
| 12 |
-
- Efficient: One LLM call for all tasks combined
|
| 13 |
-
- Trend-focused: Emphasizes temporal patterns and geographical insights
|
| 14 |
-
- Lightweight: Uses structured data format that works with smaller LLMs
|
| 15 |
-
|
| 16 |
-
Architecture:
|
| 17 |
-
1. Each task provides standardized TaskSummary format
|
| 18 |
-
2. MultiTaskSummarizer collects all TaskSummary objects
|
| 19 |
-
3. Single LLM call generates comprehensive operational summary
|
| 20 |
-
|
| 21 |
-
Usage:
|
| 22 |
-
# From individual tasks
|
| 23 |
-
task_summary = TaskSummary(
|
| 24 |
-
task_type="massimi_precipitazione",
|
| 25 |
-
geographic_scope="Provincia Genova",
|
| 26 |
-
temporal_scope="All periods (5'-24h)",
|
| 27 |
-
data_insights=DataInsights(...)
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
# Multi-task summarization
|
| 31 |
-
summarizer = MultiTaskSummarizer()
|
| 32 |
-
summarizer.add_task_result(task_summary)
|
| 33 |
-
final_summary = await summarizer.generate_final_summary()
|
| 34 |
-
"""
|
| 35 |
-
|
| 36 |
-
import asyncio
|
| 37 |
-
from typing import Dict, Any, List, Optional, Union
|
| 38 |
-
import logging
|
| 39 |
-
from datetime import datetime
|
| 40 |
-
from dataclasses import dataclass, asdict
|
| 41 |
-
import json
|
| 42 |
-
|
| 43 |
-
import google.generativeai as genai
|
| 44 |
-
from agent.config.env_config import get_api_key
|
| 45 |
-
|
| 46 |
-
# Configure logging
|
| 47 |
-
logger = logging.getLogger(__name__)
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
@dataclass
|
| 51 |
-
class DataInsights:
|
| 52 |
-
"""Standardized data insights that work across all task types"""
|
| 53 |
-
total_records: int
|
| 54 |
-
records_with_data: int
|
| 55 |
-
|
| 56 |
-
# Numeric analysis (for any numeric data)
|
| 57 |
-
min_value: Optional[float] = None
|
| 58 |
-
max_value: Optional[float] = None
|
| 59 |
-
avg_value: Optional[float] = None
|
| 60 |
-
unit: Optional[str] = None
|
| 61 |
-
|
| 62 |
-
# Trend analysis (for temporal data)
|
| 63 |
-
trend_direction: Optional[str] = None # "increasing", "decreasing", "stable", "peaked"
|
| 64 |
-
trend_confidence: Optional[str] = None # "high", "medium", "low"
|
| 65 |
-
peak_period: Optional[str] = None # "1h", "24h", etc.
|
| 66 |
-
|
| 67 |
-
# Geographic distribution
|
| 68 |
-
geographic_pattern: Optional[str] = None # "concentrated", "distributed", "coastal", "inland"
|
| 69 |
-
notable_locations: List[Dict[str, Any]] = None
|
| 70 |
-
|
| 71 |
-
# Data quality
|
| 72 |
-
coverage_quality: str = "complete" # "complete", "partial", "sparse"
|
| 73 |
-
|
| 74 |
-
def __post_init__(self):
|
| 75 |
-
if self.notable_locations is None:
|
| 76 |
-
self.notable_locations = []
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
@dataclass
|
| 80 |
-
class TaskSummary:
|
| 81 |
-
"""Standardized summary format for any OMIRL task"""
|
| 82 |
-
task_type: str # "valori_stazioni", "massimi_precipitazione", etc.
|
| 83 |
-
geographic_scope: str # "Provincia Genova", "Zona A", "Liguria", etc.
|
| 84 |
-
temporal_scope: str # "Current values", "All periods (5'-24h)", "Period 1h", etc.
|
| 85 |
-
data_insights: DataInsights
|
| 86 |
-
filters_applied: Dict[str, Any] = None
|
| 87 |
-
extraction_timestamp: str = None
|
| 88 |
-
|
| 89 |
-
def __post_init__(self):
|
| 90 |
-
if self.filters_applied is None:
|
| 91 |
-
self.filters_applied = {}
|
| 92 |
-
if self.extraction_timestamp is None:
|
| 93 |
-
self.extraction_timestamp = datetime.now().isoformat()
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
class MultiTaskSummarizer:
|
| 97 |
-
"""
|
| 98 |
-
Multi-task summarization coordinator
|
| 99 |
-
|
| 100 |
-
Collects results from multiple OMIRL tasks and generates
|
| 101 |
-
a single comprehensive operational summary.
|
| 102 |
-
"""
|
| 103 |
-
|
| 104 |
-
def __init__(self):
|
| 105 |
-
"""Initialize the multi-task summarizer"""
|
| 106 |
-
self.task_results: List[TaskSummary] = []
|
| 107 |
-
self.api_key = get_api_key('GEMINI_API_KEY')
|
| 108 |
-
|
| 109 |
-
if self.api_key:
|
| 110 |
-
genai.configure(api_key=self.api_key)
|
| 111 |
-
self.model = genai.GenerativeModel('gemini-1.5-flash')
|
| 112 |
-
logger.info("β
Multi-task summarizer initialized with Gemini API")
|
| 113 |
-
else:
|
| 114 |
-
self.model = None
|
| 115 |
-
logger.warning("β οΈ No Gemini API key found - will use structured fallback summaries")
|
| 116 |
-
|
| 117 |
-
def add_task_result(self, task_summary: TaskSummary) -> None:
|
| 118 |
-
"""Add a task result to be included in final summary"""
|
| 119 |
-
self.task_results.append(task_summary)
|
| 120 |
-
logger.info(f"π Added {task_summary.task_type} result to multi-task summary queue")
|
| 121 |
-
|
| 122 |
-
def clear_results(self) -> None:
|
| 123 |
-
"""Clear all collected task results"""
|
| 124 |
-
self.task_results.clear()
|
| 125 |
-
logger.info("ποΈ Cleared multi-task summary queue")
|
| 126 |
-
|
| 127 |
-
async def generate_final_summary(self, query_context: str = "") -> str:
|
| 128 |
-
"""
|
| 129 |
-
Generate comprehensive summary from all collected task results
|
| 130 |
-
|
| 131 |
-
Args:
|
| 132 |
-
query_context: Original user query for context
|
| 133 |
-
|
| 134 |
-
Returns:
|
| 135 |
-
Comprehensive operational summary in Italian
|
| 136 |
-
"""
|
| 137 |
-
|
| 138 |
-
if not self.task_results:
|
| 139 |
-
return "π Nessun dato OMIRL estratto"
|
| 140 |
-
|
| 141 |
-
try:
|
| 142 |
-
# Generate summary based on available API
|
| 143 |
-
if self.model and self.api_key:
|
| 144 |
-
return await self._generate_llm_multi_task_summary(query_context)
|
| 145 |
-
else:
|
| 146 |
-
return self._generate_structured_fallback_summary()
|
| 147 |
-
|
| 148 |
-
except Exception as e:
|
| 149 |
-
logger.error(f"β Error in multi-task summarization: {e}")
|
| 150 |
-
return self._generate_basic_fallback_summary()
|
| 151 |
-
|
| 152 |
-
async def _generate_llm_multi_task_summary(self, query_context: str) -> str:
|
| 153 |
-
"""Generate intelligent multi-task summary using Gemini API"""
|
| 154 |
-
|
| 155 |
-
# Convert task results to LLM-friendly format
|
| 156 |
-
summary_data = {
|
| 157 |
-
"query_context": query_context,
|
| 158 |
-
"num_tasks": len(self.task_results),
|
| 159 |
-
"tasks": []
|
| 160 |
-
}
|
| 161 |
-
|
| 162 |
-
for task in self.task_results:
|
| 163 |
-
task_data = {
|
| 164 |
-
"type": task.task_type,
|
| 165 |
-
"geographic_scope": task.geographic_scope,
|
| 166 |
-
"temporal_scope": task.temporal_scope,
|
| 167 |
-
"data": asdict(task.data_insights),
|
| 168 |
-
"filters": task.filters_applied
|
| 169 |
-
}
|
| 170 |
-
summary_data["tasks"].append(task_data)
|
| 171 |
-
|
| 172 |
-
# Build LLM prompt
|
| 173 |
-
prompt = self._build_multi_task_prompt(summary_data)
|
| 174 |
-
|
| 175 |
-
try:
|
| 176 |
-
response = self.model.generate_content(prompt)
|
| 177 |
-
summary = response.text.strip()
|
| 178 |
-
|
| 179 |
-
logger.info(f"β
Generated multi-task LLM summary ({len(summary)} chars) for {len(self.task_results)} tasks")
|
| 180 |
-
return summary
|
| 181 |
-
|
| 182 |
-
except Exception as e:
|
| 183 |
-
logger.error(f"β LLM multi-task summarization failed: {e}")
|
| 184 |
-
return self._generate_structured_fallback_summary()
|
| 185 |
-
|
| 186 |
-
def _build_multi_task_prompt(self, summary_data: Dict[str, Any]) -> str:
|
| 187 |
-
"""Build LLM prompt for multi-task summarization"""
|
| 188 |
-
|
| 189 |
-
prompt = f"""
|
| 190 |
-
Sei un esperto meteorologo che analizza dati OMIRL della Liguria. Hai estratto dati da {summary_data['num_tasks']} operazioni diverse.
|
| 191 |
-
|
| 192 |
-
CONTESTO RICHIESTA: "{summary_data['query_context']}"
|
| 193 |
-
|
| 194 |
-
DATI ESTRATTI:
|
| 195 |
-
{json.dumps(summary_data, indent=2, ensure_ascii=False)}
|
| 196 |
-
|
| 197 |
-
COMPITO:
|
| 198 |
-
Genera un riassunto operativo completo in italiano (max 6 righe) che:
|
| 199 |
-
|
| 200 |
-
1. **Riassuma i dati principali** di tutti i task con emoji appropriate
|
| 201 |
-
2. **Identifichi trend temporali** se presenti (es. "trend crescente nelle ultime 24h")
|
| 202 |
-
3. **Evidenzi pattern geografici** se rilevanti (es. "valori piΓΉ alti nell'entroterra")
|
| 203 |
-
4. **Fornisca insight operativi** utili per decisioni meteorologiche
|
| 204 |
-
5. **Colleghi informazioni** tra diversi task se pertinenti
|
| 205 |
-
|
| 206 |
-
FORMATO:
|
| 207 |
-
- Linguaggio naturale e professionale
|
| 208 |
-
- Valori numerici precisi con unitΓ di misura
|
| 209 |
-
- Massimo 6 righe
|
| 210 |
-
- Una riga per task principale + righe per trend/pattern
|
| 211 |
-
|
| 212 |
-
ESEMPIO MULTI-TASK:
|
| 213 |
-
π‘οΈ **Temperatura Liguria**: 15-28Β°C in 184 stazioni, media 22.1Β°C con trend stabile.
|
| 214 |
-
π§οΈ **Precipitazioni massime**: 0.2-6.2mm, picco 24h a Statale (6.2mm), trend crescente.
|
| 215 |
-
π **Pattern regionale**: temperature piΓΉ alte entroterra, precipitazioni concentrate costa orientale.
|
| 216 |
-
|
| 217 |
-
RISPOSTA (solo il riassunto, senza introduzioni):"""
|
| 218 |
-
|
| 219 |
-
return prompt
|
| 220 |
-
|
| 221 |
-
def _generate_structured_fallback_summary(self) -> str:
|
| 222 |
-
"""Generate structured summary without LLM"""
|
| 223 |
-
|
| 224 |
-
lines = []
|
| 225 |
-
|
| 226 |
-
# Group tasks by type for better organization
|
| 227 |
-
task_groups = {}
|
| 228 |
-
for task in self.task_results:
|
| 229 |
-
if task.task_type not in task_groups:
|
| 230 |
-
task_groups[task.task_type] = []
|
| 231 |
-
task_groups[task.task_type].append(task)
|
| 232 |
-
|
| 233 |
-
# Generate summary for each task type
|
| 234 |
-
for task_type, tasks in task_groups.items():
|
| 235 |
-
emoji = self._get_task_emoji(task_type)
|
| 236 |
-
|
| 237 |
-
if task_type == "valori_stazioni":
|
| 238 |
-
summary_line = self._summarize_valori_stazioni(tasks, emoji)
|
| 239 |
-
elif task_type == "massimi_precipitazione":
|
| 240 |
-
summary_line = self._summarize_massimi_precipitazione(tasks, emoji)
|
| 241 |
-
else:
|
| 242 |
-
summary_line = self._summarize_generic_task(tasks, emoji, task_type)
|
| 243 |
-
|
| 244 |
-
if summary_line:
|
| 245 |
-
lines.append(summary_line)
|
| 246 |
-
|
| 247 |
-
# Add cross-task insights if multiple tasks
|
| 248 |
-
if len(task_groups) > 1:
|
| 249 |
-
cross_insights = self._generate_cross_task_insights()
|
| 250 |
-
if cross_insights:
|
| 251 |
-
lines.append(cross_insights)
|
| 252 |
-
|
| 253 |
-
return "\n".join(lines) if lines else "π Dati OMIRL estratti senza pattern significativi"
|
| 254 |
-
|
| 255 |
-
def _summarize_valori_stazioni(self, tasks: List[TaskSummary], emoji: str) -> str:
|
| 256 |
-
"""Summarize valori_stazioni tasks"""
|
| 257 |
-
|
| 258 |
-
total_records = sum(task.data_insights.total_records for task in tasks)
|
| 259 |
-
total_with_data = sum(task.data_insights.records_with_data for task in tasks)
|
| 260 |
-
|
| 261 |
-
# Combine geographic scopes
|
| 262 |
-
scopes = [task.geographic_scope for task in tasks]
|
| 263 |
-
geographic_summary = ", ".join(set(scopes))
|
| 264 |
-
|
| 265 |
-
# Get value ranges if available
|
| 266 |
-
values_summary = ""
|
| 267 |
-
all_mins = [task.data_insights.min_value for task in tasks if task.data_insights.min_value is not None]
|
| 268 |
-
all_maxs = [task.data_insights.max_value for task in tasks if task.data_insights.max_value is not None]
|
| 269 |
-
units = [task.data_insights.unit for task in tasks if task.data_insights.unit]
|
| 270 |
-
|
| 271 |
-
if all_mins and all_maxs and units:
|
| 272 |
-
min_val = min(all_mins)
|
| 273 |
-
max_val = max(all_maxs)
|
| 274 |
-
unit = units[0]
|
| 275 |
-
values_summary = f": {min_val}{unit}-{max_val}{unit}"
|
| 276 |
-
|
| 277 |
-
return f"{emoji} **Stazioni meteo**{values_summary} in {total_with_data}/{total_records} stazioni ({geographic_summary})"
|
| 278 |
-
|
| 279 |
-
def _summarize_massimi_precipitazione(self, tasks: List[TaskSummary], emoji: str) -> str:
|
| 280 |
-
"""Summarize massimi_precipitazione tasks with trend analysis"""
|
| 281 |
-
|
| 282 |
-
total_records = sum(task.data_insights.total_records for task in tasks)
|
| 283 |
-
|
| 284 |
-
# Analyze temporal scope for trend insights
|
| 285 |
-
temporal_scopes = [task.temporal_scope for task in tasks]
|
| 286 |
-
has_full_temporal = any("All periods" in scope for scope in temporal_scopes)
|
| 287 |
-
|
| 288 |
-
# Get value ranges
|
| 289 |
-
all_mins = [task.data_insights.min_value for task in tasks if task.data_insights.min_value is not None]
|
| 290 |
-
all_maxs = [task.data_insights.max_value for task in tasks if task.data_insights.max_value is not None]
|
| 291 |
-
|
| 292 |
-
if all_mins and all_maxs:
|
| 293 |
-
min_val = min(all_mins)
|
| 294 |
-
max_val = max(all_maxs)
|
| 295 |
-
|
| 296 |
-
# Trend analysis for full temporal data
|
| 297 |
-
trend_text = ""
|
| 298 |
-
if has_full_temporal:
|
| 299 |
-
# Look for trend indicators
|
| 300 |
-
trend_tasks = [task for task in tasks if "All periods" in task.temporal_scope]
|
| 301 |
-
if trend_tasks and trend_tasks[0].data_insights.trend_direction:
|
| 302 |
-
trend = trend_tasks[0].data_insights.trend_direction
|
| 303 |
-
peak = trend_tasks[0].data_insights.peak_period
|
| 304 |
-
if peak:
|
| 305 |
-
trend_text = f", picco {peak}"
|
| 306 |
-
elif trend != "stable":
|
| 307 |
-
trend_text = f", trend {trend}"
|
| 308 |
-
|
| 309 |
-
return f"{emoji} **Precipitazioni massime**: {min_val}-{max_val}mm in {total_records} aree{trend_text}"
|
| 310 |
-
|
| 311 |
-
return f"{emoji} **Precipitazioni massime**: {total_records} aree analizzate"
|
| 312 |
-
|
| 313 |
-
def _summarize_generic_task(self, tasks: List[TaskSummary], emoji: str, task_type: str) -> str:
|
| 314 |
-
"""Summarize any other task type"""
|
| 315 |
-
|
| 316 |
-
total_records = sum(task.data_insights.total_records for task in tasks)
|
| 317 |
-
return f"{emoji} **{task_type.replace('_', ' ').title()}**: {total_records} record estratti"
|
| 318 |
-
|
| 319 |
-
def _generate_cross_task_insights(self) -> str:
|
| 320 |
-
"""Generate insights that span multiple tasks"""
|
| 321 |
-
|
| 322 |
-
# Look for geographical patterns across tasks
|
| 323 |
-
geographic_scopes = [task.geographic_scope for task in self.task_results]
|
| 324 |
-
unique_scopes = set(geographic_scopes)
|
| 325 |
-
|
| 326 |
-
if len(unique_scopes) > 1:
|
| 327 |
-
return f"π **Copertura geografica**: {', '.join(unique_scopes)}"
|
| 328 |
-
|
| 329 |
-
return ""
|
| 330 |
-
|
| 331 |
-
def _generate_basic_fallback_summary(self) -> str:
|
| 332 |
-
"""Generate very basic summary when all else fails"""
|
| 333 |
-
|
| 334 |
-
task_counts = {}
|
| 335 |
-
for task in self.task_results:
|
| 336 |
-
task_counts[task.task_type] = task_counts.get(task.task_type, 0) + 1
|
| 337 |
-
|
| 338 |
-
parts = []
|
| 339 |
-
for task_type, count in task_counts.items():
|
| 340 |
-
emoji = self._get_task_emoji(task_type)
|
| 341 |
-
parts.append(f"{emoji} {task_type}: {count} operazioni")
|
| 342 |
-
|
| 343 |
-
return "π " + ", ".join(parts)
|
| 344 |
-
|
| 345 |
-
def _get_task_emoji(self, task_type: str) -> str:
|
| 346 |
-
"""Get appropriate emoji for task type"""
|
| 347 |
-
|
| 348 |
-
emoji_map = {
|
| 349 |
-
'valori_stazioni': 'π‘οΈ',
|
| 350 |
-
'massimi_precipitazione': 'π§οΈ',
|
| 351 |
-
'livelli_idrometrici': 'π',
|
| 352 |
-
'stazioni': 'π',
|
| 353 |
-
'mappe': 'πΊοΈ',
|
| 354 |
-
'radar': 'π‘',
|
| 355 |
-
'satellite': 'π°οΈ'
|
| 356 |
-
}
|
| 357 |
-
|
| 358 |
-
return emoji_map.get(task_type, 'π')
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
# Convenience functions for task result creation
|
| 362 |
-
|
| 363 |
-
def create_valori_stazioni_summary(
|
| 364 |
-
geographic_scope: str,
|
| 365 |
-
data_insights: DataInsights,
|
| 366 |
-
filters_applied: Dict[str, Any] = None
|
| 367 |
-
) -> TaskSummary:
|
| 368 |
-
"""Create standardized summary for valori_stazioni task"""
|
| 369 |
-
|
| 370 |
-
return TaskSummary(
|
| 371 |
-
task_type="valori_stazioni",
|
| 372 |
-
geographic_scope=geographic_scope,
|
| 373 |
-
temporal_scope="Current values",
|
| 374 |
-
data_insights=data_insights,
|
| 375 |
-
filters_applied=filters_applied or {}
|
| 376 |
-
)
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
def create_massimi_precipitazione_summary(
|
| 380 |
-
geographic_scope: str,
|
| 381 |
-
temporal_scope: str,
|
| 382 |
-
data_insights: DataInsights,
|
| 383 |
-
filters_applied: Dict[str, Any] = None
|
| 384 |
-
) -> TaskSummary:
|
| 385 |
-
"""Create standardized summary for massimi_precipitazione task"""
|
| 386 |
-
|
| 387 |
-
return TaskSummary(
|
| 388 |
-
task_type="massimi_precipitazione",
|
| 389 |
-
geographic_scope=geographic_scope,
|
| 390 |
-
temporal_scope=temporal_scope,
|
| 391 |
-
data_insights=data_insights,
|
| 392 |
-
filters_applied=filters_applied or {}
|
| 393 |
-
)
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
def analyze_station_data(station_data: List[Dict[str, Any]], sensor_type: str) -> DataInsights:
|
| 397 |
-
"""
|
| 398 |
-
Analyze station data for trends and patterns
|
| 399 |
-
|
| 400 |
-
Args:
|
| 401 |
-
station_data: List of station dictionaries with sensor values
|
| 402 |
-
sensor_type: Type of sensor (Temperatura, Precipitazione, etc.)
|
| 403 |
-
|
| 404 |
-
Returns:
|
| 405 |
-
DataInsights with station analysis
|
| 406 |
-
"""
|
| 407 |
-
|
| 408 |
-
if not station_data:
|
| 409 |
-
return DataInsights(
|
| 410 |
-
total_records=0,
|
| 411 |
-
records_with_data=0,
|
| 412 |
-
coverage_quality="no_data"
|
| 413 |
-
)
|
| 414 |
-
|
| 415 |
-
# Extract current values from stations
|
| 416 |
-
values = []
|
| 417 |
-
stations_with_values = []
|
| 418 |
-
notable_stations = []
|
| 419 |
-
|
| 420 |
-
for station in station_data:
|
| 421 |
-
try:
|
| 422 |
-
# Extract current value ("ultimo" field)
|
| 423 |
-
current_value = station.get("ultimo")
|
| 424 |
-
if current_value is not None:
|
| 425 |
-
value = float(current_value)
|
| 426 |
-
values.append(value)
|
| 427 |
-
|
| 428 |
-
station_info = {
|
| 429 |
-
"name": station.get("Nome", "Unknown"),
|
| 430 |
-
"code": station.get("Codice", ""),
|
| 431 |
-
"comune": station.get("Comune", ""),
|
| 432 |
-
"provincia": station.get("Provincia", ""),
|
| 433 |
-
"value": value,
|
| 434 |
-
"max": float(station.get("Max", value)) if station.get("Max") else value,
|
| 435 |
-
"min": float(station.get("Min", value)) if station.get("Min") else value
|
| 436 |
-
}
|
| 437 |
-
stations_with_values.append(station_info)
|
| 438 |
-
|
| 439 |
-
# Notable stations (extreme values)
|
| 440 |
-
if sensor_type.lower() == "temperatura":
|
| 441 |
-
if value > 25.0 or value < 5.0: # Hot or cold thresholds
|
| 442 |
-
notable_stations.append(station_info)
|
| 443 |
-
elif sensor_type.lower() == "precipitazione":
|
| 444 |
-
if value > 1.0: # Any significant precipitation
|
| 445 |
-
notable_stations.append(station_info)
|
| 446 |
-
elif sensor_type.lower() == "vento":
|
| 447 |
-
if value > 10.0: # Strong wind threshold
|
| 448 |
-
notable_stations.append(station_info)
|
| 449 |
-
|
| 450 |
-
except (ValueError, TypeError):
|
| 451 |
-
# Skip stations with invalid data
|
| 452 |
-
continue
|
| 453 |
-
|
| 454 |
-
if not values:
|
| 455 |
-
return DataInsights(
|
| 456 |
-
total_records=len(station_data),
|
| 457 |
-
records_with_data=0,
|
| 458 |
-
coverage_quality="sparse"
|
| 459 |
-
)
|
| 460 |
-
|
| 461 |
-
# Calculate statistics
|
| 462 |
-
min_value = min(values)
|
| 463 |
-
max_value = max(values)
|
| 464 |
-
avg_value = sum(values) / len(values)
|
| 465 |
-
value_range = max_value - min_value
|
| 466 |
-
|
| 467 |
-
# Determine trend direction based on spatial distribution
|
| 468 |
-
trend_direction = "stable" # Stations don't have temporal trends like precipitation
|
| 469 |
-
confidence_level = "high" if len(values) > 10 else "medium"
|
| 470 |
-
|
| 471 |
-
# Determine coverage quality
|
| 472 |
-
coverage_ratio = len(values) / len(station_data)
|
| 473 |
-
if coverage_ratio > 0.8:
|
| 474 |
-
coverage_quality = "good"
|
| 475 |
-
elif coverage_ratio > 0.5:
|
| 476 |
-
coverage_quality = "partial"
|
| 477 |
-
else:
|
| 478 |
-
coverage_quality = "sparse"
|
| 479 |
-
|
| 480 |
-
return DataInsights(
|
| 481 |
-
total_records=len(station_data),
|
| 482 |
-
records_with_data=len(values),
|
| 483 |
-
min_value=min_value,
|
| 484 |
-
max_value=max_value,
|
| 485 |
-
avg_value=avg_value,
|
| 486 |
-
unit=_get_sensor_unit(sensor_type),
|
| 487 |
-
coverage_quality=coverage_quality,
|
| 488 |
-
trend_direction=trend_direction,
|
| 489 |
-
trend_confidence=confidence_level,
|
| 490 |
-
notable_locations=[{
|
| 491 |
-
"name": s["name"],
|
| 492 |
-
"value": s["value"],
|
| 493 |
-
"location": f"{s['comune']}, {s['provincia']}" if s['comune'] else s['provincia']
|
| 494 |
-
} for s in notable_stations],
|
| 495 |
-
geographic_pattern="distributed" # Default for station data
|
| 496 |
-
)
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
def _get_sensor_unit(sensor_type: str) -> str:
|
| 500 |
-
"""Get unit for sensor type"""
|
| 501 |
-
unit_map = {
|
| 502 |
-
"temperatura": "Β°C",
|
| 503 |
-
"precipitazione": "mm",
|
| 504 |
-
"vento": "m/s",
|
| 505 |
-
"umiditΓ ": "%",
|
| 506 |
-
"pressione": "hPa"
|
| 507 |
-
}
|
| 508 |
-
|
| 509 |
-
for key, unit in unit_map.items():
|
| 510 |
-
if key.lower() in sensor_type.lower():
|
| 511 |
-
return unit
|
| 512 |
-
return ""
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
def analyze_precipitation_trends(precipitation_data: Dict[str, Any]) -> DataInsights:
|
| 516 |
-
"""
|
| 517 |
-
Analyze precipitation data for trends and patterns
|
| 518 |
-
|
| 519 |
-
Args:
|
| 520 |
-
precipitation_data: Raw precipitation data with time periods
|
| 521 |
-
|
| 522 |
-
Returns:
|
| 523 |
-
DataInsights with trend analysis
|
| 524 |
-
"""
|
| 525 |
-
|
| 526 |
-
# Time periods in order
|
| 527 |
-
time_periods = ["5'", "15'", "30'", "1h", "3h", "6h", "12h", "24h"]
|
| 528 |
-
|
| 529 |
-
# Extract values for trend analysis
|
| 530 |
-
values_by_period = {}
|
| 531 |
-
notable_locations = []
|
| 532 |
-
|
| 533 |
-
# Analyze both zona_allerta and province data
|
| 534 |
-
for table_type in ["zona_allerta", "province"]:
|
| 535 |
-
for record in precipitation_data.get(table_type, []):
|
| 536 |
-
area_name = record.get("Max (mm)", "")
|
| 537 |
-
|
| 538 |
-
# Extract values for each time period
|
| 539 |
-
period_values = []
|
| 540 |
-
for period in time_periods:
|
| 541 |
-
if period in record and record[period]:
|
| 542 |
-
# Parse value from format "0.2 [05:55] Station"
|
| 543 |
-
try:
|
| 544 |
-
value_str = record[period].split()[0]
|
| 545 |
-
value = float(value_str)
|
| 546 |
-
period_values.append(value)
|
| 547 |
-
|
| 548 |
-
# Track notable high values
|
| 549 |
-
if value > 1.0: # Notable threshold
|
| 550 |
-
notable_locations.append({
|
| 551 |
-
"location": area_name,
|
| 552 |
-
"value": value,
|
| 553 |
-
"period": period,
|
| 554 |
-
"details": record[period]
|
| 555 |
-
})
|
| 556 |
-
except (ValueError, IndexError):
|
| 557 |
-
period_values.append(0.0)
|
| 558 |
-
else:
|
| 559 |
-
period_values.append(0.0)
|
| 560 |
-
|
| 561 |
-
if period_values:
|
| 562 |
-
values_by_period[area_name] = period_values
|
| 563 |
-
|
| 564 |
-
# Analyze trends
|
| 565 |
-
all_values = []
|
| 566 |
-
for values in values_by_period.values():
|
| 567 |
-
all_values.extend([v for v in values if v > 0])
|
| 568 |
-
|
| 569 |
-
if not all_values:
|
| 570 |
-
return DataInsights(
|
| 571 |
-
total_records=len(values_by_period),
|
| 572 |
-
records_with_data=0,
|
| 573 |
-
coverage_quality="sparse"
|
| 574 |
-
)
|
| 575 |
-
|
| 576 |
-
# Calculate trend direction
|
| 577 |
-
trend_direction = "stable"
|
| 578 |
-
trend_confidence = "low"
|
| 579 |
-
peak_period = None
|
| 580 |
-
|
| 581 |
-
# Analyze temporal patterns
|
| 582 |
-
for area_name, values in values_by_period.items():
|
| 583 |
-
if len(values) >= 4: # Need enough data points
|
| 584 |
-
# Correct trend analysis: compare recent vs older periods
|
| 585 |
-
# values[0] = 5' ago (most recent), values[-1] = 24h ago (oldest)
|
| 586 |
-
recent_periods = values[:len(values)//2] # 5', 15', 30', 1h
|
| 587 |
-
older_periods = values[len(values)//2:] # 3h, 6h, 12h, 24h
|
| 588 |
-
|
| 589 |
-
recent_avg = sum(recent_periods) / len(recent_periods) if recent_periods else 0
|
| 590 |
-
older_avg = sum(older_periods) / len(older_periods) if older_periods else 0
|
| 591 |
-
|
| 592 |
-
# If recent values are higher than older ones, trend is increasing
|
| 593 |
-
# If older values are higher than recent ones, trend is decreasing
|
| 594 |
-
if recent_avg > older_avg * 1.5:
|
| 595 |
-
trend_direction = "increasing"
|
| 596 |
-
trend_confidence = "medium"
|
| 597 |
-
elif older_avg > recent_avg * 1.5:
|
| 598 |
-
trend_direction = "decreasing"
|
| 599 |
-
trend_confidence = "medium"
|
| 600 |
-
|
| 601 |
-
# Find peak period
|
| 602 |
-
max_value = max(values)
|
| 603 |
-
if max_value > 0:
|
| 604 |
-
max_index = values.index(max_value)
|
| 605 |
-
peak_period = time_periods[max_index]
|
| 606 |
-
break
|
| 607 |
-
|
| 608 |
-
return DataInsights(
|
| 609 |
-
total_records=len(values_by_period),
|
| 610 |
-
records_with_data=len([v for v in values_by_period.values() if any(val > 0 for val in v)]),
|
| 611 |
-
min_value=min(all_values) if all_values else None,
|
| 612 |
-
max_value=max(all_values) if all_values else None,
|
| 613 |
-
avg_value=sum(all_values) / len(all_values) if all_values else None,
|
| 614 |
-
unit="mm",
|
| 615 |
-
trend_direction=trend_direction,
|
| 616 |
-
trend_confidence=trend_confidence,
|
| 617 |
-
peak_period=peak_period,
|
| 618 |
-
notable_locations=notable_locations[:5], # Limit to top 5
|
| 619 |
-
coverage_quality="complete" if len(all_values) > 10 else "partial"
|
| 620 |
-
)
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
# Global instance for easy access
|
| 624 |
-
_multi_task_summarizer = None
|
| 625 |
-
|
| 626 |
-
def get_multi_task_summarizer() -> MultiTaskSummarizer:
|
| 627 |
-
"""Get global multi-task summarizer instance"""
|
| 628 |
-
global _multi_task_summarizer
|
| 629 |
-
|
| 630 |
-
if _multi_task_summarizer is None:
|
| 631 |
-
_multi_task_summarizer = MultiTaskSummarizer()
|
| 632 |
-
|
| 633 |
-
return _multi_task_summarizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test for LLM Summarization Node (Phase 4)
|
| 4 |
+
|
| 5 |
+
Tests the new LLM-based summarization functionality that generates
|
| 6 |
+
intelligent insights from task-specific formatted data.
|
| 7 |
+
|
| 8 |
+
Purpose:
|
| 9 |
+
- Validate LLM summarization node functionality
|
| 10 |
+
- Test integration with existing tool results
|
| 11 |
+
- Verify graceful handling of LLM failures
|
| 12 |
+
- Ensure backward compatibility with existing workflow
|
| 13 |
+
|
| 14 |
+
Created: September 7, 2025
|
| 15 |
+
Branch: omirl_refactor
|
| 16 |
+
Phase: 4 - Higher-level LLM summarization
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import asyncio
|
| 20 |
+
import sys
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from unittest.mock import patch, AsyncMock
|
| 23 |
+
|
| 24 |
+
# Add project root to path
|
| 25 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 26 |
+
|
| 27 |
+
from agent.nodes import llm_summarization_node
|
| 28 |
+
from agent.state import AgentState, ToolResult
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def create_test_state_with_tool_results():
|
| 32 |
+
"""Create a test state with sample tool results"""
|
| 33 |
+
|
| 34 |
+
# Sample OMIRL tool result
|
| 35 |
+
omirl_result = ToolResult(
|
| 36 |
+
tool_name="omirl_tool",
|
| 37 |
+
success=True,
|
| 38 |
+
summary_text="π‘οΈ **Dati Temperatura Liguria**\nβ’ 184 stazioni attive\nβ’ Temperatura media: 21.5Β°C\nβ’ Range: 16.8Β°C - 28.7Β°C",
|
| 39 |
+
artifacts=["stazioni_temperatura_20250907.json"],
|
| 40 |
+
sources=["https://omirl.regione.liguria.it/#/sensorstable"],
|
| 41 |
+
metadata={
|
| 42 |
+
"sensor_type": "Temperatura",
|
| 43 |
+
"total_after_filtering": 184,
|
| 44 |
+
"filters_applied": {"tipo_sensore": "Temperatura"},
|
| 45 |
+
"subtask": "valori_stazioni"
|
| 46 |
+
},
|
| 47 |
+
warnings=[]
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
state = AgentState(
|
| 51 |
+
user_message="Mostra temperature in Liguria",
|
| 52 |
+
routing_result={},
|
| 53 |
+
conversation_history=[],
|
| 54 |
+
tool_results=[omirl_result],
|
| 55 |
+
planned_tools=[],
|
| 56 |
+
errors=[],
|
| 57 |
+
processing_status="executing_tools",
|
| 58 |
+
agent_response="",
|
| 59 |
+
current_operation=None,
|
| 60 |
+
omirl_data=None,
|
| 61 |
+
metadata={}
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
return state
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def create_test_state_with_multiple_results():
|
| 68 |
+
"""Create test state with multiple tool results for cross-analysis"""
|
| 69 |
+
|
| 70 |
+
temp_result = ToolResult(
|
| 71 |
+
tool_name="omirl_tool",
|
| 72 |
+
success=True,
|
| 73 |
+
summary_text="π‘οΈ Temperatura: 184 stazioni, media 21.5Β°C",
|
| 74 |
+
metadata={"sensor_type": "Temperatura", "total_after_filtering": 184},
|
| 75 |
+
artifacts=[], sources=[], warnings=[]
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
precip_result = ToolResult(
|
| 79 |
+
tool_name="omirl_tool",
|
| 80 |
+
success=True,
|
| 81 |
+
summary_text="π§οΈ Precipitazioni: Zona A: 0.2mm, Genova: 0.4mm",
|
| 82 |
+
metadata={"subtask": "massimi_precipitazione", "province_records": 1},
|
| 83 |
+
artifacts=[], sources=[], warnings=[]
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
state = AgentState(
|
| 87 |
+
user_message="Analizza meteo completo Liguria",
|
| 88 |
+
routing_result={},
|
| 89 |
+
conversation_history=[],
|
| 90 |
+
tool_results=[temp_result, precip_result],
|
| 91 |
+
planned_tools=[],
|
| 92 |
+
errors=[],
|
| 93 |
+
processing_status="executing_tools",
|
| 94 |
+
agent_response="",
|
| 95 |
+
current_operation=None,
|
| 96 |
+
omirl_data=None,
|
| 97 |
+
metadata={}
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
return state
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
async def test_summarization_node_basic():
|
| 104 |
+
"""Test basic LLM summarization functionality"""
|
| 105 |
+
print("π§ͺ Testing basic LLM summarization...")
|
| 106 |
+
|
| 107 |
+
state = create_test_state_with_tool_results()
|
| 108 |
+
|
| 109 |
+
# Test with mock LLM client to avoid API calls
|
| 110 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 111 |
+
mock_client = AsyncMock()
|
| 112 |
+
mock_client.generate_insights.return_value = (
|
| 113 |
+
"β’ **Temperatura**: Valori nella norma stagionale\n"
|
| 114 |
+
"β’ **Distribuzione**: Buona copertura territoriale con 184 stazioni\n"
|
| 115 |
+
"β’ **Trend**: Temperature stabili, nessuna anomalia rilevata\n"
|
| 116 |
+
"β’ **Raccomandazione**: Monitoraggio di routine sufficiente"
|
| 117 |
+
)
|
| 118 |
+
mock_llm_class.return_value = mock_client
|
| 119 |
+
|
| 120 |
+
# Execute summarization node
|
| 121 |
+
result_state = await llm_summarization_node(state)
|
| 122 |
+
|
| 123 |
+
# Verify insights were added
|
| 124 |
+
assert len(result_state["tool_results"]) == 1
|
| 125 |
+
enhanced_result = result_state["tool_results"][0]
|
| 126 |
+
|
| 127 |
+
assert "llm_insights" in enhanced_result.metadata
|
| 128 |
+
assert "Temperatura" in enhanced_result.metadata["llm_insights"]
|
| 129 |
+
assert "insights_generated_at" in enhanced_result.metadata
|
| 130 |
+
|
| 131 |
+
print("β
Basic summarization test passed")
|
| 132 |
+
return True
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
async def test_summarization_node_multiple_results():
|
| 136 |
+
"""Test LLM summarization with multiple tool results"""
|
| 137 |
+
print("π§ͺ Testing multi-result summarization...")
|
| 138 |
+
|
| 139 |
+
state = create_test_state_with_multiple_results()
|
| 140 |
+
|
| 141 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 142 |
+
mock_client = AsyncMock()
|
| 143 |
+
mock_client.generate_insights.return_value = (
|
| 144 |
+
"β’ **Situazione Generale**: Condizioni meteo stabili in Liguria\n"
|
| 145 |
+
"β’ **Temperatura**: 184 stazioni operative, valori nella norma\n"
|
| 146 |
+
"β’ **Precipitazioni**: AttivitΓ minima, accumuli trascurabili\n"
|
| 147 |
+
"β’ **Correlazione**: Tempo stabile confermato da entrambi i parametri\n"
|
| 148 |
+
"β’ **Outlook Operativo**: Nessuna criticitΓ , condizioni favorevoli"
|
| 149 |
+
)
|
| 150 |
+
mock_llm_class.return_value = mock_client
|
| 151 |
+
|
| 152 |
+
result_state = await llm_summarization_node(state)
|
| 153 |
+
|
| 154 |
+
# Verify multi-source analysis
|
| 155 |
+
enhanced_result = result_state["tool_results"][0]
|
| 156 |
+
insights = enhanced_result.metadata["llm_insights"]
|
| 157 |
+
|
| 158 |
+
assert "Temperatura" in insights
|
| 159 |
+
assert "Precipitazioni" in insights
|
| 160 |
+
assert "Correlazione" in insights
|
| 161 |
+
|
| 162 |
+
print("β
Multi-result summarization test passed")
|
| 163 |
+
return True
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
async def test_summarization_node_llm_failure():
|
| 167 |
+
"""Test graceful handling of LLM failures"""
|
| 168 |
+
print("π§ͺ Testing LLM failure handling...")
|
| 169 |
+
|
| 170 |
+
state = create_test_state_with_tool_results()
|
| 171 |
+
|
| 172 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 173 |
+
mock_client = AsyncMock()
|
| 174 |
+
mock_client.generate_insights.side_effect = Exception("LLM API error")
|
| 175 |
+
mock_llm_class.return_value = mock_client
|
| 176 |
+
|
| 177 |
+
# Execute summarization node
|
| 178 |
+
result_state = await llm_summarization_node(state)
|
| 179 |
+
|
| 180 |
+
# Verify workflow continues without insights
|
| 181 |
+
assert len(result_state["tool_results"]) == 1
|
| 182 |
+
original_result = result_state["tool_results"][0]
|
| 183 |
+
|
| 184 |
+
# Should not have insights due to LLM failure
|
| 185 |
+
assert "llm_insights" not in original_result.metadata
|
| 186 |
+
|
| 187 |
+
# But should still have original data
|
| 188 |
+
assert original_result.success == True
|
| 189 |
+
assert "Temperatura" in original_result.summary_text
|
| 190 |
+
|
| 191 |
+
print("β
LLM failure handling test passed")
|
| 192 |
+
return True
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
async def test_summarization_node_no_results():
|
| 196 |
+
"""Test summarization with no successful results"""
|
| 197 |
+
print("π§ͺ Testing with no successful results...")
|
| 198 |
+
|
| 199 |
+
# Create state with failed tool result
|
| 200 |
+
failed_result = ToolResult(
|
| 201 |
+
tool_name="omirl_tool",
|
| 202 |
+
success=False,
|
| 203 |
+
summary_text="β οΈ Error: Could not fetch data",
|
| 204 |
+
metadata={"error": "network_timeout"},
|
| 205 |
+
artifacts=[], sources=[], warnings=[]
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
state = AgentState(
|
| 209 |
+
user_message="Test failed request",
|
| 210 |
+
routing_result={},
|
| 211 |
+
conversation_history=[],
|
| 212 |
+
tool_results=[failed_result],
|
| 213 |
+
planned_tools=[],
|
| 214 |
+
errors=[],
|
| 215 |
+
processing_status="executing_tools",
|
| 216 |
+
agent_response="",
|
| 217 |
+
current_operation=None,
|
| 218 |
+
omirl_data=None,
|
| 219 |
+
metadata={}
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
result_state = await llm_summarization_node(state)
|
| 223 |
+
|
| 224 |
+
# Verify no LLM call was made and state unchanged
|
| 225 |
+
assert len(result_state["tool_results"]) == 1
|
| 226 |
+
assert result_state["tool_results"][0].success == False
|
| 227 |
+
assert "llm_insights" not in result_state["tool_results"][0].metadata
|
| 228 |
+
|
| 229 |
+
print("β
No results test passed")
|
| 230 |
+
return True
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
async def test_summarization_integration():
|
| 234 |
+
"""Integration test to verify it works with real workflow steps"""
|
| 235 |
+
print("π§ͺ Testing summarization integration...")
|
| 236 |
+
|
| 237 |
+
# This test verifies that the node can be inserted in the workflow
|
| 238 |
+
# without breaking existing functionality
|
| 239 |
+
|
| 240 |
+
state = create_test_state_with_tool_results()
|
| 241 |
+
|
| 242 |
+
# Mock LLM but test the full node integration
|
| 243 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 244 |
+
mock_client = AsyncMock()
|
| 245 |
+
mock_client.generate_insights.return_value = (
|
| 246 |
+
"β’ **Status**: Monitoraggio attivo su 184 stazioni\n"
|
| 247 |
+
"β’ **QualitΓ Dati**: Copertura completa del territorio ligure"
|
| 248 |
+
)
|
| 249 |
+
mock_llm_class.return_value = mock_client
|
| 250 |
+
|
| 251 |
+
# Test that processing status is properly updated
|
| 252 |
+
result_state = await llm_summarization_node(state)
|
| 253 |
+
|
| 254 |
+
# Verify state updates
|
| 255 |
+
assert result_state["processing_status"] == "llm_summarization" # Should be updated by node
|
| 256 |
+
assert len(result_state["tool_results"]) == 1
|
| 257 |
+
|
| 258 |
+
# Verify insights were properly integrated
|
| 259 |
+
enhanced_result = result_state["tool_results"][0]
|
| 260 |
+
assert enhanced_result.tool_name == "omirl_tool"
|
| 261 |
+
assert enhanced_result.success == True
|
| 262 |
+
assert "llm_insights" in enhanced_result.metadata
|
| 263 |
+
|
| 264 |
+
print("β
Integration test passed")
|
| 265 |
+
return True
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
async def run_all_tests():
|
| 269 |
+
"""Run comprehensive test suite for LLM summarization node"""
|
| 270 |
+
print("π Starting LLM Summarization Node Tests")
|
| 271 |
+
print("=" * 50)
|
| 272 |
+
|
| 273 |
+
tests = [
|
| 274 |
+
("Basic Functionality", test_summarization_node_basic),
|
| 275 |
+
("Multiple Results", test_summarization_node_multiple_results),
|
| 276 |
+
("LLM Failure Handling", test_summarization_node_llm_failure),
|
| 277 |
+
("No Results Handling", test_summarization_node_no_results),
|
| 278 |
+
("Integration Test", test_summarization_integration),
|
| 279 |
+
]
|
| 280 |
+
|
| 281 |
+
results = []
|
| 282 |
+
|
| 283 |
+
for test_name, test_func in tests:
|
| 284 |
+
print(f"\nπ Running: {test_name}")
|
| 285 |
+
try:
|
| 286 |
+
result = await test_func()
|
| 287 |
+
results.append(result)
|
| 288 |
+
print(f" Result: {'β
PASS' if result else 'β FAIL'}")
|
| 289 |
+
except Exception as e:
|
| 290 |
+
print(f" β Test failed with exception: {e}")
|
| 291 |
+
results.append(False)
|
| 292 |
+
|
| 293 |
+
# Summary
|
| 294 |
+
passed = sum(results)
|
| 295 |
+
total = len(results)
|
| 296 |
+
print(f"\nπ Test Results: {passed}/{total} tests passed")
|
| 297 |
+
|
| 298 |
+
if passed == total:
|
| 299 |
+
print("β¨ All LLM summarization tests passed!")
|
| 300 |
+
return True
|
| 301 |
+
else:
|
| 302 |
+
print(f"β οΈ {total - passed} tests failed")
|
| 303 |
+
return False
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
if __name__ == "__main__":
|
| 307 |
+
success = asyncio.run(run_all_tests())
|
| 308 |
+
sys.exit(0 if success else 1)
|
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
End-to-end test of Phase 4 summarization workflow
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import asyncio
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from unittest.mock import patch, AsyncMock
|
| 10 |
+
|
| 11 |
+
# Add project root to path
|
| 12 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 13 |
+
|
| 14 |
+
from agent.state import AgentState, ToolResult
|
| 15 |
+
from agent.nodes import llm_summarization_node, response_generation_node
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def test_full_workflow():
|
| 19 |
+
"""Test complete workflow from tool results through summarization to response"""
|
| 20 |
+
print("π§ͺ Testing complete Phase 4 workflow...")
|
| 21 |
+
|
| 22 |
+
# Create realistic OMIRL tool result
|
| 23 |
+
omirl_result = ToolResult(
|
| 24 |
+
tool_name="omirl_tool",
|
| 25 |
+
success=True,
|
| 26 |
+
summary_text="π‘οΈ **Dati Temperatura Liguria**\nβ’ 184 stazioni attive\nβ’ Temperatura media: 21.5Β°C\nβ’ Range: 16.8Β°C - 28.7Β°C\nβ’ Province coperte: Genova, Savona, Imperia, La Spezia",
|
| 27 |
+
artifacts=["stazioni_temperatura_20250907.json"],
|
| 28 |
+
sources=["https://omirl.regione.liguria.it/#/sensorstable"],
|
| 29 |
+
metadata={
|
| 30 |
+
"sensor_type": "Temperatura",
|
| 31 |
+
"total_after_filtering": 184,
|
| 32 |
+
"filters_applied": {"tipo_sensore": "Temperatura"},
|
| 33 |
+
"subtask": "valori_stazioni"
|
| 34 |
+
},
|
| 35 |
+
warnings=[]
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Create initial state
|
| 39 |
+
state = AgentState(
|
| 40 |
+
user_message="Mostra temperature in Liguria",
|
| 41 |
+
routing_result={},
|
| 42 |
+
conversation_history=[],
|
| 43 |
+
tool_results=[omirl_result],
|
| 44 |
+
planned_tools=[],
|
| 45 |
+
errors=[],
|
| 46 |
+
processing_status="executing_tools",
|
| 47 |
+
agent_response="",
|
| 48 |
+
current_operation=None,
|
| 49 |
+
omirl_data=None,
|
| 50 |
+
metadata={}
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
print(f" Initial tool results: {len(state['tool_results'])}")
|
| 54 |
+
|
| 55 |
+
# Display Phase 1: Original task output
|
| 56 |
+
print("\n" + "="*60)
|
| 57 |
+
print("π PHASE 1: TASK-SPECIFIC OUTPUT")
|
| 58 |
+
print("="*60)
|
| 59 |
+
|
| 60 |
+
original_result = state["tool_results"][0]
|
| 61 |
+
print(f"π§ Tool: {original_result.tool_name}")
|
| 62 |
+
print(f"π Success: {original_result.success}")
|
| 63 |
+
print(f"π― Filters Applied: {original_result.metadata.get('filters_applied', 'None')}")
|
| 64 |
+
print(f"π Total Records: {original_result.metadata.get('total_after_filtering', 'Unknown')}")
|
| 65 |
+
print(f"π·οΈ Subtask: {original_result.metadata.get('subtask', 'Unknown')}")
|
| 66 |
+
|
| 67 |
+
print(f"\nπ Task Output Summary:")
|
| 68 |
+
print("-" * 40)
|
| 69 |
+
print(original_result.summary_text)
|
| 70 |
+
|
| 71 |
+
if original_result.artifacts:
|
| 72 |
+
print(f"\nπ Artifacts: {original_result.artifacts}")
|
| 73 |
+
if original_result.sources:
|
| 74 |
+
print(f"π Sources: {original_result.sources}")
|
| 75 |
+
|
| 76 |
+
# Step 1: Run LLM summarization
|
| 77 |
+
print("\n" + "="*60)
|
| 78 |
+
print("π§ PHASE 2: LLM SUMMARIZATION")
|
| 79 |
+
print("="*60)
|
| 80 |
+
|
| 81 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 82 |
+
mock_client = AsyncMock()
|
| 83 |
+
llm_insights = (
|
| 84 |
+
"β’ **Copertura Territoriale**: Eccellente con 184 stazioni operative\n"
|
| 85 |
+
"β’ **Temperature Attuali**: Valori stagionali nella norma (21.5Β°C media)\n"
|
| 86 |
+
"β’ **Distribuzione Geografica**: Buona rappresentanza di tutte le province liguri\n"
|
| 87 |
+
"β’ **QualitΓ Monitoraggio**: Sistema di rilevamento pienamente operativo\n"
|
| 88 |
+
"β’ **Raccomandazioni**: Condizioni stabili, monitoraggio di routine sufficiente"
|
| 89 |
+
)
|
| 90 |
+
mock_client.generate_insights.return_value = llm_insights
|
| 91 |
+
mock_llm_class.return_value = mock_client
|
| 92 |
+
|
| 93 |
+
print("π€ LLM Client: Initialized (mocked)")
|
| 94 |
+
print("π Generating insights from task data...")
|
| 95 |
+
|
| 96 |
+
# Execute summarization
|
| 97 |
+
summarized_state = await llm_summarization_node(state)
|
| 98 |
+
|
| 99 |
+
print(f"β
LLM processing complete")
|
| 100 |
+
print(f"π Results after summarization: {len(summarized_state['tool_results'])}")
|
| 101 |
+
|
| 102 |
+
# Verify insights were added
|
| 103 |
+
enhanced_result = summarized_state["tool_results"][0]
|
| 104 |
+
assert "llm_insights" in enhanced_result.metadata
|
| 105 |
+
|
| 106 |
+
print(f"\nπ§ LLM Generated Insights:")
|
| 107 |
+
print("-" * 40)
|
| 108 |
+
print(enhanced_result.metadata['llm_insights'])
|
| 109 |
+
|
| 110 |
+
print(f"\nβ° Insights timestamp: {enhanced_result.metadata.get('insights_generated_at', 'Unknown')}")
|
| 111 |
+
|
| 112 |
+
# Step 2: Run response generation
|
| 113 |
+
print("\n" + "="*60)
|
| 114 |
+
print("π PHASE 3: RESPONSE GENERATION")
|
| 115 |
+
print("="*60)
|
| 116 |
+
|
| 117 |
+
print("π Combining task output + LLM insights...")
|
| 118 |
+
final_state = await response_generation_node(summarized_state)
|
| 119 |
+
|
| 120 |
+
print(f"β
Response generation complete")
|
| 121 |
+
print(f"π Final response length: {len(final_state['agent_response'])} characters")
|
| 122 |
+
|
| 123 |
+
# Verify response contains both original data and insights
|
| 124 |
+
response = final_state["agent_response"]
|
| 125 |
+
assert "π **Estrazione Dati OMIRL Completata**" in response
|
| 126 |
+
assert "π§ **Analisi Intelligente:**" in response
|
| 127 |
+
assert "Copertura Territoriale" in response
|
| 128 |
+
assert "Temperature Attuali" in response
|
| 129 |
+
assert "π **Fonti dati:**" in response
|
| 130 |
+
|
| 131 |
+
print(" β
Response contains original task data")
|
| 132 |
+
print(" β
Response contains LLM insights section")
|
| 133 |
+
print(" β
Response contains data sources")
|
| 134 |
+
|
| 135 |
+
# Display final combined output
|
| 136 |
+
print("\n" + "="*60)
|
| 137 |
+
print("π― FINAL COMBINED RESPONSE")
|
| 138 |
+
print("="*60)
|
| 139 |
+
print(response)
|
| 140 |
+
|
| 141 |
+
# Summary statistics
|
| 142 |
+
print("\n" + "="*60)
|
| 143 |
+
print("οΏ½ WORKFLOW SUMMARY")
|
| 144 |
+
print("="*60)
|
| 145 |
+
print(f"π― User Query: '{state['user_message']}'")
|
| 146 |
+
print(f"π§ Tool Executed: {original_result.tool_name}")
|
| 147 |
+
print(f"ποΈ Mode: tables")
|
| 148 |
+
print(f"π Subtask: {original_result.metadata.get('subtask', 'Unknown')}")
|
| 149 |
+
print(f"π― Filters Applied: {original_result.metadata.get('filters_applied')}")
|
| 150 |
+
print(f"π Data Records: {original_result.metadata.get('total_after_filtering')}")
|
| 151 |
+
print(f"π Task Output: {len(original_result.summary_text)} chars")
|
| 152 |
+
print(f"π§ LLM Insights: {len(enhanced_result.metadata.get('llm_insights', ''))} chars")
|
| 153 |
+
print(f"π Final Response: {len(response)} chars")
|
| 154 |
+
print(f"β‘ Processing Status: {final_state['processing_status']}")
|
| 155 |
+
|
| 156 |
+
return True
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
async def test_precipitation_workflow():
|
| 160 |
+
"""Test Phase 4 workflow with ambiguous precipitation query"""
|
| 161 |
+
print("\n" + "="*80)
|
| 162 |
+
print("π§οΈ TESTING PRECIPITATION QUERY: 'mostra le precipitazioni a Genova'")
|
| 163 |
+
print("="*80)
|
| 164 |
+
|
| 165 |
+
# Create realistic precipitation tool result
|
| 166 |
+
precip_result = ToolResult(
|
| 167 |
+
tool_name="omirl_tool",
|
| 168 |
+
success=True,
|
| 169 |
+
summary_text="π§οΈ **Precipitazioni Massime - Provincia Genova**\nβ’ Zona d'allerta: A (0.0-0.2mm)\nβ’ Provincia Genova: 0.2-0.4mm nelle ultime 24h\nβ’ Picco registrato: 0.4mm alle 11:25 al Passo del Turchino\nβ’ Trend: AttivitΓ precipitativa minima",
|
| 170 |
+
artifacts=["precipitazioni_genova_20250907.json"],
|
| 171 |
+
sources=["https://omirl.regione.liguria.it/#/maxtable"],
|
| 172 |
+
metadata={
|
| 173 |
+
"filters_applied": {"provincia": "GENOVA"},
|
| 174 |
+
"provincia": "GE",
|
| 175 |
+
"zona_allerta_records": 7,
|
| 176 |
+
"province_records": 1,
|
| 177 |
+
"extraction_method": "dual_table",
|
| 178 |
+
"subtask": "massimi_precipitazione"
|
| 179 |
+
},
|
| 180 |
+
warnings=[]
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# Create state for precipitation query
|
| 184 |
+
precip_state = AgentState(
|
| 185 |
+
user_message="mostra le precipitazioni a Genova",
|
| 186 |
+
routing_result={},
|
| 187 |
+
conversation_history=[],
|
| 188 |
+
tool_results=[precip_result],
|
| 189 |
+
planned_tools=[],
|
| 190 |
+
errors=[],
|
| 191 |
+
processing_status="executing_tools",
|
| 192 |
+
agent_response="",
|
| 193 |
+
current_operation=None,
|
| 194 |
+
omirl_data=None,
|
| 195 |
+
metadata={}
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
print(f" Initial tool results: {len(precip_state['tool_results'])}")
|
| 199 |
+
|
| 200 |
+
# Display Phase 1: Original task output
|
| 201 |
+
print("\n" + "="*60)
|
| 202 |
+
print("π PHASE 1: TASK-SPECIFIC OUTPUT")
|
| 203 |
+
print("="*60)
|
| 204 |
+
|
| 205 |
+
original_result = precip_state["tool_results"][0]
|
| 206 |
+
print(f"π§ Tool: {original_result.tool_name}")
|
| 207 |
+
print(f"ποΈ Mode: tables")
|
| 208 |
+
print(f"π Subtask: {original_result.metadata.get('subtask', 'Unknown')}")
|
| 209 |
+
print(f"π Success: {original_result.success}")
|
| 210 |
+
print(f"π― Filters Applied: {original_result.metadata.get('filters_applied', 'None')}")
|
| 211 |
+
print(f"π Zona d'Allerta Records: {original_result.metadata.get('zona_allerta_records', 'Unknown')}")
|
| 212 |
+
print(f"π Province Records: {original_result.metadata.get('province_records', 'Unknown')}")
|
| 213 |
+
print(f"π§ Extraction Method: {original_result.metadata.get('extraction_method', 'Unknown')}")
|
| 214 |
+
|
| 215 |
+
print(f"\nπ Task Output Summary:")
|
| 216 |
+
print("-" * 40)
|
| 217 |
+
print(original_result.summary_text)
|
| 218 |
+
|
| 219 |
+
if original_result.artifacts:
|
| 220 |
+
print(f"\nπ Artifacts: {original_result.artifacts}")
|
| 221 |
+
if original_result.sources:
|
| 222 |
+
print(f"π Sources: {original_result.sources}")
|
| 223 |
+
|
| 224 |
+
# Step 1: Run LLM summarization for precipitation data
|
| 225 |
+
print("\n" + "="*60)
|
| 226 |
+
print("π§ PHASE 2: LLM SUMMARIZATION")
|
| 227 |
+
print("="*60)
|
| 228 |
+
|
| 229 |
+
with patch('agent.nodes.LLMClient') as mock_llm_class:
|
| 230 |
+
mock_client = AsyncMock()
|
| 231 |
+
llm_insights = (
|
| 232 |
+
"β’ **Situazione Precipitazioni**: AttivitΓ molto limitata nella provincia di Genova\n"
|
| 233 |
+
"β’ **Distribuzione Temporale**: Accumuli minimi (0.2-0.4mm) concentrati nelle ultime 24h\n"
|
| 234 |
+
"β’ **LocalitΓ Principale**: Passo del Turchino con il valore massimo registrato (0.4mm)\n"
|
| 235 |
+
"β’ **Trend Meteo**: Condizioni prevalentemente asciutte con episodi sporadici\n"
|
| 236 |
+
"β’ **Valutazione Operativa**: Nessuna criticitΓ idrica, situazione nella norma per il periodo\n"
|
| 237 |
+
"β’ **Previsioni Immediate**: StabilitΓ delle condizioni, monitoraggio di routine"
|
| 238 |
+
)
|
| 239 |
+
mock_client.generate_insights.return_value = llm_insights
|
| 240 |
+
mock_llm_class.return_value = mock_client
|
| 241 |
+
|
| 242 |
+
print("π€ LLM Client: Initialized (mocked)")
|
| 243 |
+
print("π Generating precipitation-specific insights...")
|
| 244 |
+
|
| 245 |
+
# Execute summarization
|
| 246 |
+
summarized_state = await llm_summarization_node(precip_state)
|
| 247 |
+
|
| 248 |
+
print(f"β
LLM processing complete")
|
| 249 |
+
print(f"π Results after summarization: {len(summarized_state['tool_results'])}")
|
| 250 |
+
|
| 251 |
+
# Verify insights were added
|
| 252 |
+
enhanced_result = summarized_state["tool_results"][0]
|
| 253 |
+
assert "llm_insights" in enhanced_result.metadata
|
| 254 |
+
|
| 255 |
+
print(f"\nπ§ LLM Generated Insights:")
|
| 256 |
+
print("-" * 40)
|
| 257 |
+
print(enhanced_result.metadata['llm_insights'])
|
| 258 |
+
|
| 259 |
+
print(f"\nβ° Insights timestamp: {enhanced_result.metadata.get('insights_generated_at', 'Unknown')}")
|
| 260 |
+
|
| 261 |
+
# Step 2: Run response generation
|
| 262 |
+
print("\n" + "="*60)
|
| 263 |
+
print("π PHASE 3: RESPONSE GENERATION")
|
| 264 |
+
print("="*60)
|
| 265 |
+
|
| 266 |
+
print("π Combining precipitation data + LLM insights...")
|
| 267 |
+
final_state = await response_generation_node(summarized_state)
|
| 268 |
+
|
| 269 |
+
print(f"β
Response generation complete")
|
| 270 |
+
print(f"π Final response length: {len(final_state['agent_response'])} characters")
|
| 271 |
+
|
| 272 |
+
# Verify response contains precipitation-specific content
|
| 273 |
+
response = final_state["agent_response"]
|
| 274 |
+
assert "π **Estrazione Dati OMIRL Completata**" in response
|
| 275 |
+
assert "π§ **Analisi Intelligente:**" in response
|
| 276 |
+
|
| 277 |
+
print(" β
Response contains original precipitation data")
|
| 278 |
+
print(" β
Response contains LLM precipitation insights")
|
| 279 |
+
print(" β
Response contains data sources")
|
| 280 |
+
|
| 281 |
+
# Display final combined output
|
| 282 |
+
print("\n" + "="*60)
|
| 283 |
+
print("π― FINAL COMBINED RESPONSE")
|
| 284 |
+
print("="*60)
|
| 285 |
+
print(response)
|
| 286 |
+
|
| 287 |
+
# Summary statistics for precipitation
|
| 288 |
+
print("\n" + "="*60)
|
| 289 |
+
print("π PRECIPITATION WORKFLOW SUMMARY")
|
| 290 |
+
print("="*60)
|
| 291 |
+
print(f"π― User Query: '{precip_state['user_message']}'")
|
| 292 |
+
print(f"π§ Tool Executed: {original_result.tool_name}")
|
| 293 |
+
print(f"ποΈ Mode: tables")
|
| 294 |
+
print(f"π Subtask: {original_result.metadata.get('subtask', 'Unknown')}")
|
| 295 |
+
print(f"π― Filters Applied: {original_result.metadata.get('filters_applied')}")
|
| 296 |
+
print(f"π Zona Records: {original_result.metadata.get('zona_allerta_records')}")
|
| 297 |
+
print(f"π Province Records: {original_result.metadata.get('province_records')}")
|
| 298 |
+
print(f"π Task Output: {len(original_result.summary_text)} chars")
|
| 299 |
+
print(f"π§ LLM Insights: {len(enhanced_result.metadata.get('llm_insights', ''))} chars")
|
| 300 |
+
print(f"π Final Response: {len(response)} chars")
|
| 301 |
+
print(f"β‘ Processing Status: {final_state['processing_status']}")
|
| 302 |
+
|
| 303 |
+
return True
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
async def run_all_tests():
|
| 307 |
+
"""Run both temperature and precipitation workflow tests"""
|
| 308 |
+
print("π§ͺ PHASE 4 COMPREHENSIVE WORKFLOW TESTS")
|
| 309 |
+
print("="*80)
|
| 310 |
+
|
| 311 |
+
try:
|
| 312 |
+
# Test 1: Temperature query
|
| 313 |
+
print("TEST 1: Temperature workflow")
|
| 314 |
+
success1 = await test_full_workflow()
|
| 315 |
+
|
| 316 |
+
# Test 2: Precipitation query
|
| 317 |
+
print("\nTEST 2: Precipitation workflow")
|
| 318 |
+
success2 = await test_precipitation_workflow()
|
| 319 |
+
|
| 320 |
+
if success1 and success2:
|
| 321 |
+
print("\nπ All Phase 4 workflow tests successful!")
|
| 322 |
+
return True
|
| 323 |
+
else:
|
| 324 |
+
print("\nβ Some Phase 4 tests failed")
|
| 325 |
+
return False
|
| 326 |
+
|
| 327 |
+
except Exception as e:
|
| 328 |
+
print(f"\nβ Phase 4 tests failed with error: {e}")
|
| 329 |
+
import traceback
|
| 330 |
+
traceback.print_exc()
|
| 331 |
+
return False
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
if __name__ == "__main__":
|
| 335 |
+
success = asyncio.run(run_all_tests())
|
| 336 |
+
if success:
|
| 337 |
+
print("\nπ All Phase 4 tests completed successfully!")
|
| 338 |
+
else:
|
| 339 |
+
print("\nβ Some Phase 4 tests failed")
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
OMIRL Performance Test Analysis
|
| 3 |
+
|
| 4 |
+
This script analyzes why OMIRL integration tests are slow and provides solutions.
|
| 5 |
+
"""
|
| 6 |
+
import asyncio
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
async def analyze_omirl_performance():
|
| 10 |
+
"""Analyze OMIRL test performance issues"""
|
| 11 |
+
|
| 12 |
+
print("π OMIRL Test Performance Analysis")
|
| 13 |
+
print("=" * 50)
|
| 14 |
+
|
| 15 |
+
print("\nπ Current Timing Breakdown:")
|
| 16 |
+
print(" β’ Browser initialization: ~2-3 seconds")
|
| 17 |
+
print(" β’ Page navigation: ~3-5 seconds")
|
| 18 |
+
print(" β’ AngularJS wait: 5 seconds (fixed)")
|
| 19 |
+
print(" β’ Network idle wait: 8 seconds (with timeout)")
|
| 20 |
+
print(" β’ Angular rendering wait: 2 seconds (fixed)")
|
| 21 |
+
print(" β’ Data extraction: ~1-2 seconds")
|
| 22 |
+
print(" β’ Rate limiting: 1 second")
|
| 23 |
+
print(" βββββββββββββββββββββββββββββββββ")
|
| 24 |
+
print(" β’ TOTAL PER TEST: ~22-26 seconds")
|
| 25 |
+
|
| 26 |
+
print("\nπ Performance Issues Identified:")
|
| 27 |
+
print(" 1. Fixed waits instead of smart waiting")
|
| 28 |
+
print(" 2. No browser session reuse between tests")
|
| 29 |
+
print(" 3. Full web scraping on every test call")
|
| 30 |
+
print(" 4. OMIRL website may be slow/unreliable")
|
| 31 |
+
print(" 5. Potential browser session accumulation")
|
| 32 |
+
|
| 33 |
+
print("\nπ‘ Solutions Implemented:")
|
| 34 |
+
print(" β
Fast tests with mocked data (< 0.1s each)")
|
| 35 |
+
print(" β
Test timeouts to prevent hanging")
|
| 36 |
+
print(" β
Proper browser cleanup in finally blocks")
|
| 37 |
+
print(" β
Separate fast vs integration test suites")
|
| 38 |
+
|
| 39 |
+
print("\nβ‘ Recommended Test Strategy:")
|
| 40 |
+
print(" π Daily development: Use fast tests (test_fast.py)")
|
| 41 |
+
print(" π§ͺ CI/CD pipeline: Use fast tests + selective integration")
|
| 42 |
+
print(" π Full validation: Run integration tests weekly")
|
| 43 |
+
print(" π Performance: Mock web calls, test logic only")
|
| 44 |
+
|
| 45 |
+
print("\nπ― Test Performance Comparison:")
|
| 46 |
+
print(" β’ Fast tests (mocked): ~0.08 seconds")
|
| 47 |
+
print(" β’ Integration tests: ~22-26 seconds each")
|
| 48 |
+
print(" β’ Speed improvement: 275-325x faster!")
|
| 49 |
+
|
| 50 |
+
return True
|
| 51 |
+
|
| 52 |
+
async def test_browser_cleanup():
|
| 53 |
+
"""Test browser cleanup to prevent session accumulation"""
|
| 54 |
+
from services.web.browser import close_all_browser_sessions
|
| 55 |
+
|
| 56 |
+
print("\nπ§Ή Testing browser cleanup...")
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
await close_all_browser_sessions()
|
| 60 |
+
print("β
Browser cleanup completed successfully")
|
| 61 |
+
return True
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"β οΈ Browser cleanup warning: {e}")
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
async def main():
|
| 67 |
+
"""Main analysis function"""
|
| 68 |
+
|
| 69 |
+
await analyze_omirl_performance()
|
| 70 |
+
|
| 71 |
+
print("\n" + "=" * 50)
|
| 72 |
+
print("π§ Cleanup Test")
|
| 73 |
+
await test_browser_cleanup()
|
| 74 |
+
|
| 75 |
+
print("\n" + "=" * 50)
|
| 76 |
+
print("π Summary & Recommendations:")
|
| 77 |
+
print(" 1. Use test_fast.py for regular development")
|
| 78 |
+
print(" 2. Run integration tests only when needed")
|
| 79 |
+
print(" 3. Add timeouts to prevent hanging tests")
|
| 80 |
+
print(" 4. Consider mocking for CI/CD pipelines")
|
| 81 |
+
print(" 5. Monitor OMIRL website availability")
|
| 82 |
+
|
| 83 |
+
print("\nβ
Analysis complete!")
|
| 84 |
+
|
| 85 |
+
if __name__ == "__main__":
|
| 86 |
+
asyncio.run(main())
|
|
@@ -2,21 +2,26 @@
|
|
| 2 |
"""
|
| 3 |
Test for OMIRL Adapter Integration
|
| 4 |
|
| 5 |
-
Tests the updated adapter.py that
|
| 6 |
-
with YAML-based validation. This
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
Created: September 6, 2025
|
|
|
|
| 10 |
Branch: omirl_refactor
|
| 11 |
-
Purpose: Validate the adapter integration with new architecture
|
| 12 |
"""
|
| 13 |
|
| 14 |
import sys
|
| 15 |
import asyncio
|
| 16 |
from pathlib import Path
|
| 17 |
|
| 18 |
-
# Add the
|
| 19 |
-
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 20 |
|
| 21 |
from tools.omirl.adapter import omirl_tool
|
| 22 |
|
|
@@ -55,6 +60,48 @@ async def test_adapter_basic():
|
|
| 55 |
return False
|
| 56 |
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
async def test_adapter_invalid_mode():
|
| 59 |
"""Test adapter error handling with invalid mode"""
|
| 60 |
print("\nπ§ͺ Testing adapter error handling (invalid mode)...")
|
|
@@ -105,15 +152,42 @@ async def test_adapter_invalid_sensor():
|
|
| 105 |
return False
|
| 106 |
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
async def main():
|
| 109 |
"""Run all adapter tests"""
|
| 110 |
print("π Starting OMIRL Adapter Integration Tests")
|
| 111 |
print("===========================================")
|
| 112 |
|
| 113 |
tests = [
|
| 114 |
-
("Basic Functionality", test_adapter_basic),
|
|
|
|
| 115 |
("Invalid Mode Handling", test_adapter_invalid_mode),
|
| 116 |
("Invalid Sensor Handling", test_adapter_invalid_sensor),
|
|
|
|
| 117 |
]
|
| 118 |
|
| 119 |
results = []
|
|
|
|
| 2 |
"""
|
| 3 |
Test for OMIRL Adapter Integration
|
| 4 |
|
| 5 |
+
Tests the updated adapter.py that supports both valori_stazioni and massimi_precipitazione
|
| 6 |
+
subtasks with YAML-based validation. This comprehensive test covers:
|
| 7 |
+
|
| 8 |
+
- Both valori_stazioni and massimi_precipitazione subtasks
|
| 9 |
+
- Filter validation and routing
|
| 10 |
+
- Error handling for invalid modes, sensors, and subtasks
|
| 11 |
+
- Response format consistency
|
| 12 |
|
| 13 |
Created: September 6, 2025
|
| 14 |
+
Updated: September 7, 2025
|
| 15 |
Branch: omirl_refactor
|
| 16 |
+
Purpose: Validate the complete adapter integration with new architecture
|
| 17 |
"""
|
| 18 |
|
| 19 |
import sys
|
| 20 |
import asyncio
|
| 21 |
from pathlib import Path
|
| 22 |
|
| 23 |
+
# Add the project root directory to sys.path so we can import the OMIRL modules
|
| 24 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 25 |
|
| 26 |
from tools.omirl.adapter import omirl_tool
|
| 27 |
|
|
|
|
| 60 |
return False
|
| 61 |
|
| 62 |
|
| 63 |
+
async def test_adapter_precipitation():
|
| 64 |
+
"""Test adapter functionality with massimi_precipitazione subtask"""
|
| 65 |
+
print("π§ͺ Testing adapter with precipitation data...")
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
result = await omirl_tool(
|
| 69 |
+
mode="tables",
|
| 70 |
+
subtask="massimi_precipitazione",
|
| 71 |
+
filters={"provincia": "GENOVA"},
|
| 72 |
+
language="it"
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
print(f" Success: {'summary_text' in result and len(result.get('summary_text', '')) > 0}")
|
| 76 |
+
print(f" Summary length: {len(result.get('summary_text', ''))}")
|
| 77 |
+
print(f" Artifacts: {len(result.get('artifacts', []))}")
|
| 78 |
+
print(f" Sources: {len(result.get('sources', []))}")
|
| 79 |
+
print(f" Metadata keys: {list(result.get('metadata', {}).keys())}")
|
| 80 |
+
print(f" Warnings: {len(result.get('warnings', []))}")
|
| 81 |
+
|
| 82 |
+
# Check required fields
|
| 83 |
+
required_fields = ['summary_text', 'artifacts', 'sources', 'metadata', 'warnings']
|
| 84 |
+
has_all_fields = all(field in result for field in required_fields)
|
| 85 |
+
|
| 86 |
+
print(f" Has all required fields: {has_all_fields}")
|
| 87 |
+
print(f" Subtask: {result.get('metadata', {}).get('subtask')}")
|
| 88 |
+
|
| 89 |
+
# Validate sources contain precipitation table
|
| 90 |
+
has_precipitation_source = any('maxtable' in source for source in result.get('sources', []))
|
| 91 |
+
print(f" Has precipitation source: {has_precipitation_source}")
|
| 92 |
+
|
| 93 |
+
return (has_all_fields and
|
| 94 |
+
len(result.get('summary_text', '')) > 0 and
|
| 95 |
+
result.get('metadata', {}).get('subtask') == 'massimi_precipitazione' and
|
| 96 |
+
has_precipitation_source)
|
| 97 |
+
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f" β Test failed with exception: {e}")
|
| 100 |
+
import traceback
|
| 101 |
+
traceback.print_exc()
|
| 102 |
+
return False
|
| 103 |
+
|
| 104 |
+
|
| 105 |
async def test_adapter_invalid_mode():
|
| 106 |
"""Test adapter error handling with invalid mode"""
|
| 107 |
print("\nπ§ͺ Testing adapter error handling (invalid mode)...")
|
|
|
|
| 152 |
return False
|
| 153 |
|
| 154 |
|
| 155 |
+
async def test_adapter_invalid_subtask():
|
| 156 |
+
"""Test adapter error handling with invalid subtask"""
|
| 157 |
+
print("\nπ§ͺ Testing adapter error handling (invalid subtask)...")
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
result = await omirl_tool(
|
| 161 |
+
mode="tables",
|
| 162 |
+
subtask="invalid_subtask",
|
| 163 |
+
filters={},
|
| 164 |
+
language="it"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
error_keywords = ['non supportato', 'Invalid subtask', 'Sottotask non supportato']
|
| 168 |
+
has_error = any(keyword in result.get('summary_text', '') for keyword in error_keywords)
|
| 169 |
+
|
| 170 |
+
print(f" Error detected: {has_error}")
|
| 171 |
+
print(f" Summary: {result.get('summary_text', '')[:100]}...")
|
| 172 |
+
|
| 173 |
+
return has_error
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
print(f" β Test failed with exception: {e}")
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
|
| 180 |
async def main():
|
| 181 |
"""Run all adapter tests"""
|
| 182 |
print("π Starting OMIRL Adapter Integration Tests")
|
| 183 |
print("===========================================")
|
| 184 |
|
| 185 |
tests = [
|
| 186 |
+
("Basic Functionality (Valori Stazioni)", test_adapter_basic),
|
| 187 |
+
("Precipitation Functionality", test_adapter_precipitation),
|
| 188 |
("Invalid Mode Handling", test_adapter_invalid_mode),
|
| 189 |
("Invalid Sensor Handling", test_adapter_invalid_sensor),
|
| 190 |
+
("Invalid Subtask Handling", test_adapter_invalid_subtask),
|
| 191 |
]
|
| 192 |
|
| 193 |
results = []
|
|
@@ -1,178 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Test suite for OMIRL Adapter with Massimi Precipitazione support
|
| 3 |
-
|
| 4 |
-
Tests the updated adapter functionality including:
|
| 5 |
-
- Both valori_stazioni and massimi_precipitazione subtasks
|
| 6 |
-
- Filter validation and routing
|
| 7 |
-
- Response format consistency
|
| 8 |
-
- Error handling
|
| 9 |
-
"""
|
| 10 |
-
import asyncio
|
| 11 |
-
import sys
|
| 12 |
-
from pathlib import Path
|
| 13 |
-
|
| 14 |
-
# Add parent directories to path for imports
|
| 15 |
-
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 16 |
-
|
| 17 |
-
from tools.omirl.adapter import omirl_tool
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
class TestOMIRLAdapter:
|
| 21 |
-
"""Test cases for OMIRL adapter functionality"""
|
| 22 |
-
|
| 23 |
-
async def test_valori_stazioni_subtask(self):
|
| 24 |
-
"""Test valori_stazioni subtask (existing functionality)"""
|
| 25 |
-
print("\nπ§ͺ Testing valori_stazioni subtask...")
|
| 26 |
-
|
| 27 |
-
result = await omirl_tool(
|
| 28 |
-
mode="tables",
|
| 29 |
-
subtask="valori_stazioni",
|
| 30 |
-
filters={"tipo_sensore": "Temperatura"},
|
| 31 |
-
language="it"
|
| 32 |
-
)
|
| 33 |
-
|
| 34 |
-
# Validate response structure
|
| 35 |
-
assert isinstance(result, dict)
|
| 36 |
-
assert "summary_text" in result
|
| 37 |
-
assert "artifacts" in result
|
| 38 |
-
assert "sources" in result
|
| 39 |
-
assert "metadata" in result
|
| 40 |
-
assert "warnings" in result
|
| 41 |
-
|
| 42 |
-
# Validate sources
|
| 43 |
-
assert "sensorstable" in result["sources"][0]
|
| 44 |
-
|
| 45 |
-
# Validate metadata
|
| 46 |
-
assert result["metadata"]["subtask"] == "valori_stazioni"
|
| 47 |
-
|
| 48 |
-
print("β
Valori stazioni subtask works")
|
| 49 |
-
return result
|
| 50 |
-
|
| 51 |
-
async def test_massimi_precipitazione_subtask(self):
|
| 52 |
-
"""Test massimi_precipitazione subtask (new functionality)"""
|
| 53 |
-
print("\nπ§ͺ Testing massimi_precipitazione subtask...")
|
| 54 |
-
|
| 55 |
-
result = await omirl_tool(
|
| 56 |
-
mode="tables",
|
| 57 |
-
subtask="massimi_precipitazione",
|
| 58 |
-
filters={"provincia": "GENOVA"},
|
| 59 |
-
language="it"
|
| 60 |
-
)
|
| 61 |
-
|
| 62 |
-
# Validate response structure
|
| 63 |
-
assert isinstance(result, dict)
|
| 64 |
-
assert "summary_text" in result
|
| 65 |
-
assert "artifacts" in result
|
| 66 |
-
assert "sources" in result
|
| 67 |
-
assert "metadata" in result
|
| 68 |
-
assert "warnings" in result
|
| 69 |
-
|
| 70 |
-
# Validate sources
|
| 71 |
-
assert "maxtable" in result["sources"][0]
|
| 72 |
-
|
| 73 |
-
# Validate metadata
|
| 74 |
-
assert result["metadata"]["subtask"] == "massimi_precipitazione"
|
| 75 |
-
|
| 76 |
-
print("β
Massimi precipitazione subtask works")
|
| 77 |
-
return result
|
| 78 |
-
|
| 79 |
-
async def test_zona_allerta_filter(self):
|
| 80 |
-
"""Test zona d'allerta filtering"""
|
| 81 |
-
print("\nπ§ͺ Testing zona d'allerta filter...")
|
| 82 |
-
|
| 83 |
-
result = await omirl_tool(
|
| 84 |
-
mode="tables",
|
| 85 |
-
subtask="massimi_precipitazione",
|
| 86 |
-
filters={"zona_allerta": "A"},
|
| 87 |
-
language="it"
|
| 88 |
-
)
|
| 89 |
-
|
| 90 |
-
assert isinstance(result, dict)
|
| 91 |
-
print("β
Zona d'allerta filter works")
|
| 92 |
-
return result
|
| 93 |
-
|
| 94 |
-
async def test_invalid_subtask(self):
|
| 95 |
-
"""Test invalid subtask handling"""
|
| 96 |
-
print("\nπ§ͺ Testing invalid subtask...")
|
| 97 |
-
|
| 98 |
-
result = await omirl_tool(
|
| 99 |
-
mode="tables",
|
| 100 |
-
subtask="invalid_subtask",
|
| 101 |
-
filters={},
|
| 102 |
-
language="it"
|
| 103 |
-
)
|
| 104 |
-
|
| 105 |
-
# Should return error response
|
| 106 |
-
assert isinstance(result, dict)
|
| 107 |
-
assert "β οΈ" in result["summary_text"]
|
| 108 |
-
assert result["metadata"]["success"] == False
|
| 109 |
-
|
| 110 |
-
print("β
Invalid subtask handled correctly")
|
| 111 |
-
return result
|
| 112 |
-
|
| 113 |
-
async def test_sensor_validation_for_precipitation(self):
|
| 114 |
-
"""Test that sensor validation is skipped for precipitation subtask"""
|
| 115 |
-
print("\nπ§ͺ Testing sensor validation skip for precipitation...")
|
| 116 |
-
|
| 117 |
-
# This should work - sensor type should be ignored for precipitation
|
| 118 |
-
result = await omirl_tool(
|
| 119 |
-
mode="tables",
|
| 120 |
-
subtask="massimi_precipitazione",
|
| 121 |
-
filters={"tipo_sensore": "SomeInvalidSensor"}, # Should be ignored
|
| 122 |
-
language="it"
|
| 123 |
-
)
|
| 124 |
-
|
| 125 |
-
# Should succeed because sensor validation is skipped for precipitation
|
| 126 |
-
assert isinstance(result, dict)
|
| 127 |
-
print("β
Sensor validation correctly skipped for precipitation")
|
| 128 |
-
return result
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
# Integration test function
|
| 132 |
-
async def test_adapter_integration():
|
| 133 |
-
"""Integration test for updated adapter functionality"""
|
| 134 |
-
print("π§ͺ Running OMIRL adapter integration test...")
|
| 135 |
-
print("=" * 60)
|
| 136 |
-
|
| 137 |
-
tests = TestOMIRLAdapter()
|
| 138 |
-
|
| 139 |
-
try:
|
| 140 |
-
# Test 1: Valori stazioni (existing)
|
| 141 |
-
print("\n1οΈβ£ Testing valori_stazioni...")
|
| 142 |
-
result1 = await tests.test_valori_stazioni_subtask()
|
| 143 |
-
print(f" Summary: {result1['summary_text'][:100]}...")
|
| 144 |
-
|
| 145 |
-
# Test 2: Massimi precipitazione (new)
|
| 146 |
-
print("\n2οΈβ£ Testing massimi_precipitazione...")
|
| 147 |
-
result2 = await tests.test_massimi_precipitazione_subtask()
|
| 148 |
-
print(f" Summary: {result2['summary_text'][:100]}...")
|
| 149 |
-
|
| 150 |
-
# Test 3: Zona d'allerta filter
|
| 151 |
-
print("\n3οΈβ£ Testing zona_allerta filter...")
|
| 152 |
-
result3 = await tests.test_zona_allerta_filter()
|
| 153 |
-
print(f" Summary: {result3['summary_text'][:100]}...")
|
| 154 |
-
|
| 155 |
-
# Test 4: Error handling
|
| 156 |
-
print("\n4οΈβ£ Testing error handling...")
|
| 157 |
-
result4 = await tests.test_invalid_subtask()
|
| 158 |
-
print(f" Error: {result4['summary_text'][:100]}...")
|
| 159 |
-
|
| 160 |
-
# Test 5: Sensor validation
|
| 161 |
-
print("\n5οΈβ£ Testing sensor validation...")
|
| 162 |
-
result5 = await tests.test_sensor_validation_for_precipitation()
|
| 163 |
-
print(f" Summary: {result5['summary_text'][:100]}...")
|
| 164 |
-
|
| 165 |
-
print("\nβ
All adapter tests completed successfully!")
|
| 166 |
-
return True
|
| 167 |
-
|
| 168 |
-
except Exception as e:
|
| 169 |
-
print(f"\nβ Adapter test failed: {e}")
|
| 170 |
-
import traceback
|
| 171 |
-
traceback.print_exc()
|
| 172 |
-
return False
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
if __name__ == "__main__":
|
| 176 |
-
# Run integration test directly
|
| 177 |
-
success = asyncio.run(test_adapter_integration())
|
| 178 |
-
sys.exit(0 if success else 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fast OMIRL Tests with Mocked Web Calls
|
| 3 |
+
|
| 4 |
+
This test module provides faster unit tests by mocking the slow web scraping calls.
|
| 5 |
+
Use this for CI/CD and development, while keeping the integration tests for full validation.
|
| 6 |
+
"""
|
| 7 |
+
import pytest
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from unittest.mock import Mock, patch, AsyncMock
|
| 11 |
+
|
| 12 |
+
# Add parent directories to path for imports
|
| 13 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 14 |
+
|
| 15 |
+
from tools.omirl.shared import OMIRLResult, OMIRLFilterSet
|
| 16 |
+
from tools.omirl.adapter import omirl_tool
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestOMIRLAdapterFast:
|
| 20 |
+
"""Fast tests for OMIRL adapter with mocked web calls"""
|
| 21 |
+
|
| 22 |
+
@pytest.mark.asyncio
|
| 23 |
+
@patch('services.web.table_scraper.fetch_omirl_stations')
|
| 24 |
+
async def test_valori_stazioni_fast(self, mock_fetch_stations):
|
| 25 |
+
"""Test valori_stazioni with mocked data"""
|
| 26 |
+
print("\nπ Fast test: valori_stazioni...")
|
| 27 |
+
|
| 28 |
+
# Mock the web scraper response
|
| 29 |
+
mock_fetch_stations.return_value = [
|
| 30 |
+
{
|
| 31 |
+
"Nome": "Test Station 1",
|
| 32 |
+
"Codice": "TEST1",
|
| 33 |
+
"Comune": "Genova",
|
| 34 |
+
"Provincia": "GE",
|
| 35 |
+
"ultimo": "25.5",
|
| 36 |
+
"Max": "28.0",
|
| 37 |
+
"Min": "20.1"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"Nome": "Test Station 2",
|
| 41 |
+
"Codice": "TEST2",
|
| 42 |
+
"Comune": "Savona",
|
| 43 |
+
"Provincia": "SV",
|
| 44 |
+
"ultimo": "23.2",
|
| 45 |
+
"Max": "26.8",
|
| 46 |
+
"Min": "18.5"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
# Call the adapter
|
| 51 |
+
result = await omirl_tool(
|
| 52 |
+
mode="tables",
|
| 53 |
+
subtask="valori_stazioni",
|
| 54 |
+
filters={"tipo_sensore": "Temperatura", "provincia": "GE"},
|
| 55 |
+
language="it"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# Validate response structure
|
| 59 |
+
assert isinstance(result, dict)
|
| 60 |
+
assert "summary_text" in result
|
| 61 |
+
assert "artifacts" in result
|
| 62 |
+
assert "sources" in result
|
| 63 |
+
assert "metadata" in result
|
| 64 |
+
assert "warnings" in result
|
| 65 |
+
|
| 66 |
+
# Verify mock was called
|
| 67 |
+
mock_fetch_stations.assert_called_once_with("Temperatura")
|
| 68 |
+
|
| 69 |
+
print(f"β
Fast test completed: {result['summary_text'][:50]}...")
|
| 70 |
+
return result
|
| 71 |
+
|
| 72 |
+
@pytest.mark.asyncio
|
| 73 |
+
@patch('services.web.table_scraper.fetch_omirl_massimi_precipitazioni')
|
| 74 |
+
async def test_massimi_precipitazione_fast(self, mock_fetch_precip):
|
| 75 |
+
"""Test massimi_precipitazione with mocked data"""
|
| 76 |
+
print("\nπ Fast test: massimi_precipitazione...")
|
| 77 |
+
|
| 78 |
+
# Mock the precipitation data response
|
| 79 |
+
mock_fetch_precip.return_value = {
|
| 80 |
+
"zona_allerta": [
|
| 81 |
+
{
|
| 82 |
+
"Max (mm)": "A",
|
| 83 |
+
"5'": "0.0 [02:10] Station A1",
|
| 84 |
+
"15'": "0.0 [02:10] Station A1",
|
| 85 |
+
"30'": "0.0 [02:10] Station A1",
|
| 86 |
+
"1h": "0.2 [05:45] Station A2",
|
| 87 |
+
"3h": "0.4 [08:15] Station A2",
|
| 88 |
+
"6h": "0.6 [12:30] Station A3",
|
| 89 |
+
"12h": "0.8 [18:45] Station A3",
|
| 90 |
+
"24h": "1.2 [06:00] Station A4"
|
| 91 |
+
}
|
| 92 |
+
],
|
| 93 |
+
"province": [
|
| 94 |
+
{
|
| 95 |
+
"Max (mm)": "Genova",
|
| 96 |
+
"5'": "0.1 [03:20] Genova Station",
|
| 97 |
+
"15'": "0.2 [08:15] Genova Station",
|
| 98 |
+
"30'": "0.3 [09:00] Genova Station",
|
| 99 |
+
"1h": "0.5 [11:30] Genova Station",
|
| 100 |
+
"3h": "0.8 [14:45] Genova Station",
|
| 101 |
+
"6h": "1.2 [18:00] Genova Station",
|
| 102 |
+
"12h": "1.8 [22:15] Genova Station",
|
| 103 |
+
"24h": "2.5 [08:30] Genova Station"
|
| 104 |
+
}
|
| 105 |
+
]
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
# Call the adapter
|
| 109 |
+
result = await omirl_tool(
|
| 110 |
+
mode="tables",
|
| 111 |
+
subtask="massimi_precipitazione",
|
| 112 |
+
filters={"zona_allerta": "A"},
|
| 113 |
+
language="it"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
# Validate response structure
|
| 117 |
+
assert isinstance(result, dict)
|
| 118 |
+
assert "summary_text" in result
|
| 119 |
+
assert "artifacts" in result
|
| 120 |
+
assert "sources" in result
|
| 121 |
+
assert "metadata" in result
|
| 122 |
+
|
| 123 |
+
# Verify mock was called
|
| 124 |
+
mock_fetch_precip.assert_called_once()
|
| 125 |
+
|
| 126 |
+
print(f"β
Fast test completed: {result['summary_text'][:50]}...")
|
| 127 |
+
return result
|
| 128 |
+
|
| 129 |
+
@pytest.mark.asyncio
|
| 130 |
+
async def test_invalid_sensor_validation_fast(self):
|
| 131 |
+
"""Test sensor validation without web calls"""
|
| 132 |
+
print("\nπ Fast test: validation...")
|
| 133 |
+
|
| 134 |
+
result = await omirl_tool(
|
| 135 |
+
mode="tables",
|
| 136 |
+
subtask="valori_stazioni",
|
| 137 |
+
filters={"tipo_sensore": "InvalidSensor"},
|
| 138 |
+
language="it"
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# Should return error without making web calls
|
| 142 |
+
assert isinstance(result, dict)
|
| 143 |
+
assert "οΏ½οΏ½οΏ½οΈ" in result["summary_text"]
|
| 144 |
+
assert "non valido" in result["summary_text"]
|
| 145 |
+
|
| 146 |
+
print(f"β
Validation test completed: {result['summary_text'][:50]}...")
|
| 147 |
+
return result
|
| 148 |
+
|
| 149 |
+
@pytest.mark.asyncio
|
| 150 |
+
async def test_mode_validation_fast(self):
|
| 151 |
+
"""Test mode validation without web calls"""
|
| 152 |
+
print("\nπ Fast test: mode validation...")
|
| 153 |
+
|
| 154 |
+
result = await omirl_tool(
|
| 155 |
+
mode="invalid_mode",
|
| 156 |
+
subtask="valori_stazioni",
|
| 157 |
+
filters={},
|
| 158 |
+
language="it"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Should return error without making web calls
|
| 162 |
+
assert isinstance(result, dict)
|
| 163 |
+
assert "β οΈ" in result["summary_text"]
|
| 164 |
+
assert result["metadata"]["success"] == False
|
| 165 |
+
|
| 166 |
+
print(f"β
Mode validation test completed")
|
| 167 |
+
return result
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class TestOMIRLTasksFast:
|
| 171 |
+
"""Fast tests for individual OMIRL tasks with mocked data"""
|
| 172 |
+
|
| 173 |
+
@pytest.mark.asyncio
|
| 174 |
+
@patch('services.web.table_scraper.fetch_omirl_stations')
|
| 175 |
+
async def test_valori_stazioni_task_fast(self, mock_fetch):
|
| 176 |
+
"""Test valori_stazioni task directly with mocked data"""
|
| 177 |
+
from tools.omirl.tables.valori_stazioni import fetch_valori_stazioni_async
|
| 178 |
+
|
| 179 |
+
print("\nπ Fast task test: valori_stazioni...")
|
| 180 |
+
|
| 181 |
+
# Mock return data
|
| 182 |
+
mock_fetch.return_value = [
|
| 183 |
+
{"Nome": "Mock Station", "Codice": "MOCK", "Provincia": "GE"}
|
| 184 |
+
]
|
| 185 |
+
|
| 186 |
+
# Test with filter set
|
| 187 |
+
filters = OMIRLFilterSet({"tipo_sensore": "Temperatura"})
|
| 188 |
+
result = await fetch_valori_stazioni_async(filters)
|
| 189 |
+
|
| 190 |
+
assert isinstance(result, OMIRLResult)
|
| 191 |
+
assert result.success
|
| 192 |
+
assert len(result.data) == 1
|
| 193 |
+
|
| 194 |
+
print("β
Fast task test completed")
|
| 195 |
+
return result
|
| 196 |
+
|
| 197 |
+
@pytest.mark.asyncio
|
| 198 |
+
@patch('services.web.table_scraper.fetch_omirl_massimi_precipitazioni')
|
| 199 |
+
async def test_massimi_precipitazione_task_fast(self, mock_fetch):
|
| 200 |
+
"""Test massimi_precipitazione task directly with mocked data"""
|
| 201 |
+
from tools.omirl.tables.massimi_precipitazione import fetch_massimi_precipitazione_async
|
| 202 |
+
|
| 203 |
+
print("\nπ Fast task test: massimi_precipitazione...")
|
| 204 |
+
|
| 205 |
+
# Mock return data
|
| 206 |
+
mock_fetch.return_value = {
|
| 207 |
+
"zona_allerta": [{"Max (mm)": "A", "24h": "0.5 [12:00] Test"}],
|
| 208 |
+
"province": []
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
# Test with filter set
|
| 212 |
+
filters = OMIRLFilterSet({"zona_allerta": "A"})
|
| 213 |
+
result = await fetch_massimi_precipitazione_async(filters)
|
| 214 |
+
|
| 215 |
+
assert isinstance(result, OMIRLResult)
|
| 216 |
+
assert result.success
|
| 217 |
+
|
| 218 |
+
print("β
Fast task test completed")
|
| 219 |
+
return result
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# Fast integration test
|
| 223 |
+
@pytest.mark.asyncio
|
| 224 |
+
async def test_fast_integration():
|
| 225 |
+
"""Fast integration test with all validation but no web calls"""
|
| 226 |
+
print("\nπ Running fast integration test...")
|
| 227 |
+
print("=" * 60)
|
| 228 |
+
|
| 229 |
+
tests = TestOMIRLAdapterFast()
|
| 230 |
+
|
| 231 |
+
try:
|
| 232 |
+
# Test validation (no web calls)
|
| 233 |
+
print("\n1οΈβ£ Testing validation...")
|
| 234 |
+
result1 = await tests.test_invalid_sensor_validation_fast()
|
| 235 |
+
|
| 236 |
+
print("\n2οΈβ£ Testing mode validation...")
|
| 237 |
+
result2 = await tests.test_mode_validation_fast()
|
| 238 |
+
|
| 239 |
+
print("\nβ
Fast integration completed successfully!")
|
| 240 |
+
print("π Tests completed in < 1 second vs 30+ seconds for full integration")
|
| 241 |
+
return True
|
| 242 |
+
|
| 243 |
+
except Exception as e:
|
| 244 |
+
print(f"\nβ Fast integration failed: {e}")
|
| 245 |
+
return False
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
if __name__ == "__main__":
|
| 249 |
+
# Run fast tests directly
|
| 250 |
+
import asyncio
|
| 251 |
+
success = asyncio.run(test_fast_integration())
|
| 252 |
+
print(f"\nFast test result: {'β
PASSED' if success else 'β FAILED'}")
|
|
@@ -6,6 +6,21 @@ Tests the massimi_precipitazione module functionality including:
|
|
| 6 |
- Geographic filtering (zona d'allerta and province)
|
| 7 |
- Data structure validation
|
| 8 |
- Error handling
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
"""
|
| 10 |
import pytest
|
| 11 |
import sys
|
|
@@ -18,8 +33,9 @@ from tools.omirl.shared import OMIRLFilterSet
|
|
| 18 |
from tools.omirl.tables.massimi_precipitazione import (
|
| 19 |
fetch_massimi_precipitazione_async,
|
| 20 |
fetch_massimi_precipitazione,
|
| 21 |
-
|
| 22 |
-
_parse_single_value
|
|
|
|
| 23 |
)
|
| 24 |
|
| 25 |
|
|
@@ -27,57 +43,180 @@ class TestMassimiPrecipitazione:
|
|
| 27 |
"""Test cases for massimi precipitazione functionality"""
|
| 28 |
|
| 29 |
@pytest.mark.asyncio
|
| 30 |
-
async def
|
| 31 |
-
"""Test
|
| 32 |
-
print("\nπ§ͺ Testing
|
| 33 |
|
| 34 |
-
#
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
assert hasattr(result, 'message')
|
| 45 |
-
assert hasattr(result, 'metadata')
|
| 46 |
|
| 47 |
-
if
|
| 48 |
-
print(f"β
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
if
|
| 74 |
-
|
| 75 |
-
assert 'Max (mm)' in sample
|
| 76 |
-
print(f"β
Province sample: {sample.get('Max (mm)')}")
|
| 77 |
-
|
| 78 |
else:
|
| 79 |
-
print(f"
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
def test_sync_wrapper(self):
|
| 83 |
"""Test the synchronous wrapper function"""
|
|
@@ -109,25 +248,26 @@ class TestMassimiPrecipitazione:
|
|
| 109 |
|
| 110 |
# Test zona d'allerta filtering
|
| 111 |
filters_zona = OMIRLFilterSet({"zona_allerta": "B"})
|
| 112 |
-
|
|
|
|
| 113 |
|
| 114 |
assert len(filtered["zona_allerta"]) == 1
|
| 115 |
assert filtered["zona_allerta"][0]["Max (mm)"] == "B"
|
| 116 |
-
assert len(filtered["province"]) == 3 # No province filter, all included
|
| 117 |
print("β
Zona d'allerta filtering works")
|
| 118 |
|
| 119 |
# Test province filtering
|
| 120 |
filters_prov = OMIRLFilterSet({"provincia": "GENOVA"})
|
| 121 |
-
|
|
|
|
| 122 |
|
| 123 |
assert len(filtered["province"]) == 1
|
| 124 |
assert filtered["province"][0]["Max (mm)"] == "Genova"
|
| 125 |
-
assert len(filtered["zona_allerta"]) == 3 # No zona filter, all included
|
| 126 |
print("β
Province filtering works")
|
| 127 |
|
| 128 |
# Test province code mapping
|
| 129 |
filters_code = OMIRLFilterSet({"provincia": "GE"})
|
| 130 |
-
|
|
|
|
| 131 |
|
| 132 |
assert len(filtered["province"]) == 1
|
| 133 |
assert filtered["province"][0]["Max (mm)"] == "Genova"
|
|
@@ -163,8 +303,116 @@ class TestMassimiPrecipitazione:
|
|
| 163 |
assert result["value"] is None
|
| 164 |
print("β
Empty string handling works")
|
| 165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
# Integration test function that can be run independently
|
|
|
|
| 168 |
async def test_massimi_precipitazione_integration():
|
| 169 |
"""Integration test for massimi precipitazione functionality"""
|
| 170 |
print("π§ͺ Running massimi precipitazione integration test...")
|
|
|
|
| 6 |
- Geographic filtering (zona d'allerta and province)
|
| 7 |
- Data structure validation
|
| 8 |
- Error handling
|
| 9 |
+
|
| 10 |
+
TODO - BROWSER MANAGEMENT ISSUES:
|
| 11 |
+
The integrated tests (test_format_precipitation_scenarios, test_invalid_input_scenarios)
|
| 12 |
+
work correctly but occasionally get stuck during browser navigation due to:
|
| 13 |
+
- Selenium WebDriver session management
|
| 14 |
+
- AngularJS table loading timing issues
|
| 15 |
+
- Network timeouts during OMIRL site interaction
|
| 16 |
+
|
| 17 |
+
These tests validate the complete workflow but may need optimization for:
|
| 18 |
+
- Browser session reuse across test scenarios
|
| 19 |
+
- Better timeout handling for AngularJS data loading
|
| 20 |
+
- Retry mechanisms for network-dependent operations
|
| 21 |
+
|
| 22 |
+
For now, unit tests (test_format_precipitation_data_simple_unit) provide reliable
|
| 23 |
+
validation of the formatting logic without browser dependencies.
|
| 24 |
"""
|
| 25 |
import pytest
|
| 26 |
import sys
|
|
|
|
| 33 |
from tools.omirl.tables.massimi_precipitazione import (
|
| 34 |
fetch_massimi_precipitazione_async,
|
| 35 |
fetch_massimi_precipitazione,
|
| 36 |
+
_apply_filters_to_precipitation_data,
|
| 37 |
+
_parse_single_value,
|
| 38 |
+
format_precipitation_data_simple
|
| 39 |
)
|
| 40 |
|
| 41 |
|
|
|
|
| 43 |
"""Test cases for massimi precipitazione functionality"""
|
| 44 |
|
| 45 |
@pytest.mark.asyncio
|
| 46 |
+
async def test_format_precipitation_scenarios(self):
|
| 47 |
+
"""Test formatting function with different input scenarios"""
|
| 48 |
+
print("\nπ§ͺ Testing precipitation formatting scenarios...")
|
| 49 |
|
| 50 |
+
# Scenario 1: Only zona d'allerta
|
| 51 |
+
print("\n--- Scenario 1: Only zona d'allerta ---")
|
| 52 |
+
filters1 = OMIRLFilterSet({"zona_allerta": "A"})
|
| 53 |
+
result1 = await fetch_massimi_precipitazione_async(filters1)
|
| 54 |
|
| 55 |
+
if result1.success:
|
| 56 |
+
print(f"β
Result: {result1.message}")
|
| 57 |
+
if result1.metadata.get('summary'):
|
| 58 |
+
print(f"π Summary: {result1.metadata['summary']}")
|
| 59 |
+
else:
|
| 60 |
+
print(f"β Failed: {result1.message}")
|
| 61 |
|
| 62 |
+
# Scenario 2: zona d'allerta + periodo
|
| 63 |
+
print("\n--- Scenario 2: zona d'allerta + periodo ---")
|
| 64 |
+
filters2 = OMIRLFilterSet({"zona_allerta": "A", "periodo": "1h"})
|
| 65 |
+
result2 = await fetch_massimi_precipitazione_async(filters2)
|
|
|
|
|
|
|
| 66 |
|
| 67 |
+
if result2.success:
|
| 68 |
+
print(f"β
Result: {result2.message}")
|
| 69 |
+
if result2.metadata.get('summary'):
|
| 70 |
+
print(f"π Summary: {result2.metadata['summary']}")
|
| 71 |
+
else:
|
| 72 |
+
print(f"β Failed: {result2.message}")
|
| 73 |
+
|
| 74 |
+
# Scenario 3: Only provincia
|
| 75 |
+
print("\n--- Scenario 3: Only provincia ---")
|
| 76 |
+
filters3 = OMIRLFilterSet({"provincia": "Genova"})
|
| 77 |
+
result3 = await fetch_massimi_precipitazione_async(filters3)
|
| 78 |
+
|
| 79 |
+
if result3.success:
|
| 80 |
+
print(f"β
Result: {result3.message}")
|
| 81 |
+
if result3.metadata.get('summary'):
|
| 82 |
+
print(f"π Summary: {result3.metadata['summary']}")
|
| 83 |
+
else:
|
| 84 |
+
print(f"β Failed: {result3.message}")
|
| 85 |
+
|
| 86 |
+
# Scenario 4: provincia + zona d'allerta (should prioritize zona_allerta)
|
| 87 |
+
print("\n--- Scenario 4: provincia + zona d'allerta ---")
|
| 88 |
+
filters4 = OMIRLFilterSet({"provincia": "Genova", "zona_allerta": "A"})
|
| 89 |
+
result4 = await fetch_massimi_precipitazione_async(filters4)
|
| 90 |
+
|
| 91 |
+
if result4.success:
|
| 92 |
+
print(f"β
Result: {result4.message}")
|
| 93 |
+
if result4.metadata.get('summary'):
|
| 94 |
+
print(f"π Summary: {result4.metadata['summary']}")
|
|
|
|
|
|
|
|
|
|
| 95 |
else:
|
| 96 |
+
print(f"β Failed: {result4.message}")
|
| 97 |
+
|
| 98 |
+
# Basic validation that at least one scenario worked
|
| 99 |
+
assert result1 is not None or result2 is not None or result3 is not None or result4 is not None
|
| 100 |
+
|
| 101 |
+
@pytest.mark.asyncio
|
| 102 |
+
async def test_invalid_input_scenarios(self):
|
| 103 |
+
"""Test error handling with invalid inputs and edge cases"""
|
| 104 |
+
print("\nπ§ͺ Testing invalid input scenarios...")
|
| 105 |
+
|
| 106 |
+
# Scenario 5: Missing required filters (no zona_allerta or provincia)
|
| 107 |
+
print("\n--- Scenario 5: Missing required filters ---")
|
| 108 |
+
filters5 = OMIRLFilterSet({"periodo": "1h"}) # Only periodo, missing geographic filter
|
| 109 |
+
result5 = await fetch_massimi_precipitazione_async(filters5)
|
| 110 |
+
|
| 111 |
+
print(f"Expected failure: {result5.message}")
|
| 112 |
+
assert not result5.success, "Should fail when missing required geographic filters"
|
| 113 |
+
assert "obbligatori mancanti" in result5.message.lower() or "required" in result5.message.lower()
|
| 114 |
+
|
| 115 |
+
# Scenario 6: Empty filters
|
| 116 |
+
print("\n--- Scenario 6: Empty filters ---")
|
| 117 |
+
filters6 = OMIRLFilterSet({})
|
| 118 |
+
result6 = await fetch_massimi_precipitazione_async(filters6)
|
| 119 |
+
|
| 120 |
+
print(f"Expected failure: {result6.message}")
|
| 121 |
+
assert not result6.success, "Should fail when no filters provided"
|
| 122 |
+
|
| 123 |
+
# Scenario 7: Invalid zona_allerta value
|
| 124 |
+
print("\n--- Scenario 7: Invalid zona_allerta ---")
|
| 125 |
+
filters7 = OMIRLFilterSet({"zona_allerta": "Z"}) # Z is not a valid zone
|
| 126 |
+
result7 = await fetch_massimi_precipitazione_async(filters7)
|
| 127 |
+
|
| 128 |
+
if result7.success:
|
| 129 |
+
# Extraction might succeed but should return no data
|
| 130 |
+
assert len(result7.data.get("zona_allerta", [])) == 0, "Should return empty data for invalid zone"
|
| 131 |
+
print(f"β
Handled gracefully: Found {len(result7.data.get('zona_allerta', []))} records for invalid zone")
|
| 132 |
+
else:
|
| 133 |
+
print(f"β Failed as expected: {result7.message}")
|
| 134 |
+
|
| 135 |
+
# Scenario 8: Invalid provincia value
|
| 136 |
+
print("\n--- Scenario 8: Invalid provincia ---")
|
| 137 |
+
filters8 = OMIRLFilterSet({"provincia": "INVALID_PROVINCE"})
|
| 138 |
+
result8 = await fetch_massimi_precipitazione_async(filters8)
|
| 139 |
+
|
| 140 |
+
if result8.success:
|
| 141 |
+
# Extraction might succeed but should return no data
|
| 142 |
+
assert len(result8.data.get("province", [])) == 0, "Should return empty data for invalid province"
|
| 143 |
+
print(f"β
Handled gracefully: Found {len(result8.data.get('province', []))} records for invalid province")
|
| 144 |
+
else:
|
| 145 |
+
print(f"β Failed as expected: {result8.message}")
|
| 146 |
+
|
| 147 |
+
# Scenario 9: Invalid periodo value
|
| 148 |
+
print("\n--- Scenario 9: Invalid periodo ---")
|
| 149 |
+
filters9 = OMIRLFilterSet({"zona_allerta": "A", "periodo": "99h"}) # Invalid time period
|
| 150 |
+
result9 = await fetch_massimi_precipitazione_async(filters9)
|
| 151 |
+
|
| 152 |
+
if result9.success:
|
| 153 |
+
# Should succeed but with no filtered data for the invalid period
|
| 154 |
+
filtered_record = result9.data.get("zona_allerta", [])
|
| 155 |
+
if filtered_record:
|
| 156 |
+
# Check if the invalid period was filtered out
|
| 157 |
+
has_invalid_period = any("99h" in str(record) for record in filtered_record)
|
| 158 |
+
assert not has_invalid_period, "Invalid period should be filtered out"
|
| 159 |
+
print(f"β
Handled gracefully: Invalid periodo filtered appropriately")
|
| 160 |
+
else:
|
| 161 |
+
print(f"β Failed as expected: {result9.message}")
|
| 162 |
+
|
| 163 |
+
# Scenario 10: Valid filters but edge case combination
|
| 164 |
+
print("\n--- Scenario 10: Edge case - provincia + invalid periodo ---")
|
| 165 |
+
filters10 = OMIRLFilterSet({"provincia": "Genova", "periodo": "invalid_time"})
|
| 166 |
+
result10 = await fetch_massimi_precipitazione_async(filters10)
|
| 167 |
+
|
| 168 |
+
if result10.success:
|
| 169 |
+
print(f"β
Handled edge case: {result10.message}")
|
| 170 |
+
# Should have data but no filtered period data
|
| 171 |
+
if result10.data.get("province"):
|
| 172 |
+
print(f"π Found province data but handled invalid periodo gracefully")
|
| 173 |
+
else:
|
| 174 |
+
print(f"β Failed as expected: {result10.message}")
|
| 175 |
+
|
| 176 |
+
print("\nβ
Invalid input scenarios testing completed")
|
| 177 |
+
|
| 178 |
+
def test_format_precipitation_data_simple_unit(self):
|
| 179 |
+
"""Test the formatting function directly with mock data"""
|
| 180 |
+
print("\nπ§ͺ Testing format_precipitation_data_simple function directly...")
|
| 181 |
+
|
| 182 |
+
# Mock data structure
|
| 183 |
+
mock_filtered_data = {
|
| 184 |
+
"zona_allerta": [{
|
| 185 |
+
"Max (mm)": "A",
|
| 186 |
+
"5'": "0.0 [02:10] Sella di Gouta",
|
| 187 |
+
"15'": "0.0 [02:10] Sella di Gouta",
|
| 188 |
+
"1h": "0.0 [02:10] Sella di Gouta",
|
| 189 |
+
"24h": "0.2 [03:05] Colle Belenda"
|
| 190 |
+
}],
|
| 191 |
+
"province": []
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
# Test scenario: zona_allerta without periodo
|
| 195 |
+
filters_zona = {"zona_allerta": "A"}
|
| 196 |
+
result_zona = format_precipitation_data_simple(mock_filtered_data, filters_zona)
|
| 197 |
+
print(f"π Zona without periodo:\n{result_zona}")
|
| 198 |
+
|
| 199 |
+
assert "Zona d'allerta A" in result_zona
|
| 200 |
+
assert "Riepilogo" in result_zona
|
| 201 |
+
assert "0.0mm alle 02:10 (Sella di Gouta)" in result_zona
|
| 202 |
+
assert "0.2mm alle 03:05 (Colle Belenda)" in result_zona
|
| 203 |
+
|
| 204 |
+
# Test scenario: zona_allerta with periodo
|
| 205 |
+
filters_zona_periodo = {"zona_allerta": "A", "periodo": "1h"}
|
| 206 |
+
result_zona_periodo = format_precipitation_data_simple(mock_filtered_data, filters_zona_periodo)
|
| 207 |
+
print(f"\nπ Zona with periodo:\n{result_zona_periodo}")
|
| 208 |
+
|
| 209 |
+
assert "**1h**: 0.0mm alle 02:10 (Sella di Gouta)" in result_zona_periodo
|
| 210 |
+
assert "Riepilogo" not in result_zona_periodo # Should not show summary when specific period requested
|
| 211 |
+
|
| 212 |
+
# Test empty data
|
| 213 |
+
empty_data = {"zona_allerta": [], "province": []}
|
| 214 |
+
result_empty = format_precipitation_data_simple(empty_data, filters_zona)
|
| 215 |
+
print(f"\nπ Empty data:\n{result_empty}")
|
| 216 |
+
|
| 217 |
+
assert "Nessun dato trovato" in result_empty
|
| 218 |
+
|
| 219 |
+
print("β
Unit tests for formatting function completed")
|
| 220 |
|
| 221 |
def test_sync_wrapper(self):
|
| 222 |
"""Test the synchronous wrapper function"""
|
|
|
|
| 248 |
|
| 249 |
# Test zona d'allerta filtering
|
| 250 |
filters_zona = OMIRLFilterSet({"zona_allerta": "B"})
|
| 251 |
+
filters_dict = filters_zona.get_geographic_filters()
|
| 252 |
+
filtered = _apply_filters_to_precipitation_data(sample_data, filters_dict)
|
| 253 |
|
| 254 |
assert len(filtered["zona_allerta"]) == 1
|
| 255 |
assert filtered["zona_allerta"][0]["Max (mm)"] == "B"
|
|
|
|
| 256 |
print("β
Zona d'allerta filtering works")
|
| 257 |
|
| 258 |
# Test province filtering
|
| 259 |
filters_prov = OMIRLFilterSet({"provincia": "GENOVA"})
|
| 260 |
+
filters_dict = filters_prov.get_geographic_filters()
|
| 261 |
+
filtered = _apply_filters_to_precipitation_data(sample_data, filters_dict)
|
| 262 |
|
| 263 |
assert len(filtered["province"]) == 1
|
| 264 |
assert filtered["province"][0]["Max (mm)"] == "Genova"
|
|
|
|
| 265 |
print("β
Province filtering works")
|
| 266 |
|
| 267 |
# Test province code mapping
|
| 268 |
filters_code = OMIRLFilterSet({"provincia": "GE"})
|
| 269 |
+
filters_dict = filters_code.get_geographic_filters()
|
| 270 |
+
filtered = _apply_filters_to_precipitation_data(sample_data, filters_dict)
|
| 271 |
|
| 272 |
assert len(filtered["province"]) == 1
|
| 273 |
assert filtered["province"][0]["Max (mm)"] == "Genova"
|
|
|
|
| 303 |
assert result["value"] is None
|
| 304 |
print("β
Empty string handling works")
|
| 305 |
|
| 306 |
+
def test_format_precipitation_data_scenarios(self):
|
| 307 |
+
"""Test the new formatting function with different input scenarios"""
|
| 308 |
+
print("\nπ§ͺ Testing format_precipitation_data_simple with different scenarios...")
|
| 309 |
+
|
| 310 |
+
# Scenario 1: Zona d'allerta with all time periods
|
| 311 |
+
print("\nπ Scenario 1: Zona d'allerta A with all time periods")
|
| 312 |
+
zona_all_periods_data = {
|
| 313 |
+
"zona_allerta": [{
|
| 314 |
+
"Max (mm)": "A",
|
| 315 |
+
"5'": "0.1 [14:25] Colle del Melogno",
|
| 316 |
+
"15'": "0.2 [14:30] Colle del Melogno",
|
| 317 |
+
"30'": "0.3 [14:35] Colle del Melogno",
|
| 318 |
+
"1h": "0.5 [14:40] Colle del Melogno",
|
| 319 |
+
"3h": "1.2 [14:45] Colle del Melogno",
|
| 320 |
+
"6h": "2.1 [14:50] Colle del Melogno",
|
| 321 |
+
"12h": "3.5 [14:55] Colle del Melogno",
|
| 322 |
+
"24h": "6.2 [15:00] Colle del Melogno"
|
| 323 |
+
}],
|
| 324 |
+
"province": []
|
| 325 |
+
}
|
| 326 |
+
filters = {"zona_allerta": "A"}
|
| 327 |
+
|
| 328 |
+
result = format_precipitation_data_simple(zona_all_periods_data, filters)
|
| 329 |
+
print(result)
|
| 330 |
+
print()
|
| 331 |
+
|
| 332 |
+
# Scenario 2: Zona d'allerta with specific time period
|
| 333 |
+
print("π Scenario 2: Zona d'allerta B with specific period (1h)")
|
| 334 |
+
zona_specific_period_data = {
|
| 335 |
+
"zona_allerta": [{
|
| 336 |
+
"Max (mm)": "B",
|
| 337 |
+
"1h": "2.8 [13:15] Monte Settepani"
|
| 338 |
+
}],
|
| 339 |
+
"province": []
|
| 340 |
+
}
|
| 341 |
+
filters = {"zona_allerta": "B", "periodo": "1h"}
|
| 342 |
+
|
| 343 |
+
result = format_precipitation_data_simple(zona_specific_period_data, filters)
|
| 344 |
+
print(result)
|
| 345 |
+
print()
|
| 346 |
+
|
| 347 |
+
# Scenario 3: Province with all periods
|
| 348 |
+
print("π Scenario 3: Provincia Genova with all time periods")
|
| 349 |
+
province_all_periods_data = {
|
| 350 |
+
"zona_allerta": [],
|
| 351 |
+
"province": [{
|
| 352 |
+
"Max (mm)": "Genova",
|
| 353 |
+
"5'": "0.0 [10:00] Genova Centro",
|
| 354 |
+
"15'": "0.1 [10:05] Genova Centro",
|
| 355 |
+
"30'": "0.2 [10:10] Genova Centro",
|
| 356 |
+
"1h": "0.4 [10:15] Genova Centro",
|
| 357 |
+
"3h": "0.8 [10:20] Genova Centro",
|
| 358 |
+
"6h": "1.5 [10:25] Genova Centro",
|
| 359 |
+
"12h": "2.2 [10:30] Genova Centro",
|
| 360 |
+
"24h": "4.1 [10:35] Genova Centro"
|
| 361 |
+
}]
|
| 362 |
+
}
|
| 363 |
+
filters = {"provincia": "Genova"}
|
| 364 |
+
|
| 365 |
+
result = format_precipitation_data_simple(province_all_periods_data, filters)
|
| 366 |
+
print(result)
|
| 367 |
+
print()
|
| 368 |
+
|
| 369 |
+
# Scenario 4: Province with specific period but no data
|
| 370 |
+
print("π Scenario 4: Provincia Savona with specific period (3h) - no data")
|
| 371 |
+
province_no_data = {
|
| 372 |
+
"zona_allerta": [],
|
| 373 |
+
"province": [{
|
| 374 |
+
"Max (mm)": "Savona",
|
| 375 |
+
"3h": "" # Empty data
|
| 376 |
+
}]
|
| 377 |
+
}
|
| 378 |
+
filters = {"provincia": "Savona", "periodo": "3h"}
|
| 379 |
+
|
| 380 |
+
result = format_precipitation_data_simple(province_no_data, filters)
|
| 381 |
+
print(result)
|
| 382 |
+
print()
|
| 383 |
+
|
| 384 |
+
# Scenario 5: Empty data
|
| 385 |
+
print("π Scenario 5: No data found")
|
| 386 |
+
empty_data = {"zona_allerta": [], "province": []}
|
| 387 |
+
filters = {"zona_allerta": "C"}
|
| 388 |
+
|
| 389 |
+
result = format_precipitation_data_simple(empty_data, filters)
|
| 390 |
+
print(result)
|
| 391 |
+
print()
|
| 392 |
+
|
| 393 |
+
# Scenario 6: Mixed data - both zona and province (shouldn't happen in practice but test anyway)
|
| 394 |
+
print("π Scenario 6: Mixed data (both zona and province)")
|
| 395 |
+
mixed_data = {
|
| 396 |
+
"zona_allerta": [{
|
| 397 |
+
"Max (mm)": "C",
|
| 398 |
+
"24h": "1.5 [09:00] Zona C Station"
|
| 399 |
+
}],
|
| 400 |
+
"province": [{
|
| 401 |
+
"Max (mm)": "Imperia",
|
| 402 |
+
"24h": "0.8 [09:00] Imperia Station"
|
| 403 |
+
}]
|
| 404 |
+
}
|
| 405 |
+
filters = {"zona_allerta": "C", "provincia": "Imperia"} # Both filters (unusual)
|
| 406 |
+
|
| 407 |
+
result = format_precipitation_data_simple(mixed_data, filters)
|
| 408 |
+
print(result)
|
| 409 |
+
print()
|
| 410 |
+
|
| 411 |
+
print("β
All formatting scenarios tested successfully")
|
| 412 |
+
|
| 413 |
|
| 414 |
# Integration test function that can be run independently
|
| 415 |
+
@pytest.mark.asyncio
|
| 416 |
async def test_massimi_precipitazione_integration():
|
| 417 |
"""Integration test for massimi precipitazione functionality"""
|
| 418 |
print("π§ͺ Running massimi precipitazione integration test...")
|
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test suite for OMIRL Validation System
|
| 3 |
+
|
| 4 |
+
Tests the YAML-based validation architecture including:
|
| 5 |
+
- Sensor type validation with suggestions
|
| 6 |
+
- Province validation and code mapping
|
| 7 |
+
- Zone validation
|
| 8 |
+
- Period validation
|
| 9 |
+
- Complete request validation
|
| 10 |
+
- Error handling and suggestions
|
| 11 |
+
"""
|
| 12 |
+
import pytest
|
| 13 |
+
import sys
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
|
| 16 |
+
# Add parent directories to path for imports
|
| 17 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 18 |
+
|
| 19 |
+
from tools.omirl.shared.validation import (
|
| 20 |
+
OMIRLValidator,
|
| 21 |
+
get_validator,
|
| 22 |
+
validate_sensor_type,
|
| 23 |
+
validate_provincia,
|
| 24 |
+
validate_zona,
|
| 25 |
+
validate_periodo,
|
| 26 |
+
validate_mode_task_combination,
|
| 27 |
+
get_valid_sensor_types,
|
| 28 |
+
get_valid_provinces,
|
| 29 |
+
get_validation_errors
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TestOMIRLValidation:
|
| 34 |
+
"""Test cases for OMIRL validation functionality"""
|
| 35 |
+
|
| 36 |
+
def test_validator_initialization(self):
|
| 37 |
+
"""Test that validator initializes correctly"""
|
| 38 |
+
print("\nπ§ͺ Testing validator initialization...")
|
| 39 |
+
|
| 40 |
+
validator = get_validator()
|
| 41 |
+
assert validator is not None
|
| 42 |
+
assert hasattr(validator, 'parameters')
|
| 43 |
+
assert hasattr(validator, 'mode_tasks')
|
| 44 |
+
assert hasattr(validator, 'validation_rules')
|
| 45 |
+
|
| 46 |
+
print("β
Validator initialized correctly")
|
| 47 |
+
|
| 48 |
+
def test_sensor_type_validation(self):
|
| 49 |
+
"""Test sensor type validation with valid and invalid inputs"""
|
| 50 |
+
print("\nπ§ͺ Testing sensor type validation...")
|
| 51 |
+
|
| 52 |
+
validator = get_validator()
|
| 53 |
+
|
| 54 |
+
# Test valid sensor types
|
| 55 |
+
valid_sensors = ["Temperatura", "Precipitazione", "Vento", "UmiditΓ dell'aria"]
|
| 56 |
+
for sensor in valid_sensors:
|
| 57 |
+
is_valid, corrected, suggestions = validator.validate_sensor_type(sensor)
|
| 58 |
+
assert is_valid, f"Valid sensor '{sensor}' should pass validation"
|
| 59 |
+
assert corrected == sensor or corrected is None
|
| 60 |
+
print(f" β
{sensor}: valid")
|
| 61 |
+
|
| 62 |
+
# Test invalid sensor types with suggestions
|
| 63 |
+
invalid_cases = [
|
| 64 |
+
("Temperature", ["Temperatura"]), # English vs Italian - should get suggestions
|
| 65 |
+
("Temperatur", ["Temperatura"]), # Typo - should get suggestions
|
| 66 |
+
("Precipitazioni", ["Precipitazione"]), # Plural vs singular
|
| 67 |
+
("Humidity", []), # Too different - may not get suggestions
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
for invalid_sensor, expected_suggestions in invalid_cases:
|
| 71 |
+
is_valid, corrected, suggestions = validator.validate_sensor_type(invalid_sensor)
|
| 72 |
+
assert not is_valid, f"Invalid sensor '{invalid_sensor}' should fail validation"
|
| 73 |
+
assert corrected is None
|
| 74 |
+
|
| 75 |
+
if expected_suggestions:
|
| 76 |
+
assert len(suggestions) > 0, f"Should provide suggestions for '{invalid_sensor}'"
|
| 77 |
+
# Check if at least one expected suggestion is present
|
| 78 |
+
has_expected = any(exp in suggestions for exp in expected_suggestions)
|
| 79 |
+
assert has_expected, f"Expected suggestions {expected_suggestions} not found in {suggestions}"
|
| 80 |
+
|
| 81 |
+
print(f" β {invalid_sensor}: invalid, suggestions: {suggestions}")
|
| 82 |
+
|
| 83 |
+
# Test empty input
|
| 84 |
+
is_valid, corrected, suggestions = validator.validate_sensor_type("")
|
| 85 |
+
assert is_valid, "Empty sensor type should be valid (optional parameter)"
|
| 86 |
+
|
| 87 |
+
print("β
Sensor type validation works correctly")
|
| 88 |
+
|
| 89 |
+
def test_province_validation(self):
|
| 90 |
+
"""Test province validation and code mapping"""
|
| 91 |
+
print("\nπ§ͺ Testing province validation...")
|
| 92 |
+
|
| 93 |
+
validator = get_validator()
|
| 94 |
+
|
| 95 |
+
# Test valid province codes
|
| 96 |
+
valid_codes = ["GE", "SV", "IM", "SP"]
|
| 97 |
+
for code in valid_codes:
|
| 98 |
+
is_valid, corrected, suggestions = validator.validate_provincia(code)
|
| 99 |
+
assert is_valid, f"Valid province code '{code}' should pass validation"
|
| 100 |
+
assert corrected == code
|
| 101 |
+
print(f" β
{code}: valid code")
|
| 102 |
+
|
| 103 |
+
# Test valid province names (should be converted to codes)
|
| 104 |
+
province_mappings = {
|
| 105 |
+
"GENOVA": "GE",
|
| 106 |
+
"SAVONA": "SV",
|
| 107 |
+
"IMPERIA": "IM",
|
| 108 |
+
"LA SPEZIA": "SP"
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
for name, expected_code in province_mappings.items():
|
| 112 |
+
is_valid, corrected, suggestions = validator.validate_provincia(name)
|
| 113 |
+
assert is_valid, f"Valid province name '{name}' should pass validation"
|
| 114 |
+
assert corrected == expected_code, f"'{name}' should be corrected to '{expected_code}'"
|
| 115 |
+
print(f" β
{name} β {corrected}: valid conversion")
|
| 116 |
+
|
| 117 |
+
# Test invalid provinces - use cases that might get suggestions
|
| 118 |
+
invalid_test_cases = [
|
| 119 |
+
("GENOA", True), # Similar to GENOVA - should get suggestions
|
| 120 |
+
("SAVON", True), # Similar to SAVONA - should get suggestions
|
| 121 |
+
("ROMA", False), # Too different - might not get suggestions
|
| 122 |
+
("INVALID", False), # Too different - might not get suggestions
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
for invalid, should_have_suggestions in invalid_test_cases:
|
| 126 |
+
is_valid, corrected, suggestions = validator.validate_provincia(invalid)
|
| 127 |
+
assert not is_valid, f"Invalid province '{invalid}' should fail validation"
|
| 128 |
+
assert corrected is None
|
| 129 |
+
|
| 130 |
+
if should_have_suggestions:
|
| 131 |
+
assert len(suggestions) > 0, f"Should provide suggestions for '{invalid}'"
|
| 132 |
+
|
| 133 |
+
print(f" β {invalid}: invalid, suggestions: {suggestions}")
|
| 134 |
+
|
| 135 |
+
# Test empty input
|
| 136 |
+
is_valid, corrected, suggestions = validator.validate_provincia("")
|
| 137 |
+
assert is_valid, "Empty province should be valid (optional parameter)"
|
| 138 |
+
|
| 139 |
+
print("β
Province validation works correctly")
|
| 140 |
+
|
| 141 |
+
def test_zone_validation(self):
|
| 142 |
+
"""Test alert zone validation"""
|
| 143 |
+
print("\nπ§ͺ Testing zone validation...")
|
| 144 |
+
|
| 145 |
+
validator = get_validator()
|
| 146 |
+
|
| 147 |
+
# Test valid zones
|
| 148 |
+
valid_zones = ["A", "B", "C", "C+", "C-", "D", "E"]
|
| 149 |
+
for zone in valid_zones:
|
| 150 |
+
is_valid, corrected, suggestions = validator.validate_zona(zone)
|
| 151 |
+
assert is_valid, f"Valid zone '{zone}' should pass validation"
|
| 152 |
+
print(f" β
{zone}: valid zone")
|
| 153 |
+
|
| 154 |
+
# Test invalid zones
|
| 155 |
+
invalid_zones = ["F", "Z", "1", "AB"]
|
| 156 |
+
for invalid in invalid_zones:
|
| 157 |
+
is_valid, corrected, suggestions = validator.validate_zona(invalid)
|
| 158 |
+
assert not is_valid, f"Invalid zone '{invalid}' should fail validation"
|
| 159 |
+
print(f" β {invalid}: invalid, suggestions: {suggestions}")
|
| 160 |
+
|
| 161 |
+
print("β
Zone validation works correctly")
|
| 162 |
+
|
| 163 |
+
def test_period_validation(self):
|
| 164 |
+
"""Test time period validation"""
|
| 165 |
+
print("\nπ§ͺ Testing period validation...")
|
| 166 |
+
|
| 167 |
+
validator = get_validator()
|
| 168 |
+
|
| 169 |
+
# Test valid periods
|
| 170 |
+
valid_periods = ["5'", "15'", "30'", "1h", "3h", "6h", "12h", "24h"]
|
| 171 |
+
for period in valid_periods:
|
| 172 |
+
is_valid, corrected, suggestions = validator.validate_periodo(period)
|
| 173 |
+
assert is_valid, f"Valid period '{period}' should pass validation"
|
| 174 |
+
print(f" β
{period}: valid period")
|
| 175 |
+
|
| 176 |
+
# Test invalid periods
|
| 177 |
+
invalid_periods = ["2h", "48h", "1d", "invalid"]
|
| 178 |
+
for invalid in invalid_periods:
|
| 179 |
+
is_valid, corrected, suggestions = validator.validate_periodo(invalid)
|
| 180 |
+
assert not is_valid, f"Invalid period '{invalid}' should fail validation"
|
| 181 |
+
print(f" β {invalid}: invalid, suggestions: {suggestions}")
|
| 182 |
+
|
| 183 |
+
print("β
Period validation works correctly")
|
| 184 |
+
|
| 185 |
+
def test_mode_task_validation(self):
|
| 186 |
+
"""Test mode and task combination validation"""
|
| 187 |
+
print("\nπ§ͺ Testing mode/task validation...")
|
| 188 |
+
|
| 189 |
+
validator = get_validator()
|
| 190 |
+
|
| 191 |
+
# Test valid combinations
|
| 192 |
+
valid_combinations = [
|
| 193 |
+
("tables", "valori_stazioni"),
|
| 194 |
+
("tables", "massimi_precipitazione")
|
| 195 |
+
]
|
| 196 |
+
|
| 197 |
+
for mode, task in valid_combinations:
|
| 198 |
+
is_valid, valid_tasks = validator.validate_mode_task_combination(mode, task)
|
| 199 |
+
assert is_valid, f"Valid combination '{mode}/{task}' should pass validation"
|
| 200 |
+
assert task in valid_tasks
|
| 201 |
+
print(f" β
{mode}/{task}: valid combination")
|
| 202 |
+
|
| 203 |
+
# Test invalid mode
|
| 204 |
+
is_valid, valid_tasks = validator.validate_mode_task_combination("invalid_mode", "task")
|
| 205 |
+
assert not is_valid, "Invalid mode should fail validation"
|
| 206 |
+
assert len(valid_tasks) == 0
|
| 207 |
+
print(" β invalid_mode: invalid mode")
|
| 208 |
+
|
| 209 |
+
# Test invalid task for valid mode
|
| 210 |
+
is_valid, valid_tasks = validator.validate_mode_task_combination("tables", "invalid_task")
|
| 211 |
+
assert not is_valid, "Invalid task should fail validation"
|
| 212 |
+
assert len(valid_tasks) > 0 # Should still return valid tasks for the mode
|
| 213 |
+
print(" β tables/invalid_task: invalid task")
|
| 214 |
+
|
| 215 |
+
print("β
Mode/task validation works correctly")
|
| 216 |
+
|
| 217 |
+
def test_complete_request_validation(self):
|
| 218 |
+
"""Test complete request validation"""
|
| 219 |
+
print("\nπ§ͺ Testing complete request validation...")
|
| 220 |
+
|
| 221 |
+
validator = get_validator()
|
| 222 |
+
|
| 223 |
+
# Test valid valori_stazioni request
|
| 224 |
+
valid_request = {
|
| 225 |
+
"tipo_sensore": "Temperatura",
|
| 226 |
+
"provincia": "GENOVA"
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
is_valid, corrected, errors = validator.validate_complete_request(
|
| 230 |
+
"tables", "valori_stazioni", valid_request
|
| 231 |
+
)
|
| 232 |
+
assert is_valid, f"Valid request should pass validation, errors: {errors}"
|
| 233 |
+
assert corrected["provincia"] == "GE", "Province should be auto-corrected to code"
|
| 234 |
+
print(f" β
Valid request: corrected filters = {corrected}")
|
| 235 |
+
|
| 236 |
+
# Test invalid request with multiple errors
|
| 237 |
+
invalid_request = {
|
| 238 |
+
"tipo_sensore": "InvalidSensor",
|
| 239 |
+
"provincia": "InvalidProvince",
|
| 240 |
+
"zona": "InvalidZone"
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
is_valid, corrected, errors = validator.validate_complete_request(
|
| 244 |
+
"tables", "valori_stazioni", invalid_request
|
| 245 |
+
)
|
| 246 |
+
assert not is_valid, "Invalid request should fail validation"
|
| 247 |
+
assert len(errors) > 0, "Should provide error messages"
|
| 248 |
+
print(f" β Invalid request: {len(errors)} errors = {errors}")
|
| 249 |
+
|
| 250 |
+
# Test massimi_precipitazione request
|
| 251 |
+
precip_request = {
|
| 252 |
+
"zona_allerta": "A",
|
| 253 |
+
"periodo": "24h"
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
is_valid, corrected, errors = validator.validate_complete_request(
|
| 257 |
+
"tables", "massimi_precipitazione", precip_request
|
| 258 |
+
)
|
| 259 |
+
assert is_valid, f"Valid precipitation request should pass, errors: {errors}"
|
| 260 |
+
print(f" β
Valid precipitation request: {corrected}")
|
| 261 |
+
|
| 262 |
+
print("β
Complete request validation works correctly")
|
| 263 |
+
|
| 264 |
+
def test_convenience_functions(self):
|
| 265 |
+
"""Test convenience validation functions"""
|
| 266 |
+
print("\nπ§ͺ Testing convenience functions...")
|
| 267 |
+
|
| 268 |
+
# Test validate_sensor_type function
|
| 269 |
+
assert validate_sensor_type("Temperatura"), "Should validate valid sensor"
|
| 270 |
+
assert not validate_sensor_type("Invalid"), "Should reject invalid sensor"
|
| 271 |
+
print(" β
validate_sensor_type works")
|
| 272 |
+
|
| 273 |
+
# Test validate_provincia function
|
| 274 |
+
is_valid, corrected = validate_provincia("GENOVA")
|
| 275 |
+
assert is_valid and corrected == "GE", "Should validate and correct province"
|
| 276 |
+
print(" β
validate_provincia works")
|
| 277 |
+
|
| 278 |
+
# Test validate_zona function
|
| 279 |
+
assert validate_zona("A"), "Should validate valid zone"
|
| 280 |
+
assert not validate_zona("Z"), "Should reject invalid zone"
|
| 281 |
+
print(" β
validate_zona works")
|
| 282 |
+
|
| 283 |
+
# Test validate_periodo function
|
| 284 |
+
assert validate_periodo("24h"), "Should validate valid period"
|
| 285 |
+
assert not validate_periodo("invalid"), "Should reject invalid period"
|
| 286 |
+
print(" β
validate_periodo works")
|
| 287 |
+
|
| 288 |
+
# Test validate_mode_task_combination function
|
| 289 |
+
assert validate_mode_task_combination("tables", "valori_stazioni"), "Should validate valid combination"
|
| 290 |
+
assert not validate_mode_task_combination("invalid", "task"), "Should reject invalid combination"
|
| 291 |
+
print(" β
validate_mode_task_combination works")
|
| 292 |
+
|
| 293 |
+
# Test get_valid_sensor_types function
|
| 294 |
+
valid_types = get_valid_sensor_types()
|
| 295 |
+
assert isinstance(valid_types, list), "Should return list"
|
| 296 |
+
assert len(valid_types) > 0, "Should have valid sensor types"
|
| 297 |
+
assert "Temperatura" in valid_types, "Should contain expected sensor type"
|
| 298 |
+
print(f" β
get_valid_sensor_types works: {len(valid_types)} types")
|
| 299 |
+
|
| 300 |
+
# Test get_valid_provinces function
|
| 301 |
+
valid_provinces = get_valid_provinces()
|
| 302 |
+
assert isinstance(valid_provinces, dict), "Should return dict"
|
| 303 |
+
assert "GENOVA" in valid_provinces, "Should contain province mapping"
|
| 304 |
+
assert valid_provinces["GENOVA"] == "GE", "Should have correct mapping"
|
| 305 |
+
print(f" β
get_valid_provinces works: {len(valid_provinces)} provinces")
|
| 306 |
+
|
| 307 |
+
# Test get_validation_errors function
|
| 308 |
+
filters_with_errors = {"tipo_sensore": "Invalid", "provincia": "Invalid"}
|
| 309 |
+
errors = get_validation_errors(filters_with_errors)
|
| 310 |
+
assert len(errors) > 0, "Should detect validation errors"
|
| 311 |
+
print(f" β
get_validation_errors works: {len(errors)} errors detected")
|
| 312 |
+
|
| 313 |
+
print("β
All convenience functions work correctly")
|
| 314 |
+
|
| 315 |
+
def test_error_messages_and_suggestions(self):
|
| 316 |
+
"""Test that error messages include helpful suggestions"""
|
| 317 |
+
print("\nπ§ͺ Testing error messages and suggestions...")
|
| 318 |
+
|
| 319 |
+
validator = get_validator()
|
| 320 |
+
|
| 321 |
+
# Test sensor type suggestions - use cases that should get suggestions
|
| 322 |
+
is_valid, corrected, suggestions = validator.validate_sensor_type("Temperature")
|
| 323 |
+
assert not is_valid
|
| 324 |
+
assert "Temperatura" in suggestions, "Should suggest closest match"
|
| 325 |
+
print(f" β
'Temperature' suggests: {suggestions}")
|
| 326 |
+
|
| 327 |
+
# Test province suggestions
|
| 328 |
+
is_valid, corrected, suggestions = validator.validate_provincia("Genova")
|
| 329 |
+
# This might be valid (converted to GE) or invalid depending on config
|
| 330 |
+
if not is_valid:
|
| 331 |
+
assert len(suggestions) > 0, "Should provide suggestions for invalid province"
|
| 332 |
+
print(f" β
'Genova' suggests: {suggestions}")
|
| 333 |
+
else:
|
| 334 |
+
print(f" β
'Genova' auto-corrected to: {corrected}")
|
| 335 |
+
|
| 336 |
+
# Test zone suggestions - use a case that should get suggestions
|
| 337 |
+
is_valid, corrected, suggestions = validator.validate_zona("AA")
|
| 338 |
+
assert not is_valid
|
| 339 |
+
assert len(suggestions) > 0, "Should suggest valid zones"
|
| 340 |
+
assert "A" in suggestions, "Should suggest actual zones"
|
| 341 |
+
print(f" β
'AA' suggests: {suggestions}")
|
| 342 |
+
|
| 343 |
+
print("β
Error messages and suggestions work correctly")
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# Integration test
|
| 347 |
+
def test_validation_integration():
|
| 348 |
+
"""Integration test for the entire validation system"""
|
| 349 |
+
print("\nπ§ͺ Running validation integration test...")
|
| 350 |
+
print("=" * 60)
|
| 351 |
+
|
| 352 |
+
try:
|
| 353 |
+
# Test validator instantiation
|
| 354 |
+
validator = get_validator()
|
| 355 |
+
print("β
Validator created successfully")
|
| 356 |
+
|
| 357 |
+
# Test configuration loading
|
| 358 |
+
assert len(validator.get_valid_sensor_types()) > 0
|
| 359 |
+
print("β
YAML configuration loaded successfully")
|
| 360 |
+
|
| 361 |
+
# Test a complete validation workflow
|
| 362 |
+
filters = {
|
| 363 |
+
"tipo_sensore": "Temperatura",
|
| 364 |
+
"provincia": "GENOVA",
|
| 365 |
+
"zona_allerta": "A"
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
is_valid, corrected, errors = validator.validate_complete_request(
|
| 369 |
+
"tables", "valori_stazioni", filters
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
if is_valid:
|
| 373 |
+
print(f"β
Complete workflow successful: {corrected}")
|
| 374 |
+
else:
|
| 375 |
+
print(f"β Validation failed: {errors}")
|
| 376 |
+
|
| 377 |
+
print("β
Integration test completed successfully")
|
| 378 |
+
return True
|
| 379 |
+
|
| 380 |
+
except Exception as e:
|
| 381 |
+
print(f"β Integration test failed: {e}")
|
| 382 |
+
import traceback
|
| 383 |
+
traceback.print_exc()
|
| 384 |
+
return False
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
if __name__ == "__main__":
|
| 388 |
+
# Run integration test directly
|
| 389 |
+
success = test_validation_integration()
|
| 390 |
+
print(f"\nValidation system test: {'β
PASSED' if success else 'β FAILED'}")
|
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test for OMIRL Valori Stazioni Task
|
| 4 |
+
|
| 5 |
+
Tests the refactored valori_stazioni functionality that extracts station
|
| 6 |
+
measurement data from OMIRL tables using the new YAML-based architecture.
|
| 7 |
+
|
| 8 |
+
Created: September 5, 2025
|
| 9 |
+
Branch: omirl_refactor
|
| 10 |
+
Purpose: Validate the refactored valori_stazioni task implementation
|
| 11 |
+
|
| 12 |
+
TODO - BROWSER MANAGEMENT ISSUES:
|
| 13 |
+
Similar to massimi_precipitazione tests, the integrated tests that require
|
| 14 |
+
live OMIRL data extraction may experience browser management issues:
|
| 15 |
+
- Selenium WebDriver session handling
|
| 16 |
+
- AngularJS table loading timing
|
| 17 |
+
- Network-dependent test reliability
|
| 18 |
+
|
| 19 |
+
The unit tests (test_format_station_scenarios, test_format_station_edge_cases)
|
| 20 |
+
provide reliable validation of the formatting logic without browser dependencies.
|
| 21 |
+
For live data tests, consider implementing retry mechanisms and better timeout handling.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import sys
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
|
| 27 |
+
# Add the parent directory to sys.path so we can import the OMIRL modules
|
| 28 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 29 |
+
|
| 30 |
+
from tools.omirl.shared import OMIRLFilterSet
|
| 31 |
+
from tools.omirl.tables.valori_stazioni import fetch_valori_stazioni
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_valori_stazioni_basic():
|
| 35 |
+
"""Test basic valori_stazioni functionality with minimal filters"""
|
| 36 |
+
print("π§ͺ Testing basic valori_stazioni functionality...")
|
| 37 |
+
|
| 38 |
+
# Test with minimal filters
|
| 39 |
+
filter_dict = {
|
| 40 |
+
"tipo_sensore": "Temperatura"
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
filters = OMIRLFilterSet(filter_dict)
|
| 44 |
+
result = fetch_valori_stazioni(filters)
|
| 45 |
+
|
| 46 |
+
print(f" Success: {result.success}")
|
| 47 |
+
print(f" Message: {result.message}")
|
| 48 |
+
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 49 |
+
print(f" Warnings: {result.warnings}")
|
| 50 |
+
print(f" Metadata keys: {list(result.metadata.keys()) if result.metadata else []}")
|
| 51 |
+
|
| 52 |
+
return result.success
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_valori_stazioni_with_provincia():
|
| 56 |
+
"""Test valori_stazioni with provincia filter"""
|
| 57 |
+
print("\nπ§ͺ Testing valori_stazioni with provincia filter...")
|
| 58 |
+
|
| 59 |
+
filter_dict = {
|
| 60 |
+
"tipo_sensore": "Temperatura",
|
| 61 |
+
"provincia": "GE"
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
filters = OMIRLFilterSet(filter_dict)
|
| 65 |
+
result = fetch_valori_stazioni(filters)
|
| 66 |
+
|
| 67 |
+
print(f" Success: {result.success}")
|
| 68 |
+
print(f" Message: {result.message}")
|
| 69 |
+
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 70 |
+
|
| 71 |
+
return result.success
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_valori_stazioni_with_stazione():
|
| 75 |
+
"""Test valori_stazioni with specific station"""
|
| 76 |
+
print("\nπ§ͺ Testing valori_stazioni with specific station...")
|
| 77 |
+
|
| 78 |
+
filter_dict = {
|
| 79 |
+
"tipo_sensore": "Temperatura",
|
| 80 |
+
"provincia": "GE",
|
| 81 |
+
"stazione": "GENOVA CENTRO FUNZIONALE"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
filters = OMIRLFilterSet(filter_dict)
|
| 85 |
+
result = fetch_valori_stazioni(filters)
|
| 86 |
+
|
| 87 |
+
print(f" Success: {result.success}")
|
| 88 |
+
print(f" Message: {result.message}")
|
| 89 |
+
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 90 |
+
|
| 91 |
+
return result.success
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def test_new_api_compatibility():
|
| 95 |
+
"""Test the new OMIRLFilterSet API"""
|
| 96 |
+
print("\nπ§ͺ Testing new OMIRLFilterSet API...")
|
| 97 |
+
|
| 98 |
+
# Create filters using new API
|
| 99 |
+
filters = OMIRLFilterSet({
|
| 100 |
+
"tipo_sensore": "Temperatura",
|
| 101 |
+
"provincia": "GE"
|
| 102 |
+
})
|
| 103 |
+
|
| 104 |
+
result = fetch_valori_stazioni(filters)
|
| 105 |
+
|
| 106 |
+
print(f" Success: {result.success}")
|
| 107 |
+
print(f" Message: {result.message}")
|
| 108 |
+
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 109 |
+
|
| 110 |
+
return result.success
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_invalid_filters():
|
| 114 |
+
"""Test handling of invalid filters"""
|
| 115 |
+
print("\nπ§ͺ Testing invalid filter handling...")
|
| 116 |
+
|
| 117 |
+
filter_dict = {
|
| 118 |
+
"tipo_sensore": "InvalidSensor",
|
| 119 |
+
"provincia": "InvalidProvince"
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
filters = OMIRLFilterSet(filter_dict)
|
| 123 |
+
result = fetch_valori_stazioni(filters)
|
| 124 |
+
|
| 125 |
+
print(f" Success: {result.success}")
|
| 126 |
+
print(f" Message: {result.message}")
|
| 127 |
+
print(f" Expected failure: {'β
' if not result.success else 'β'}")
|
| 128 |
+
|
| 129 |
+
return not result.success # We expect this to fail
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def test_format_station_scenarios():
|
| 133 |
+
"""Test the new formatting function with different scenarios"""
|
| 134 |
+
print("\nπ§ͺ Testing station formatting scenarios...")
|
| 135 |
+
|
| 136 |
+
# Import the formatting function
|
| 137 |
+
from tools.omirl.tables.valori_stazioni import format_station_data_simple
|
| 138 |
+
|
| 139 |
+
# Mock station data
|
| 140 |
+
mock_station_data = [
|
| 141 |
+
{
|
| 142 |
+
"Nome": "Stazione Test 1",
|
| 143 |
+
"Comune": "Genova",
|
| 144 |
+
"Provincia": "GE",
|
| 145 |
+
"ultimo": "22.5",
|
| 146 |
+
"max": "25.3",
|
| 147 |
+
"min": "18.7",
|
| 148 |
+
"unita": "Β°C"
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"Nome": "Stazione Test 2",
|
| 152 |
+
"Comune": "Genova",
|
| 153 |
+
"Provincia": "GE",
|
| 154 |
+
"ultimo": "21.8",
|
| 155 |
+
"max": "24.1",
|
| 156 |
+
"min": "19.2",
|
| 157 |
+
"unita": "Β°C"
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"Nome": "Stazione Savona",
|
| 161 |
+
"Comune": "Savona",
|
| 162 |
+
"Provincia": "SV",
|
| 163 |
+
"ultimo": "23.1",
|
| 164 |
+
"max": "26.0",
|
| 165 |
+
"min": "20.1",
|
| 166 |
+
"unita": "Β°C"
|
| 167 |
+
}
|
| 168 |
+
]
|
| 169 |
+
|
| 170 |
+
# Scenario 1: Only tipo_sensore (show all data)
|
| 171 |
+
print("\n--- Scenario 1: Only tipo_sensore ---")
|
| 172 |
+
filters1 = OMIRLFilterSet({"tipo_sensore": "Temperatura"})
|
| 173 |
+
result1 = format_station_data_simple(mock_station_data, filters1, "Temperatura")
|
| 174 |
+
print(f"π All stations output:\n{result1}")
|
| 175 |
+
|
| 176 |
+
# Scenario 2: tipo_sensore + stazione (specific station details)
|
| 177 |
+
print("\n--- Scenario 2: tipo_sensore + stazione ---")
|
| 178 |
+
filters2 = OMIRLFilterSet({"tipo_sensore": "Temperatura", "stazione": "Stazione Test 1"})
|
| 179 |
+
result2 = format_station_data_simple([mock_station_data[0]], filters2, "Temperatura")
|
| 180 |
+
print(f"π Specific station output:\n{result2}")
|
| 181 |
+
|
| 182 |
+
# Scenario 3: tipo_sensore + comune
|
| 183 |
+
print("\n--- Scenario 3: tipo_sensore + comune ---")
|
| 184 |
+
filters3 = OMIRLFilterSet({"tipo_sensore": "Temperatura", "comune": "Genova"})
|
| 185 |
+
genova_stations = [s for s in mock_station_data if s["Comune"] == "Genova"]
|
| 186 |
+
result3 = format_station_data_simple(genova_stations, filters3, "Temperatura")
|
| 187 |
+
print(f"π Comune-filtered output:\n{result3}")
|
| 188 |
+
|
| 189 |
+
# Scenario 4: tipo_sensore + provincia
|
| 190 |
+
print("\n--- Scenario 4: tipo_sensore + provincia ---")
|
| 191 |
+
filters4 = OMIRLFilterSet({"tipo_sensore": "Temperatura", "provincia": "GE"})
|
| 192 |
+
ge_stations = [s for s in mock_station_data if s["Provincia"] == "GE"]
|
| 193 |
+
result4 = format_station_data_simple(ge_stations, filters4, "Temperatura")
|
| 194 |
+
print(f"π Provincia-filtered output:\n{result4}")
|
| 195 |
+
|
| 196 |
+
print("β
Station formatting scenarios completed")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def test_format_station_edge_cases():
|
| 200 |
+
"""Test edge cases for station formatting"""
|
| 201 |
+
print("\nπ§ͺ Testing station formatting edge cases...")
|
| 202 |
+
|
| 203 |
+
from tools.omirl.tables.valori_stazioni import format_station_data_simple
|
| 204 |
+
|
| 205 |
+
# Empty data
|
| 206 |
+
filters_empty = OMIRLFilterSet({"tipo_sensore": "Temperatura"})
|
| 207 |
+
result_empty = format_station_data_simple([], filters_empty, "Temperatura")
|
| 208 |
+
print(f"π Empty data result:\n{result_empty}")
|
| 209 |
+
assert "Nessun dato trovato" in result_empty
|
| 210 |
+
|
| 211 |
+
# Data with missing fields
|
| 212 |
+
incomplete_data = [{
|
| 213 |
+
"Nome": "Incomplete Station",
|
| 214 |
+
"Comune": "Unknown",
|
| 215 |
+
"ultimo": "N/A",
|
| 216 |
+
"max": None,
|
| 217 |
+
"min": "",
|
| 218 |
+
"unita": "Β°C"
|
| 219 |
+
}]
|
| 220 |
+
|
| 221 |
+
result_incomplete = format_station_data_simple(incomplete_data, filters_empty, "Temperatura")
|
| 222 |
+
print(f"\nπ Incomplete data result:\n{result_incomplete}")
|
| 223 |
+
assert "N/A" in result_incomplete
|
| 224 |
+
|
| 225 |
+
print("β
Edge cases testing completed")
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def test_valori_stazioni_invalid_inputs():
|
| 229 |
+
"""Test error handling with invalid inputs"""
|
| 230 |
+
print("\nπ§ͺ Testing valori_stazioni invalid inputs...")
|
| 231 |
+
|
| 232 |
+
# Missing tipo_sensore
|
| 233 |
+
print("\n--- Missing tipo_sensore ---")
|
| 234 |
+
filters1 = OMIRLFilterSet({"provincia": "GE"})
|
| 235 |
+
result1 = fetch_valori_stazioni(filters1)
|
| 236 |
+
print(f"Expected failure: {result1.message}")
|
| 237 |
+
assert not result1.success, "Should fail when tipo_sensore is missing"
|
| 238 |
+
|
| 239 |
+
# Invalid tipo_sensore
|
| 240 |
+
print("\n--- Invalid tipo_sensore ---")
|
| 241 |
+
filters2 = OMIRLFilterSet({"tipo_sensore": "InvalidSensor"})
|
| 242 |
+
result2 = fetch_valori_stazioni(filters2)
|
| 243 |
+
print(f"Result: {result2.message}")
|
| 244 |
+
# May succeed but return empty data, or fail validation
|
| 245 |
+
|
| 246 |
+
# Empty filters
|
| 247 |
+
print("\n--- Empty filters ---")
|
| 248 |
+
filters3 = OMIRLFilterSet({})
|
| 249 |
+
result3 = fetch_valori_stazioni(filters3)
|
| 250 |
+
print(f"Expected failure: {result3.message}")
|
| 251 |
+
assert not result3.success, "Should fail when no filters provided"
|
| 252 |
+
|
| 253 |
+
print("β
Invalid inputs testing completed")
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
if __name__ == "__main__":
|
| 257 |
+
print("π Starting OMIRL Valori Stazioni Tests")
|
| 258 |
+
print("=====================================")
|
| 259 |
+
|
| 260 |
+
tests = [
|
| 261 |
+
test_valori_stazioni_basic,
|
| 262 |
+
test_valori_stazioni_with_provincia,
|
| 263 |
+
test_valori_stazioni_with_stazione,
|
| 264 |
+
test_new_api_compatibility,
|
| 265 |
+
test_invalid_filters,
|
| 266 |
+
test_format_station_scenarios,
|
| 267 |
+
test_format_station_edge_cases,
|
| 268 |
+
test_valori_stazioni_invalid_inputs
|
| 269 |
+
]
|
| 270 |
+
|
| 271 |
+
results = []
|
| 272 |
+
|
| 273 |
+
try:
|
| 274 |
+
for test_func in tests:
|
| 275 |
+
try:
|
| 276 |
+
result = test_func()
|
| 277 |
+
results.append(result)
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f" β Test failed with exception: {e}")
|
| 280 |
+
results.append(False)
|
| 281 |
+
|
| 282 |
+
# Summary
|
| 283 |
+
passed = sum(results)
|
| 284 |
+
total = len(results)
|
| 285 |
+
print(f"\nπ Test Results: {passed}/{total} tests passed")
|
| 286 |
+
|
| 287 |
+
if passed == total:
|
| 288 |
+
print("β¨ All tests completed successfully!")
|
| 289 |
+
else:
|
| 290 |
+
print(f"β οΈ {total - passed} tests failed")
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
print(f"\nβ Test suite failed with error: {e}")
|
| 294 |
+
import traceback
|
| 295 |
+
traceback.print_exc()
|
| 296 |
+
sys.exit(1)
|
|
File without changes
|
|
@@ -122,7 +122,6 @@ if __name__ == "__main__":
|
|
| 122 |
await test_massimi_precipitazione_zona()
|
| 123 |
await test_massimi_precipitazione_provincia()
|
| 124 |
await test_geographic_filtering_validation()
|
| 125 |
-
await test_task_agnostic_summarization()
|
| 126 |
|
| 127 |
print("
|
| 128 |
π All manual tests completed!")
|
|
|
|
| 122 |
await test_massimi_precipitazione_zona()
|
| 123 |
await test_massimi_precipitazione_provincia()
|
| 124 |
await test_geographic_filtering_validation()
|
|
|
|
| 125 |
|
| 126 |
print("
|
| 127 |
π All manual tests completed!")
|
|
File without changes
|
|
@@ -1,155 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Test for OMIRL Valori Stazioni Task
|
| 4 |
-
|
| 5 |
-
Tests the refactored valori_stazioni functionality that extracts station
|
| 6 |
-
measurement data from OMIRL tables using the new YAML-based architecture.
|
| 7 |
-
|
| 8 |
-
Created: September 5, 2025
|
| 9 |
-
Branch: omirl_refactor
|
| 10 |
-
Purpose: Validate the refactored valori_stazioni task implementation
|
| 11 |
-
"""
|
| 12 |
-
|
| 13 |
-
import sys
|
| 14 |
-
from pathlib import Path
|
| 15 |
-
|
| 16 |
-
# Add the parent directory to sys.path so we can import the OMIRL modules
|
| 17 |
-
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 18 |
-
|
| 19 |
-
from tools.omirl.shared import OMIRLFilterSet
|
| 20 |
-
from tools.omirl.tables.valori_stazioni import fetch_valori_stazioni, fetch_valori_stazioni_legacy
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def test_valori_stazioni_basic():
|
| 24 |
-
"""Test basic valori_stazioni functionality with minimal filters"""
|
| 25 |
-
print("π§ͺ Testing basic valori_stazioni functionality...")
|
| 26 |
-
|
| 27 |
-
# Test with minimal filters
|
| 28 |
-
filter_dict = {
|
| 29 |
-
"tipo_sensore": "Temperatura"
|
| 30 |
-
}
|
| 31 |
-
|
| 32 |
-
filters = OMIRLFilterSet(filter_dict)
|
| 33 |
-
result = fetch_valori_stazioni(filters)
|
| 34 |
-
|
| 35 |
-
print(f" Success: {result.success}")
|
| 36 |
-
print(f" Message: {result.message}")
|
| 37 |
-
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 38 |
-
print(f" Warnings: {result.warnings}")
|
| 39 |
-
print(f" Metadata keys: {list(result.metadata.keys()) if result.metadata else []}")
|
| 40 |
-
|
| 41 |
-
return result.success
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
def test_valori_stazioni_with_provincia():
|
| 45 |
-
"""Test valori_stazioni with provincia filter"""
|
| 46 |
-
print("\nπ§ͺ Testing valori_stazioni with provincia filter...")
|
| 47 |
-
|
| 48 |
-
filter_dict = {
|
| 49 |
-
"tipo_sensore": "Temperatura",
|
| 50 |
-
"provincia": "GE"
|
| 51 |
-
}
|
| 52 |
-
|
| 53 |
-
filters = OMIRLFilterSet(filter_dict)
|
| 54 |
-
result = fetch_valori_stazioni(filters)
|
| 55 |
-
|
| 56 |
-
print(f" Success: {result.success}")
|
| 57 |
-
print(f" Message: {result.message}")
|
| 58 |
-
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 59 |
-
|
| 60 |
-
return result.success
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
def test_valori_stazioni_with_stazione():
|
| 64 |
-
"""Test valori_stazioni with specific station"""
|
| 65 |
-
print("\nπ§ͺ Testing valori_stazioni with specific station...")
|
| 66 |
-
|
| 67 |
-
filter_dict = {
|
| 68 |
-
"tipo_sensore": "Temperatura",
|
| 69 |
-
"provincia": "GE",
|
| 70 |
-
"stazione": "GENOVA CENTRO FUNZIONALE"
|
| 71 |
-
}
|
| 72 |
-
|
| 73 |
-
filters = OMIRLFilterSet(filter_dict)
|
| 74 |
-
result = fetch_valori_stazioni(filters)
|
| 75 |
-
|
| 76 |
-
print(f" Success: {result.success}")
|
| 77 |
-
print(f" Message: {result.message}")
|
| 78 |
-
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 79 |
-
|
| 80 |
-
return result.success
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
def test_legacy_compatibility():
|
| 84 |
-
"""Test the legacy API for backward compatibility"""
|
| 85 |
-
print("\nπ§ͺ Testing legacy API compatibility...")
|
| 86 |
-
|
| 87 |
-
result = fetch_valori_stazioni_legacy(
|
| 88 |
-
tipo_sensore="Temperatura",
|
| 89 |
-
provincia="GE"
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
print(f" Success: {result.success}")
|
| 93 |
-
print(f" Message: {result.message}")
|
| 94 |
-
print(f" Data count: {len(result.data) if result.data else 0}")
|
| 95 |
-
|
| 96 |
-
return result.success
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
def test_invalid_filters():
|
| 100 |
-
"""Test handling of invalid filters"""
|
| 101 |
-
print("\nπ§ͺ Testing invalid filter handling...")
|
| 102 |
-
|
| 103 |
-
filter_dict = {
|
| 104 |
-
"tipo_sensore": "InvalidSensor",
|
| 105 |
-
"provincia": "InvalidProvince"
|
| 106 |
-
}
|
| 107 |
-
|
| 108 |
-
filters = OMIRLFilterSet(filter_dict)
|
| 109 |
-
result = fetch_valori_stazioni(filters)
|
| 110 |
-
|
| 111 |
-
print(f" Success: {result.success}")
|
| 112 |
-
print(f" Message: {result.message}")
|
| 113 |
-
print(f" Expected failure: {'β
' if not result.success else 'β'}")
|
| 114 |
-
|
| 115 |
-
return not result.success # We expect this to fail
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
if __name__ == "__main__":
|
| 119 |
-
print("π Starting OMIRL Valori Stazioni Tests")
|
| 120 |
-
print("=====================================")
|
| 121 |
-
|
| 122 |
-
tests = [
|
| 123 |
-
test_valori_stazioni_basic,
|
| 124 |
-
test_valori_stazioni_with_provincia,
|
| 125 |
-
test_valori_stazioni_with_stazione,
|
| 126 |
-
test_legacy_compatibility,
|
| 127 |
-
test_invalid_filters
|
| 128 |
-
]
|
| 129 |
-
|
| 130 |
-
results = []
|
| 131 |
-
|
| 132 |
-
try:
|
| 133 |
-
for test_func in tests:
|
| 134 |
-
try:
|
| 135 |
-
result = test_func()
|
| 136 |
-
results.append(result)
|
| 137 |
-
except Exception as e:
|
| 138 |
-
print(f" β Test failed with exception: {e}")
|
| 139 |
-
results.append(False)
|
| 140 |
-
|
| 141 |
-
# Summary
|
| 142 |
-
passed = sum(results)
|
| 143 |
-
total = len(results)
|
| 144 |
-
print(f"\nπ Test Results: {passed}/{total} tests passed")
|
| 145 |
-
|
| 146 |
-
if passed == total:
|
| 147 |
-
print("β¨ All tests completed successfully!")
|
| 148 |
-
else:
|
| 149 |
-
print(f"β οΈ {total - passed} tests failed")
|
| 150 |
-
|
| 151 |
-
except Exception as e:
|
| 152 |
-
print(f"\nβ Test suite failed with error: {e}")
|
| 153 |
-
import traceback
|
| 154 |
-
traceback.print_exc()
|
| 155 |
-
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -9,14 +9,14 @@ and handles input validation, delegation, and output formatting.
|
|
| 9 |
Purpose:
|
| 10 |
- Validate agent requests against tool specification
|
| 11 |
- Route requests to appropriate task-specific modules
|
| 12 |
-
- Format responses using task-
|
| 13 |
- Handle graceful failure (never raise exceptions)
|
| 14 |
- Manage browser sessions and cleanup
|
| 15 |
|
| 16 |
Dependencies:
|
| 17 |
- Uses YAML-based validation architecture
|
| 18 |
- Delegates to task-specific modules in tables/ directory
|
| 19 |
-
- Uses task-
|
| 20 |
- Agent expects this interface to match the tool registry schema
|
| 21 |
|
| 22 |
Input Contract:
|
|
@@ -29,7 +29,7 @@ Input Contract:
|
|
| 29 |
|
| 30 |
Output Contract:
|
| 31 |
{
|
| 32 |
-
"summary_text": "
|
| 33 |
"artifacts": ["path/to/generated/files"],
|
| 34 |
"sources": ["https://omirl.regione.liguria.it/..."],
|
| 35 |
"metadata": {"timestamp": "...", "filters_applied": "..."},
|
|
@@ -38,8 +38,8 @@ Output Contract:
|
|
| 38 |
|
| 39 |
Task Architecture:
|
| 40 |
- Each subtask (valori_stazioni, massimi_precipitazione) has its own module
|
| 41 |
-
- All tasks
|
| 42 |
-
-
|
| 43 |
- Geographic resolution service handles municipalityβprovince mapping
|
| 44 |
|
| 45 |
Note: This is the ONLY file that should be imported by the agent registry.
|
|
@@ -69,7 +69,7 @@ async def omirl_tool(
|
|
| 69 |
|
| 70 |
This function provides the standardized interface for the agent to access
|
| 71 |
OMIRL weather station data. It validates inputs, delegates to appropriate
|
| 72 |
-
task-specific services, and formats responses with
|
| 73 |
|
| 74 |
Args:
|
| 75 |
mode: Operation mode ("tables" for station data extraction)
|
|
@@ -87,7 +87,7 @@ async def omirl_tool(
|
|
| 87 |
|
| 88 |
Returns:
|
| 89 |
Dict containing:
|
| 90 |
-
- summary_text:
|
| 91 |
- artifacts: List of generated JSON file paths
|
| 92 |
- sources: List of OMIRL data source URLs
|
| 93 |
- metadata: Extraction metadata and statistics
|
|
@@ -229,7 +229,7 @@ async def omirl_tool(
|
|
| 229 |
if result.metadata and result.metadata.get("summary"):
|
| 230 |
summary_data = result.metadata.get("summary")
|
| 231 |
|
| 232 |
-
# Handle
|
| 233 |
if isinstance(summary_data, dict) and "summary_text" in summary_data:
|
| 234 |
summary_text = summary_data["summary_text"]
|
| 235 |
elif isinstance(summary_data, str):
|
|
@@ -295,9 +295,6 @@ def _format_error_response(
|
|
| 295 |
}
|
| 296 |
|
| 297 |
|
| 298 |
-
# Tool specification for agent registry
|
| 299 |
-
|
| 300 |
-
|
| 301 |
# Tool specification for agent registry
|
| 302 |
OMIRL_TOOL_SPEC = {
|
| 303 |
"name": "omirl_tool",
|
|
|
|
| 9 |
Purpose:
|
| 10 |
- Validate agent requests against tool specification
|
| 11 |
- Route requests to appropriate task-specific modules
|
| 12 |
+
- Format responses using task-specific formatting (no LLM dependency)
|
| 13 |
- Handle graceful failure (never raise exceptions)
|
| 14 |
- Manage browser sessions and cleanup
|
| 15 |
|
| 16 |
Dependencies:
|
| 17 |
- Uses YAML-based validation architecture
|
| 18 |
- Delegates to task-specific modules in tables/ directory
|
| 19 |
+
- Uses task-specific formatting for fast, reliable output
|
| 20 |
- Agent expects this interface to match the tool registry schema
|
| 21 |
|
| 22 |
Input Contract:
|
|
|
|
| 29 |
|
| 30 |
Output Contract:
|
| 31 |
{
|
| 32 |
+
"summary_text": "Task-specific formatted summary with data details",
|
| 33 |
"artifacts": ["path/to/generated/files"],
|
| 34 |
"sources": ["https://omirl.regione.liguria.it/..."],
|
| 35 |
"metadata": {"timestamp": "...", "filters_applied": "..."},
|
|
|
|
| 38 |
|
| 39 |
Task Architecture:
|
| 40 |
- Each subtask (valori_stazioni, massimi_precipitazione) has its own module
|
| 41 |
+
- All tasks provide immediate, formatted output without LLM dependencies
|
| 42 |
+
- Task-specific formatting provides consistent bullet-point summaries
|
| 43 |
- Geographic resolution service handles municipalityβprovince mapping
|
| 44 |
|
| 45 |
Note: This is the ONLY file that should be imported by the agent registry.
|
|
|
|
| 69 |
|
| 70 |
This function provides the standardized interface for the agent to access
|
| 71 |
OMIRL weather station data. It validates inputs, delegates to appropriate
|
| 72 |
+
task-specific services, and formats responses with task-specific summaries.
|
| 73 |
|
| 74 |
Args:
|
| 75 |
mode: Operation mode ("tables" for station data extraction)
|
|
|
|
| 87 |
|
| 88 |
Returns:
|
| 89 |
Dict containing:
|
| 90 |
+
- summary_text: Task-specific formatted summary with data details
|
| 91 |
- artifacts: List of generated JSON file paths
|
| 92 |
- sources: List of OMIRL data source URLs
|
| 93 |
- metadata: Extraction metadata and statistics
|
|
|
|
| 229 |
if result.metadata and result.metadata.get("summary"):
|
| 230 |
summary_data = result.metadata.get("summary")
|
| 231 |
|
| 232 |
+
# Handle task-specific summary format
|
| 233 |
if isinstance(summary_data, dict) and "summary_text" in summary_data:
|
| 234 |
summary_text = summary_data["summary_text"]
|
| 235 |
elif isinstance(summary_data, str):
|
|
|
|
| 295 |
}
|
| 296 |
|
| 297 |
|
|
|
|
|
|
|
|
|
|
| 298 |
# Tool specification for agent registry
|
| 299 |
OMIRL_TOOL_SPEC = {
|
| 300 |
"name": "omirl_tool",
|
|
@@ -5,9 +5,8 @@ This package contains all table-based data extraction tasks for OMIRL.
|
|
| 5 |
These tasks correspond to the "Tabelle" section of the OMIRL website.
|
| 6 |
"""
|
| 7 |
|
| 8 |
-
from .valori_stazioni import fetch_valori_stazioni
|
| 9 |
|
| 10 |
__all__ = [
|
| 11 |
-
"fetch_valori_stazioni"
|
| 12 |
-
"fetch_valori_stazioni_legacy" # For backward compatibility
|
| 13 |
]
|
|
|
|
| 5 |
These tasks correspond to the "Tabelle" section of the OMIRL website.
|
| 6 |
"""
|
| 7 |
|
| 8 |
+
from .valori_stazioni import fetch_valori_stazioni
|
| 9 |
|
| 10 |
__all__ = [
|
| 11 |
+
"fetch_valori_stazioni"
|
|
|
|
| 12 |
]
|
|
@@ -105,78 +105,10 @@ async def fetch_massimi_precipitazione_async(filters: OMIRLFilterSet) -> OMIRLRe
|
|
| 105 |
result.data = filtered_data
|
| 106 |
result.message = f"Estratti dati precipitazione massima con filtri: {all_filters}"
|
| 107 |
|
| 108 |
-
|
| 109 |
if filtered_data:
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
from services.text.task_agnostic_summarization import (
|
| 113 |
-
create_massimi_precipitazione_summary,
|
| 114 |
-
analyze_precipitation_trends,
|
| 115 |
-
get_multi_task_summarizer
|
| 116 |
-
)
|
| 117 |
-
|
| 118 |
-
# Determine geographic and temporal scope
|
| 119 |
-
if all_filters.get('zona_allerta'):
|
| 120 |
-
geographic_scope = f"Zona d'allerta {all_filters['zona_allerta']}"
|
| 121 |
-
else:
|
| 122 |
-
geographic_scope = f"Provincia {all_filters.get('provincia', 'Unknown')}"
|
| 123 |
-
|
| 124 |
-
if all_filters.get('periodo'):
|
| 125 |
-
temporal_scope = f"Period {all_filters['periodo']}"
|
| 126 |
-
else:
|
| 127 |
-
temporal_scope = "All periods (5'-24h)"
|
| 128 |
-
|
| 129 |
-
# Analyze precipitation data for trends
|
| 130 |
-
data_insights = analyze_precipitation_trends(filtered_data)
|
| 131 |
-
|
| 132 |
-
# Create standardized task summary
|
| 133 |
-
task_summary = create_massimi_precipitazione_summary(
|
| 134 |
-
geographic_scope=geographic_scope,
|
| 135 |
-
temporal_scope=temporal_scope,
|
| 136 |
-
data_insights=data_insights,
|
| 137 |
-
filters_applied=all_filters
|
| 138 |
-
)
|
| 139 |
-
|
| 140 |
-
# For now, generate immediate summary (multi-task will be implemented in adapter)
|
| 141 |
-
summarizer = get_multi_task_summarizer()
|
| 142 |
-
summarizer.clear_results() # Clear any previous results
|
| 143 |
-
summarizer.add_task_result(task_summary)
|
| 144 |
-
summary = await summarizer.generate_final_summary(query_context="massimi precipitazione")
|
| 145 |
-
|
| 146 |
-
result.update_metadata(summary=summary)
|
| 147 |
-
|
| 148 |
-
except ImportError as e:
|
| 149 |
-
logger.warning(f"β οΈ New summarization service not available: {e}")
|
| 150 |
-
# Fallback to simple summary
|
| 151 |
-
if all_filters.get('periodo'):
|
| 152 |
-
# Specific time period was requested
|
| 153 |
-
periodo = all_filters['periodo']
|
| 154 |
-
zona_count = len(filtered_data.get("zona_allerta", []))
|
| 155 |
-
province_count = len(filtered_data.get("province", []))
|
| 156 |
-
|
| 157 |
-
if zona_count > 0:
|
| 158 |
-
summary = f"π§οΈ Precipitazione massima - Zona d'allerta: {zona_count} record trovati per periodo {periodo}"
|
| 159 |
-
else:
|
| 160 |
-
summary = f"π§οΈ Precipitazione massima - Provincia: {province_count} record trovati per periodo {periodo}"
|
| 161 |
-
else:
|
| 162 |
-
# All time periods included - summarize trends
|
| 163 |
-
zona_count = len(filtered_data.get("zona_allerta", []))
|
| 164 |
-
province_count = len(filtered_data.get("province", []))
|
| 165 |
-
|
| 166 |
-
if zona_count > 0:
|
| 167 |
-
zona_name = all_filters.get('zona_allerta', all_filters.get('zona'))
|
| 168 |
-
summary = f"π§οΈ Precipitazione massima - Zona d'allerta {zona_name}: dati completi per tutti i periodi temporali (5'-24h)"
|
| 169 |
-
else:
|
| 170 |
-
provincia_name = filters.provincia if hasattr(filters, 'provincia') and filters.provincia else all_filters.get('provincia')
|
| 171 |
-
summary = f"π§οΈ Precipitazione massima - Provincia {provincia_name}: dati completi per tutti i periodi temporali (5'-24h)"
|
| 172 |
-
|
| 173 |
-
result.update_metadata(summary=summary)
|
| 174 |
-
except Exception as e:
|
| 175 |
-
logger.error(f"β Error in precipitation summarization: {e}")
|
| 176 |
-
# Basic fallback summary if everything fails
|
| 177 |
-
zona_count = len(filtered_data.get("zona_allerta", []))
|
| 178 |
-
province_count = len(filtered_data.get("province", []))
|
| 179 |
-
result.update_metadata(summary=f"π§οΈ Estratti dati precipitazione massima: {zona_count} zone d'allerta, {province_count} province")
|
| 180 |
|
| 181 |
# Add detailed metadata
|
| 182 |
result.update_metadata(
|
|
@@ -396,6 +328,11 @@ def _parse_single_value(raw_value: str) -> Dict[str, Optional[str]]:
|
|
| 396 |
"time": match.group(2).strip(),
|
| 397 |
"station": match.group(3).strip()
|
| 398 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
else:
|
| 400 |
return {
|
| 401 |
"value": None,
|
|
@@ -407,4 +344,124 @@ def _parse_single_value(raw_value: str) -> Dict[str, Optional[str]]:
|
|
| 407 |
"value": None,
|
| 408 |
"time": None,
|
| 409 |
"station": raw_value
|
| 410 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
result.data = filtered_data
|
| 106 |
result.message = f"Estratti dati precipitazione massima con filtri: {all_filters}"
|
| 107 |
|
| 108 |
+
# Generate simple formatted summary (no LLM dependency)
|
| 109 |
if filtered_data:
|
| 110 |
+
summary = format_precipitation_data_simple(filtered_data, all_filters)
|
| 111 |
+
result.update_metadata(summary=summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
# Add detailed metadata
|
| 114 |
result.update_metadata(
|
|
|
|
| 328 |
"time": match.group(2).strip(),
|
| 329 |
"station": match.group(3).strip()
|
| 330 |
}
|
| 331 |
+
return {
|
| 332 |
+
"value": float(match.group(1)),
|
| 333 |
+
"time": match.group(2).strip(),
|
| 334 |
+
"station": match.group(3).strip()
|
| 335 |
+
}
|
| 336 |
else:
|
| 337 |
return {
|
| 338 |
"value": None,
|
|
|
|
| 344 |
"value": None,
|
| 345 |
"time": None,
|
| 346 |
"station": raw_value
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def format_precipitation_data_simple(
|
| 351 |
+
filtered_data: Dict[str, List[Dict]],
|
| 352 |
+
filters: Dict[str, Any]
|
| 353 |
+
) -> str:
|
| 354 |
+
"""
|
| 355 |
+
Generate simple, readable summary of precipitation data without LLM.
|
| 356 |
+
|
| 357 |
+
This function provides task-specific formatting that turns raw precipitation
|
| 358 |
+
data into readable bullet points with basic statistics.
|
| 359 |
+
|
| 360 |
+
Args:
|
| 361 |
+
filtered_data: Dictionary with 'zona_allerta' and 'province' keys
|
| 362 |
+
filters: Applied filters (zona_allerta, provincia, periodo, etc.)
|
| 363 |
+
|
| 364 |
+
Returns:
|
| 365 |
+
Formatted string with bullet points and basic statistics
|
| 366 |
+
"""
|
| 367 |
+
if not filtered_data or (not filtered_data.get("zona_allerta") and not filtered_data.get("province")):
|
| 368 |
+
return "π§οΈ **Precipitazioni massime**: Nessun dato trovato per i filtri specificati"
|
| 369 |
+
|
| 370 |
+
lines = []
|
| 371 |
+
time_periods = ["5'", "15'", "30'", "1h", "3h", "6h", "12h", "24h"]
|
| 372 |
+
|
| 373 |
+
# Process zona d'allerta data
|
| 374 |
+
zona_data = filtered_data.get("zona_allerta", [])
|
| 375 |
+
if zona_data:
|
| 376 |
+
zona_name = filters.get('zona_allerta') or filters.get('zona', 'Unknown')
|
| 377 |
+
lines.append(f"π§οΈ **Zona d'allerta {zona_name}**:")
|
| 378 |
+
|
| 379 |
+
for record in zona_data:
|
| 380 |
+
# Extract precipitation values for different time periods
|
| 381 |
+
precip_values = []
|
| 382 |
+
for period in time_periods:
|
| 383 |
+
if period in record and record[period]:
|
| 384 |
+
# Parse the value (format: "value [time] station")
|
| 385 |
+
raw_value = record[period]
|
| 386 |
+
parsed = _parse_single_value(raw_value)
|
| 387 |
+
if parsed["value"] is not None:
|
| 388 |
+
precip_values.append({
|
| 389 |
+
"period": period,
|
| 390 |
+
"value": parsed["value"],
|
| 391 |
+
"station": parsed["station"],
|
| 392 |
+
"time": parsed["time"]
|
| 393 |
+
})
|
| 394 |
+
|
| 395 |
+
if precip_values:
|
| 396 |
+
# Show range of values found
|
| 397 |
+
values_only = [pv["value"] for pv in precip_values]
|
| 398 |
+
min_val, max_val = min(values_only), max(values_only)
|
| 399 |
+
|
| 400 |
+
# Find the period with max value
|
| 401 |
+
max_period_data = max(precip_values, key=lambda x: x["value"])
|
| 402 |
+
|
| 403 |
+
if filters.get('periodo'):
|
| 404 |
+
# Specific period requested
|
| 405 |
+
periodo_data = [pv for pv in precip_values if pv["period"] == filters['periodo']]
|
| 406 |
+
if periodo_data:
|
| 407 |
+
pd = periodo_data[0]
|
| 408 |
+
lines.append(f" β’ **{filters['periodo']}**: {pd['value']}mm alle {pd['time']} ({pd['station']})")
|
| 409 |
+
else:
|
| 410 |
+
lines.append(f" β’ **{filters['periodo']}**: Nessun dato disponibile")
|
| 411 |
+
else:
|
| 412 |
+
# All periods - show exhaustive details for each period
|
| 413 |
+
lines.append(f" β’ **Riepilogo**: {min_val}-{max_val}mm su {len(precip_values)} periodi")
|
| 414 |
+
for pv in sorted(precip_values, key=lambda x: time_periods.index(x["period"])):
|
| 415 |
+
lines.append(f" β’ **{pv['period']}**: {pv['value']}mm alle {pv['time']} ({pv['station']})")
|
| 416 |
+
else:
|
| 417 |
+
lines.append(f" β’ Nessun dato di precipitazione disponibile")
|
| 418 |
+
|
| 419 |
+
# Process province data
|
| 420 |
+
province_data = filtered_data.get("province", [])
|
| 421 |
+
if province_data:
|
| 422 |
+
provincia_name = filters.get('provincia', 'Unknown')
|
| 423 |
+
if zona_data: # Add spacing if we had zona data too
|
| 424 |
+
lines.append("")
|
| 425 |
+
lines.append(f"π§οΈ **Provincia {provincia_name}**:")
|
| 426 |
+
|
| 427 |
+
for record in province_data:
|
| 428 |
+
# Extract precipitation values for different time periods
|
| 429 |
+
precip_values = []
|
| 430 |
+
for period in time_periods:
|
| 431 |
+
if period in record and record[period]:
|
| 432 |
+
# Parse the value (format: "value [time] station")
|
| 433 |
+
raw_value = record[period]
|
| 434 |
+
parsed = _parse_single_value(raw_value)
|
| 435 |
+
if parsed["value"] is not None:
|
| 436 |
+
precip_values.append({
|
| 437 |
+
"period": period,
|
| 438 |
+
"value": parsed["value"],
|
| 439 |
+
"station": parsed["station"],
|
| 440 |
+
"time": parsed["time"]
|
| 441 |
+
})
|
| 442 |
+
|
| 443 |
+
if precip_values:
|
| 444 |
+
# Show range of values found
|
| 445 |
+
values_only = [pv["value"] for pv in precip_values]
|
| 446 |
+
min_val, max_val = min(values_only), max(values_only)
|
| 447 |
+
|
| 448 |
+
# Find the period with max value
|
| 449 |
+
max_period_data = max(precip_values, key=lambda x: x["value"])
|
| 450 |
+
|
| 451 |
+
if filters.get('periodo'):
|
| 452 |
+
# Specific period requested
|
| 453 |
+
periodo_data = [pv for pv in precip_values if pv["period"] == filters['periodo']]
|
| 454 |
+
if periodo_data:
|
| 455 |
+
pd = periodo_data[0]
|
| 456 |
+
lines.append(f" β’ **{filters['periodo']}**: {pd['value']}mm alle {pd['time']} ({pd['station']})")
|
| 457 |
+
else:
|
| 458 |
+
lines.append(f" β’ **{filters['periodo']}**: Nessun dato disponibile")
|
| 459 |
+
else:
|
| 460 |
+
# All periods - show exhaustive details for each period
|
| 461 |
+
lines.append(f" β’ **Riepilogo**: {min_val}-{max_val}mm su {len(precip_values)} periodi")
|
| 462 |
+
for pv in sorted(precip_values, key=lambda x: time_periods.index(x["period"])):
|
| 463 |
+
lines.append(f" β’ **{pv['period']}**: {pv['value']}mm alle {pv['time']} ({pv['station']})")
|
| 464 |
+
else:
|
| 465 |
+
lines.append(f" β’ Nessun dato di precipitazione disponibile")
|
| 466 |
+
|
| 467 |
+
return "\n".join(lines)
|
|
@@ -58,37 +58,10 @@ async def fetch_valori_stazioni_async(filters: OMIRLFilterSet) -> OMIRLResult:
|
|
| 58 |
result.data = filtered_data
|
| 59 |
result.message = f"Estratti {len(filtered_data)} record dalle stazioni meteorologiche"
|
| 60 |
|
| 61 |
-
# Generate summary
|
| 62 |
if filtered_data:
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
create_valori_stazioni_summary,
|
| 66 |
-
analyze_station_data,
|
| 67 |
-
get_multi_task_summarizer
|
| 68 |
-
)
|
| 69 |
-
|
| 70 |
-
# Analyze the station data for insights
|
| 71 |
-
data_insights = analyze_station_data(filtered_data, sensor_type)
|
| 72 |
-
|
| 73 |
-
# Create standardized summary
|
| 74 |
-
task_summary = create_valori_stazioni_summary(
|
| 75 |
-
geographic_scope=filters.provincia or filters.comune or "Liguria",
|
| 76 |
-
data_insights=data_insights,
|
| 77 |
-
filters_applied=all_filters
|
| 78 |
-
)
|
| 79 |
-
|
| 80 |
-
# Generate LLM-based summary using MultiTaskSummarizer
|
| 81 |
-
summarizer = get_multi_task_summarizer()
|
| 82 |
-
summarizer.clear_results() # Clear any previous results
|
| 83 |
-
summarizer.add_task_result(task_summary)
|
| 84 |
-
summary = await summarizer.generate_final_summary(
|
| 85 |
-
query_context=f"valori stazioni {sensor_type}"
|
| 86 |
-
)
|
| 87 |
-
|
| 88 |
-
result.update_metadata(summary=summary)
|
| 89 |
-
except ImportError:
|
| 90 |
-
# Task-agnostic summarization service not available - continue without summary
|
| 91 |
-
pass
|
| 92 |
|
| 93 |
# Add filter metadata
|
| 94 |
result.update_metadata(
|
|
@@ -151,23 +124,106 @@ def _apply_additional_filters(data: List[Dict], filters: OMIRLFilterSet) -> List
|
|
| 151 |
return filtered_data
|
| 152 |
|
| 153 |
|
| 154 |
-
|
| 155 |
-
|
|
|
|
|
|
|
|
|
|
| 156 |
"""
|
| 157 |
-
|
| 158 |
|
| 159 |
-
This
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
"""
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
if comune:
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
result.data = filtered_data
|
| 59 |
result.message = f"Estratti {len(filtered_data)} record dalle stazioni meteorologiche"
|
| 60 |
|
| 61 |
+
# Generate simple formatted summary (no LLM dependency)
|
| 62 |
if filtered_data:
|
| 63 |
+
summary = format_station_data_simple(filtered_data, filters, sensor_type)
|
| 64 |
+
result.update_metadata(summary=summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
# Add filter metadata
|
| 67 |
result.update_metadata(
|
|
|
|
| 124 |
return filtered_data
|
| 125 |
|
| 126 |
|
| 127 |
+
def format_station_data_simple(
|
| 128 |
+
filtered_data: List[Dict],
|
| 129 |
+
filters: OMIRLFilterSet,
|
| 130 |
+
sensor_type: str
|
| 131 |
+
) -> str:
|
| 132 |
"""
|
| 133 |
+
Generate simple, readable summary of station data without LLM.
|
| 134 |
|
| 135 |
+
This function provides task-specific formatting that turns raw station
|
| 136 |
+
data into readable bullet points based on the filtering criteria.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
filtered_data: List of station dictionaries
|
| 140 |
+
filters: OMIRLFilterSet with applied filters
|
| 141 |
+
sensor_type: Type of sensor (Temperatura, Precipitazione, etc.)
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
Formatted string with bullet points based on filter specificity
|
| 145 |
"""
|
| 146 |
+
if not filtered_data:
|
| 147 |
+
return f"π‘οΈ **Stazioni {sensor_type}**: Nessun dato trovato per i filtri specificati"
|
| 148 |
+
|
| 149 |
+
lines = [f"π‘οΈ **Stazioni {sensor_type}** ({len(filtered_data)} record):"]
|
| 150 |
+
|
| 151 |
+
# Scenario 1: Only tipo_sensore provided - show all table content in bullet points
|
| 152 |
+
if not filters.stazione and not filters.comune and not filters.provincia:
|
| 153 |
+
# Group by province for better organization
|
| 154 |
+
by_province = {}
|
| 155 |
+
for station in filtered_data:
|
| 156 |
+
prov = station.get('Provincia', 'Unknown')
|
| 157 |
+
if prov not in by_province:
|
| 158 |
+
by_province[prov] = []
|
| 159 |
+
by_province[prov].append(station)
|
| 160 |
+
|
| 161 |
+
for province, stations in sorted(by_province.items()):
|
| 162 |
+
lines.append(f" β’ **{province}**: {len(stations)} stazioni")
|
| 163 |
+
for station in stations[:10]: # Limit to first 10 per province to avoid overwhelming output
|
| 164 |
+
ultimo = station.get('ultimo', 'N/A')
|
| 165 |
+
max_val = station.get('max', 'N/A')
|
| 166 |
+
min_val = station.get('min', 'N/A')
|
| 167 |
+
unit = station.get('unita', '')
|
| 168 |
+
nome = station.get('Nome', 'Unknown')
|
| 169 |
+
comune = station.get('Comune', 'Unknown')
|
| 170 |
+
lines.append(f" - **{nome}** ({comune}): {ultimo}{unit} (max: {max_val}{unit}, min: {min_val}{unit})")
|
| 171 |
+
|
| 172 |
+
if len(stations) > 10:
|
| 173 |
+
lines.append(f" ... e altre {len(stations) - 10} stazioni")
|
| 174 |
+
|
| 175 |
+
# Scenario 2: tipo_sensore + stazione - show specific station details
|
| 176 |
+
elif filters.stazione:
|
| 177 |
+
for station in filtered_data:
|
| 178 |
+
ultimo = station.get('ultimo', 'N/A')
|
| 179 |
+
max_val = station.get('max', 'N/A')
|
| 180 |
+
min_val = station.get('min', 'N/A')
|
| 181 |
+
unit = station.get('unita', '')
|
| 182 |
+
nome = station.get('Nome', 'Unknown')
|
| 183 |
+
comune = station.get('Comune', 'Unknown')
|
| 184 |
+
provincia = station.get('Provincia', 'Unknown')
|
| 185 |
+
|
| 186 |
+
lines.append(f" β’ **{nome}** ({comune}, {provincia}):")
|
| 187 |
+
lines.append(f" - **Ultimo**: {ultimo}{unit}")
|
| 188 |
+
lines.append(f" - **Massimo**: {max_val}{unit}")
|
| 189 |
+
lines.append(f" - **Minimo**: {min_val}{unit}")
|
| 190 |
+
|
| 191 |
+
# Scenario 3: tipo_sensore + comune - show all stations in that comune
|
| 192 |
+
elif filters.comune:
|
| 193 |
+
comune_name = filters.comune
|
| 194 |
+
lines.append(f" β’ **Comune: {comune_name}**")
|
| 195 |
+
|
| 196 |
+
for station in filtered_data:
|
| 197 |
+
ultimo = station.get('ultimo', 'N/A')
|
| 198 |
+
max_val = station.get('max', 'N/A')
|
| 199 |
+
min_val = station.get('min', 'N/A')
|
| 200 |
+
unit = station.get('unita', '')
|
| 201 |
+
nome = station.get('Nome', 'Unknown')
|
| 202 |
+
|
| 203 |
+
lines.append(f" - **{nome}**: ultimo {ultimo}{unit}, max {max_val}{unit}, min {min_val}{unit}")
|
| 204 |
+
|
| 205 |
+
# Scenario 4: tipo_sensore + provincia - show all stations in that provincia
|
| 206 |
+
elif filters.provincia:
|
| 207 |
+
provincia_name = filters.provincia
|
| 208 |
+
lines.append(f" β’ **Provincia: {provincia_name}**")
|
| 209 |
+
|
| 210 |
+
# Group by comune within the provincia
|
| 211 |
+
by_comune = {}
|
| 212 |
+
for station in filtered_data:
|
| 213 |
+
comune = station.get('Comune', 'Unknown')
|
| 214 |
+
if comune not in by_comune:
|
| 215 |
+
by_comune[comune] = []
|
| 216 |
+
by_comune[comune].append(station)
|
| 217 |
+
|
| 218 |
+
for comune, stations in sorted(by_comune.items()):
|
| 219 |
+
lines.append(f" - **{comune}**: {len(stations)} stazioni")
|
| 220 |
+
for station in stations:
|
| 221 |
+
ultimo = station.get('ultimo', 'N/A')
|
| 222 |
+
max_val = station.get('max', 'N/A')
|
| 223 |
+
min_val = station.get('min', 'N/A')
|
| 224 |
+
unit = station.get('unita', '')
|
| 225 |
+
nome = station.get('Nome', 'Unknown')
|
| 226 |
+
|
| 227 |
+
lines.append(f" β’ **{nome}**: ultimo {ultimo}{unit}, max {max_val}{unit}, min {min_val}{unit}")
|
| 228 |
+
|
| 229 |
+
return "\n".join(lines)
|