ratyim commited on
Commit
faa740f
·
verified ·
1 Parent(s): a422356

Upload 6 files

Browse files
Files changed (6) hide show
  1. dynamic_simulation.py +418 -0
  2. floor_plan.py +168 -0
  3. llm_faiss_rag.py +1848 -0
  4. llm_route_decider.py +183 -0
  5. pathfinding.py +367 -0
  6. sensor_system.py +439 -0
dynamic_simulation.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Dynamic Simulation Module - Real-time fire spread and sensor updates
3
+ Simulates how fire conditions change over time and updates evacuation recommendations
4
+ """
5
+ import random
6
+ import time
7
+ from typing import Dict, List, Tuple
8
+ from .floor_plan import FloorPlan
9
+ from .sensor_system import SensorSystem, SensorReading
10
+ from .pathfinding import PathFinder, RiskAssessment
11
+
12
+
13
+ class FireSpreadSimulator:
14
+ """Simulates dynamic fire spread and environmental changes"""
15
+
16
+ def __init__(self, floor_plan: FloorPlan, sensor_system: SensorSystem):
17
+ self.floor_plan = floor_plan
18
+ self.sensor_system = sensor_system
19
+ self.time_step = 0
20
+ self.fire_sources = [] # List of rooms where fire started
21
+
22
+ def initialize_fire(self, fire_locations: List[str]):
23
+ """Initialize fire at specific locations with all real-world factors"""
24
+ self.fire_sources = fire_locations
25
+
26
+ for location in fire_locations:
27
+ if location in self.sensor_system.sensors:
28
+ # Generate comprehensive mock data for fire location
29
+ self.sensor_system.update_sensor(
30
+ location,
31
+ # Basic factors
32
+ fire_detected=True,
33
+ smoke_level=random.uniform(0.7, 0.9),
34
+ temperature=random.uniform(150, 250),
35
+ oxygen_level=random.uniform(14, 16),
36
+ visibility=random.uniform(5, 15),
37
+ structural_integrity=random.uniform(60, 80),
38
+
39
+ # Fire-specific factors
40
+ fire_growth_rate=random.uniform(5, 15), # m²/min
41
+ flashover_risk=random.uniform(0.3, 0.6),
42
+ backdraft_risk=random.uniform(0.2, 0.4),
43
+ heat_radiation=random.uniform(3, 8), # kW/m²
44
+ fire_type=random.choice(["wood", "electrical", "chemical"]),
45
+
46
+ # Toxic gases (high in fire areas)
47
+ carbon_monoxide=random.uniform(50, 200), # ppm
48
+ carbon_dioxide=random.uniform(5000, 15000), # ppm
49
+ hydrogen_cyanide=random.uniform(10, 50), # ppm
50
+ hydrogen_chloride=random.uniform(5, 20), # ppm
51
+
52
+ # Environmental (affected by fire)
53
+ wind_direction=random.uniform(0, 360),
54
+ wind_speed=random.uniform(2, 8), # m/s
55
+ air_pressure=random.uniform(1000, 1020),
56
+ humidity=random.uniform(30, 60),
57
+
58
+ # Human factors
59
+ occupancy_density=random.uniform(0.3, 0.7),
60
+ mobility_limitations=random.randint(0, 3),
61
+ panic_level=random.uniform(0.6, 0.9),
62
+ evacuation_progress=0.0,
63
+
64
+ # Infrastructure (may fail near fire)
65
+ sprinkler_active=random.choice([True, False]),
66
+ emergency_lighting=random.choice([True, False]),
67
+ elevator_available=False, # Elevators disabled in fire
68
+ stairwell_clear=random.choice([True, False]),
69
+ exit_accessible=random.choice([True, False]),
70
+ exit_capacity=random.randint(50, 150),
71
+ ventilation_active=random.choice([True, False]),
72
+
73
+ # Time-based
74
+ time_since_fire_start=0,
75
+ estimated_time_to_exit=random.randint(60, 300),
76
+
77
+ # Communication
78
+ emergency_comm_working=random.choice([True, False]),
79
+ wifi_signal_strength=random.uniform(40, 80),
80
+
81
+ # External
82
+ weather_temperature=random.uniform(15, 25),
83
+ weather_rain=random.choice([True, False]),
84
+ time_of_day=random.randint(8, 18),
85
+ day_of_week=random.randint(0, 6)
86
+ )
87
+
88
+ def update_simulation(self, intensity: float = 1.0):
89
+ """
90
+ Update fire conditions for one time step
91
+
92
+ Args:
93
+ intensity: Fire spread intensity (0.5 = slow, 1.0 = normal, 2.0 = fast)
94
+ """
95
+ self.time_step += 1
96
+
97
+ # Update time for all sensors
98
+ for sensor in self.sensor_system.sensors.values():
99
+ sensor.time_since_fire_start = self.time_step * 30 # 30 seconds per step
100
+
101
+ # 1. Intensify existing fires
102
+ self._intensify_fires(intensity)
103
+
104
+ # 2. Spread fire to adjacent rooms
105
+ self._spread_fire(intensity)
106
+
107
+ # 3. Spread smoke further
108
+ self._spread_smoke(intensity)
109
+
110
+ # 4. Update structural integrity
111
+ self._update_structures(intensity)
112
+
113
+ # 5. Update evacuation progress
114
+ self._update_evacuation_progress(intensity)
115
+
116
+ def _intensify_fires(self, intensity: float):
117
+ """Make existing fires worse over time with all factors"""
118
+ for location_id, sensor in self.sensor_system.sensors.items():
119
+ if sensor.fire_detected:
120
+ # Fire gets hotter
121
+ temp_increase = random.uniform(5, 15) * intensity
122
+ new_temp = min(sensor.temperature + temp_increase, 300)
123
+
124
+ # More smoke
125
+ smoke_increase = random.uniform(0.02, 0.05) * intensity
126
+ new_smoke = min(sensor.smoke_level + smoke_increase, 1.0)
127
+
128
+ # Less oxygen
129
+ oxygen_decrease = random.uniform(0.2, 0.5) * intensity
130
+ new_oxygen = max(sensor.oxygen_level - oxygen_decrease, 10.0)
131
+
132
+ # Worse visibility
133
+ visibility_decrease = random.uniform(1, 3) * intensity
134
+ new_visibility = max(sensor.visibility - visibility_decrease, 0.0)
135
+
136
+ # Structural damage
137
+ integrity_decrease = random.uniform(1, 3) * intensity
138
+ new_integrity = max(sensor.structural_integrity - integrity_decrease, 30.0)
139
+
140
+ # Update all fire-related factors
141
+ self.sensor_system.update_sensor(
142
+ location_id,
143
+ temperature=new_temp,
144
+ smoke_level=new_smoke,
145
+ oxygen_level=new_oxygen,
146
+ visibility=new_visibility,
147
+ structural_integrity=new_integrity,
148
+
149
+ # Fire growth increases
150
+ fire_growth_rate=min(sensor.fire_growth_rate + random.uniform(0.5, 2) * intensity, 20),
151
+ flashover_risk=min(sensor.flashover_risk + random.uniform(0.02, 0.05) * intensity, 1.0),
152
+ backdraft_risk=min(sensor.backdraft_risk + random.uniform(0.01, 0.03) * intensity, 1.0),
153
+ heat_radiation=min(sensor.heat_radiation + random.uniform(0.2, 0.5) * intensity, 15),
154
+
155
+ # Toxic gases increase
156
+ carbon_monoxide=min(sensor.carbon_monoxide + random.uniform(5, 15) * intensity, 500),
157
+ carbon_dioxide=min(sensor.carbon_dioxide + random.uniform(200, 500) * intensity, 20000),
158
+ hydrogen_cyanide=min(sensor.hydrogen_cyanide + random.uniform(1, 3) * intensity, 100),
159
+ hydrogen_chloride=min(sensor.hydrogen_chloride + random.uniform(0.5, 2) * intensity, 50),
160
+
161
+ # Time increases
162
+ time_since_fire_start=sensor.time_since_fire_start + 30, # 30 seconds per step
163
+
164
+ # Panic increases
165
+ panic_level=min(sensor.panic_level + random.uniform(0.01, 0.03) * intensity, 1.0),
166
+
167
+ # Infrastructure may fail
168
+ emergency_lighting=random.random() > 0.1, # 10% chance of failure
169
+ exit_accessible=random.random() > 0.15, # 15% chance of blockage
170
+ stairwell_clear=random.random() > 0.2 # 20% chance of blockage
171
+ )
172
+
173
+ def _spread_fire(self, intensity: float):
174
+ """Fire spreads to adjacent rooms based on conditions"""
175
+ new_fires = []
176
+
177
+ for location_id, sensor in self.sensor_system.sensors.items():
178
+ if sensor.fire_detected:
179
+ # Get adjacent rooms
180
+ neighbors = self.floor_plan.get_neighbors(location_id)
181
+
182
+ for neighbor_id, _ in neighbors:
183
+ neighbor_sensor = self.sensor_system.get_sensor_reading(neighbor_id)
184
+
185
+ if neighbor_sensor and not neighbor_sensor.fire_detected:
186
+ # Chance of fire spreading based on conditions
187
+ spread_chance = 0.15 * intensity # Base 15% chance per time step
188
+
189
+ # Higher chance if already hot or smoky
190
+ if neighbor_sensor.temperature > 80:
191
+ spread_chance += 0.2
192
+ if neighbor_sensor.smoke_level > 0.5:
193
+ spread_chance += 0.15
194
+
195
+ # Check if oxygen cylinder present (explosion!)
196
+ room = self.floor_plan.get_room(neighbor_id)
197
+ if room and room.has_oxygen_cylinder and neighbor_sensor.temperature > 60:
198
+ spread_chance += 0.4 # Much higher chance!
199
+
200
+ if random.random() < spread_chance:
201
+ new_fires.append(neighbor_id)
202
+
203
+ # Apply new fires
204
+ for location_id in new_fires:
205
+ self.sensor_system.update_sensor(
206
+ location_id,
207
+ fire_detected=True,
208
+ smoke_level=random.uniform(0.6, 0.8),
209
+ temperature=random.uniform(120, 180),
210
+ oxygen_level=random.uniform(15, 17),
211
+ visibility=random.uniform(15, 30)
212
+ )
213
+ self.fire_sources.append(location_id)
214
+
215
+ def _spread_smoke(self, intensity: float):
216
+ """Smoke and toxic gases spread to all connected areas"""
217
+ for location_id, sensor in self.sensor_system.sensors.items():
218
+ # Rooms with fire or high smoke affect neighbors
219
+ if sensor.fire_detected or sensor.smoke_level > 0.3:
220
+ neighbors = self.floor_plan.get_neighbors(location_id)
221
+
222
+ for neighbor_id, _ in neighbors:
223
+ neighbor_sensor = self.sensor_system.get_sensor_reading(neighbor_id)
224
+
225
+ if neighbor_sensor and not neighbor_sensor.fire_detected:
226
+ # Smoke drifts to adjacent areas
227
+ smoke_increase = random.uniform(0.03, 0.08) * intensity
228
+ new_smoke = min(neighbor_sensor.smoke_level + smoke_increase, 1.0)
229
+
230
+ # Temperature rises slightly
231
+ temp_increase = random.uniform(2, 8) * intensity
232
+ new_temp = min(neighbor_sensor.temperature + temp_increase, 100)
233
+
234
+ # Oxygen decreases
235
+ oxygen_decrease = random.uniform(0.1, 0.3) * intensity
236
+ new_oxygen = max(neighbor_sensor.oxygen_level - oxygen_decrease, 16.0)
237
+
238
+ # Visibility decreases
239
+ visibility_decrease = random.uniform(2, 5) * intensity
240
+ new_visibility = max(neighbor_sensor.visibility - visibility_decrease, 10.0)
241
+
242
+ # Toxic gases spread (reduced concentration)
243
+ co_spread = sensor.carbon_monoxide * 0.1 * intensity
244
+ co2_spread = sensor.carbon_dioxide * 0.05 * intensity
245
+ hcn_spread = sensor.hydrogen_cyanide * 0.15 * intensity
246
+ hcl_spread = sensor.hydrogen_chloride * 0.2 * intensity
247
+
248
+ # Heat radiation spreads
249
+ heat_spread = sensor.heat_radiation * 0.3 * intensity
250
+
251
+ self.sensor_system.update_sensor(
252
+ neighbor_id,
253
+ smoke_level=new_smoke,
254
+ temperature=new_temp,
255
+ oxygen_level=new_oxygen,
256
+ visibility=new_visibility,
257
+
258
+ # Toxic gases (accumulate)
259
+ carbon_monoxide=min(neighbor_sensor.carbon_monoxide + co_spread, 200),
260
+ carbon_dioxide=min(neighbor_sensor.carbon_dioxide + co2_spread, 10000),
261
+ hydrogen_cyanide=min(neighbor_sensor.hydrogen_cyanide + hcn_spread, 50),
262
+ hydrogen_chloride=min(neighbor_sensor.hydrogen_chloride + hcl_spread, 30),
263
+
264
+ # Heat radiation
265
+ heat_radiation=min(neighbor_sensor.heat_radiation + heat_spread, 5),
266
+
267
+ # Flashover risk increases slightly
268
+ flashover_risk=min(neighbor_sensor.flashover_risk + random.uniform(0.01, 0.02) * intensity, 0.5),
269
+
270
+ # Occupancy may increase (people moving away)
271
+ occupancy_density=max(neighbor_sensor.occupancy_density - random.uniform(0.05, 0.15), 0.0),
272
+ panic_level=min(neighbor_sensor.panic_level + random.uniform(0.02, 0.05) * intensity, 1.0)
273
+ )
274
+
275
+ def _update_structures(self, intensity: float):
276
+ """Update structural integrity based on fire exposure"""
277
+ for location_id, sensor in self.sensor_system.sensors.items():
278
+ if sensor.fire_detected or sensor.temperature > 100:
279
+ # Structural damage from heat
280
+ damage = random.uniform(0.5, 2.0) * intensity
281
+ new_integrity = max(sensor.structural_integrity - damage, 20.0)
282
+
283
+ self.sensor_system.update_sensor(
284
+ location_id,
285
+ structural_integrity=new_integrity
286
+ )
287
+
288
+ def _update_evacuation_progress(self, intensity: float):
289
+ """Update evacuation progress and occupancy"""
290
+ for location_id, sensor in self.sensor_system.sensors.items():
291
+ # People evacuate from rooms (occupancy decreases)
292
+ if sensor.occupancy_density > 0:
293
+ evacuation_rate = random.uniform(0.05, 0.15) * intensity
294
+ new_occupancy = max(sensor.occupancy_density - evacuation_rate, 0.0)
295
+ new_progress = min(sensor.evacuation_progress + evacuation_rate * 100, 100.0)
296
+
297
+ self.sensor_system.update_sensor(
298
+ location_id,
299
+ occupancy_density=new_occupancy,
300
+ evacuation_progress=new_progress
301
+ )
302
+
303
+ # Exits may get more crowded as people arrive
304
+ room = self.floor_plan.get_room(location_id)
305
+ if room and room.room_type == "exit":
306
+ # Exits get more crowded as people evacuate
307
+ if sensor.occupancy_density < 0.8:
308
+ arrival_rate = random.uniform(0.02, 0.08) * intensity
309
+ new_occupancy = min(sensor.occupancy_density + arrival_rate, 0.9)
310
+ self.sensor_system.update_sensor(
311
+ location_id,
312
+ occupancy_density=new_occupancy
313
+ )
314
+
315
+
316
+ class DynamicEvacuationSystem:
317
+ """Manages dynamic evacuation with changing conditions"""
318
+
319
+ def __init__(self, floor_plan: FloorPlan):
320
+ self.floor_plan = floor_plan
321
+ self.sensor_system = SensorSystem(floor_plan)
322
+ self.simulator = FireSpreadSimulator(floor_plan, self.sensor_system)
323
+ self.pathfinder = PathFinder(floor_plan, self.sensor_system)
324
+ self.current_recommendation = None
325
+ self.recommendation_history = []
326
+
327
+ def initialize_scenario(self, fire_locations: List[str],
328
+ affected_areas: Dict[str, Dict] = None):
329
+ """Initialize the fire scenario"""
330
+ # Set up initial conditions
331
+ if affected_areas:
332
+ for location, values in affected_areas.items():
333
+ if location in self.sensor_system.sensors:
334
+ self.sensor_system.update_sensor(location, **values)
335
+
336
+ # Initialize fires
337
+ self.simulator.initialize_fire(fire_locations)
338
+
339
+ # Get initial recommendation
340
+ self._update_recommendation()
341
+
342
+ def step(self, intensity: float = 1.0, start_location: str = "R1"):
343
+ """
344
+ Advance simulation by one time step
345
+
346
+ Args:
347
+ intensity: Fire spread intensity
348
+ start_location: Where person is evacuating from
349
+
350
+ Returns:
351
+ (time_step, routes, recommended_route, route_changed)
352
+ """
353
+ # Update fire conditions
354
+ self.simulator.update_simulation(intensity)
355
+
356
+ # Recalculate best route
357
+ previous_rec = self.current_recommendation
358
+ self._update_recommendation(start_location)
359
+
360
+ # Check if recommendation changed
361
+ route_changed = False
362
+ if previous_rec and self.current_recommendation:
363
+ prev_path = previous_rec[0] if previous_rec else None
364
+ curr_path = self.current_recommendation[0] if self.current_recommendation else None
365
+ route_changed = (prev_path != curr_path)
366
+
367
+ # Get all routes
368
+ routes = self.pathfinder.find_all_evacuation_routes(start_location)
369
+
370
+ return self.simulator.time_step, routes, self.current_recommendation, route_changed
371
+
372
+ def _update_recommendation(self, start_location: str = "R1"):
373
+ """Update the evacuation recommendation"""
374
+ routes = self.pathfinder.find_all_evacuation_routes(start_location)
375
+
376
+ if routes:
377
+ recommended = RiskAssessment.recommend_path(
378
+ [(path, risk) for _, path, risk in routes]
379
+ )
380
+ self.current_recommendation = recommended
381
+
382
+ # Track history
383
+ if recommended:
384
+ rec_path, rec_risk = recommended
385
+ rec_exit = [e for e, p, _ in routes if p == rec_path][0]
386
+ self.recommendation_history.append({
387
+ 'time_step': self.simulator.time_step,
388
+ 'exit': rec_exit,
389
+ 'path': rec_path,
390
+ 'danger': rec_risk['avg_danger']
391
+ })
392
+ else:
393
+ self.current_recommendation = None
394
+
395
+ def get_status_summary(self, start_location: str = "R1"):
396
+ """Get current status summary"""
397
+ routes = self.pathfinder.find_all_evacuation_routes(start_location)
398
+
399
+ summary = {
400
+ 'time_step': self.simulator.time_step,
401
+ 'total_fires': sum(1 for s in self.sensor_system.sensors.values()
402
+ if s.fire_detected),
403
+ 'passable_routes': sum(1 for _, _, r in routes if r['passable']),
404
+ 'total_routes': len(routes),
405
+ 'avg_danger': sum(r['avg_danger'] for _, _, r in routes) / len(routes) if routes else 100,
406
+ }
407
+
408
+ if self.current_recommendation:
409
+ rec_path, rec_risk = self.current_recommendation
410
+ rec_exit = [e for e, p, _ in routes if p == rec_path][0]
411
+ summary['recommended_exit'] = rec_exit
412
+ summary['recommended_danger'] = rec_risk['avg_danger']
413
+ else:
414
+ summary['recommended_exit'] = None
415
+ summary['recommended_danger'] = 100.0
416
+
417
+ return summary
418
+
floor_plan.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Floor Plan Module - Represents building layout with rooms, corridors, and connections
3
+ """
4
+ import numpy as np
5
+ from typing import Dict, List, Tuple, Optional
6
+
7
+
8
+ class Room:
9
+ """Represents a room or area in the building"""
10
+ def __init__(self, room_id: str, name: str, position: Tuple[float, float],
11
+ room_type: str = "room", size: Tuple[float, float] = (5, 5)):
12
+ self.room_id = room_id
13
+ self.name = name
14
+ self.position = position # (x, y) coordinates
15
+ self.room_type = room_type # room, corridor, exit, stairwell
16
+ self.size = size # (width, height)
17
+ self.connected_to = [] # List of connected room IDs
18
+ self.has_oxygen_cylinder = False
19
+ self.has_fire_extinguisher = False
20
+
21
+ def add_connection(self, room_id: str, distance: float = None):
22
+ """Add connection to another room"""
23
+ if distance is None:
24
+ distance = 1.0
25
+ self.connected_to.append((room_id, distance))
26
+
27
+ def __repr__(self):
28
+ return f"Room({self.room_id}, {self.name})"
29
+
30
+
31
+ class FloorPlan:
32
+ """Represents the complete building floor plan"""
33
+ def __init__(self, floor_name: str = "Ground Floor"):
34
+ self.floor_name = floor_name
35
+ self.rooms: Dict[str, Room] = {}
36
+ self.exits: List[str] = []
37
+
38
+ def add_room(self, room: Room):
39
+ """Add a room to the floor plan"""
40
+ self.rooms[room.room_id] = room
41
+ if room.room_type == "exit":
42
+ self.exits.append(room.room_id)
43
+
44
+ def add_connection(self, room_id1: str, room_id2: str, distance: float = 1.0):
45
+ """Create bidirectional connection between two rooms"""
46
+ if room_id1 in self.rooms and room_id2 in self.rooms:
47
+ self.rooms[room_id1].add_connection(room_id2, distance)
48
+ self.rooms[room_id2].add_connection(room_id1, distance)
49
+
50
+ def get_neighbors(self, room_id: str) -> List[Tuple[str, float]]:
51
+ """Get all neighboring rooms and their distances"""
52
+ if room_id in self.rooms:
53
+ return self.rooms[room_id].connected_to
54
+ return []
55
+
56
+ def get_all_exits(self) -> List[str]:
57
+ """Get all exit points"""
58
+ return self.exits
59
+
60
+ def get_room(self, room_id: str) -> Optional[Room]:
61
+ """Get room by ID"""
62
+ return self.rooms.get(room_id)
63
+
64
+
65
+ def create_sample_floor_plan() -> FloorPlan:
66
+ """
67
+ Create an expanded floor plan with multiple rooms and 5 exit routes
68
+
69
+ Layout:
70
+ [R1] - [C1] - [R2] - [C4] - [R5] - [EXIT1] (Route 1: has oxygen cylinder)
71
+ | | |
72
+ [C2] [C3] [C8]
73
+ | | |
74
+ [R3] - [C5] - [R4] - [C6] - [R6] - [EXIT2] (Route 2)
75
+ | |
76
+ [C7] [C9]
77
+ | |
78
+ [EXIT3] (Route 3) [EXIT4] (Route 4)
79
+
80
+ [R7] - [C10] - [EXIT5] (Route 5)
81
+ """
82
+ plan = FloorPlan("Ground Floor")
83
+
84
+ # Create rooms with better spacing
85
+ rooms = [
86
+ # Main floor rooms
87
+ Room("R1", "Room 101", (10, 10), "room", (12, 12)),
88
+ Room("R2", "Room 102", (60, 10), "room", (12, 12)),
89
+ Room("R3", "Room 103", (10, 50), "room", (12, 12)),
90
+ Room("R4", "Room 104", (60, 50), "room", (12, 12)),
91
+ Room("R5", "Room 105", (110, 10), "room", (12, 12)),
92
+ Room("R6", "Room 106", (110, 50), "room", (12, 12)),
93
+ Room("R7", "Room 107", (10, 100), "room", (12, 12)),
94
+
95
+ # Corridors
96
+ Room("C1", "Corridor 1", (35, 10), "corridor", (15, 6)),
97
+ Room("C2", "Corridor 2", (10, 30), "corridor", (6, 12)),
98
+ Room("C3", "Corridor 3", (60, 30), "corridor", (6, 12)),
99
+ Room("C4", "Corridor 4", (85, 10), "corridor", (15, 6)),
100
+ Room("C5", "Corridor 5", (35, 50), "corridor", (15, 6)),
101
+ Room("C6", "Corridor 6", (85, 50), "corridor", (15, 6)),
102
+ Room("C7", "Corridor 7", (10, 75), "corridor", (6, 15)),
103
+ Room("C8", "Corridor 8", (110, 30), "corridor", (6, 12)),
104
+ Room("C9", "Corridor 9", (110, 75), "corridor", (6, 15)),
105
+ Room("C10", "Corridor 10", (35, 100), "corridor", (15, 6)),
106
+
107
+ # Exits (5 exits now!)
108
+ Room("EXIT1", "North Exit", (135, 10), "exit", (8, 8)),
109
+ Room("EXIT2", "East Exit", (135, 50), "exit", (8, 8)),
110
+ Room("EXIT3", "South Exit", (10, 125), "exit", (8, 8)),
111
+ Room("EXIT4", "Southeast Exit", (110, 125), "exit", (8, 8)),
112
+ Room("EXIT5", "West Exit", (60, 100), "exit", (8, 8)),
113
+ ]
114
+
115
+ # Add oxygen cylinder to corridor 1 (Route 1)
116
+ for room in rooms:
117
+ if room.room_id == "C1":
118
+ room.has_oxygen_cylinder = True
119
+ if room.room_id in ["R2", "R3", "R5"]:
120
+ room.has_fire_extinguisher = True
121
+
122
+ # Add all rooms to floor plan
123
+ for room in rooms:
124
+ plan.add_room(room)
125
+
126
+ # Create connections (building the graph)
127
+ connections = [
128
+ # Route 1 path (North - via oxygen cylinder area)
129
+ ("R1", "C1", 1.0),
130
+ ("C1", "R2", 1.0),
131
+ ("R2", "C4", 1.0),
132
+ ("C4", "R5", 1.0),
133
+ ("R5", "EXIT1", 1.0),
134
+
135
+ # Route 2 path (East)
136
+ ("R1", "C2", 1.0),
137
+ ("C2", "R3", 1.0),
138
+ ("R3", "C5", 1.0),
139
+ ("C5", "R4", 1.0),
140
+ ("R4", "C6", 1.0),
141
+ ("C6", "R6", 1.0),
142
+ ("R6", "EXIT2", 1.0),
143
+
144
+ # Route 3 path (South)
145
+ ("R3", "C7", 1.0),
146
+ ("C7", "EXIT3", 1.0),
147
+
148
+ # Route 4 path (Southeast)
149
+ ("R6", "C9", 1.0),
150
+ ("C9", "EXIT4", 1.0),
151
+
152
+ # Route 5 path (West)
153
+ ("R7", "C10", 1.0),
154
+ ("C10", "EXIT5", 1.0),
155
+
156
+ # Cross connections
157
+ ("R2", "C3", 1.0),
158
+ ("C3", "R4", 1.0),
159
+ ("R5", "C8", 1.0),
160
+ ("C8", "R6", 1.0),
161
+ ("R3", "R7", 1.0), # Direct connection
162
+ ]
163
+
164
+ for room1, room2, distance in connections:
165
+ plan.add_connection(room1, room2, distance)
166
+
167
+ return plan
168
+
llm_faiss_rag.py ADDED
@@ -0,0 +1,1848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Llama + FAISS RAG System for Fire Evacuation with Advanced Reasoning
3
+
4
+ This module implements a RAG (Retrieval-Augmented Generation) system for fire evacuation scenarios
5
+ with advanced LLM reasoning techniques including:
6
+
7
+ 1. Chain-of-Thought (CoT) Prompting:
8
+ - Enables step-by-step reasoning through intermediate steps
9
+ - Improves complex problem-solving capabilities
10
+ - Reference: https://arxiv.org/pdf/2201.11903
11
+
12
+ 2. Tree-of-Thoughts (ToT):
13
+ - Maintains multiple reasoning paths
14
+ - Self-evaluates progress through intermediate thoughts
15
+ - Enables deliberate reasoning process
16
+ - Reference: https://arxiv.org/pdf/2305.10601
17
+
18
+ 3. Reflexion:
19
+ - Reinforces language-based agents through linguistic feedback
20
+ - Self-reflection and iterative improvement
21
+ - Reference: https://arxiv.org/pdf/2303.11366
22
+
23
+ 4. CoT with Tools:
24
+ - Combines CoT prompting with external tools
25
+ - Interleaved reasoning and tool usage
26
+ - Reference: https://arxiv.org/pdf/2303.09014
27
+
28
+ 5. Advanced Decoding Strategies:
29
+ - Greedy: Deterministic highest probability
30
+ - Sampling: Random sampling with temperature
31
+ - Beam Search: Explores multiple paths
32
+ - Nucleus (Top-p): Samples from top-p probability mass
33
+ - Temperature: Temperature-based sampling
34
+
35
+ Downloads Llama model, creates JSON dataset, builds FAISS index, and provides RAG querying
36
+ """
37
+ import json
38
+ import os
39
+ import pickle
40
+ import glob
41
+ import re
42
+ from typing import List, Dict, Any, Optional, Tuple
43
+ from pathlib import Path
44
+ from enum import Enum
45
+ import copy
46
+
47
+ import numpy as np
48
+ import faiss
49
+ import torch
50
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
51
+ from sentence_transformers import SentenceTransformer
52
+
53
+ # Import project modules (package-relative imports)
54
+ from .floor_plan import FloorPlan, create_sample_floor_plan
55
+ from .sensor_system import SensorSystem, create_sample_fire_scenario
56
+ from .pathfinding import PathFinder
57
+
58
+
59
+ class FireEvacuationDataExporter:
60
+ """Exports fire evacuation system data to JSON format"""
61
+
62
+ def __init__(self, floor_plan: FloorPlan, sensor_system: SensorSystem, pathfinder: PathFinder):
63
+ self.floor_plan = floor_plan
64
+ self.sensor_system = sensor_system
65
+ self.pathfinder = pathfinder
66
+
67
+ def export_room_data(self, room_id: str) -> Dict[str, Any]:
68
+ """Export comprehensive room data to JSON"""
69
+ room = self.floor_plan.get_room(room_id)
70
+ sensor = self.sensor_system.get_sensor_reading(room_id)
71
+
72
+ if not room or not sensor:
73
+ return {}
74
+
75
+ return {
76
+ "room_id": room_id,
77
+ "name": room.name,
78
+ "room_type": room.room_type,
79
+ "position": room.position,
80
+ "size": room.size,
81
+ "has_oxygen_cylinder": room.has_oxygen_cylinder,
82
+ "has_fire_extinguisher": room.has_fire_extinguisher,
83
+ "connected_to": [conn[0] for conn in room.connected_to],
84
+ "sensor_data": {
85
+ "fire_detected": sensor.fire_detected,
86
+ "smoke_level": round(sensor.smoke_level, 2),
87
+ "temperature_c": round(sensor.temperature, 1),
88
+ "oxygen_pct": round(sensor.oxygen_level, 1),
89
+ "visibility_pct": round(sensor.visibility, 1),
90
+ "structural_integrity_pct": round(sensor.structural_integrity, 1),
91
+ "fire_growth_rate": round(sensor.fire_growth_rate, 2),
92
+ "flashover_risk": round(sensor.flashover_risk, 2),
93
+ "backdraft_risk": round(sensor.backdraft_risk, 2),
94
+ "heat_radiation": round(sensor.heat_radiation, 2),
95
+ "fire_type": sensor.fire_type,
96
+ "carbon_monoxide_ppm": round(sensor.carbon_monoxide, 1),
97
+ "carbon_dioxide_ppm": round(sensor.carbon_dioxide, 1),
98
+ "hydrogen_cyanide_ppm": round(sensor.hydrogen_cyanide, 2),
99
+ "hydrogen_chloride_ppm": round(sensor.hydrogen_chloride, 2),
100
+ "wind_direction": round(sensor.wind_direction, 1),
101
+ "wind_speed": round(sensor.wind_speed, 2),
102
+ "air_pressure": round(sensor.air_pressure, 2),
103
+ "humidity": round(sensor.humidity, 1),
104
+ "occupancy_density": round(sensor.occupancy_density, 2),
105
+ "mobility_limitations": sensor.mobility_limitations,
106
+ "panic_level": round(sensor.panic_level, 2),
107
+ "evacuation_progress": round(sensor.evacuation_progress, 1),
108
+ "sprinkler_active": sensor.sprinkler_active,
109
+ "emergency_lighting": sensor.emergency_lighting,
110
+ "elevator_available": sensor.elevator_available,
111
+ "stairwell_clear": sensor.stairwell_clear,
112
+ "exit_accessible": sensor.exit_accessible,
113
+ "exit_capacity": sensor.exit_capacity,
114
+ "ventilation_active": sensor.ventilation_active,
115
+ "time_since_fire_start": sensor.time_since_fire_start,
116
+ "estimated_time_to_exit": sensor.estimated_time_to_exit,
117
+ "emergency_comm_working": sensor.emergency_comm_working,
118
+ "wifi_signal_strength": round(sensor.wifi_signal_strength, 1),
119
+ "danger_score": round(sensor.calculate_danger_score(), 1),
120
+ "passable": sensor.is_passable()
121
+ }
122
+ }
123
+
124
+ def export_route_data(self, start_location: str = "R1") -> Dict[str, Any]:
125
+ """Export all evacuation routes with detailed information"""
126
+ routes = self.pathfinder.find_all_evacuation_routes(start_location)
127
+
128
+ route_data = {
129
+ "timestamp_sec": 0,
130
+ "start_location": start_location,
131
+ "total_routes": len(routes),
132
+ "routes": []
133
+ }
134
+
135
+ for idx, (exit_id, path, risk) in enumerate(routes, 1):
136
+ route_info = {
137
+ "route_id": f"Route {idx}",
138
+ "exit": exit_id,
139
+ "path": path,
140
+ "metrics": {
141
+ "avg_danger": round(risk['avg_danger'], 2),
142
+ "max_danger": round(risk['max_danger'], 2),
143
+ "max_danger_location": risk['max_danger_location'],
144
+ "total_danger": round(risk['total_danger'], 2),
145
+ "path_length": risk['path_length'],
146
+ "has_fire": risk['has_fire'],
147
+ "has_oxygen_hazard": risk['has_oxygen_hazard'],
148
+ "passable": risk['passable'],
149
+ "risk_factors": risk['risk_factors']
150
+ },
151
+ "nodes": []
152
+ }
153
+
154
+ # Add detailed node information
155
+ for room_id in path:
156
+ node_data = self.export_room_data(room_id)
157
+ if node_data:
158
+ route_info["nodes"].append(node_data)
159
+
160
+ route_data["routes"].append(route_info)
161
+
162
+ return route_data
163
+
164
+ def export_all_rooms(self) -> List[Dict[str, Any]]:
165
+ """Export all rooms as separate documents"""
166
+ all_rooms = []
167
+ for room_id in self.floor_plan.rooms:
168
+ room_data = self.export_room_data(room_id)
169
+ if room_data:
170
+ all_rooms.append(room_data)
171
+ return all_rooms
172
+
173
+ def export_to_json(self, output_path: str, start_location: str = "R1"):
174
+ """Export complete dataset to JSON file"""
175
+ data = {
176
+ "floor_plan": {
177
+ "floor_name": self.floor_plan.floor_name,
178
+ "total_rooms": len(self.floor_plan.rooms),
179
+ "exits": self.floor_plan.exits
180
+ },
181
+ "all_rooms": self.export_all_rooms(),
182
+ "evacuation_routes": self.export_route_data(start_location)
183
+ }
184
+
185
+ with open(output_path, 'w', encoding='utf-8') as f:
186
+ json.dump(data, f, indent=2, ensure_ascii=False)
187
+
188
+ print(f"[OK] Exported data to {output_path}")
189
+ return data
190
+
191
+
192
+ class ReasoningMode(Enum):
193
+ """Enumeration of reasoning modes"""
194
+ STANDARD = "standard"
195
+ CHAIN_OF_THOUGHT = "chain_of_thought"
196
+ TREE_OF_THOUGHTS = "tree_of_thoughts"
197
+ REFLEXION = "reflexion"
198
+ COT_WITH_TOOLS = "cot_with_tools"
199
+
200
+
201
+ class DecodingStrategy(Enum):
202
+ """Enumeration of decoding strategies"""
203
+ GREEDY = "greedy"
204
+ SAMPLING = "sampling"
205
+ BEAM_SEARCH = "beam_search"
206
+ NUCLEUS = "nucleus"
207
+ TEMPERATURE = "temperature"
208
+
209
+
210
+ class FireEvacuationRAG:
211
+ """RAG system using FAISS for retrieval and Llama for generation with advanced reasoning"""
212
+
213
+ def __init__(self, model_name: str = "nvidia/Llama-3.1-Minitron-4B-Width-Base", model_dir: str = "./models",
214
+ use_8bit: bool = False, use_unsloth: bool = False, load_in_4bit: bool = True, max_seq_length: int = 2048,
215
+ reasoning_mode: ReasoningMode = ReasoningMode.CHAIN_OF_THOUGHT,
216
+ decoding_strategy: DecodingStrategy = DecodingStrategy.NUCLEUS):
217
+ self.model_name = model_name
218
+ self.model_dir = model_dir
219
+ self.local_model_path = os.path.join(model_dir, model_name.replace("/", "_"))
220
+ self.use_8bit = use_8bit
221
+ self.use_unsloth = use_unsloth
222
+ self.load_in_4bit = load_in_4bit
223
+ self.max_seq_length = max_seq_length
224
+ self.reasoning_mode = reasoning_mode
225
+ self.decoding_strategy = decoding_strategy
226
+ self.tokenizer = None
227
+ self.model = None
228
+ self.pipe = None
229
+ self.embedder = None
230
+ self.index = None
231
+ self.documents = []
232
+ self.metadata = []
233
+ self.reflexion_history = [] # Store reflection history for Reflexion
234
+
235
+ # Create model directory if it doesn't exist
236
+ os.makedirs(self.model_dir, exist_ok=True)
237
+ os.makedirs(self.local_model_path, exist_ok=True)
238
+
239
+ print(f"Initializing RAG system with model: {model_name}")
240
+ print(f"Model will be saved to: {self.local_model_path}")
241
+ print(f"Reasoning mode: {reasoning_mode.value}")
242
+ print(f"Decoding strategy: {decoding_strategy.value}")
243
+ if use_unsloth:
244
+ print("[*] Unsloth enabled (faster loading and inference)")
245
+ if load_in_4bit:
246
+ print(" - 4-bit quantization enabled (very fast, low memory)")
247
+ elif use_8bit:
248
+ print("[!] 8-bit quantization enabled (faster loading, lower memory, slight quality trade-off)")
249
+
250
+ def _check_model_files_exist(self, model_path: str) -> bool:
251
+ """Check if model files actually exist (not just config.json)"""
252
+ required_files = [
253
+ "config.json",
254
+ "model.safetensors.index.json" # Check for sharded model index
255
+ ]
256
+
257
+ # Check for at least one model file
258
+ model_file_patterns = [
259
+ "model.safetensors",
260
+ "pytorch_model.bin",
261
+ "model-*.safetensors" # Sharded models
262
+ ]
263
+
264
+ config_exists = os.path.exists(os.path.join(model_path, "config.json"))
265
+ if not config_exists:
266
+ return False
267
+
268
+ # Check for model weight files
269
+ for pattern in model_file_patterns:
270
+ if glob.glob(os.path.join(model_path, pattern)):
271
+ return True
272
+
273
+ # Check for sharded model index
274
+ if os.path.exists(os.path.join(model_path, "model.safetensors.index.json")):
275
+ return True
276
+
277
+ return False
278
+
279
+ def download_model(self):
280
+ """Download and load the Llama model, saving weights to local directory"""
281
+ print("Downloading Llama model (this may take a while)...")
282
+ print(f"Model weights will be saved to: {self.local_model_path}")
283
+
284
+ # Use Unsloth if enabled (much faster loading) - PRIMARY METHOD
285
+ if self.use_unsloth:
286
+ try:
287
+ from unsloth import FastLanguageModel
288
+ from transformers import TextStreamer
289
+ print("[*] Using Unsloth for fast model loading...")
290
+
291
+ # Check if model name indicates it's already quantized (contains "bnb-4bit" or "bnb-8bit")
292
+ is_pre_quantized = "bnb-4bit" in self.model_name.lower() or "bnb-8bit" in self.model_name.lower()
293
+
294
+ # For pre-quantized models, don't set load_in_4bit (model is already quantized)
295
+ # For non-quantized models, check if bitsandbytes is available
296
+ if self.load_in_4bit and not is_pre_quantized:
297
+ try:
298
+ import bitsandbytes
299
+ print("[OK] bitsandbytes available for 4-bit quantization")
300
+ except ImportError:
301
+ print("[!] bitsandbytes not found. 4-bit quantization requires bitsandbytes.")
302
+ print(" Install with: pip install bitsandbytes")
303
+ print(" Falling back to full precision...")
304
+ self.load_in_4bit = False
305
+
306
+ # Check if model exists locally
307
+ if self._check_model_files_exist(self.local_model_path):
308
+ print(f"Loading from local path: {self.local_model_path}")
309
+ model_path = self.local_model_path
310
+ else:
311
+ print(f"Downloading model: {self.model_name}")
312
+ model_path = self.model_name
313
+
314
+ # ==== Load Model with Unsloth (exact pattern from user) ====
315
+ dtype = None # Auto-detect dtype
316
+
317
+ # Try loading with proper error handling for bitsandbytes
318
+ # The model config might have quantization settings that trigger bitsandbytes check
319
+ max_retries = 2
320
+ for attempt in range(max_retries):
321
+ try:
322
+ # For pre-quantized models, don't specify load_in_4bit (it's already quantized)
323
+ if is_pre_quantized or attempt > 0:
324
+ print("[OK] Loading model without quantization parameters...")
325
+ # Don't pass any quantization parameters
326
+ load_kwargs = {
327
+ "model_name": model_path,
328
+ "max_seq_length": self.max_seq_length,
329
+ "dtype": dtype,
330
+ }
331
+ else:
332
+ # For non-quantized models, try quantization if requested
333
+ load_kwargs = {
334
+ "model_name": model_path,
335
+ "max_seq_length": self.max_seq_length,
336
+ "dtype": dtype,
337
+ }
338
+ if self.load_in_4bit:
339
+ load_kwargs["load_in_4bit"] = True
340
+
341
+ self.model, self.tokenizer = FastLanguageModel.from_pretrained(**load_kwargs)
342
+ break # Success, exit retry loop
343
+
344
+ except (ImportError, Exception) as quant_error:
345
+ error_str = str(quant_error)
346
+ is_bitsandbytes_error = (
347
+ "bitsandbytes" in error_str.lower() or
348
+ "PackageNotFoundError" in error_str or
349
+ "No package metadata" in error_str or
350
+ "quantization_config" in error_str.lower()
351
+ )
352
+
353
+ if is_bitsandbytes_error and attempt < max_retries - 1:
354
+ print(f"[!] Attempt {attempt + 1}: bitsandbytes error detected.")
355
+ print(f" Error: {error_str[:150]}...")
356
+ print(" Retrying without quantization parameters...")
357
+ continue # Retry without quantization
358
+ elif is_bitsandbytes_error:
359
+ print("[!] bitsandbytes required but not installed.")
360
+ print(" Options:")
361
+ print(" 1. Install bitsandbytes: pip install bitsandbytes")
362
+ print(" 2. Use a non-quantized model")
363
+ print(" 3. Set USE_UNSLOTH=False to use standard loading")
364
+ raise ImportError(
365
+ "bitsandbytes is required for this model. "
366
+ "Install with: pip install bitsandbytes"
367
+ ) from quant_error
368
+ else:
369
+ # Re-raise if it's a different error
370
+ raise
371
+
372
+ # Optimize for inference
373
+ FastLanguageModel.for_inference(self.model)
374
+
375
+ print("[OK] Model loaded successfully with Unsloth!")
376
+
377
+ # Verify device
378
+ if torch.cuda.is_available():
379
+ actual_device = next(self.model.parameters()).device
380
+ print(f"[OK] Model loaded on {actual_device}!")
381
+ allocated = torch.cuda.memory_allocated(0) / 1024**3
382
+ print(f"[OK] GPU Memory allocated: {allocated:.2f} GB")
383
+ else:
384
+ print("[OK] Model loaded on CPU!")
385
+
386
+ # Set pipe to model for compatibility (we'll use model directly in generation)
387
+ self.pipe = self.model # Store model reference for compatibility checks
388
+
389
+ return # Exit early, Unsloth loading complete
390
+
391
+ except ImportError:
392
+ print("[!] Unsloth not installed. Falling back to standard loading.")
393
+ print(" Install with: pip install unsloth")
394
+ self.use_unsloth = False # Disable unsloth for this session
395
+ except Exception as e:
396
+ print(f"[!] Unsloth loading failed: {e}")
397
+ print(" Falling back to standard loading...")
398
+ self.use_unsloth = False
399
+
400
+ # Standard loading (original code)
401
+ # Check GPU availability and optimize settings
402
+ device = "cuda" if torch.cuda.is_available() else "cpu"
403
+ if device == "cuda":
404
+ gpu_name = torch.cuda.get_device_name(0)
405
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
406
+ print(f"[OK] GPU detected: {gpu_name}")
407
+ print(f"[OK] GPU Memory: {gpu_memory:.2f} GB")
408
+ # Use bfloat16 for faster loading and inference on GPU
409
+ torch_dtype = torch.bfloat16
410
+ print("[OK] Using bfloat16 precision for faster loading")
411
+ else:
412
+ print("[!] No GPU detected, using CPU (will be slower)")
413
+ torch_dtype = torch.float32
414
+ print("[OK] Using float32 precision for CPU")
415
+
416
+ # Check for optimized attention implementation
417
+ try:
418
+ import flash_attn # noqa: F401
419
+ attn_impl = 'flash_attention_2'
420
+ print("[OK] FlashAttention2 available - using for optimal performance")
421
+ except ImportError:
422
+ attn_impl = 'sdpa' # Scaled Dot Product Attention (built into PyTorch)
423
+ print("[OK] Using SDPA (Scaled Dot Product Attention) for faster inference")
424
+
425
+ # Check for 8-bit quantization support
426
+ use_quantization = False
427
+ if self.use_8bit and device == "cuda":
428
+ try:
429
+ import bitsandbytes
430
+ use_quantization = True
431
+ print("[OK] 8-bit quantization available - will use for faster loading")
432
+ except ImportError:
433
+ print("[!] 8-bit requested but bitsandbytes not installed, using full precision")
434
+ print(" Install with: pip install bitsandbytes")
435
+
436
+ try:
437
+ # Check if model already exists locally with actual model files
438
+ if self._check_model_files_exist(self.local_model_path):
439
+ print(f"Found existing model at {self.local_model_path}, loading from local...")
440
+ model_path = self.local_model_path
441
+ load_from_local = True
442
+ else:
443
+ print("Downloading model from HuggingFace...")
444
+ model_path = self.model_name
445
+ load_from_local = False
446
+
447
+ # Load tokenizer
448
+ print("Loading tokenizer...")
449
+ self.tokenizer = AutoTokenizer.from_pretrained(
450
+ model_path,
451
+ trust_remote_code=True
452
+ )
453
+
454
+ # Save tokenizer locally if downloaded (wrap in try-except to avoid crashes)
455
+ if not load_from_local:
456
+ try:
457
+ print("Saving tokenizer to local directory...")
458
+ self.tokenizer.save_pretrained(self.local_model_path)
459
+ print(f"[OK] Tokenizer saved to {self.local_model_path}")
460
+ except Exception as save_err:
461
+ print(f"[!] Warning: Could not save tokenizer locally: {save_err}")
462
+ print("Continuing without local save...")
463
+
464
+ # Load model with optimizations
465
+ print("Loading model with optimizations...")
466
+ load_kwargs = {
467
+ "trust_remote_code": True,
468
+ "low_cpu_mem_usage": True, # Reduces memory usage during loading
469
+ "_attn_implementation": attn_impl, # Optimized attention
470
+ }
471
+
472
+ # Add quantization or dtype
473
+ if use_quantization:
474
+ from transformers import BitsAndBytesConfig
475
+ load_kwargs["quantization_config"] = BitsAndBytesConfig(
476
+ load_in_8bit=True,
477
+ llm_int8_threshold=6.0
478
+ )
479
+ print("[OK] Using 8-bit quantization for faster loading and lower memory")
480
+ else:
481
+ load_kwargs["torch_dtype"] = torch_dtype
482
+
483
+ # Use device_map="auto" for GPU, manual placement for CPU
484
+ if device == "cuda":
485
+ try:
486
+ load_kwargs["device_map"] = "auto"
487
+ print("[OK] Using device_map='auto' for optimal GPU memory management")
488
+ except Exception as e:
489
+ print(f"[!] device_map='auto' failed, using manual GPU placement: {e}")
490
+ load_kwargs.pop("device_map", None)
491
+
492
+ self.model = AutoModelForCausalLM.from_pretrained(
493
+ model_path,
494
+ **load_kwargs
495
+ )
496
+
497
+ # Manual device placement if device_map wasn't used
498
+ if device == "cuda" and "device_map" not in load_kwargs:
499
+ self.model = self.model.cuda()
500
+ print("[OK] Model moved to GPU")
501
+
502
+ # Save model locally if downloaded (wrap in try-except to handle DTensor errors)
503
+ if not load_from_local:
504
+ try:
505
+ print("Saving model weights to local directory (this may take a while)...")
506
+ self.model.save_pretrained(
507
+ self.local_model_path,
508
+ safe_serialization=True # Use safetensors format
509
+ )
510
+ print(f"[OK] Model saved to {self.local_model_path}")
511
+ except ImportError as import_err:
512
+ if "DTensor" in str(import_err):
513
+ print(f"[!] Warning: Could not save model due to PyTorch/transformers compatibility issue: {import_err}")
514
+ print("This is a known issue with certain versions. Model will work but won't be saved locally.")
515
+ print("Continuing without local save...")
516
+ else:
517
+ raise
518
+ except Exception as save_err:
519
+ print(f"[!] Warning: Could not save model locally: {save_err}")
520
+ print("Continuing without local save...")
521
+
522
+ # Create pipeline with optimizations
523
+ print("Creating pipeline...")
524
+ pipeline_kwargs = {
525
+ "model": self.model,
526
+ "tokenizer": self.tokenizer,
527
+ }
528
+ if device == "cuda":
529
+ pipeline_kwargs["device_map"] = "auto"
530
+
531
+ self.pipe = pipeline("text-generation", **pipeline_kwargs)
532
+
533
+ # Verify model device
534
+ if device == "cuda":
535
+ actual_device = next(self.model.parameters()).device
536
+ print(f"[OK] Model loaded successfully on {actual_device}!")
537
+ if torch.cuda.is_available():
538
+ allocated = torch.cuda.memory_allocated(0) / 1024**3
539
+ print(f"[OK] GPU Memory allocated: {allocated:.2f} GB")
540
+ else:
541
+ print("[OK] Model loaded successfully on CPU!")
542
+
543
+ except Exception as e:
544
+ print(f"Error loading model: {e}")
545
+ print("Falling back to pipeline-only loading...")
546
+ try:
547
+ # Determine device and dtype for fallback
548
+ device = "cuda" if torch.cuda.is_available() else "cpu"
549
+ torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
550
+
551
+ # Try loading from local path first (only if model files actually exist)
552
+ if self._check_model_files_exist(self.local_model_path):
553
+ print(f"Attempting to load from local path: {self.local_model_path}")
554
+ pipeline_kwargs = {
555
+ "model": self.local_model_path,
556
+ "trust_remote_code": True,
557
+ "torch_dtype": torch_dtype,
558
+ }
559
+ if device == "cuda":
560
+ pipeline_kwargs["device_map"] = "auto"
561
+ self.pipe = pipeline("text-generation", **pipeline_kwargs)
562
+ # Extract tokenizer from pipeline if available
563
+ if hasattr(self.pipe, 'tokenizer'):
564
+ self.tokenizer = self.pipe.tokenizer
565
+ else:
566
+ print(f"Downloading model: {self.model_name}")
567
+ pipeline_kwargs = {
568
+ "model": self.model_name,
569
+ "trust_remote_code": True,
570
+ "torch_dtype": torch_dtype,
571
+ }
572
+ if device == "cuda":
573
+ pipeline_kwargs["device_map"] = "auto"
574
+ self.pipe = pipeline("text-generation", **pipeline_kwargs)
575
+ # Extract tokenizer from pipeline if available
576
+ if hasattr(self.pipe, 'tokenizer'):
577
+ self.tokenizer = self.pipe.tokenizer
578
+
579
+ # Try to save after loading (but don't fail if it doesn't work)
580
+ try:
581
+ if hasattr(self.pipe, 'model') and hasattr(self.pipe.model, 'save_pretrained'):
582
+ print("Attempting to save downloaded model to local directory...")
583
+ self.pipe.model.save_pretrained(self.local_model_path, safe_serialization=True)
584
+ if hasattr(self.pipe, 'tokenizer'):
585
+ self.pipe.tokenizer.save_pretrained(self.local_model_path)
586
+ print("[OK] Model saved successfully")
587
+ except ImportError as import_err:
588
+ if "DTensor" in str(import_err):
589
+ print(f"[!] Warning: Could not save model due to compatibility issue. Model will work but won't be saved locally.")
590
+ else:
591
+ print(f"[!] Warning: Could not save model: {import_err}")
592
+ except Exception as save_err:
593
+ print(f"[!] Warning: Could not save model locally: {save_err}")
594
+
595
+ except Exception as e2:
596
+ print(f"Pipeline loading also failed: {e2}")
597
+ raise
598
+
599
+ def load_embedder(self, model_name: str = "all-MiniLM-L6-v2"):
600
+ """Load sentence transformer for embeddings, saving to local directory"""
601
+ embedder_dir = os.path.join(self.model_dir, "embedder", model_name.replace("/", "_"))
602
+ os.makedirs(embedder_dir, exist_ok=True)
603
+
604
+ print(f"Loading embedding model: {model_name}...")
605
+ print(f"Embedder will be cached in: {embedder_dir}")
606
+
607
+ # Check if embedder exists locally (check for actual model files, not just config)
608
+ config_path = os.path.join(embedder_dir, "config.json")
609
+ has_model_files = False
610
+ if os.path.exists(config_path):
611
+ # Check if model files exist
612
+ model_files = glob.glob(os.path.join(embedder_dir, "*.safetensors")) + \
613
+ glob.glob(os.path.join(embedder_dir, "pytorch_model.bin"))
614
+ if model_files or os.path.exists(os.path.join(embedder_dir, "model.safetensors.index.json")):
615
+ has_model_files = True
616
+
617
+ if has_model_files:
618
+ print(f"Loading embedder from local cache: {embedder_dir}")
619
+ self.embedder = SentenceTransformer(embedder_dir)
620
+ else:
621
+ print("Downloading embedder from HuggingFace...")
622
+ self.embedder = SentenceTransformer(model_name, cache_folder=embedder_dir)
623
+ # Try to save to local directory (but don't fail if it doesn't work)
624
+ try:
625
+ self.embedder.save(embedder_dir)
626
+ print(f"[OK] Embedder saved to {embedder_dir}")
627
+ except ImportError as import_err:
628
+ if "DTensor" in str(import_err):
629
+ print(f"[!] Warning: Could not save embedder due to PyTorch/transformers compatibility issue: {import_err}")
630
+ print("This is a known issue with certain versions. Embedder will work but won't be saved locally.")
631
+ print("Continuing without local save...")
632
+ else:
633
+ print(f"[!] Warning: Could not save embedder: {import_err}")
634
+ except Exception as save_err:
635
+ print(f"[!] Warning: Could not save embedder locally: {save_err}")
636
+ print("Continuing without local save...")
637
+
638
+ print("[OK] Embedding model loaded!")
639
+
640
+ def build_faiss_index(self, documents: List[str], metadata: List[Dict] = None):
641
+ """
642
+ Build FAISS index from documents
643
+
644
+ Args:
645
+ documents: List of text documents to index
646
+ metadata: Optional metadata for each document
647
+ """
648
+ if not self.embedder:
649
+ self.load_embedder()
650
+
651
+ print(f"Building FAISS index for {len(documents)} documents...")
652
+
653
+ # Generate embeddings
654
+ embeddings = self.embedder.encode(documents, show_progress_bar=True)
655
+ embeddings = np.array(embeddings).astype('float32')
656
+
657
+ # Get dimension
658
+ dimension = embeddings.shape[1]
659
+
660
+ # Create FAISS index (L2 distance)
661
+ self.index = faiss.IndexFlatL2(dimension)
662
+
663
+ # Add embeddings to index
664
+ self.index.add(embeddings)
665
+
666
+ # Store documents and metadata
667
+ self.documents = documents
668
+ self.metadata = metadata if metadata else [{}] * len(documents)
669
+
670
+ print(f"[OK] FAISS index built with {self.index.ntotal} vectors")
671
+
672
+ def build_index_from_json(self, json_data: Dict[str, Any]):
673
+ """Build FAISS index from exported JSON data"""
674
+ documents = []
675
+ metadata = []
676
+
677
+ # Add room documents
678
+ for room in json_data.get("all_rooms", []):
679
+ # Create text representation
680
+ room_text = self._room_to_text(room)
681
+ documents.append(room_text)
682
+ metadata.append({
683
+ "type": "room",
684
+ "room_id": room.get("room_id"),
685
+ "data": room
686
+ })
687
+
688
+ # Add route documents
689
+ for route in json_data.get("evacuation_routes", {}).get("routes", []):
690
+ route_text = self._route_to_text(route)
691
+ documents.append(route_text)
692
+ metadata.append({
693
+ "type": "route",
694
+ "route_id": route.get("route_id"),
695
+ "exit": route.get("exit"),
696
+ "data": route
697
+ })
698
+
699
+ # Build index
700
+ self.build_faiss_index(documents, metadata)
701
+
702
+ def _room_to_text(self, room: Dict[str, Any]) -> str:
703
+ """Convert room data to searchable text"""
704
+ sensor = room.get("sensor_data", {})
705
+
706
+ text_parts = [
707
+ f"Room {room.get('room_id')} ({room.get('name')})",
708
+ f"Type: {room.get('room_type')}",
709
+ ]
710
+
711
+ if room.get("has_oxygen_cylinder"):
712
+ text_parts.append("[!]️ OXYGEN CYLINDER PRESENT - EXPLOSION RISK")
713
+
714
+ if sensor.get("fire_detected"):
715
+ text_parts.append("[FIRE] FIRE DETECTED")
716
+
717
+ text_parts.extend([
718
+ f"Temperature: {sensor.get('temperature_c')}°C",
719
+ f"Smoke level: {sensor.get('smoke_level')}",
720
+ f"Oxygen: {sensor.get('oxygen_pct')}%",
721
+ f"Visibility: {sensor.get('visibility_pct')}%",
722
+ f"Structural integrity: {sensor.get('structural_integrity_pct')}%",
723
+ f"Danger score: {sensor.get('danger_score')}",
724
+ f"Passable: {sensor.get('passable')}"
725
+ ])
726
+
727
+ if sensor.get("carbon_monoxide_ppm", 0) > 50:
728
+ text_parts.append(f"[!]️ HIGH CARBON MONOXIDE: {sensor.get('carbon_monoxide_ppm')} ppm")
729
+
730
+ if sensor.get("flashover_risk", 0) > 0.5:
731
+ text_parts.append(f"[!]️ FLASHOVER RISK: {sensor.get('flashover_risk')*100:.0f}%")
732
+
733
+ if not sensor.get("exit_accessible", True):
734
+ text_parts.append("[!]️ EXIT BLOCKED")
735
+
736
+ if sensor.get("occupancy_density", 0) > 0.7:
737
+ text_parts.append(f"[!]️ HIGH CROWD DENSITY: {sensor.get('occupancy_density')*100:.0f}%")
738
+
739
+ return " | ".join(text_parts)
740
+
741
+ def _route_to_text(self, route: Dict[str, Any]) -> str:
742
+ """Convert route data to searchable text"""
743
+ metrics = route.get("metrics", {})
744
+
745
+ text_parts = [
746
+ f"{route.get('route_id')} to {route.get('exit')}",
747
+ f"Path: {' → '.join(route.get('path', []))}",
748
+ f"Average danger: {metrics.get('avg_danger')}",
749
+ f"Max danger: {metrics.get('max_danger')} at {metrics.get('max_danger_location')}",
750
+ f"Passable: {metrics.get('passable')}",
751
+ f"Has fire: {metrics.get('has_fire')}",
752
+ f"Has oxygen hazard: {metrics.get('has_oxygen_hazard')}"
753
+ ]
754
+
755
+ risk_factors = metrics.get("risk_factors", [])
756
+ if risk_factors:
757
+ text_parts.append(f"Risks: {', '.join(risk_factors[:3])}")
758
+
759
+ return " | ".join(text_parts)
760
+
761
+ def search(self, query: str, k: int = 5) -> List[Dict[str, Any]]:
762
+ """
763
+ Search FAISS index for relevant documents
764
+
765
+ Args:
766
+ query: Search query
767
+ k: Number of results to return
768
+
769
+ Returns:
770
+ List of relevant documents with metadata
771
+ """
772
+ if not self.index or not self.embedder:
773
+ raise ValueError("Index not built. Call build_faiss_index() first.")
774
+
775
+ # Encode query
776
+ query_embedding = self.embedder.encode([query])
777
+ query_embedding = np.array(query_embedding).astype('float32')
778
+
779
+ # Search
780
+ distances, indices = self.index.search(query_embedding, k)
781
+
782
+ # Return results
783
+ results = []
784
+ for i, idx in enumerate(indices[0]):
785
+ if idx < len(self.documents):
786
+ results.append({
787
+ "document": self.documents[idx],
788
+ "metadata": self.metadata[idx],
789
+ "distance": float(distances[0][i])
790
+ })
791
+
792
+ return results
793
+
794
+ def _build_cot_prompt(self, query: str, context: List[str]) -> str:
795
+ """Build Chain-of-Thought prompt with step-by-step reasoning"""
796
+ context_text = "\n".join([f"- {ctx}" for ctx in context])
797
+
798
+ prompt = f"""You are an expert fire evacuation safety advisor. Use the following context to answer the question concisely.
799
+
800
+ CONTEXT:
801
+ {context_text}
802
+
803
+ QUESTION: {query}
804
+
805
+ Think step by step, then provide a brief answer:
806
+
807
+ REASONING:
808
+ 1. Analyze available information
809
+ 2. Identify key safety factors
810
+ 3. Evaluate risks and prioritize
811
+ 4. Conclude with recommendation
812
+
813
+ ANSWER:"""
814
+ return prompt
815
+
816
+ def _build_tot_prompt(self, query: str, context: List[str], thought: str = "") -> str:
817
+ """Build Tree-of-Thoughts prompt for exploring multiple reasoning paths"""
818
+ context_text = "\n".join([f"- {ctx}" for ctx in context])
819
+
820
+ if not thought:
821
+ prompt = f"""You are an expert fire evacuation safety advisor. Use the following context to explore different reasoning approaches.
822
+
823
+ CONTEXT:
824
+ {context_text}
825
+
826
+ QUESTION: {query}
827
+
828
+ Let's explore different reasoning approaches to solve this problem:
829
+
830
+ APPROACH 1 - Safety-First Analysis:
831
+ """
832
+ else:
833
+ prompt = f"""CONTEXT:
834
+ {context_text}
835
+
836
+ QUESTION: {query}
837
+
838
+ CURRENT THOUGHT: {thought}
839
+
840
+ Evaluate this thought:
841
+ - Is this reasoning sound?
842
+ - What are the strengths and weaknesses?
843
+ - What alternative approaches should we consider?
844
+
845
+ EVALUATION:
846
+ """
847
+ return prompt
848
+
849
+ def _build_reflexion_prompt(self, query: str, context: List[str], previous_answer: str = "",
850
+ reflection: str = "") -> str:
851
+ """Build Reflexion prompt for self-reflection and improvement"""
852
+ context_text = "\n".join([f"- {ctx}" for ctx in context])
853
+
854
+ if not previous_answer:
855
+ # Initial answer
856
+ prompt = f"""You are an expert fire evacuation safety advisor. Use the following context to answer the question.
857
+
858
+ CONTEXT:
859
+ {context_text}
860
+
861
+ QUESTION: {query}
862
+
863
+ Provide a clear, safety-focused answer based on the context.
864
+
865
+ ANSWER:"""
866
+ else:
867
+ # Reflection phase
868
+ prompt = f"""You are an expert fire evacuation safety advisor. Review and improve your previous answer.
869
+
870
+ CONTEXT:
871
+ {context_text}
872
+
873
+ QUESTION: {query}
874
+
875
+ PREVIOUS ANSWER:
876
+ {previous_answer}
877
+
878
+ REFLECTION:
879
+ {reflection}
880
+
881
+ Now provide an improved answer based on your reflection:
882
+
883
+ IMPROVED ANSWER:"""
884
+ return prompt
885
+
886
+ def _build_cot_with_tools_prompt(self, query: str, context: List[str], tool_results: List[str] = None) -> str:
887
+ """Build Chain-of-Thought prompt with tool integration"""
888
+ context_text = "\n".join([f"- {ctx}" for ctx in context])
889
+
890
+ tool_text = ""
891
+ if tool_results:
892
+ tool_text = "\nTOOL RESULTS:\n" + "\n".join([f"- {result}" for result in tool_results])
893
+
894
+ prompt = f"""You are an expert fire evacuation safety advisor. Use the following context and tool results to answer the question.
895
+
896
+ CONTEXT:
897
+ {context_text}
898
+ {tool_text}
899
+
900
+ QUESTION: {query}
901
+
902
+ Let's solve this step by step, using both the context and tool results:
903
+
904
+ STEP 1 - Understand the question and available data:
905
+ """
906
+ return prompt
907
+
908
+ def _generate_with_decoding_strategy(self, prompt: str, max_length: int = 500,
909
+ temperature: float = 0.7, top_p: float = 0.9,
910
+ num_beams: int = 3, stop_sequences: List[str] = None) -> str:
911
+ """Generate response using specified decoding strategy"""
912
+ if not self.pipe and not self.model:
913
+ raise ValueError("Model not loaded. Call download_model() first.")
914
+
915
+ try:
916
+ if self.use_unsloth and self.model:
917
+ inputs = self.tokenizer(
918
+ prompt,
919
+ return_tensors="pt",
920
+ truncation=True,
921
+ max_length=self.max_seq_length
922
+ ).to(self.model.device)
923
+
924
+ # Configure generation parameters based on decoding strategy
925
+ gen_kwargs = {
926
+ "max_new_tokens": max_length,
927
+ "pad_token_id": self.tokenizer.eos_token_id,
928
+ "eos_token_id": self.tokenizer.eos_token_id,
929
+ }
930
+
931
+ if self.decoding_strategy == DecodingStrategy.GREEDY:
932
+ gen_kwargs.update({
933
+ "do_sample": False,
934
+ "num_beams": 1
935
+ })
936
+ elif self.decoding_strategy == DecodingStrategy.SAMPLING:
937
+ gen_kwargs.update({
938
+ "do_sample": True,
939
+ "temperature": temperature,
940
+ "top_k": 50
941
+ })
942
+ elif self.decoding_strategy == DecodingStrategy.BEAM_SEARCH:
943
+ gen_kwargs.update({
944
+ "do_sample": False,
945
+ "num_beams": num_beams,
946
+ "early_stopping": True
947
+ })
948
+ elif self.decoding_strategy == DecodingStrategy.NUCLEUS:
949
+ gen_kwargs.update({
950
+ "do_sample": True,
951
+ "temperature": temperature,
952
+ "top_p": top_p,
953
+ "top_k": 0
954
+ })
955
+ elif self.decoding_strategy == DecodingStrategy.TEMPERATURE:
956
+ gen_kwargs.update({
957
+ "do_sample": True,
958
+ "temperature": temperature
959
+ })
960
+
961
+ with torch.no_grad():
962
+ outputs = self.model.generate(**inputs, **gen_kwargs)
963
+
964
+ response = self.tokenizer.batch_decode(
965
+ outputs,
966
+ skip_special_tokens=True
967
+ )[0]
968
+
969
+ # Extract response after prompt
970
+ if prompt in response:
971
+ response = response.split(prompt)[-1].strip()
972
+
973
+ # Post-process to stop at verbose endings
974
+ stop_phrases = [
975
+ "\n\nHowever, please note",
976
+ "\n\nAdditionally,",
977
+ "\n\nLet me know",
978
+ "\n\nIf you have",
979
+ "\n\nHere's another",
980
+ "\n\nQUESTION:",
981
+ "\n\nLet's break",
982
+ "\n\nHave a great day",
983
+ "\n\nI'm here to help"
984
+ ]
985
+ for phrase in stop_phrases:
986
+ if phrase in response:
987
+ response = response.split(phrase)[0].strip()
988
+ break
989
+
990
+ return response
991
+ else:
992
+ # Use pipeline for standard models
993
+ gen_kwargs = {
994
+ "max_length": len(self.tokenizer.encode(prompt)) + max_length,
995
+ "num_return_sequences": 1,
996
+ }
997
+
998
+ if self.decoding_strategy == DecodingStrategy.GREEDY:
999
+ gen_kwargs.update({
1000
+ "do_sample": False
1001
+ })
1002
+ elif self.decoding_strategy == DecodingStrategy.SAMPLING:
1003
+ gen_kwargs.update({
1004
+ "do_sample": True,
1005
+ "temperature": temperature,
1006
+ "top_k": 50
1007
+ })
1008
+ elif self.decoding_strategy == DecodingStrategy.BEAM_SEARCH:
1009
+ gen_kwargs.update({
1010
+ "do_sample": False,
1011
+ "num_beams": num_beams,
1012
+ "early_stopping": True
1013
+ })
1014
+ elif self.decoding_strategy == DecodingStrategy.NUCLEUS:
1015
+ gen_kwargs.update({
1016
+ "do_sample": True,
1017
+ "temperature": temperature,
1018
+ "top_p": top_p,
1019
+ "top_k": 0
1020
+ })
1021
+ elif self.decoding_strategy == DecodingStrategy.TEMPERATURE:
1022
+ gen_kwargs.update({
1023
+ "do_sample": True,
1024
+ "temperature": temperature
1025
+ })
1026
+
1027
+ gen_kwargs["pad_token_id"] = self.tokenizer.eos_token_id if self.tokenizer else None
1028
+
1029
+ outputs = self.pipe(prompt, **gen_kwargs)
1030
+ response = outputs[0]['generated_text']
1031
+
1032
+ # Extract response after prompt
1033
+ if prompt in response:
1034
+ response = response.split(prompt)[-1].strip()
1035
+
1036
+ # Post-process to stop at verbose endings
1037
+ stop_phrases = [
1038
+ "\n\nHowever, please note",
1039
+ "\n\nAdditionally,",
1040
+ "\n\nLet me know",
1041
+ "\n\nIf you have",
1042
+ "\n\nHere's another",
1043
+ "\n\nQUESTION:",
1044
+ "\n\nLet's break",
1045
+ "\n\nHave a great day",
1046
+ "\n\nI'm here to help"
1047
+ ]
1048
+ for phrase in stop_phrases:
1049
+ if phrase in response:
1050
+ response = response.split(phrase)[0].strip()
1051
+ break
1052
+
1053
+ return response
1054
+
1055
+ except Exception as e:
1056
+ return f"Error generating response: {e}"
1057
+
1058
+ def _chain_of_thought_reasoning(self, query: str, context: List[str], max_length: int = 500) -> Tuple[str, str]:
1059
+ """Generate response using Chain-of-Thought reasoning
1060
+
1061
+ Returns:
1062
+ Tuple of (full_reasoning, final_answer)
1063
+ """
1064
+ prompt = self._build_cot_prompt(query, context)
1065
+ # Use shorter max_length for CoT to prevent verbosity
1066
+ full_response = self._generate_with_decoding_strategy(prompt, max_length=min(max_length, 300))
1067
+
1068
+ # Extract reasoning steps (everything before ANSWER)
1069
+ reasoning = ""
1070
+ if "REASONING:" in full_response:
1071
+ reasoning_parts = full_response.split("REASONING:")
1072
+ if len(reasoning_parts) > 1:
1073
+ reasoning_section = reasoning_parts[1].split("ANSWER:")[0] if "ANSWER:" in reasoning_parts[1] else reasoning_parts[1]
1074
+ reasoning = reasoning_section.strip()
1075
+ elif "ANSWER:" in full_response:
1076
+ reasoning = full_response.split("ANSWER:")[0].strip()
1077
+ else:
1078
+ # Try to extract reasoning from numbered steps
1079
+ lines = full_response.split('\n')
1080
+ reasoning_lines = []
1081
+ for line in lines:
1082
+ if line.strip().startswith(('1.', '2.', '3.', '4.', '5.', 'Step', 'STEP')):
1083
+ reasoning_lines.append(line.strip())
1084
+ elif "ANSWER" in line.upper():
1085
+ break
1086
+ elif reasoning_lines: # Continue collecting if we've started
1087
+ reasoning_lines.append(line.strip())
1088
+ reasoning = '\n'.join(reasoning_lines)
1089
+
1090
+ # Extract final answer (everything after ANSWER:)
1091
+ final_answer = full_response
1092
+ if "ANSWER:" in full_response:
1093
+ answer_parts = full_response.split("ANSWER:")
1094
+ if len(answer_parts) > 1:
1095
+ answer_text = answer_parts[-1].strip()
1096
+ # Stop at common continuation markers
1097
+ stop_markers = [
1098
+ "\n\nHowever, please note",
1099
+ "\n\nAdditionally,",
1100
+ "\n\nLet me know",
1101
+ "\n\nIf you have",
1102
+ "\n\nHere's another",
1103
+ "\n\nQUESTION:",
1104
+ "\n\nLet's break",
1105
+ "\n\nHave a great day",
1106
+ "\n\nI'm here to help",
1107
+ "\n\nThese general guidelines",
1108
+ "\n\nIf you have any further"
1109
+ ]
1110
+ for marker in stop_markers:
1111
+ if marker in answer_text:
1112
+ answer_text = answer_text.split(marker)[0].strip()
1113
+ break
1114
+ # Also limit to first 2-3 sentences if it's still too long
1115
+ sentences = answer_text.split('. ')
1116
+ if len(sentences) > 3:
1117
+ answer_text = '. '.join(sentences[:3])
1118
+ if not answer_text.endswith('.'):
1119
+ answer_text += '.'
1120
+ final_answer = answer_text
1121
+
1122
+ # Clean up reasoning - remove verbose parts
1123
+ if reasoning:
1124
+ # Remove common verbose endings
1125
+ verbose_endings = [
1126
+ "However, please note",
1127
+ "Additionally,",
1128
+ "Let me know",
1129
+ "If you have",
1130
+ "Here's another",
1131
+ "Have a great day",
1132
+ "I'm here to help"
1133
+ ]
1134
+ for ending in verbose_endings:
1135
+ if ending in reasoning:
1136
+ reasoning = reasoning.split(ending)[0].strip()
1137
+ break
1138
+
1139
+ return reasoning or "Reasoning steps generated", final_answer
1140
+
1141
+ def _tree_of_thoughts_reasoning(self, query: str, context: List[str], max_length: int = 500,
1142
+ max_thoughts: int = 3) -> Tuple[str, str]:
1143
+ """Generate response using Tree-of-Thoughts reasoning
1144
+
1145
+ Returns:
1146
+ Tuple of (full_reasoning, final_answer)
1147
+ """
1148
+ thoughts = []
1149
+ reasoning_log = []
1150
+
1151
+ # Generate initial thoughts
1152
+ for i in range(max_thoughts):
1153
+ thought_prompt = self._build_tot_prompt(query, context,
1154
+ thought=f"Exploring approach {i+1}")
1155
+ thought = self._generate_with_decoding_strategy(thought_prompt, max_length // max_thoughts)
1156
+ thoughts.append(thought)
1157
+ reasoning_log.append(f"APPROACH {i+1}:\n{thought}\n")
1158
+
1159
+ # Evaluate thoughts and select best
1160
+ evaluation_prompt = f"""Evaluate these different reasoning approaches for answering the question:
1161
+
1162
+ QUESTION: {query}
1163
+
1164
+ APPROACHES:
1165
+ """
1166
+ for i, thought in enumerate(thoughts, 1):
1167
+ evaluation_prompt += f"\nAPPROACH {i}:\n{thought}\n"
1168
+
1169
+ evaluation_prompt += "\nWhich approach is most sound and complete? Provide the best answer based on the evaluation.\n\nBEST ANSWER:"
1170
+
1171
+ final_response = self._generate_with_decoding_strategy(evaluation_prompt, max_length)
1172
+
1173
+ full_reasoning = "\n".join(reasoning_log) + f"\n\nEVALUATION:\n{final_response}"
1174
+ return full_reasoning, final_response
1175
+
1176
+ def _reflexion_reasoning(self, query: str, context: List[str], max_length: int = 500,
1177
+ max_iterations: int = 2) -> Tuple[str, str]:
1178
+ """Generate response using Reflexion (self-reflection and improvement)
1179
+
1180
+ Returns:
1181
+ Tuple of (full_reasoning, final_answer)
1182
+ """
1183
+ reasoning_log = []
1184
+
1185
+ # Initial answer
1186
+ initial_prompt = self._build_reflexion_prompt(query, context)
1187
+ answer = self._generate_with_decoding_strategy(initial_prompt, max_length)
1188
+ reasoning_log.append(f"INITIAL ANSWER:\n{answer}\n")
1189
+
1190
+ # Reflection and improvement iterations
1191
+ for iteration in range(max_iterations):
1192
+ # Generate reflection
1193
+ reflection_prompt = f"""Review this answer for a fire evacuation safety question:
1194
+
1195
+ QUESTION: {query}
1196
+
1197
+ CURRENT ANSWER:
1198
+ {answer}
1199
+
1200
+ What could be improved? Consider:
1201
+ - Accuracy of safety information
1202
+ - Completeness of the response
1203
+ - Clarity and actionability
1204
+ - Missing critical safety factors
1205
+
1206
+ REFLECTION:"""
1207
+
1208
+ reflection = self._generate_with_decoding_strategy(reflection_prompt, max_length // 2)
1209
+ reasoning_log.append(f"ITERATION {iteration + 1} - REFLECTION:\n{reflection}\n")
1210
+
1211
+ # Generate improved answer
1212
+ improved_prompt = self._build_reflexion_prompt(query, context, answer, reflection)
1213
+ improved_answer = self._generate_with_decoding_strategy(improved_prompt, max_length)
1214
+ reasoning_log.append(f"ITERATION {iteration + 1} - IMPROVED ANSWER:\n{improved_answer}\n")
1215
+
1216
+ # Check if improvement is significant (simple heuristic)
1217
+ if len(improved_answer) > len(answer) * 0.8: # At least 80% of original length
1218
+ answer = improved_answer
1219
+ else:
1220
+ break # Stop if answer becomes too short
1221
+
1222
+ self.reflexion_history.append({
1223
+ "query": query,
1224
+ "final_answer": answer,
1225
+ "iterations": iteration + 1
1226
+ })
1227
+
1228
+ full_reasoning = "\n".join(reasoning_log)
1229
+ return full_reasoning, answer
1230
+
1231
+ def _cot_with_tools_reasoning(self, query: str, context: List[str], max_length: int = 500) -> Tuple[str, str]:
1232
+ """Generate response using Chain-of-Thought with tool integration
1233
+
1234
+ Returns:
1235
+ Tuple of (full_reasoning, final_answer)
1236
+ """
1237
+ reasoning_log = []
1238
+
1239
+ # Simulate tool calls (in real implementation, these would call actual tools)
1240
+ tool_results = []
1241
+
1242
+ # Tool 1: Route analysis
1243
+ if "route" in query.lower() or "path" in query.lower():
1244
+ tool_result = "Tool: Route Analyzer - Found 3 evacuation routes with risk scores"
1245
+ tool_results.append(tool_result)
1246
+ reasoning_log.append(f"TOOL CALL: {tool_result}\n")
1247
+
1248
+ # Tool 2: Risk calculator
1249
+ if "danger" in query.lower() or "risk" in query.lower():
1250
+ tool_result = "Tool: Risk Calculator - Calculated danger scores for all rooms"
1251
+ tool_results.append(tool_result)
1252
+ reasoning_log.append(f"TOOL CALL: {tool_result}\n")
1253
+
1254
+ # Tool 3: Sensor aggregator
1255
+ if "sensor" in query.lower() or "temperature" in query.lower() or "smoke" in query.lower():
1256
+ tool_result = "Tool: Sensor Aggregator - Aggregated sensor data from all rooms"
1257
+ tool_results.append(tool_result)
1258
+ reasoning_log.append(f"TOOL CALL: {tool_result}\n")
1259
+
1260
+ prompt = self._build_cot_with_tools_prompt(query, context, tool_results)
1261
+ response = self._generate_with_decoding_strategy(prompt, max_length)
1262
+
1263
+ reasoning_log.append(f"REASONING WITH TOOLS:\n{response}\n")
1264
+ full_reasoning = "\n".join(reasoning_log)
1265
+
1266
+ # Extract final answer
1267
+ final_answer = response
1268
+ if "ANSWER:" in response or "answer:" in response.lower():
1269
+ parts = response.split("ANSWER:") if "ANSWER:" in response else response.split("answer:")
1270
+ if len(parts) > 1:
1271
+ final_answer = parts[-1].strip()
1272
+
1273
+ return full_reasoning, final_answer
1274
+
1275
+ def generate_response(self, query: str, context: List[str] = None, max_length: int = 500,
1276
+ return_reasoning: bool = False) -> str:
1277
+ """
1278
+ Generate response using Llama model with context and advanced reasoning
1279
+
1280
+ Args:
1281
+ query: User query
1282
+ context: Optional context strings (if None, will retrieve from FAISS)
1283
+ max_length: Maximum response length
1284
+ return_reasoning: If True, returns tuple of (reasoning, answer), else just answer
1285
+
1286
+ Returns:
1287
+ If return_reasoning is True: Tuple of (reasoning_steps, final_answer)
1288
+ Otherwise: Just the final answer string
1289
+ """
1290
+ if not self.pipe and not self.model:
1291
+ raise ValueError("Model not loaded. Call download_model() first.")
1292
+
1293
+ # Retrieve context if not provided
1294
+ if context is None:
1295
+ search_results = self.search(query, k=3)
1296
+ context = [r["document"] for r in search_results]
1297
+
1298
+ # Route to appropriate reasoning method based on mode
1299
+ if self.reasoning_mode == ReasoningMode.CHAIN_OF_THOUGHT:
1300
+ reasoning, answer = self._chain_of_thought_reasoning(query, context, max_length)
1301
+ elif self.reasoning_mode == ReasoningMode.TREE_OF_THOUGHTS:
1302
+ reasoning, answer = self._tree_of_thoughts_reasoning(query, context, max_length)
1303
+ elif self.reasoning_mode == ReasoningMode.REFLEXION:
1304
+ reasoning, answer = self._reflexion_reasoning(query, context, max_length)
1305
+ elif self.reasoning_mode == ReasoningMode.COT_WITH_TOOLS:
1306
+ reasoning, answer = self._cot_with_tools_reasoning(query, context, max_length)
1307
+ else:
1308
+ # Standard mode - use enhanced prompt with decoding strategy
1309
+ context_text = "\n".join([f"- {ctx}" for ctx in context])
1310
+
1311
+ prompt = f"""You are an expert fire evacuation safety advisor. Use the following context about the building's fire safety status to answer the question.
1312
+
1313
+ CONTEXT:
1314
+ {context_text}
1315
+
1316
+ QUESTION: {query}
1317
+
1318
+ Provide a clear, safety-focused answer based on the context. If the context doesn't contain enough information, say so.
1319
+
1320
+ ANSWER:"""
1321
+
1322
+ answer = self._generate_with_decoding_strategy(prompt, max_length)
1323
+ reasoning = f"Standard reasoning mode - Direct answer generation.\n\n{answer}"
1324
+
1325
+ if return_reasoning:
1326
+ return reasoning, answer
1327
+ return answer
1328
+
1329
+ def set_reasoning_mode(self, mode: ReasoningMode):
1330
+ """Set the reasoning mode for future queries"""
1331
+ self.reasoning_mode = mode
1332
+ print(f"[OK] Reasoning mode set to: {mode.value}")
1333
+
1334
+ def set_decoding_strategy(self, strategy: DecodingStrategy):
1335
+ """Set the decoding strategy for future queries"""
1336
+ self.decoding_strategy = strategy
1337
+ print(f"[OK] Decoding strategy set to: {strategy.value}")
1338
+
1339
+ def query(self, question: str, k: int = 3, reasoning_mode: Optional[ReasoningMode] = None,
1340
+ show_reasoning: bool = True) -> Dict[str, Any]:
1341
+ """
1342
+ Complete RAG query: retrieve context and generate response with advanced reasoning
1343
+
1344
+ Args:
1345
+ question: User question
1346
+ k: Number of context documents to retrieve
1347
+ reasoning_mode: Optional override for reasoning mode (uses instance default if None)
1348
+ show_reasoning: If True, includes full reasoning steps in response
1349
+
1350
+ Returns:
1351
+ Dictionary with answer, context, metadata, reasoning information, and reasoning steps
1352
+ """
1353
+ # Retrieve relevant context
1354
+ search_results = self.search(question, k=k)
1355
+
1356
+ # Generate response with reasoning
1357
+ context = [r["document"] for r in search_results]
1358
+
1359
+ # Temporarily override reasoning mode if provided
1360
+ original_mode = self.reasoning_mode
1361
+ if reasoning_mode is not None:
1362
+ self.reasoning_mode = reasoning_mode
1363
+
1364
+ try:
1365
+ reasoning, answer = self.generate_response(question, context, return_reasoning=True)
1366
+ finally:
1367
+ # Restore original mode
1368
+ self.reasoning_mode = original_mode
1369
+
1370
+ result = {
1371
+ "question": question,
1372
+ "answer": answer,
1373
+ "context": context,
1374
+ "reasoning_mode": self.reasoning_mode.value,
1375
+ "decoding_strategy": self.decoding_strategy.value,
1376
+ "sources": [
1377
+ {
1378
+ "type": r["metadata"].get("type"),
1379
+ "room_id": r["metadata"].get("room_id"),
1380
+ "route_id": r["metadata"].get("route_id"),
1381
+ "relevance_score": 1.0 / (1.0 + r["distance"])
1382
+ }
1383
+ for r in search_results
1384
+ ]
1385
+ }
1386
+
1387
+ if show_reasoning:
1388
+ result["reasoning_steps"] = reasoning
1389
+
1390
+ return result
1391
+
1392
+ def save_index(self, index_path: str, metadata_path: str):
1393
+ """Save FAISS index and metadata"""
1394
+ if self.index:
1395
+ faiss.write_index(self.index, index_path)
1396
+ with open(metadata_path, 'wb') as f:
1397
+ pickle.dump({
1398
+ "documents": self.documents,
1399
+ "metadata": self.metadata
1400
+ }, f)
1401
+ print(f"[OK] Saved index to {index_path} and metadata to {metadata_path}")
1402
+
1403
+ def load_index(self, index_path: str, metadata_path: str):
1404
+ """Load FAISS index and metadata"""
1405
+ self.index = faiss.read_index(index_path)
1406
+ with open(metadata_path, 'rb') as f:
1407
+ data = pickle.load(f)
1408
+ self.documents = data["documents"]
1409
+ self.metadata = data["metadata"]
1410
+ print(f"[OK] Loaded index with {self.index.ntotal} vectors")
1411
+
1412
+ def compare_reasoning_modes(self, question: str, k: int = 3) -> Dict[str, Any]:
1413
+ """
1414
+ Compare all reasoning modes for a given question
1415
+
1416
+ Args:
1417
+ question: User question
1418
+ k: Number of context documents to retrieve
1419
+
1420
+ Returns:
1421
+ Dictionary with answers from all reasoning modes
1422
+ """
1423
+ # Retrieve context once
1424
+ search_results = self.search(question, k=k)
1425
+ context = [r["document"] for r in search_results]
1426
+
1427
+ results = {
1428
+ "question": question,
1429
+ "context": context,
1430
+ "sources": [
1431
+ {
1432
+ "type": r["metadata"].get("type"),
1433
+ "room_id": r["metadata"].get("room_id"),
1434
+ "route_id": r["metadata"].get("route_id"),
1435
+ "relevance_score": 1.0 / (1.0 + r["distance"])
1436
+ }
1437
+ for r in search_results
1438
+ ],
1439
+ "answers": {}
1440
+ }
1441
+
1442
+ # Save original mode
1443
+ original_mode = self.reasoning_mode
1444
+
1445
+ # Test each reasoning mode
1446
+ for mode in ReasoningMode:
1447
+ try:
1448
+ self.reasoning_mode = mode
1449
+ reasoning, answer = self.generate_response(question, context, return_reasoning=True)
1450
+ results["answers"][mode.value] = {
1451
+ "answer": answer,
1452
+ "reasoning": reasoning,
1453
+ "length": len(answer)
1454
+ }
1455
+ except Exception as e:
1456
+ results["answers"][mode.value] = {
1457
+ "error": str(e)
1458
+ }
1459
+
1460
+ # Restore original mode
1461
+ self.reasoning_mode = original_mode
1462
+
1463
+ return results
1464
+
1465
+
1466
+ def main():
1467
+ """Main function to set up and demonstrate the RAG system"""
1468
+ # ========== Configuration ==========
1469
+ USE_UNSLOTH = True # Set to True for fastest loading (requires: pip install unsloth)
1470
+ USE_8BIT = False # Set to True for 8-bit quantization (alternative to Unsloth)
1471
+
1472
+ # Unsloth model options:
1473
+ # - "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" (pre-quantized, requires bitsandbytes)
1474
+ # - "unsloth/Meta-Llama-3.1-8B-Instruct" (non-quantized, no bitsandbytes needed)
1475
+ # - "unsloth/Llama-3.1-8B-Instruct" (alternative non-quantized)
1476
+ # Note: Using non-quantized model by default to avoid bitsandbytes requirement
1477
+ UNSLOTH_MODEL = "unsloth/Meta-Llama-3.1-8B-Instruct" # Non-quantized (no bitsandbytes needed)
1478
+ # ====================================
1479
+
1480
+ print("="*80)
1481
+ print("Fire Evacuation RAG System - Llama + FAISS")
1482
+ print("="*80)
1483
+ print()
1484
+
1485
+ # Step 1: Create fire evacuation system
1486
+ print("Step 1: Creating fire evacuation system...")
1487
+ floor_plan = create_sample_floor_plan()
1488
+ sensor_system = create_sample_fire_scenario(floor_plan)
1489
+ pathfinder = PathFinder(floor_plan, sensor_system)
1490
+ print("[OK] System initialized")
1491
+ print()
1492
+
1493
+ # Step 2: Export data to JSON
1494
+ print("Step 2: Exporting data to JSON...")
1495
+ exporter = FireEvacuationDataExporter(floor_plan, sensor_system, pathfinder)
1496
+ json_path = "fire_evacuation_data.json"
1497
+ json_data = exporter.export_to_json(json_path, start_location="R1")
1498
+ print()
1499
+
1500
+ # Step 3: Initialize RAG system
1501
+ print("Step 3: Initializing RAG system...")
1502
+ # Model will be saved in ./models/ directory
1503
+
1504
+ if USE_UNSLOTH:
1505
+ # Unsloth loading (FASTEST - recommended!)
1506
+ # Install with: pip install unsloth
1507
+ # Note: If you get bitsandbytes error, either:
1508
+ # 1. Install bitsandbytes: pip install bitsandbytes
1509
+ # 2. Use a non-quantized model: "unsloth/Meta-Llama-3.1-8B-Instruct" (without -bnb-4bit)
1510
+ # 3. Set USE_UNSLOTH=False to use standard loading
1511
+ print("[*] Using Unsloth for fast model loading...")
1512
+
1513
+ rag = FireEvacuationRAG(
1514
+ model_name=UNSLOTH_MODEL,
1515
+ model_dir="./models",
1516
+ use_unsloth=True, # Enable Unsloth for fast loading
1517
+ load_in_4bit=False, # Set to False for pre-quantized models (they're already quantized)
1518
+ max_seq_length=2048, # Maximum sequence length
1519
+ reasoning_mode=ReasoningMode.CHAIN_OF_THOUGHT, # Use CoT by default
1520
+ decoding_strategy=DecodingStrategy.NUCLEUS # Use nucleus sampling by default
1521
+ )
1522
+ else:
1523
+ # Standard loading (slower but more compatible)
1524
+ print("Using standard model loading...")
1525
+ rag = FireEvacuationRAG(
1526
+ model_name="nvidia/Llama-3.1-Minitron-4B-Width-Base",
1527
+ model_dir="./models",
1528
+ use_8bit=USE_8BIT,
1529
+ reasoning_mode=ReasoningMode.CHAIN_OF_THOUGHT, # Use CoT by default
1530
+ decoding_strategy=DecodingStrategy.NUCLEUS # Use nucleus sampling by default
1531
+ )
1532
+
1533
+ # Download model
1534
+ rag.download_model()
1535
+ print()
1536
+
1537
+ # Load embedder
1538
+ rag.load_embedder()
1539
+ print()
1540
+
1541
+ # Step 4: Build FAISS index
1542
+ print("Step 4: Building FAISS index...")
1543
+ rag.build_index_from_json(json_data)
1544
+ print()
1545
+
1546
+ # Step 5: Save index
1547
+ print("Step 5: Saving index...")
1548
+ rag.save_index("faiss_index.idx", "faiss_metadata.pkl")
1549
+ print()
1550
+
1551
+ # Step 6: Interactive Fire Evacuation Query System
1552
+ print("="*80)
1553
+ print("[FIRE] FIRE EVACUATION RESCUE SYSTEM - INTERACTIVE MODE")
1554
+ print("="*80)
1555
+ print()
1556
+ print("You are now connected to the Fire Evacuation RAG System.")
1557
+ print("Ask questions about evacuation routes, building safety, and emergency situations.")
1558
+ print()
1559
+ print("Example questions you can ask:")
1560
+ print(" - 'What is the safest evacuation route from R1?'")
1561
+ print(" - 'Which rooms have fire detected?'")
1562
+ print(" - 'Are there any oxygen cylinders near fire?'")
1563
+ print(" - 'What is the danger level in room R2?'")
1564
+ print(" - 'Which exits are blocked?'")
1565
+ print(" - 'Show me all passable routes from R3'")
1566
+ print(" - 'What rooms have high carbon monoxide levels?'")
1567
+ print(" - 'Find the route with lowest danger score'")
1568
+ print()
1569
+ print("Commands:")
1570
+ print(" - Type 'help' to see example questions")
1571
+ print(" - Type 'status' to see building status summary")
1572
+ print(" - Type 'mode' to see/change reasoning mode")
1573
+ print(" - Type 'decode' to see/change decoding strategy")
1574
+ print(" - Type 'compare' to compare all reasoning modes for your question")
1575
+ print(" - Type 'quit' or 'exit' to exit")
1576
+ print()
1577
+ print("Reasoning Modes Available:")
1578
+ print(" - standard: Basic prompt-based generation")
1579
+ print(" - chain_of_thought: Step-by-step reasoning (CoT)")
1580
+ print(" - tree_of_thoughts: Multiple reasoning paths (ToT)")
1581
+ print(" - reflexion: Self-reflection and improvement")
1582
+ print(" - cot_with_tools: CoT with tool integration")
1583
+ print()
1584
+ print("Decoding Strategies Available:")
1585
+ print(" - greedy: Deterministic, highest probability")
1586
+ print(" - sampling: Random sampling with temperature")
1587
+ print(" - beam_search: Explores multiple paths")
1588
+ print(" - nucleus: Top-p (nucleus) sampling")
1589
+ print(" - temperature: Temperature-based sampling")
1590
+ print("="*80)
1591
+ print()
1592
+
1593
+ # Interactive question loop
1594
+ while True:
1595
+ try:
1596
+ # Get user input
1597
+ user_input = input("\n[ALERT] FIRE EVACUATION QUERY: ").strip()
1598
+
1599
+ # Handle empty input
1600
+ if not user_input:
1601
+ continue
1602
+
1603
+ # Handle exit commands
1604
+ if user_input.lower() in ['quit', 'exit', 'q']:
1605
+ print("\n[OK] Exiting Fire Evacuation System. Stay safe!")
1606
+ break
1607
+
1608
+ # Handle help command
1609
+ if user_input.lower() == 'help':
1610
+ print("\n[HELP] Example Questions:")
1611
+ print(" - What is the safest evacuation route from [room_id]?")
1612
+ print(" - Which rooms have fire detected?")
1613
+ print(" - Are there any oxygen cylinders near fire?")
1614
+ print(" - What is the danger level in room [room_id]?")
1615
+ print(" - Which exits are blocked?")
1616
+ print(" - Show me all passable routes from [room_id]")
1617
+ print(" - What rooms have high carbon monoxide levels?")
1618
+ print(" - Find the route with lowest danger score")
1619
+ print(" - Which rooms are not passable?")
1620
+ print(" - What is the temperature in room [room_id]?")
1621
+ print(" - Are there any flashover risks?")
1622
+ print(" - Which routes avoid fire zones?")
1623
+ continue
1624
+
1625
+ # Handle status command
1626
+ if user_input.lower() == 'status':
1627
+ print("\n[STATUS] Building Status Summary:")
1628
+ print(f" - Total rooms indexed: {len(rag.documents)}")
1629
+ print(f" - FAISS index size: {rag.index.ntotal if rag.index else 0} vectors")
1630
+ print(f" - Model: {rag.model_name}")
1631
+ print(f" - Model loaded: {'[OK]' if rag.pipe else '✗'}")
1632
+ print(f" - Embedder loaded: {'[OK]' if rag.embedder else '✗'}")
1633
+ print(f" - Index built: {'[OK]' if rag.index else '✗'}")
1634
+ print(f" - Current reasoning mode: {rag.reasoning_mode.value}")
1635
+ print(f" - Current decoding strategy: {rag.decoding_strategy.value}")
1636
+ continue
1637
+
1638
+ # Handle mode command
1639
+ if user_input.lower() == 'mode':
1640
+ print("\n[REASONING MODES] Available modes:")
1641
+ for mode in ReasoningMode:
1642
+ current = " (current)" if rag.reasoning_mode == mode else ""
1643
+ print(f" - {mode.value}{current}")
1644
+ print("\nTo change mode, type: mode <mode_name>")
1645
+ print("Example: mode chain_of_thought")
1646
+ continue
1647
+
1648
+ # Handle mode change
1649
+ if user_input.lower().startswith('mode '):
1650
+ mode_name = user_input.lower().replace('mode ', '').strip()
1651
+ try:
1652
+ new_mode = ReasoningMode(mode_name)
1653
+ rag.set_reasoning_mode(new_mode)
1654
+ except ValueError:
1655
+ print(f"[ERROR] Invalid mode: {mode_name}")
1656
+ print("Valid modes: standard, chain_of_thought, tree_of_thoughts, reflexion, cot_with_tools")
1657
+ continue
1658
+
1659
+ # Handle decode command
1660
+ if user_input.lower() == 'decode':
1661
+ print("\n[DECODING STRATEGIES] Available strategies:")
1662
+ for strategy in DecodingStrategy:
1663
+ current = " (current)" if rag.decoding_strategy == strategy else ""
1664
+ print(f" - {strategy.value}{current}")
1665
+ print("\nTo change strategy, type: decode <strategy_name>")
1666
+ print("Example: decode nucleus")
1667
+ continue
1668
+
1669
+ # Handle decode change
1670
+ if user_input.lower().startswith('decode '):
1671
+ strategy_name = user_input.lower().replace('decode ', '').strip()
1672
+ try:
1673
+ new_strategy = DecodingStrategy(strategy_name)
1674
+ rag.set_decoding_strategy(new_strategy)
1675
+ except ValueError:
1676
+ print(f"[ERROR] Invalid strategy: {strategy_name}")
1677
+ print("Valid strategies: greedy, sampling, beam_search, nucleus, temperature")
1678
+ continue
1679
+
1680
+ # Handle compare command
1681
+ if user_input.lower() == 'compare':
1682
+ print("\n[COMPARE] Enter a question to compare all reasoning modes:")
1683
+ compare_question = input("Question: ").strip()
1684
+ if compare_question:
1685
+ print("\n🔍 Comparing all reasoning modes...")
1686
+ print("-" * 80)
1687
+ comparison = rag.compare_reasoning_modes(compare_question, k=3)
1688
+
1689
+ print(f"\n[QUESTION] {comparison['question']}")
1690
+ print("\n[COMPARISON] Answers from different reasoning modes:\n")
1691
+
1692
+ for mode_name, mode_result in comparison['answers'].items():
1693
+ print(f"--- {mode_name.upper()} ---")
1694
+ if 'error' in mode_result:
1695
+ print(f"Error: {mode_result['error']}")
1696
+ else:
1697
+ print(f"Length: {mode_result['length']} characters")
1698
+ print(f"Answer: {mode_result['answer'][:200]}...")
1699
+ print()
1700
+
1701
+ print("-" * 80)
1702
+ continue
1703
+
1704
+ # Process the query
1705
+ print("\n🔍 Searching evacuation data and generating response...")
1706
+ print("-" * 80)
1707
+
1708
+ result = rag.query(user_input, k=3)
1709
+
1710
+ # Display reasoning info
1711
+ print(f"\n[REASONING] Mode: {result.get('reasoning_mode', 'N/A')} | Strategy: {result.get('decoding_strategy', 'N/A')}")
1712
+
1713
+ # Display reasoning steps if available
1714
+ if 'reasoning_steps' in result and result.get('reasoning_steps'):
1715
+ print(f"\n[REASONING STEPS] Step-by-step reasoning process:")
1716
+ print("=" * 80)
1717
+ # Format and display reasoning steps
1718
+ reasoning_text = result['reasoning_steps']
1719
+
1720
+ # Clean up reasoning text - remove verbose parts
1721
+ if "However, please note" in reasoning_text:
1722
+ reasoning_text = reasoning_text.split("However, please note")[0]
1723
+ if "Additionally," in reasoning_text:
1724
+ reasoning_text = reasoning_text.split("Additionally,")[0]
1725
+ if "Let me know" in reasoning_text:
1726
+ reasoning_text = reasoning_text.split("Let me know")[0]
1727
+
1728
+ # Parse and format reasoning based on mode
1729
+ if result.get('reasoning_mode') == 'chain_of_thought':
1730
+ # CoT format - look for numbered steps or reasoning sections
1731
+ lines = [l.strip() for l in reasoning_text.split('\n') if l.strip()]
1732
+ displayed_lines = []
1733
+ for line in lines:
1734
+ # Stop at ANSWER or verbose markers
1735
+ if any(marker in line.upper() for marker in ['ANSWER', 'HOWEVER', 'ADDITIONALLY', 'LET ME KNOW']):
1736
+ break
1737
+ # Show numbered steps or key reasoning lines
1738
+ if (line.startswith(('1.', '2.', '3.', '4.', '5.', 'Step', 'STEP', '-')) or
1739
+ any(keyword in line.lower() for keyword in ['analyze', 'identify', 'evaluate', 'conclude', 'risk', 'safety', 'danger'])):
1740
+ displayed_lines.append(line)
1741
+
1742
+ if displayed_lines:
1743
+ for line in displayed_lines[:10]: # Limit to 10 lines
1744
+ print(f" {line}")
1745
+ else:
1746
+ # Fallback: show first few meaningful lines
1747
+ meaningful = [l for l in lines[:5] if len(l) > 20 and not l.startswith('ANSWER')]
1748
+ for line in meaningful:
1749
+ print(f" {line}")
1750
+
1751
+ elif result.get('reasoning_mode') == 'tree_of_thoughts':
1752
+ # ToT format - show approaches and evaluation
1753
+ lines = reasoning_text.split('\n')
1754
+ in_section = False
1755
+ for line in lines:
1756
+ line = line.strip()
1757
+ if not line:
1758
+ if in_section:
1759
+ print() # Add spacing
1760
+ continue
1761
+ if line.startswith("APPROACH"):
1762
+ print(f"\n 🔍 {line}")
1763
+ in_section = True
1764
+ elif line.startswith("EVALUATION"):
1765
+ print(f"\n ✅ {line}")
1766
+ in_section = True
1767
+ else:
1768
+ if line and not line.startswith("---"):
1769
+ print(f" {line}")
1770
+
1771
+ elif result.get('reasoning_mode') == 'reflexion':
1772
+ # Reflexion format - show iterations
1773
+ lines = reasoning_text.split('\n')
1774
+ for line in lines:
1775
+ line = line.strip()
1776
+ if not line:
1777
+ continue
1778
+ if line.startswith("INITIAL ANSWER"):
1779
+ print(f"\n 📝 {line}")
1780
+ elif line.startswith("ITERATION"):
1781
+ print(f"\n 🔄 {line}")
1782
+ elif line.startswith("REFLECTION"):
1783
+ print(f" 💭 {line}")
1784
+ else:
1785
+ if line and not line.startswith("---"):
1786
+ print(f" {line}")
1787
+
1788
+ elif result.get('reasoning_mode') == 'cot_with_tools':
1789
+ # CoT with Tools format
1790
+ lines = reasoning_text.split('\n')
1791
+ for line in lines:
1792
+ line = line.strip()
1793
+ if not line:
1794
+ continue
1795
+ if line.startswith("TOOL CALL"):
1796
+ print(f"\n 🔧 {line}")
1797
+ elif line.startswith("REASONING WITH TOOLS"):
1798
+ print(f"\n 🧠 {line}")
1799
+ else:
1800
+ if line and not line.startswith("---"):
1801
+ print(f" {line}")
1802
+
1803
+ else:
1804
+ # Standard or fallback: display with basic formatting
1805
+ lines = reasoning_text.split('\n')
1806
+ for line in lines:
1807
+ line = line.strip()
1808
+ if line:
1809
+ print(f" {line}")
1810
+
1811
+ print("=" * 80)
1812
+
1813
+ # Display answer
1814
+ print(f"\n[ANSWER] FINAL ANSWER:")
1815
+ print(result['answer'])
1816
+
1817
+ # Display sources
1818
+ print(f"\n[SOURCES] SOURCES ({len(result['sources'])} relevant documents found):")
1819
+ for i, source in enumerate(result['sources'], 1):
1820
+ source_type = source.get('type', 'N/A')
1821
+ room_id = source.get('room_id', 'N/A')
1822
+ route_id = source.get('route_id', 'N/A')
1823
+ relevance = source.get('relevance_score', 0)
1824
+
1825
+ if source_type == 'room':
1826
+ print(f" {i}. Room: {room_id} (Relevance: {relevance:.2f})")
1827
+ elif source_type == 'route':
1828
+ print(f" {i}. Route: {route_id} (Relevance: {relevance:.2f})")
1829
+ else:
1830
+ print(f" {i}. {source_type} (Relevance: {relevance:.2f})")
1831
+
1832
+ print("-" * 80)
1833
+
1834
+ except KeyboardInterrupt:
1835
+ print("\n\n[OK] Exiting Fire Evacuation System. Stay safe!")
1836
+ break
1837
+ except Exception as e:
1838
+ print(f"\n[ERROR] Error processing query: {e}")
1839
+ print("Please try again or type 'help' for examples.")
1840
+
1841
+ print("\n" + "="*80)
1842
+ print("Fire Evacuation System session ended.")
1843
+ print("="*80)
1844
+
1845
+
1846
+ if __name__ == "__main__":
1847
+ main()
1848
+
llm_route_decider.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Realtime LLM-based route decision script for mock sensor data streams.
3
+
4
+ Designed to run outside the main project. It reads sensor payloads (JSON per line),
5
+ formats a prompt for a Llama-family model (e.g., via Ollama), and logs the AI's
6
+ route recommendation plus warnings/actions for each update.
7
+ """
8
+ import argparse
9
+ import json
10
+ import sys
11
+ import textwrap
12
+ import time
13
+ from pathlib import Path
14
+ from typing import Dict, Any, List, Optional
15
+
16
+ import requests
17
+
18
+
19
+ PROMPT_TEMPLATE = """You are an emergency evacuation strategist using live sensor and route telemetry.
20
+ Evaluate the candidate routes and choose the safest *passable* option.
21
+
22
+ Decision rules:
23
+ 1. Reject any route marked passable=false or containing simultaneous fire and oxygen-cylinder hazards.
24
+ 2. Avoid rooms with biohazard alerts unless respirators are available; penalize heavily if no PPE.
25
+ 3. Prefer routes with lower average danger and lower max danger; break ties by shorter path_length.
26
+ 4. Flag any oxygen cylinders near heat (>40°C) or fire for operator intervention.
27
+ 5. Provide backup route only if it is passable and within 15 danger points of the best route.
28
+
29
+ Data:
30
+ {data}
31
+
32
+ Respond in JSON with this schema:
33
+ {{
34
+ "recommended_route": "<route_id>",
35
+ "confidence": "high|medium|low",
36
+ "warnings": ["..."],
37
+ "actions": ["..."],
38
+ "backup_route": "<route_id or null>"
39
+ }}
40
+
41
+ If no safe route exists, set recommended_route=null and explain why in warnings.
42
+ """
43
+
44
+
45
+ def load_event_stream(path: Path):
46
+ """Yield parsed JSON objects from a file (one JSON object per line)."""
47
+ with path.open("r", encoding="utf-8") as f:
48
+ for line_num, line in enumerate(f, start=1):
49
+ line = line.strip()
50
+ if not line:
51
+ continue
52
+ try:
53
+ yield json.loads(line)
54
+ except json.JSONDecodeError as exc:
55
+ raise ValueError(f"Invalid JSON on line {line_num}: {exc}") from exc
56
+
57
+
58
+ def format_payload(event: Dict[str, Any]) -> str:
59
+ """Pretty-print the incoming event for the LLM prompt."""
60
+ return json.dumps(event, indent=2, ensure_ascii=False)
61
+
62
+
63
+ def call_llm(model: str, prompt: str, server_url: str) -> str:
64
+ """
65
+ Call a Llama-family model via Ollama (or any compatible endpoint).
66
+
67
+ server_url defaults to http://localhost:11434/api/generate.
68
+ """
69
+ payload = {
70
+ "model": model,
71
+ "prompt": prompt,
72
+ "stream": False,
73
+ }
74
+ response = requests.post(server_url, json=payload, timeout=120)
75
+ response.raise_for_status()
76
+ data = response.json()
77
+
78
+ # Ollama returns {"response": "...", "done": true, ...}
79
+ if "response" in data:
80
+ return data["response"]
81
+ # Fallback for other APIs
82
+ return data.get("text") or json.dumps(data)
83
+
84
+
85
+ def parse_llm_output(text_output: str) -> Dict[str, Any]:
86
+ """Attempt to parse the LLM JSON; fall back to error record."""
87
+ text_output = text_output.strip()
88
+ try:
89
+ return json.loads(text_output)
90
+ except json.JSONDecodeError:
91
+ return {
92
+ "recommended_route": None,
93
+ "confidence": "low",
94
+ "warnings": ["LLM returned non-JSON output", text_output],
95
+ "actions": [],
96
+ "backup_route": None,
97
+ }
98
+
99
+
100
+ def process_event(event: Dict[str, Any], args) -> Dict[str, Any]:
101
+ """Send single event to LLM and return structured decision."""
102
+ payload_str = format_payload(event)
103
+ prompt = PROMPT_TEMPLATE.format(data=payload_str)
104
+ raw_output = call_llm(args.model, prompt, args.server_url)
105
+ decision = parse_llm_output(raw_output)
106
+ return {
107
+ "timestamp": event.get("timestamp_sec"),
108
+ "decision": decision,
109
+ "raw_prompt": prompt if args.debug else None,
110
+ "raw_output": raw_output if args.debug else None,
111
+ }
112
+
113
+
114
+ def log_decision(result: Dict[str, Any]):
115
+ """Print a concise summary to stdout."""
116
+ decision = result["decision"]
117
+ ts = result["timestamp"]
118
+ header = f"[t={ts}s]" if ts is not None else "[t=?]"
119
+ print(f"{header} recommended_route={decision.get('recommended_route')} "
120
+ f"confidence={decision.get('confidence')}")
121
+ if decision.get("warnings"):
122
+ print(" warnings:")
123
+ for warning in decision["warnings"]:
124
+ wrapped = textwrap.fill(warning, width=78, subsequent_indent=" ")
125
+ print(f" - {wrapped}")
126
+ if decision.get("actions"):
127
+ print(" actions:")
128
+ for action in decision["actions"]:
129
+ wrapped = textwrap.fill(action, width=78, subsequent_indent=" ")
130
+ print(f" - {wrapped}")
131
+ if decision.get("backup_route"):
132
+ print(f" backup_route: {decision['backup_route']}")
133
+ print()
134
+
135
+
136
+ def build_arg_parser() -> argparse.ArgumentParser:
137
+ parser = argparse.ArgumentParser(
138
+ description="Stream sensor events to a Llama model for route decisions.")
139
+ parser.add_argument("--input", required=True,
140
+ help="Path to JSONL file containing mock sensor events.")
141
+ parser.add_argument("--model", default="llama3",
142
+ help="Model name served by Ollama (default: llama3).")
143
+ parser.add_argument("--server-url", default="http://localhost:11434/api/generate",
144
+ help="Generation endpoint URL.")
145
+ parser.add_argument("--delay", type=float, default=0.0,
146
+ help="Seconds to wait between events (simulate realtime).")
147
+ parser.add_argument("--debug", action="store_true",
148
+ help="Print raw prompt/output for troubleshooting.")
149
+ return parser
150
+
151
+
152
+ def main(argv: Optional[List[str]] = None):
153
+ parser = build_arg_parser()
154
+ args = parser.parse_args(argv)
155
+
156
+ input_path = Path(args.input)
157
+ if not input_path.exists():
158
+ parser.error(f"Input file not found: {input_path}")
159
+
160
+ try:
161
+ for event in load_event_stream(input_path):
162
+ result = process_event(event, args)
163
+ log_decision(result)
164
+
165
+ if args.debug:
166
+ print("----- RAW PROMPT -----")
167
+ print(result["raw_prompt"])
168
+ print("----- RAW OUTPUT -----")
169
+ print(result["raw_output"])
170
+ print("----------------------\n")
171
+
172
+ if args.delay > 0:
173
+ time.sleep(args.delay)
174
+ except KeyboardInterrupt:
175
+ print("\nInterrupted by user.", file=sys.stderr)
176
+ except Exception as exc:
177
+ print(f"ERROR: {exc}", file=sys.stderr)
178
+ sys.exit(1)
179
+
180
+
181
+ if __name__ == "__main__":
182
+ main()
183
+
pathfinding.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pathfinding and Risk Assessment Module
3
+ Uses A* algorithm with custom risk-based heuristics
4
+ """
5
+ import heapq
6
+ from typing import Dict, List, Tuple, Optional
7
+ from .floor_plan import FloorPlan
8
+ from .sensor_system import SensorSystem, SensorReading
9
+
10
+
11
+ class PathNode:
12
+ """Node for pathfinding algorithm"""
13
+ def __init__(self, room_id: str, g_cost: float, h_cost: float, parent=None):
14
+ self.room_id = room_id
15
+ self.g_cost = g_cost # Cost from start
16
+ self.h_cost = h_cost # Heuristic cost to goal
17
+ self.f_cost = g_cost + h_cost # Total cost
18
+ self.parent = parent
19
+
20
+ def __lt__(self, other):
21
+ return self.f_cost < other.f_cost
22
+
23
+ def __eq__(self, other):
24
+ return self.room_id == other.room_id
25
+
26
+
27
+ class RiskAssessment:
28
+ """Assesses risk for different evacuation routes"""
29
+
30
+ @staticmethod
31
+ def calculate_path_risk(path: List[str], sensor_system: SensorSystem,
32
+ floor_plan: FloorPlan) -> Dict:
33
+ """
34
+ Calculate comprehensive risk assessment for a path
35
+
36
+ Returns dict with:
37
+ - total_danger: Overall danger score
38
+ - max_danger: Maximum danger point
39
+ - avg_danger: Average danger
40
+ - has_fire: Whether path goes through fire
41
+ - has_oxygen_hazard: Whether path has oxygen cylinder
42
+ - passable: Whether path is passable
43
+ - risk_factors: List of specific risks
44
+ """
45
+ if not path:
46
+ return {"passable": False, "total_danger": 100.0}
47
+
48
+ danger_scores = []
49
+ risk_factors = []
50
+ has_fire = False
51
+ has_oxygen_hazard = False
52
+ max_danger_location = None
53
+ max_danger_score = 0.0
54
+
55
+ for room_id in path:
56
+ sensor = sensor_system.get_sensor_reading(room_id)
57
+ room = floor_plan.get_room(room_id)
58
+
59
+ if sensor:
60
+ danger = sensor.calculate_danger_score()
61
+ danger_scores.append(danger)
62
+
63
+ if danger > max_danger_score:
64
+ max_danger_score = danger
65
+ max_danger_location = room_id
66
+
67
+ # Check specific hazards
68
+ if sensor.fire_detected:
69
+ has_fire = True
70
+ risk_factors.append(f"Fire detected in {room_id}")
71
+
72
+ if room and room.has_oxygen_cylinder:
73
+ has_oxygen_hazard = True
74
+ # Increase danger if there's heat near oxygen
75
+ if sensor.temperature > 40 or sensor.fire_detected:
76
+ danger_scores[-1] += 15 # Add explosion risk
77
+ risk_factors.append(f"Oxygen cylinder explosion risk in {room_id}")
78
+ else:
79
+ risk_factors.append(f"Oxygen cylinder present in {room_id}")
80
+
81
+ if sensor.smoke_level > 0.5:
82
+ risk_factors.append(f"Heavy smoke in {room_id}")
83
+
84
+ if sensor.temperature > 60:
85
+ risk_factors.append(f"High temperature ({sensor.temperature:.1f}°C) in {room_id}")
86
+
87
+ if sensor.oxygen_level < 19.5:
88
+ risk_factors.append(f"Low oxygen ({sensor.oxygen_level:.1f}%) in {room_id}")
89
+
90
+ # NEW: Toxic gas warnings
91
+ if sensor.carbon_monoxide > 50:
92
+ risk_factors.append(f"TOXIC CO ({sensor.carbon_monoxide:.0f} ppm) in {room_id}")
93
+ if sensor.carbon_dioxide > 5000:
94
+ risk_factors.append(f"High CO2 ({sensor.carbon_dioxide:.0f} ppm) in {room_id}")
95
+ if sensor.hydrogen_cyanide > 20:
96
+ risk_factors.append(f"TOXIC HCN ({sensor.hydrogen_cyanide:.1f} ppm) in {room_id}")
97
+
98
+ # NEW: Flashover/backdraft warnings
99
+ if sensor.flashover_risk > 0.7:
100
+ risk_factors.append(f"CRITICAL: Flashover risk ({sensor.flashover_risk*100:.0f}%) in {room_id}")
101
+ if sensor.backdraft_risk > 0.6:
102
+ risk_factors.append(f"CRITICAL: Backdraft risk ({sensor.backdraft_risk*100:.0f}%) in {room_id}")
103
+
104
+ # NEW: Crowd density warnings
105
+ if sensor.occupancy_density > 0.8:
106
+ risk_factors.append(f"High crowd density ({sensor.occupancy_density*100:.0f}%) in {room_id}")
107
+
108
+ # NEW: Infrastructure failures
109
+ if not sensor.exit_accessible:
110
+ risk_factors.append(f"Exit BLOCKED in {room_id}")
111
+ if not sensor.stairwell_clear:
112
+ risk_factors.append(f"Stairwell BLOCKED in {room_id}")
113
+ if not sensor.emergency_lighting:
114
+ risk_factors.append(f"No emergency lighting in {room_id}")
115
+
116
+ # NEW: Time pressure
117
+ if sensor.time_since_fire_start > 300:
118
+ risk_factors.append(f"Time pressure: {sensor.time_since_fire_start//60} min elapsed")
119
+
120
+ if not sensor.is_passable():
121
+ risk_factors.append(f"Path blocked at {room_id}")
122
+
123
+ total_danger = sum(danger_scores)
124
+ avg_danger = total_danger / len(danger_scores) if danger_scores else 0
125
+
126
+ # Check if all segments are passable
127
+ passable = all(sensor_system.get_sensor_reading(rid).is_passable()
128
+ for rid in path if sensor_system.get_sensor_reading(rid))
129
+
130
+ return {
131
+ "total_danger": total_danger,
132
+ "max_danger": max_danger_score,
133
+ "max_danger_location": max_danger_location,
134
+ "avg_danger": avg_danger,
135
+ "path_length": len(path),
136
+ "has_fire": has_fire,
137
+ "has_oxygen_hazard": has_oxygen_hazard,
138
+ "passable": passable,
139
+ "risk_factors": risk_factors,
140
+ "danger_scores": danger_scores
141
+ }
142
+
143
+ @staticmethod
144
+ def get_risk_level(avg_danger: float) -> str:
145
+ """Get risk level description"""
146
+ if avg_danger < 20:
147
+ return "LOW"
148
+ elif avg_danger < 40:
149
+ return "MODERATE"
150
+ elif avg_danger < 60:
151
+ return "HIGH"
152
+ else:
153
+ return "CRITICAL"
154
+
155
+ @staticmethod
156
+ def recommend_path(paths: List[Tuple[List[str], Dict]]) -> Optional[Tuple[List[str], Dict]]:
157
+ """
158
+ Recommend the best path based on risk assessment
159
+
160
+ Args:
161
+ paths: List of (path, risk_assessment) tuples
162
+
163
+ Returns:
164
+ Best (path, risk_assessment) tuple or None
165
+ """
166
+ if not paths:
167
+ return None
168
+
169
+ # Filter to only passable paths
170
+ passable_paths = [(p, r) for p, r in paths if r["passable"]]
171
+
172
+ if not passable_paths:
173
+ # No fully passable paths, return least dangerous
174
+ return min(paths, key=lambda x: x[1]["total_danger"])
175
+
176
+ # Score each path (lower is better)
177
+ def score_path(path_info):
178
+ path, risk = path_info
179
+ score = 0
180
+
181
+ # Heavily penalize fire
182
+ if risk["has_fire"]:
183
+ score += 100
184
+
185
+ # Add oxygen hazard risk (but less than fire)
186
+ if risk["has_oxygen_hazard"]:
187
+ score += 30
188
+
189
+ # Add danger scores
190
+ score += risk["total_danger"]
191
+
192
+ # Prefer shorter paths (slight preference)
193
+ score += risk["path_length"] * 2
194
+
195
+ # Penalize high max danger points
196
+ score += risk["max_danger"] * 0.5
197
+
198
+ return score
199
+
200
+ # Return path with lowest score
201
+ return min(passable_paths, key=score_path)
202
+
203
+
204
+ class PathFinder:
205
+ """Find optimal evacuation paths using A* algorithm with risk assessment"""
206
+
207
+ def __init__(self, floor_plan: FloorPlan, sensor_system: SensorSystem):
208
+ self.floor_plan = floor_plan
209
+ self.sensor_system = sensor_system
210
+
211
+ def find_all_evacuation_routes(self, start: str) -> List[Tuple[str, List[str], Dict]]:
212
+ """
213
+ Find evacuation routes to all exits
214
+
215
+ Returns: List of (exit_id, path, risk_assessment) tuples
216
+ """
217
+ routes = []
218
+ exits = self.floor_plan.get_all_exits()
219
+
220
+ for exit_id in exits:
221
+ path = self.find_path(start, exit_id)
222
+ if path:
223
+ risk = RiskAssessment.calculate_path_risk(
224
+ path, self.sensor_system, self.floor_plan
225
+ )
226
+ routes.append((exit_id, path, risk))
227
+
228
+ return routes
229
+
230
+ def find_path(self, start: str, goal: str) -> Optional[List[str]]:
231
+ """
232
+ Find path from start to goal using A* with risk-based costs
233
+
234
+ Returns: List of room IDs representing the path, or None if no path exists
235
+ """
236
+ if start not in self.floor_plan.rooms or goal not in self.floor_plan.rooms:
237
+ return None
238
+
239
+ # Priority queue: (f_cost, node)
240
+ open_set = []
241
+ closed_set = set()
242
+
243
+ # Initialize start node
244
+ start_node = PathNode(start, 0, self._heuristic(start, goal))
245
+ heapq.heappush(open_set, start_node)
246
+
247
+ # Track best g_cost for each room
248
+ g_costs = {start: 0}
249
+
250
+ while open_set:
251
+ current = heapq.heappop(open_set)
252
+
253
+ # Goal reached
254
+ if current.room_id == goal:
255
+ return self._reconstruct_path(current)
256
+
257
+ # Skip if already processed
258
+ if current.room_id in closed_set:
259
+ continue
260
+
261
+ closed_set.add(current.room_id)
262
+
263
+ # Explore neighbors
264
+ neighbors = self.floor_plan.get_neighbors(current.room_id)
265
+ for neighbor_id, base_distance in neighbors:
266
+ if neighbor_id in closed_set:
267
+ continue
268
+
269
+ # Calculate risk-adjusted cost
270
+ risk_cost = self._calculate_risk_cost(neighbor_id)
271
+ tentative_g = current.g_cost + base_distance + risk_cost
272
+
273
+ # If this path is better, add to open set
274
+ if neighbor_id not in g_costs or tentative_g < g_costs[neighbor_id]:
275
+ g_costs[neighbor_id] = tentative_g
276
+ h_cost = self._heuristic(neighbor_id, goal)
277
+ neighbor_node = PathNode(neighbor_id, tentative_g, h_cost, current)
278
+ heapq.heappush(open_set, neighbor_node)
279
+
280
+ # No path found
281
+ return None
282
+
283
+ def _heuristic(self, room_id1: str, room_id2: str) -> float:
284
+ """
285
+ Heuristic function for A* (Manhattan distance between room positions)
286
+ """
287
+ room1 = self.floor_plan.get_room(room_id1)
288
+ room2 = self.floor_plan.get_room(room_id2)
289
+
290
+ if not room1 or not room2:
291
+ return 0
292
+
293
+ x1, y1 = room1.position
294
+ x2, y2 = room2.position
295
+
296
+ return abs(x2 - x1) + abs(y2 - y1)
297
+
298
+ def _calculate_risk_cost(self, room_id: str) -> float:
299
+ """
300
+ Calculate risk-based cost for traversing a room with all real-world factors
301
+ Higher danger = higher cost
302
+ """
303
+ sensor = self.sensor_system.get_sensor_reading(room_id)
304
+ if not sensor:
305
+ return 0.0
306
+
307
+ danger_score = sensor.calculate_danger_score()
308
+
309
+ # Convert danger score to cost multiplier
310
+ # Danger 0-20: minimal cost
311
+ # Danger 20-50: moderate cost
312
+ # Danger 50+: high cost
313
+ risk_cost = danger_score / 10.0
314
+
315
+ # Extra penalty for fire
316
+ if sensor.fire_detected:
317
+ risk_cost += 20.0
318
+
319
+ # Extra penalty for oxygen cylinder in dangerous conditions
320
+ room = self.floor_plan.get_room(room_id)
321
+ if room and room.has_oxygen_cylinder:
322
+ if sensor.temperature > 40 or sensor.fire_detected:
323
+ risk_cost += 10.0
324
+
325
+ # NEW: Extra penalties for critical factors
326
+ # Toxic gas penalty
327
+ if sensor.carbon_monoxide > 50:
328
+ risk_cost += 15.0
329
+ if sensor.carbon_dioxide > 5000:
330
+ risk_cost += 10.0
331
+
332
+ # Flashover risk penalty
333
+ if sensor.flashover_risk > 0.7:
334
+ risk_cost += 25.0
335
+ elif sensor.flashover_risk > 0.5:
336
+ risk_cost += 15.0
337
+
338
+ # Exit blockage penalty
339
+ if not sensor.exit_accessible:
340
+ risk_cost += 30.0
341
+
342
+ if not sensor.stairwell_clear:
343
+ risk_cost += 20.0
344
+
345
+ # Crowd density penalty (slows movement)
346
+ if sensor.occupancy_density > 0.8:
347
+ risk_cost += sensor.occupancy_density * 20.0
348
+
349
+ # Infrastructure failure penalty
350
+ if not sensor.emergency_lighting:
351
+ risk_cost += 5.0
352
+
353
+ # Make impassable areas very expensive (but not infinite)
354
+ if not sensor.is_passable():
355
+ risk_cost += 50.0
356
+
357
+ return risk_cost
358
+
359
+ def _reconstruct_path(self, node: PathNode) -> List[str]:
360
+ """Reconstruct path from goal node back to start"""
361
+ path = []
362
+ current = node
363
+ while current:
364
+ path.append(current.room_id)
365
+ current = current.parent
366
+ return list(reversed(path))
367
+
sensor_system.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sensor System Module - Simulates and manages sensor data for fire, smoke, temperature, oxygen
3
+ """
4
+ import random
5
+ import numpy as np
6
+ from typing import Dict, List
7
+ from .floor_plan import FloorPlan
8
+
9
+
10
+ class SensorReading:
11
+ """Represents sensor readings for a specific location"""
12
+ def __init__(self, location_id: str):
13
+ self.location_id = location_id
14
+
15
+ # Basic factors (existing)
16
+ self.fire_detected = False
17
+ self.smoke_level = 0.0 # 0.0 to 1.0 (0 = no smoke, 1 = heavy smoke)
18
+ self.temperature = 20.0 # Celsius
19
+ self.oxygen_level = 21.0 # Percentage (normal is ~21%)
20
+ self.visibility = 100.0 # Percentage (100 = clear, 0 = no visibility)
21
+ self.structural_integrity = 100.0 # Percentage
22
+
23
+ # NEW: Fire-specific factors
24
+ self.fire_growth_rate = 0.0 # m²/min
25
+ self.flashover_risk = 0.0 # 0.0 to 1.0
26
+ self.backdraft_risk = 0.0 # 0.0 to 1.0
27
+ self.heat_radiation = 0.0 # kW/m²
28
+ self.fire_type = "none" # electrical, chemical, wood, etc.
29
+
30
+ # NEW: Toxic gas detection
31
+ self.carbon_monoxide = 0.0 # ppm (normal < 9)
32
+ self.carbon_dioxide = 400.0 # ppm (normal ~400)
33
+ self.hydrogen_cyanide = 0.0 # ppm
34
+ self.hydrogen_chloride = 0.0 # ppm
35
+
36
+ # NEW: Environmental factors
37
+ self.wind_direction = 0.0 # degrees
38
+ self.wind_speed = 0.0 # m/s
39
+ self.air_pressure = 1013.25 # hPa (normal)
40
+ self.humidity = 50.0 # Percentage
41
+
42
+ # NEW: Human factors
43
+ self.occupancy_density = 0.0 # 0.0 to 1.0 (0 = empty, 1 = full)
44
+ self.mobility_limitations = 0 # Count of people with mobility issues
45
+ self.panic_level = 0.0 # 0.0 to 1.0
46
+ self.evacuation_progress = 0.0 # Percentage evacuated
47
+
48
+ # NEW: Building infrastructure
49
+ self.sprinkler_active = False
50
+ self.emergency_lighting = True
51
+ self.elevator_available = True
52
+ self.stairwell_clear = True
53
+ self.exit_accessible = True
54
+ self.exit_capacity = 100 # people per minute
55
+ self.ventilation_active = True
56
+
57
+ # NEW: Time-based factors
58
+ self.time_since_fire_start = 0 # seconds
59
+ self.estimated_time_to_exit = 0 # seconds
60
+
61
+ # NEW: Communication
62
+ self.emergency_comm_working = True
63
+ self.wifi_signal_strength = 100.0 # Percentage
64
+
65
+ # NEW: External factors
66
+ self.weather_temperature = 20.0 # Celsius
67
+ self.weather_rain = False
68
+ self.time_of_day = 12 # 0-23 hours
69
+ self.day_of_week = 1 # 0=Monday, 6=Sunday
70
+
71
+ def calculate_danger_score(self) -> float:
72
+ """
73
+ Calculate enhanced danger score with all real-world factors
74
+ Returns: 0.0 (safe) to 100.0 (extremely dangerous)
75
+ """
76
+ score = 0.0
77
+
78
+ # === BASIC FACTORS (Existing) ===
79
+ # Fire presence - highest priority
80
+ if self.fire_detected:
81
+ score += 40.0
82
+
83
+ # Smoke level
84
+ score += self.smoke_level * 20.0
85
+
86
+ # Temperature (dangerous above 60°C)
87
+ if self.temperature > 60:
88
+ score += min((self.temperature - 60) / 2, 20.0)
89
+
90
+ # Low oxygen
91
+ if self.oxygen_level < 19.5:
92
+ score += (19.5 - self.oxygen_level) * 5.0
93
+
94
+ # Visibility
95
+ score += (100 - self.visibility) * 0.1
96
+
97
+ # Structural integrity
98
+ if self.structural_integrity < 80:
99
+ score += (100 - self.structural_integrity) * 0.2
100
+
101
+ # === NEW: TOXIC GAS RISK ===
102
+ # Carbon Monoxide (deadly above 50 ppm)
103
+ if self.carbon_monoxide > 50:
104
+ score += min(self.carbon_monoxide / 5, 30.0)
105
+ elif self.carbon_monoxide > 9:
106
+ score += (self.carbon_monoxide - 9) * 0.5
107
+
108
+ # Carbon Dioxide (dangerous above 5000 ppm)
109
+ if self.carbon_dioxide > 5000:
110
+ score += min((self.carbon_dioxide - 5000) / 200, 20.0)
111
+
112
+ # Hydrogen Cyanide (deadly above 20 ppm)
113
+ if self.hydrogen_cyanide > 20:
114
+ score += min(self.hydrogen_cyanide * 1.5, 25.0)
115
+
116
+ # Hydrogen Chloride (dangerous above 5 ppm)
117
+ if self.hydrogen_chloride > 5:
118
+ score += min(self.hydrogen_chloride * 2, 15.0)
119
+
120
+ # === NEW: FLASHOVER RISK ===
121
+ if self.flashover_risk > 0.7:
122
+ score += 25.0
123
+ elif self.flashover_risk > 0.5:
124
+ score += 15.0
125
+ elif self.flashover_risk > 0.3:
126
+ score += 8.0
127
+
128
+ # === NEW: BACKDRAFT RISK ===
129
+ if self.backdraft_risk > 0.6:
130
+ score += 20.0
131
+ elif self.backdraft_risk > 0.4:
132
+ score += 10.0
133
+
134
+ # === NEW: CROWD DENSITY PENALTY ===
135
+ if self.occupancy_density > 0.8:
136
+ score += (self.occupancy_density - 0.8) * 75.0
137
+ elif self.occupancy_density > 0.6:
138
+ score += (self.occupancy_density - 0.6) * 30.0
139
+
140
+ # === NEW: EXIT BLOCKAGE ===
141
+ if not self.exit_accessible:
142
+ score += 20.0
143
+
144
+ if not self.stairwell_clear:
145
+ score += 15.0
146
+
147
+ # === NEW: TIME PRESSURE ===
148
+ if self.time_since_fire_start > 300: # 5 minutes
149
+ score += min((self.time_since_fire_start - 300) / 20, 15.0)
150
+
151
+ # === NEW: INFRASTRUCTURE FAILURES ===
152
+ if not self.emergency_lighting:
153
+ score += 5.0
154
+
155
+ if not self.emergency_comm_working:
156
+ score += 3.0
157
+
158
+ # === NEW: FIRE GROWTH RATE ===
159
+ if self.fire_growth_rate > 10: # m²/min
160
+ score += min(self.fire_growth_rate / 2, 12.0)
161
+
162
+ # === NEW: HEAT RADIATION ===
163
+ if self.heat_radiation > 2.5: # kW/m² (dangerous)
164
+ score += min((self.heat_radiation - 2.5) * 3, 10.0)
165
+
166
+ return min(score, 100.0)
167
+
168
+ def is_passable(self) -> bool:
169
+ """Determine if this location is passable"""
170
+ danger = self.calculate_danger_score()
171
+ # Consider passable if danger score is below 70
172
+ return danger < 70.0 and self.structural_integrity > 50.0
173
+
174
+ def __repr__(self):
175
+ return (f"SensorReading({self.location_id}: "
176
+ f"Fire={self.fire_detected}, "
177
+ f"Smoke={self.smoke_level:.2f}, "
178
+ f"Temp={self.temperature:.1f}°C, "
179
+ f"Danger={self.calculate_danger_score():.1f})")
180
+
181
+
182
+ class SensorSystem:
183
+ """Manages all sensors in the building"""
184
+ def __init__(self, floor_plan: FloorPlan):
185
+ self.floor_plan = floor_plan
186
+ self.sensors: Dict[str, SensorReading] = {}
187
+ self._initialize_sensors()
188
+
189
+ def _initialize_sensors(self):
190
+ """Initialize sensors for all rooms with realistic mock data"""
191
+ import random
192
+
193
+ for room_id in self.floor_plan.rooms:
194
+ sensor = SensorReading(room_id)
195
+
196
+ # Initialize all rooms with realistic baseline mock data
197
+ sensor.fire_detected = False
198
+ sensor.smoke_level = random.uniform(0.0, 0.1)
199
+ sensor.temperature = random.uniform(18, 25)
200
+ sensor.oxygen_level = random.uniform(20.5, 21.0)
201
+ sensor.visibility = random.uniform(95, 100)
202
+ sensor.structural_integrity = 100.0
203
+
204
+ # Fire factors (none initially)
205
+ sensor.fire_growth_rate = 0.0
206
+ sensor.flashover_risk = 0.0
207
+ sensor.backdraft_risk = 0.0
208
+ sensor.heat_radiation = 0.0
209
+ sensor.fire_type = "none"
210
+
211
+ # Toxic gases (normal levels)
212
+ sensor.carbon_monoxide = random.uniform(0, 5) # Normal < 9 ppm
213
+ sensor.carbon_dioxide = random.uniform(350, 450) # Normal ~400 ppm
214
+ sensor.hydrogen_cyanide = 0.0
215
+ sensor.hydrogen_chloride = 0.0
216
+
217
+ # Environmental (normal conditions)
218
+ sensor.wind_direction = random.uniform(0, 360)
219
+ sensor.wind_speed = random.uniform(0, 3) # m/s
220
+ sensor.air_pressure = random.uniform(1010, 1020)
221
+ sensor.humidity = random.uniform(40, 60)
222
+
223
+ # Human factors (varies by room type)
224
+ if "EXIT" in room_id:
225
+ sensor.occupancy_density = random.uniform(0.1, 0.3) # Exits less crowded
226
+ elif "C" in room_id: # Corridors
227
+ sensor.occupancy_density = random.uniform(0.2, 0.5)
228
+ else: # Rooms
229
+ sensor.occupancy_density = random.uniform(0.3, 0.7)
230
+
231
+ sensor.mobility_limitations = random.randint(0, 2)
232
+ sensor.panic_level = random.uniform(0.0, 0.2) # Low initially
233
+ sensor.evacuation_progress = 0.0
234
+
235
+ # Infrastructure (mostly working)
236
+ sensor.sprinkler_active = True
237
+ sensor.emergency_lighting = True
238
+ sensor.elevator_available = True
239
+ sensor.stairwell_clear = True
240
+ sensor.exit_accessible = True
241
+ sensor.exit_capacity = random.randint(80, 120)
242
+ sensor.ventilation_active = True
243
+
244
+ # Time-based
245
+ sensor.time_since_fire_start = 0
246
+ sensor.estimated_time_to_exit = random.randint(30, 180)
247
+
248
+ # Communication
249
+ sensor.emergency_comm_working = True
250
+ sensor.wifi_signal_strength = random.uniform(70, 100)
251
+
252
+ # External
253
+ sensor.weather_temperature = random.uniform(15, 25)
254
+ sensor.weather_rain = random.choice([True, False])
255
+ sensor.time_of_day = random.randint(8, 18)
256
+ sensor.day_of_week = random.randint(0, 6)
257
+
258
+ self.sensors[room_id] = sensor
259
+
260
+ def update_sensor(self, location_id: str, **kwargs):
261
+ """Update sensor readings for a specific location"""
262
+ if location_id in self.sensors:
263
+ sensor = self.sensors[location_id]
264
+ for key, value in kwargs.items():
265
+ if hasattr(sensor, key):
266
+ setattr(sensor, key, value)
267
+
268
+ def get_sensor_reading(self, location_id: str) -> SensorReading:
269
+ """Get current sensor reading for a location"""
270
+ return self.sensors.get(location_id)
271
+
272
+ def simulate_fire_scenario(self, fire_locations: List[str],
273
+ affected_areas: Dict[str, Dict] = None):
274
+ """
275
+ Simulate a fire scenario with specified fire locations and affected areas
276
+
277
+ Args:
278
+ fire_locations: List of room IDs where fire started
279
+ affected_areas: Dict of room_id -> sensor values for affected areas
280
+ """
281
+ # Reset all sensors
282
+ self._initialize_sensors()
283
+
284
+ # Set fire locations
285
+ for location in fire_locations:
286
+ if location in self.sensors:
287
+ self.update_sensor(
288
+ location,
289
+ fire_detected=True,
290
+ smoke_level=0.9,
291
+ temperature=200.0 + random.uniform(-20, 50),
292
+ oxygen_level=15.0,
293
+ visibility=10.0,
294
+ structural_integrity=70.0
295
+ )
296
+
297
+ # Apply affected areas
298
+ if affected_areas:
299
+ for location, values in affected_areas.items():
300
+ if location in self.sensors:
301
+ self.update_sensor(location, **values)
302
+
303
+ # Simulate fire spread effect to adjacent rooms
304
+ self._simulate_smoke_spread(fire_locations)
305
+
306
+ def _simulate_smoke_spread(self, fire_locations: List[str]):
307
+ """Simulate smoke spreading to adjacent areas"""
308
+ for fire_loc in fire_locations:
309
+ neighbors = self.floor_plan.get_neighbors(fire_loc)
310
+ for neighbor_id, _ in neighbors:
311
+ if neighbor_id in self.sensors and not self.sensors[neighbor_id].fire_detected:
312
+ # Add smoke and heat to adjacent areas
313
+ current = self.sensors[neighbor_id]
314
+ self.update_sensor(
315
+ neighbor_id,
316
+ smoke_level=min(current.smoke_level + random.uniform(0.3, 0.6), 1.0),
317
+ temperature=current.temperature + random.uniform(20, 40),
318
+ visibility=max(current.visibility - random.uniform(20, 40), 20.0),
319
+ oxygen_level=max(current.oxygen_level - random.uniform(1, 3), 16.0)
320
+ )
321
+
322
+ def get_all_readings(self) -> Dict[str, SensorReading]:
323
+ """Get all sensor readings"""
324
+ return self.sensors
325
+
326
+ def print_status(self):
327
+ """Print status of all sensors"""
328
+ print(f"\n{'='*80}")
329
+ print(f"SENSOR SYSTEM STATUS")
330
+ print(f"{'='*80}")
331
+ for location_id, reading in sorted(self.sensors.items()):
332
+ danger = reading.calculate_danger_score()
333
+ status = "SAFE" if danger < 30 else "WARNING" if danger < 70 else "DANGER"
334
+ print(f"{location_id:10} | {status:7} | {reading}")
335
+
336
+
337
+ def create_sample_fire_scenario(floor_plan: FloorPlan) -> SensorSystem:
338
+ """
339
+ Create a sample fire scenario with 3 routes of varying danger
340
+
341
+ Scenario:
342
+ - Route 1 (via C1, C4 to EXIT1): Has oxygen cylinder (explosion risk) but less fire
343
+ - Route 2 (via C2, C5, C6 to EXIT2): Has moderate fire
344
+ - Route 3 (via C7 to EXIT3): Has heavy fire blocking path
345
+ """
346
+ sensor_system = SensorSystem(floor_plan)
347
+
348
+ # Main fire locations
349
+ fire_locations = ["R2", "C5", "C7"]
350
+
351
+ # Specific affected areas with custom sensor values
352
+ affected_areas = {
353
+ # Route 1 - Oxygen cylinder area (explosion risk but less fire)
354
+ "C1": {
355
+ "fire_detected": False,
356
+ "smoke_level": 0.4,
357
+ "temperature": 45.0,
358
+ "oxygen_level": 20.5,
359
+ "visibility": 60.0,
360
+ "structural_integrity": 95.0
361
+ },
362
+ "C4": {
363
+ "fire_detected": False,
364
+ "smoke_level": 0.5,
365
+ "temperature": 50.0,
366
+ "oxygen_level": 20.0,
367
+ "visibility": 50.0,
368
+ "structural_integrity": 90.0
369
+ },
370
+
371
+ # Route 2 - Moderate fire
372
+ "C2": {
373
+ "fire_detected": False,
374
+ "smoke_level": 0.6,
375
+ "temperature": 65.0,
376
+ "oxygen_level": 18.5,
377
+ "visibility": 40.0,
378
+ "structural_integrity": 85.0
379
+ },
380
+ "C6": {
381
+ "fire_detected": True,
382
+ "smoke_level": 0.8,
383
+ "temperature": 150.0,
384
+ "oxygen_level": 16.0,
385
+ "visibility": 20.0,
386
+ "structural_integrity": 75.0
387
+ },
388
+
389
+ # Route 3 - Heavy fire (worst option)
390
+ "R3": {
391
+ "fire_detected": False,
392
+ "smoke_level": 0.7,
393
+ "temperature": 80.0,
394
+ "oxygen_level": 17.5,
395
+ "visibility": 30.0,
396
+ "structural_integrity": 80.0
397
+ },
398
+
399
+ # Starting point
400
+ "R1": {
401
+ "fire_detected": False,
402
+ "smoke_level": 0.2,
403
+ "temperature": 35.0,
404
+ "oxygen_level": 20.5,
405
+ "visibility": 80.0,
406
+ "structural_integrity": 100.0
407
+ },
408
+
409
+ # Exit areas
410
+ "EXIT1": {
411
+ "fire_detected": False,
412
+ "smoke_level": 0.1,
413
+ "temperature": 25.0,
414
+ "oxygen_level": 21.0,
415
+ "visibility": 100.0,
416
+ "structural_integrity": 100.0
417
+ },
418
+ "EXIT2": {
419
+ "fire_detected": False,
420
+ "smoke_level": 0.3,
421
+ "temperature": 40.0,
422
+ "oxygen_level": 20.0,
423
+ "visibility": 70.0,
424
+ "structural_integrity": 100.0
425
+ },
426
+ "EXIT3": {
427
+ "fire_detected": False,
428
+ "smoke_level": 0.4,
429
+ "temperature": 45.0,
430
+ "oxygen_level": 19.5,
431
+ "visibility": 60.0,
432
+ "structural_integrity": 100.0
433
+ }
434
+ }
435
+
436
+ sensor_system.simulate_fire_scenario(fire_locations, affected_areas)
437
+
438
+ return sensor_system
439
+