aaronmrls commited on
Commit
f733c1b
·
verified ·
1 Parent(s): 21265a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -2
app.py CHANGED
@@ -24,15 +24,31 @@ def predict():
24
  results = []
25
 
26
  for item in data:
27
- # Build input text (you can adjust formatting if needed)
28
  input_text = f"{item['category']} - {item['subcategory']} in {item['area']}. {item.get('comments', '')}"
 
 
 
 
 
 
 
 
 
 
 
 
29
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
30
 
31
  with torch.no_grad():
32
  outputs = model(**inputs)
33
  predicted_class = torch.argmax(outputs.logits, dim=1).item()
34
 
35
- results.append({"priority_score": predicted_class})
 
 
 
 
36
 
37
  return jsonify(results)
38
 
 
24
  results = []
25
 
26
  for item in data:
27
+ # Build input text
28
  input_text = f"{item['category']} - {item['subcategory']} in {item['area']}. {item.get('comments', '')}"
29
+
30
+ # Smart keyword override for critical incidents
31
+ text_lower = input_text.lower()
32
+ critical_keywords = [
33
+ "umuusok", "sunog", "amoy sunog", "spark", "kuryente",
34
+ "leak", "baha", "gas", "short circuit", "smoke"
35
+ ]
36
+ if any(word in text_lower for word in critical_keywords):
37
+ results.append({"priority_score": 5})
38
+ continue
39
+
40
+ # Tokenize & predict
41
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
42
 
43
  with torch.no_grad():
44
  outputs = model(**inputs)
45
  predicted_class = torch.argmax(outputs.logits, dim=1).item()
46
 
47
+ # ✅ Convert back to 1–5 scale
48
+ priority_score = predicted_class + 1
49
+
50
+ # ✅ Here's where this line goes
51
+ results.append({"priority_score": priority_score})
52
 
53
  return jsonify(results)
54