varshakolanu commited on
Commit
89fb907
·
verified ·
1 Parent(s): 2024dce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -45
app.py CHANGED
@@ -1,50 +1,77 @@
 
 
 
 
1
  import json
2
- from datetime import datetime
3
-
4
- def calculate_scores(logs):
5
- quality_scores = []
6
- timeliness_scores = []
7
- safety_scores = []
8
- communication_scores = []
9
-
 
 
 
 
 
 
 
 
 
 
 
10
  for log in logs:
11
- if log['log_type'] == 'Quality':
12
- quality_scores.append(log['quality_score'] != null and log['quality_score'] or 50)
13
- elif log['log_type'] == 'Delay':
14
- timeliness_scores.append(100 - (log['delay_percentage'] != null and log['delay_percentage'] or 0))
15
- elif log['log_type'] == 'Incident':
16
- safety_scores.append(log['safety_compliance'] and 100 or 0)
17
- elif log['log_type'] == 'Work Completion':
18
- communication_scores.append(log['feedback'] != null and len(log['feedback']) > 0 and 80 or 20)
19
-
20
- quality_avg = sum(quality_scores) / len(quality_scores) if quality_scores else 50
21
- timeliness_avg = sum(timeliness_scores) / len(timeliness_scores) if timeliness_scores else 50
22
- safety_avg = sum(safety_scores) / len(safety_scores) if safety_scores else 50
23
- communication_avg = sum(communication_scores) / len(communication_scores) if communication_scores else 50
24
-
25
- final_score = (quality_avg * 0.3) + (timeliness_avg * 0.3) + (safety_avg * 0.2) + (communication_avg * 0.2)
26
- alert_flag = final_score < 60
27
-
 
 
 
 
 
 
 
28
  return {
29
- 'quality_score': round(quality_avg, 2),
30
- 'timeliness_score': round(timeliness_avg, 2),
31
- 'safety_score': round(safety_avg, 2),
32
- 'communication_score': round(communication_avg, 2),
33
- 'final_score': round(final_score, 2),
34
- 'alert_flag': alert_flag,
35
- 'trend': 'stable' # Simplified; add trend detection later
36
  }
37
 
38
- def main():
39
- # Sample input for testing
40
- sample_logs = [
41
- {'log_type': 'Quality', 'quality_score': 85},
42
- {'log_type': 'Delay', 'delay_percentage': 10},
43
- {'log_type': 'Incident', 'safety_compliance': True},
44
- {'log_type': 'Work Completion', 'feedback': 'Good communication'}
45
- ]
46
- result = calculate_scores(sample_logs)
47
- print(json.dumps(result, indent=2))
48
-
49
- if __name__ == '__main__':
50
- main()
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from fastapi import FastAPI, Request
3
+ from transformers import pipeline
4
+ import uvicorn
5
  import json
6
+
7
+ # Initialize sentiment analysis pipeline
8
+ sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
9
+
10
+ # FastAPI app for API endpoint
11
+ app = FastAPI()
12
+
13
+ @app.post("/calculate_scores")
14
+ async def calculate_scores(request: Request):
15
+ logs = await request.json()
16
+ quality_score = 0
17
+ timeliness_score = 0
18
+ safety_score = 0
19
+ communication_score = 0
20
+ quality_count = 0
21
+ delay_count = 0
22
+ incident_count = 0
23
+ feedback_count = 0
24
+
25
  for log in logs:
26
+ log_type = log.get("log_type")
27
+ if log_type == "Quality" and log.get("quality_score"):
28
+ quality_score += float(log["quality_score"])
29
+ quality_count += 1
30
+ if log_type == "Delay" and log.get("delay_percentage"):
31
+ timeliness_score += (100 - float(log["delay_percentage"]))
32
+ delay_count += 1
33
+ if log_type == "Incident":
34
+ safety_score += (100 if log.get("safety_compliance", False) else 0)
35
+ incident_count += 1
36
+ if log.get("feedback"):
37
+ feedback = log["feedback"]
38
+ sentiment = sentiment_analyzer(feedback)[0]
39
+ sentiment_score = sentiment["score"] * 100 if sentiment["label"] == "POSITIVE" else 50
40
+ base_score = 90 if len(feedback) > 50 else 70
41
+ communication_score += base_score * (sentiment_score / 100)
42
+ feedback_count += 1
43
+
44
+ # Calculate averages
45
+ quality_score = quality_score / quality_count if quality_count > 0 else 0
46
+ timeliness_score = timeliness_score / delay_count if delay_count > 0 else 0
47
+ safety_score = safety_score / incident_count if incident_count > 0 else 0
48
+ communication_score = communication_score / feedback_count if feedback_count > 0 else 0
49
+
50
  return {
51
+ "quality_score": round(quality_score, 2),
52
+ "timeliness_score": round(timeliness_score, 2),
53
+ "safety_score": round(safety_score, 2),
54
+ "communication_score": round(communication_score, 2),
55
+ "alert_flag": quality_score < 50 or timeliness_score < 50
 
 
56
  }
57
 
58
+ # Gradio interface for testing
59
+ def test_api(logs_json):
60
+ try:
61
+ logs = json.loads(logs_json)
62
+ result = calculate_scores(logs)
63
+ return json.dumps(result, indent=2)
64
+ except Exception as e:
65
+ return f"Error: {str(e)}"
66
+
67
+ # Launch Gradio UI
68
+ gr.Interface(
69
+ fn=test_api,
70
+ inputs=gr.Textbox(label="Enter Logs JSON", placeholder='[{"log_type": "Quality", "quality_score": 80, "feedback": "Great work!"}]'),
71
+ outputs=gr.Textbox(label="Scores"),
72
+ title="Subcontractor Scoring API Tester"
73
+ ).launch()
74
+
75
+ # Run FastAPI with Uvicorn (Gradio handles this automatically)
76
+ if __name__ == "__main__":
77
+ uvicorn.run(app, host="0.0.0.0", port=7860)