Spaces:
Sleeping
Sleeping
Implemented leave application endpoint and added self-service timesheet views for agents and performance tracking for managers
Browse files- docs/devlogs/db/logs.json +1 -1
- docs/devlogs/server/runtimeerror.txt +57 -36
- docs/features/timesheets/DEPLOYMENT_STEPS.md +0 -0
- docs/features/timesheets/README.md +0 -93
- docs/features/timesheets/RECONCILIATION_SYSTEM.md +251 -1614
- docs/features/timesheets/SETUP_GUIDE.md +0 -261
- docs/schema/VALID_ROLES.md +0 -0
- src/app/api/v1/ticket_assignments.py +41 -2
- src/app/api/v1/ticket_expenses.py +30 -0
- src/app/api/v1/timesheets.py +289 -1
- src/app/config/apps.py +1 -2
- src/app/schemas/timesheet.py +9 -0
- src/app/services/reconciliation/reconciliation_service.py +163 -0
- src/app/tasks/scheduler.py +59 -88
- supabase/migrations/20241210_add_inventory_to_timesheets.sql +347 -0
- supabase/migrations/20241210_add_realtime_reconciliation.sql +294 -0
- supabase/migrations/20241210_cleanup_inventory_view.sql +21 -0
docs/devlogs/db/logs.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
[{"idx":10,"id":"f59b29fc-d0b9-4618-b0d1-889e340da612","project_id":"0ade6bd1-e492-4e25-b681-59f42058d29a","source":"sales_order","source_id":"c93b28c0-d7bf-4f57-b750-d0c6864543b0","ticket_name":"Elizabeth Muthoni","ticket_type":"installation","service_type":"ftth","work_description":"Install Premium Fiber 100Mbps for Elizabeth Muthoni","status":"completed","priority":"normal","scheduled_date":"2025-12-11","scheduled_time_slot":null,"due_date":"2025-11-29 13:24:19.212882+00","sla_target_date":"2025-11-29 13:24:19.212882+00","sla_violated":true,"started_at":null,"completed_at":"2025-11-30 12:08:38.793534+00","is_invoiced":false,"invoiced_at":null,"contractor_invoice_id":null,"project_region_id":"4cd27765-5720-4cc0-872e-bf0da3cd1898","work_location_latitude":null,"work_location_longitude":null,"work_location_accuracy":null,"work_location_verified":false,"dedup_key":null,"notes":"[COMPLETION] we finished connection","additional_metadata":"{}","version":1,"created_at":"2025-11-26 13:24:19.212882+00","updated_at":"2025-11-30 12:08:38.858902+00","deleted_at":null,"required_team_size":1,"completion_data":"{\"odu_serial\": \"jjhh\", \"ont_serial\": \"jhh\"}","completion_photos_verified":true,"completion_data_verified":true}]
|
|
|
|
| 1 |
+
[{"idx":0,"id":"20772cb1-ec31-41dc-9cb0-73649dc6ac55","ticket_id":"70090c47-e9c1-4b0a-add4-69bec53d92f9","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"completed","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-12-03 08:04:10.106149+00","responded_at":"2025-12-03 08:04:10.106152+00","journey_started_at":"2025-12-03 08:04:25.130305+00","arrived_at":"2025-12-03 08:05:00.662165+00","ended_at":"2025-12-09 06:11:34.948803+00","journey_start_latitude":-1.2200548,"journey_start_longitude":36.876972,"arrival_latitude":-1.2200534,"arrival_longitude":36.876971,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2200548, \"lng\": 36.8769751, \"speed\": 0.0, \"battery\": 62, \"network\": \"3g\", \"accuracy\": 20.0, \"timestamp\": \"2025-12-03T08:04:29.012678\"}, {\"lat\": -1.2200534, \"lng\": 36.876971, \"speed\": 0.0, \"battery\": 62, \"network\": \"3g\", \"accuracy\": 20.0, \"timestamp\": \"2025-12-03T08:05:00.310663\"}]","reason":null,"notes":"[COMPLETED] well done\n","created_at":"2025-12-03 08:04:10.113266+00","updated_at":"2025-12-09 06:11:35.012778+00","deleted_at":null},{"idx":1,"id":"34c2a077-5630-4af8-843d-e125c2497267","ticket_id":"2de41ce7-dff1-4151-9710-87958d18b5c4","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"accepted","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-12-03 11:26:09.556998+00","responded_at":"2025-12-03 11:26:09.557001+00","journey_started_at":"2025-12-03 11:26:46.746692+00","arrived_at":"2025-12-03 12:18:09.241003+00","ended_at":null,"journey_start_latitude":-1.21995899256506,"journey_start_longitude":36.8769753048327,"arrival_latitude":-1.219888,"arrival_longitude":36.877013,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2199589925650558, \"lng\": 36.87697530483272, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T11:26:48.178288\"}, {\"lat\": -1.2199589925650558, \"lng\": 36.87697530483272, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T11:27:21.327980\"}, {\"lat\": -1.2200231002547879, \"lng\": 36.8771629705233, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 68.0, \"timestamp\": \"2025-12-03T11:27:50.652389\"}, {\"lat\": -1.2200231002547879, \"lng\": 36.8771629705233, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 68.0, \"timestamp\": \"2025-12-03T11:28:22.992295\"}, {\"lat\": -1.220109, \"lng\": 36.8774065, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 99.0, \"timestamp\": \"2025-12-03T11:43:06.954957\"}, {\"lat\": -1.220109, \"lng\": 36.8774065, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 99.0, \"timestamp\": \"2025-12-03T11:43:38.608213\"}, {\"lat\": -1.220109, \"lng\": 36.8774065, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 99.0, \"timestamp\": \"2025-12-03T11:44:10.707565\"}, {\"lat\": -1.220109, \"lng\": 36.8774065, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 99.0, \"timestamp\": \"2025-12-03T11:44:38.555332\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T11:45:08.446621\"}, {\"lat\": -1.2200648441710604, \"lng\": 36.8773285837727, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:45:38.963983\"}, {\"lat\": -1.2200648441710604, \"lng\": 36.8773285837727, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:46:08.085772\"}, {\"lat\": -1.2200648441710604, \"lng\": 36.8773285837727, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:46:41.028458\"}, {\"lat\": -1.2200648441710604, \"lng\": 36.8773285837727, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:47:18.306843\"}, {\"lat\": -1.2198873388753058, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 114.0, \"timestamp\": \"2025-12-03T11:48:18.534400\"}, {\"lat\": -1.22001920394653, \"lng\": 36.87720764186818, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:49:19.458742\"}, {\"lat\": -1.22003489286718, \"lng\": 36.8771952558782, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:50:18.361195\"}, {\"lat\": -1.22003489286718, \"lng\": 36.8771952558782, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-12-03T11:51:00.540478\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:05:23.087122\"}, {\"lat\": -1.2201036936940906, \"lng\": 36.87739493481981, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-12-03T12:05:50.078766\"}, {\"lat\": -1.2201036936940906, \"lng\": 36.87739493481981, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-12-03T12:06:20.217655\"}, {\"lat\": -1.2201036936940906, \"lng\": 36.87739493481981, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-12-03T12:06:52.207598\"}, {\"lat\": -1.2201036936940906, \"lng\": 36.87739493481981, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-12-03T12:07:19.342098\"}, {\"lat\": -1.2201036936940906, \"lng\": 36.87739493481981, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-12-03T12:07:48.987004\"}, {\"lat\": -1.2200341085574573, \"lng\": 36.87726984694376, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 114.0, \"timestamp\": \"2025-12-03T12:08:19.276546\"}, {\"lat\": -1.2200341085574573, \"lng\": 36.87726984694376, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 114.0, \"timestamp\": \"2025-12-03T12:08:49.316274\"}, {\"lat\": -1.2201656877323421, \"lng\": 36.87751371747212, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T12:10:01.727248\"}, {\"lat\": -1.2201656877323421, \"lng\": 36.87751371747212, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T12:10:32.749087\"}, {\"lat\": -1.2201656877323421, \"lng\": 36.87751371747212, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T12:10:49.035657\"}, {\"lat\": -1.2200968236467173, \"lng\": 36.8773399129426, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 79.0, \"timestamp\": \"2025-12-03T12:11:19.244816\"}, {\"lat\": -1.2200968236467173, \"lng\": 36.8773399129426, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 79.0, \"timestamp\": \"2025-12-03T12:11:49.325134\"}, {\"lat\": -1.2200968236467173, \"lng\": 36.8773399129426, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 79.0, \"timestamp\": \"2025-12-03T12:12:22.640713\"}, {\"lat\": -1.220083681913554, \"lng\": 36.877307709819554, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 87.0, \"timestamp\": \"2025-12-03T12:12:49.255484\"}, {\"lat\": -1.220195055324658, \"lng\": 36.877557804952716, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T12:13:27.145805\"}, {\"lat\": -1.220195055324658, \"lng\": 36.877557804952716, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 85.0, \"timestamp\": \"2025-12-03T12:13:57.449517\"}, {\"lat\": -1.2200767183872938, \"lng\": 36.87734831582162, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 107.0, \"timestamp\": \"2025-12-03T12:14:30.021486\"}, {\"lat\": -1.2200767183872938, \"lng\": 36.87734831582162, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 107.0, \"timestamp\": \"2025-12-03T12:14:57.364315\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:15:27.675161\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:15:57.396948\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:16:27.477932\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:16:59.376149\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:17:26.486481\"}, {\"lat\": -1.219888, \"lng\": 36.877013, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 149.0, \"timestamp\": \"2025-12-03T12:17:57.103096\"}]","reason":null,"notes":null,"created_at":"2025-12-03 11:26:09.562514+00","updated_at":"2025-12-03 12:18:09.242009+00","deleted_at":null},{"idx":2,"id":"69a2a4f6-f72a-425c-9f90-4088e074c13b","ticket_id":"1f807cf8-f139-421b-86e3-38c2f8bc7070","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"dropped","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-12-02 14:12:10.05599+00","responded_at":"2025-12-02 14:12:10.055992+00","journey_started_at":"2025-12-02 14:12:27.929722+00","arrived_at":"2025-12-02 14:45:50.181161+00","ended_at":"2025-12-03 11:59:40.294207+00","journey_start_latitude":-1.2200122,"journey_start_longitude":36.8770178,"arrival_latitude":-1.2192414,"arrival_longitude":36.8907296,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2194135, \"lng\": 36.8775907, \"speed\": 0.0, \"battery\": 56, \"network\": \"4g\", \"accuracy\": 34.61399841308594, \"timestamp\": \"2025-12-02T14:12:31.527583\"}, {\"lat\": -1.2194928, \"lng\": 36.8775066, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 26.85700035095215, \"timestamp\": \"2025-12-02T14:13:00.622699\"}, {\"lat\": -1.2195889, \"lng\": 36.8776923, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 30.481000900268555, \"timestamp\": \"2025-12-02T14:13:30.323994\"}, {\"lat\": -1.2199054, \"lng\": 36.8783171, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 36.08300018310547, \"timestamp\": \"2025-12-02T14:14:00.533225\"}, {\"lat\": -1.2200001, \"lng\": 36.8784442, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 18.518999099731445, \"timestamp\": \"2025-12-02T14:14:33.007440\"}, {\"lat\": -1.2201898, \"lng\": 36.8806725, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 20.899999618530273, \"timestamp\": \"2025-12-02T14:19:29.029786\"}, {\"lat\": -1.2202622, \"lng\": 36.8808323, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 12.71399974822998, \"timestamp\": \"2025-12-02T14:20:21.598297\"}, {\"lat\": -1.2204048, \"lng\": 36.8814195, \"speed\": 1.7677345275878906, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 12.756999969482422, \"timestamp\": \"2025-12-02T14:20:51.373114\"}, {\"lat\": -1.2203779, \"lng\": 36.881459, \"speed\": 0.0, \"battery\": 55, \"network\": \"4g\", \"accuracy\": 30.0, \"timestamp\": \"2025-12-02T14:21:21.198022\"}, {\"lat\": -1.2187483, \"lng\": 36.8871833, \"speed\": 0.0, \"battery\": 54, \"network\": \"4g\", \"accuracy\": 12.199999809265137, \"timestamp\": \"2025-12-02T14:35:01.049929\"}, {\"lat\": -1.2188454, \"lng\": 36.8875974, \"speed\": 0.0, \"battery\": 54, \"network\": \"4g\", \"accuracy\": 18.856000900268555, \"timestamp\": \"2025-12-02T14:35:24.327865\"}, {\"lat\": -1.2188761, \"lng\": 36.8876456, \"speed\": 0.0, \"battery\": 54, \"network\": \"4g\", \"accuracy\": 15.873000144958496, \"timestamp\": \"2025-12-02T14:35:53.867957\"}, {\"lat\": -1.2188681, \"lng\": 36.8876746, \"speed\": 0.0, \"battery\": 54, \"network\": \"4g\", \"accuracy\": 19.760000228881836, \"timestamp\": \"2025-12-02T14:36:23.507366\"}, {\"lat\": -1.2192414, \"lng\": 36.8907296, \"speed\": 3.572157597541809, \"battery\": 53, \"network\": \"4g\", \"accuracy\": 72.9000015258789, \"timestamp\": \"2025-12-02T14:45:50.097141\"}]","reason":"[cancellation] opted for another installer","notes":null,"created_at":"2025-12-02 14:12:10.058135+00","updated_at":"2025-12-03 11:59:40.358261+00","deleted_at":null},{"idx":3,"id":"a82a3824-f4f1-4283-a2e3-8c348dbb28ce","ticket_id":"f59b29fc-d0b9-4618-b0d1-889e340da612","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"accepted","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-11-30 10:21:10.085152+00","responded_at":"2025-11-30 10:21:10.085155+00","journey_started_at":"2025-11-30 10:55:39.250857+00","arrived_at":"2025-11-30 10:56:11.929093+00","ended_at":"2025-12-01 05:43:37.183538+00","journey_start_latitude":-1.10052333333333,"journey_start_longitude":37.0092266666667,"arrival_latitude":-1.10056144256674,"arrival_longitude":37.0092363654176,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.100523333333333, \"lng\": 37.00922666666666, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 64.0, \"timestamp\": \"2025-11-30T10:55:40.399050\"}, {\"lat\": -1.1005614425667403, \"lng\": 37.009236365417586, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 65.0, \"timestamp\": \"2025-11-30T10:56:11.682201\"}]","reason":null,"notes":null,"created_at":"2025-11-30 10:21:10.111327+00","updated_at":"2025-11-30 10:56:11.929936+00","deleted_at":null},{"idx":4,"id":"b3a83bd0-d287-4cea-a1c8-8bef145c1296","ticket_id":"8f08ad14-df8b-4780-84e7-0d45e133f2a6","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"accepted","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-11-28 09:27:41.952753+00","responded_at":"2025-11-28 09:27:41.952757+00","journey_started_at":"2025-11-28 10:04:37.590417+00","arrived_at":"2025-11-28 10:45:04.045672+00","ended_at":"2025-12-01 05:45:37.183538+00","journey_start_latitude":-1.22005074013012,"journey_start_longitude":36.8772529852395,"arrival_latitude":-1.22018863567581,"arrival_longitude":36.8775512279948,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2201086694376528, \"lng\": 36.87740484718826, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 87.0, \"timestamp\": \"2025-11-28T10:34:41.638069\"}, {\"lat\": -1.2201371818782953, \"lng\": 36.87746013754867, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 68.0, \"timestamp\": \"2025-11-28T10:36:56.645310\"}, {\"lat\": -1.220034831373305, \"lng\": 36.877274425758436, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 81.0, \"timestamp\": \"2025-11-28T10:38:04.419568\"}, {\"lat\": -1.2200648441710604, \"lng\": 36.8773285837727, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 92.0, \"timestamp\": \"2025-11-28T10:44:42.461237\"}, {\"lat\": -1.220188635675814, \"lng\": 36.87755122799482, \"speed\": 0.0, \"battery\": 100, \"network\": null, \"accuracy\": 75.0, \"timestamp\": \"2025-11-28T10:44:57.478127\"}]","reason":null,"notes":null,"created_at":"2025-11-28 09:27:41.997778+00","updated_at":"2025-11-28 10:45:04.049575+00","deleted_at":null},{"idx":5,"id":"d6f25868-5117-4b85-8bb3-44314144ef6e","ticket_id":"0fd3ee15-5e7d-465a-b377-155f9bdb7e70","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"completed","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-12-01 05:50:51.219042+00","responded_at":"2025-12-01 05:50:51.219044+00","journey_started_at":"2025-12-01 05:52:03.358665+00","arrived_at":"2025-12-01 05:52:35.847093+00","ended_at":"2025-12-01 05:54:37.183538+00","journey_start_latitude":-1.2200188,"journey_start_longitude":36.8770193,"arrival_latitude":-1.2200285,"arrival_longitude":36.8770248,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2200188, \"lng\": 36.8770193, \"speed\": 0.0, \"battery\": 81, \"network\": \"4g\", \"accuracy\": 15.255999565124512, \"timestamp\": \"2025-12-01T05:52:07.294621\"}, {\"lat\": -1.2200285, \"lng\": 36.8770248, \"speed\": 0.0, \"battery\": 81, \"network\": \"4g\", \"accuracy\": 19.003999710083008, \"timestamp\": \"2025-12-01T05:52:35.782705\"}]","reason":null,"notes":"[COMPLETED] Hhhdg\n","created_at":"2025-12-01 05:50:51.230657+00","updated_at":"2025-12-01 05:54:37.241104+00","deleted_at":null},{"idx":6,"id":"f5b40f0c-bc9c-4904-9ec1-7e570bda34eb","ticket_id":"169eec08-654d-4ffe-bdb3-45fad1101637","user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","action":"accepted","assigned_by_user_id":"43b778b0-2062-4724-abbb-916a4835a9b0","is_self_assigned":true,"execution_order":null,"planned_start_time":null,"assigned_at":"2025-12-03 13:21:09.471388+00","responded_at":"2025-12-03 13:21:09.471392+00","journey_started_at":"2025-12-03 13:21:46.868278+00","arrived_at":"2025-12-03 13:22:20.932051+00","ended_at":null,"journey_start_latitude":-1.2200555,"journey_start_longitude":36.876972,"arrival_latitude":-1.2200712,"arrival_longitude":36.8770138,"arrival_verified":false,"journey_location_history":"[{\"lat\": -1.2200661, \"lng\": 36.8769748, \"speed\": 0.0, \"battery\": 46, \"network\": \"3g\", \"accuracy\": 38.72999954223633, \"timestamp\": \"2025-12-03T13:21:50.999645\"}, {\"lat\": -1.2200712, \"lng\": 36.8770138, \"speed\": 0.0, \"battery\": 46, \"network\": \"3g\", \"accuracy\": 23.055999755859375, \"timestamp\": \"2025-12-03T13:22:20.715580\"}]","reason":null,"notes":null,"created_at":"2025-12-03 13:21:09.472355+00","updated_at":"2025-12-03 13:22:20.932686+00","deleted_at":null}]
|
docs/devlogs/server/runtimeerror.txt
CHANGED
|
@@ -1,41 +1,62 @@
|
|
| 1 |
-
===== Application Startup at 2025-12-09 21:
|
| 2 |
|
| 3 |
INFO: Started server process [7]
|
| 4 |
INFO: Waiting for application startup.
|
| 5 |
-
INFO: 2025-12-09T21:
|
| 6 |
-
INFO: 2025-12-09T21:
|
| 7 |
-
INFO: 2025-12-09T21:
|
| 8 |
-
INFO: 2025-12-09T21:
|
| 9 |
-
INFO: 2025-12-09T21:
|
| 10 |
-
INFO: 2025-12-09T21:
|
| 11 |
-
INFO: 2025-12-09T21:
|
| 12 |
-
INFO: 2025-12-09T21:
|
| 13 |
-
INFO: 2025-12-09T21:
|
| 14 |
-
INFO: 2025-12-09T21:
|
| 15 |
-
INFO: 2025-12-09T21:
|
| 16 |
-
INFO: 2025-12-09T21:
|
| 17 |
-
INFO: 2025-12-09T21:
|
| 18 |
-
INFO: 2025-12-09T21:
|
| 19 |
-
INFO: 2025-12-09T21:
|
| 20 |
-
INFO: 2025-12-09T21:
|
| 21 |
-
INFO: 2025-12-09T21:
|
| 22 |
-
INFO: 2025-12-09T21:
|
| 23 |
-
INFO: 2025-12-09T21:
|
| 24 |
-
INFO: 2025-12-09T21:
|
| 25 |
-
INFO: 2025-12-09T21:
|
| 26 |
-
INFO: 2025-12-09T21:
|
| 27 |
-
INFO: 2025-12-09T21:
|
| 28 |
INFO: Application startup complete.
|
| 29 |
INFO: Uvicorn running on http://0.0.0.0:7860 (Press CTRL+C to quit)
|
| 30 |
-
INFO: 10.16.
|
| 31 |
-
INFO:
|
| 32 |
-
INFO:
|
| 33 |
-
INFO: 2025-12-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
===== Application Startup at 2025-12-09 21:19:32 =====
|
| 2 |
|
| 3 |
INFO: Started server process [7]
|
| 4 |
INFO: Waiting for application startup.
|
| 5 |
+
INFO: 2025-12-09T21:19:45 - app.main: ============================================================
|
| 6 |
+
INFO: 2025-12-09T21:19:45 - app.main: 🚀 SwiftOps API v1.0.0 | PRODUCTION
|
| 7 |
+
INFO: 2025-12-09T21:19:45 - app.main: 📊 Dashboard: Enabled
|
| 8 |
+
INFO: 2025-12-09T21:19:45 - app.main: ============================================================
|
| 9 |
+
INFO: 2025-12-09T21:19:45 - app.main: 📦 Database:
|
| 10 |
+
INFO: 2025-12-09T21:19:45 - app.main: ✓ Connected | 46 tables | 6 users
|
| 11 |
+
INFO: 2025-12-09T21:19:45 - app.main: 💾 Cache & Sessions:
|
| 12 |
+
INFO: 2025-12-09T21:19:46 - app.services.otp_service: ✅ OTP Service initialized with Redis storage
|
| 13 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✓ Redis: Connected
|
| 14 |
+
INFO: 2025-12-09T21:19:47 - app.main: 🔌 External Services:
|
| 15 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✓ Cloudinary: Connected
|
| 16 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✓ Resend: Configured
|
| 17 |
+
INFO: 2025-12-09T21:19:47 - app.main: ○ WASender: Disconnected
|
| 18 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✓ Supabase: Connected | 6 buckets
|
| 19 |
+
INFO: 2025-12-09T21:19:47 - app.main: ⏰ Scheduler:
|
| 20 |
+
INFO: 2025-12-09T21:19:47 - apscheduler.scheduler: Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
| 21 |
+
INFO: 2025-12-09T21:19:47 - apscheduler.scheduler: Added job "Daily Field Agent Reconciliation" to job store "default"
|
| 22 |
+
INFO: 2025-12-09T21:19:47 - apscheduler.scheduler: Scheduler started
|
| 23 |
+
INFO: 2025-12-09T21:19:47 - app.tasks.scheduler: Reconciliation scheduler started
|
| 24 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✓ Daily reconciliation scheduler started (runs at midnight)
|
| 25 |
+
INFO: 2025-12-09T21:19:47 - app.main: ============================================================
|
| 26 |
+
INFO: 2025-12-09T21:19:47 - app.main: ✅ Startup complete | Ready to serve requests
|
| 27 |
+
INFO: 2025-12-09T21:19:47 - app.main: ============================================================
|
| 28 |
INFO: Application startup complete.
|
| 29 |
INFO: Uvicorn running on http://0.0.0.0:7860 (Press CTRL+C to quit)
|
| 30 |
+
INFO: 10.16.13.79:9584 - "GET /health HTTP/1.1" 200 OK
|
| 31 |
+
INFO: 10.16.37.13:6060 - "GET /health HTTP/1.1" 200 OK
|
| 32 |
+
INFO: 10.16.37.13:1927 - "GET / HTTP/1.1" 200 OK
|
| 33 |
+
INFO: 2025-12-10T00:00:00 - apscheduler.executors.default: Running job "Daily Field Agent Reconciliation (trigger: cron[hour='0', minute='0'], next run at: 2025-12-10 00:00:00 UTC)" (scheduled at 2025-12-10 00:00:00+00:00)
|
| 34 |
+
INFO: 2025-12-10T00:00:00 - app.tasks.scheduler: Starting scheduled reconciliation for 2025-12-09
|
| 35 |
+
ERROR: 2025-12-10T00:00:00 - app.tasks.scheduler: Scheduled reconciliation failed: cannot import name 'get_async_session' from 'app.core.database' (/app/src/app/core/database.py)
|
| 36 |
+
Traceback (most recent call last):
|
| 37 |
+
File "/app/src/app/tasks/scheduler.py", line 69, in run_daily_reconciliation
|
| 38 |
+
asyncio.run(reconcile_all_projects(yesterday))
|
| 39 |
+
File "/usr/local/lib/python3.11/asyncio/runners.py", line 190, in run
|
| 40 |
+
return runner.run(main)
|
| 41 |
+
^^^^^^^^^^^^^^^^
|
| 42 |
+
File "/usr/local/lib/python3.11/asyncio/runners.py", line 118, in run
|
| 43 |
+
return self._loop.run_until_complete(task)
|
| 44 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 45 |
+
File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete
|
| 46 |
+
File "/app/src/app/tasks/scheduler.py", line 84, in reconcile_all_projects
|
| 47 |
+
from app.core.database import get_async_session
|
| 48 |
+
ImportError: cannot import name 'get_async_session' from 'app.core.database' (/app/src/app/core/database.py)
|
| 49 |
+
INFO: 2025-12-10T00:00:00 - apscheduler.executors.default: Job "Daily Field Agent Reconciliation (trigger: cron[hour='0', minute='0'], next run at: 2025-12-11 00:00:00 UTC)" executed successfully
|
| 50 |
+
INFO: 2025-12-10T00:00:00 - app.tasks.scheduler: Job daily_reconciliation completed successfully
|
| 51 |
+
INFO: 2025-12-10T05:57:38 - app.core.supabase_auth: Session refreshed successfully
|
| 52 |
+
INFO: 2025-12-10T05:57:40 - app.api.v1.auth: ✅ Token refreshed successfully for: nadina73@nembors.com
|
| 53 |
+
INFO: 10.16.13.79:27643 - "POST /api/v1/auth/refresh-token HTTP/1.1" 200 OK
|
| 54 |
+
INFO: 2025-12-10T05:57:41 - app.api.deps: Checking active user: c5cf92be-4172-4fe2-af5c-f05d83b3a938, is_active: True, type: <class 'bool'>
|
| 55 |
+
INFO: 2025-12-10T05:57:41 - app.api.deps: User c5cf92be-4172-4fe2-af5c-f05d83b3a938 is active - proceeding
|
| 56 |
+
INFO: 10.16.13.79:27643 - "GET /api/v1/auth/me HTTP/1.1" 200 OK
|
| 57 |
+
INFO: 2025-12-10T05:57:42 - app.api.deps: Checking active user: c5cf92be-4172-4fe2-af5c-f05d83b3a938, is_active: True, type: <class 'bool'>
|
| 58 |
+
INFO: 2025-12-10T05:57:42 - app.api.deps: User c5cf92be-4172-4fe2-af5c-f05d83b3a938 is active - proceeding
|
| 59 |
+
INFO: 10.16.13.79:27643 - "GET /api/v1/auth/me/preferences/available-apps HTTP/1.1" 200 OK
|
| 60 |
+
INFO: 2025-12-10T05:57:42 - app.api.deps: Checking active user: c5cf92be-4172-4fe2-af5c-f05d83b3a938, is_active: True, type: <class 'bool'>
|
| 61 |
+
INFO: 2025-12-10T05:57:42 - app.api.deps: User c5cf92be-4172-4fe2-af5c-f05d83b3a938 is active - proceeding
|
| 62 |
+
INFO: 10.16.37.13:62543 - "GET /api/v1/auth/me/preferences HTTP/1.1" 200 OK
|
docs/features/timesheets/DEPLOYMENT_STEPS.md
ADDED
|
File without changes
|
docs/features/timesheets/README.md
DELETED
|
@@ -1,93 +0,0 @@
|
|
| 1 |
-
# Daily Reconciliation System
|
| 2 |
-
|
| 3 |
-
## Overview
|
| 4 |
-
|
| 5 |
-
The Daily Reconciliation System automatically aggregates field agent activity (ticket assignments and expenses) into daily timesheet records. This provides the foundation for performance tracking, payroll calculations, and operational insights.
|
| 6 |
-
|
| 7 |
-
## Key Features
|
| 8 |
-
|
| 9 |
-
- ✅ **Automated Daily Reconciliation**: Runs at midnight to process previous day's activity
|
| 10 |
-
- ✅ **Manual Triggers**: API endpoints for on-demand reconciliation
|
| 11 |
-
- ✅ **Anomaly Detection**: Identifies suspicious patterns in agent behavior
|
| 12 |
-
- ✅ **Audit Trail**: Complete history of all reconciliation runs
|
| 13 |
-
- ✅ **High Performance**: Processes 500 agents in <1 second
|
| 14 |
-
- ✅ **Transactional Integrity**: All-or-nothing with automatic rollback
|
| 15 |
-
- ✅ **Row-Level Security**: Proper RLS policies for data access control
|
| 16 |
-
|
| 17 |
-
## Architecture
|
| 18 |
-
|
| 19 |
-
```
|
| 20 |
-
ticket_assignments + ticket_expenses (source of truth)
|
| 21 |
-
↓
|
| 22 |
-
reconciliation_service (aggregation)
|
| 23 |
-
↓
|
| 24 |
-
timesheets (derived daily summaries)
|
| 25 |
-
```
|
| 26 |
-
|
| 27 |
-
## Quick Start
|
| 28 |
-
|
| 29 |
-
1. **Run Migration**: Execute `supabase/migrations/20241209_add_reconciliation_system.sql`
|
| 30 |
-
2. **Install Dependencies**: `pip install -r requirements.txt`
|
| 31 |
-
3. **Start Application**: `uvicorn app.main:app --reload`
|
| 32 |
-
4. **Test API**: See SETUP_GUIDE.md for examples
|
| 33 |
-
|
| 34 |
-
## Documentation
|
| 35 |
-
|
| 36 |
-
- **[RECONCILIATION_SYSTEM.md](./RECONCILIATION_SYSTEM.md)** - Complete technical specification
|
| 37 |
-
- **[SETUP_GUIDE.md](./SETUP_GUIDE.md)** - Installation and usage guide
|
| 38 |
-
|
| 39 |
-
## Database Tables
|
| 40 |
-
|
| 41 |
-
### `reconciliation_runs`
|
| 42 |
-
Audit trail for all reconciliation runs with execution metrics and results.
|
| 43 |
-
|
| 44 |
-
### `timesheets` (enhanced)
|
| 45 |
-
Daily summaries with new expense tracking columns:
|
| 46 |
-
- `total_expenses`
|
| 47 |
-
- `approved_expenses`
|
| 48 |
-
- `pending_expenses`
|
| 49 |
-
- `rejected_expenses`
|
| 50 |
-
- `expense_claims_count`
|
| 51 |
-
- `reconciliation_run_id`
|
| 52 |
-
- `last_reconciled_at`
|
| 53 |
-
|
| 54 |
-
## API Endpoints
|
| 55 |
-
|
| 56 |
-
- `POST /api/v1/reconciliation/run` - Trigger reconciliation
|
| 57 |
-
- `GET /api/v1/reconciliation/status/{run_id}` - Check status
|
| 58 |
-
- `GET /api/v1/reconciliation/report/{project_id}` - Get daily report
|
| 59 |
-
- `GET /api/v1/reconciliation/history/{project_id}` - View history
|
| 60 |
-
- `GET /api/v1/reconciliation/anomalies/{project_id}` - View anomalies
|
| 61 |
-
|
| 62 |
-
## Scheduler
|
| 63 |
-
|
| 64 |
-
Runs automatically at midnight (Africa/Nairobi timezone) using APScheduler.
|
| 65 |
-
|
| 66 |
-
## Performance
|
| 67 |
-
|
| 68 |
-
- **Target**: Process 500 agents in <30 seconds
|
| 69 |
-
- **Actual**: ~800ms for 500 agents
|
| 70 |
-
- **Query Time**: ~150ms
|
| 71 |
-
- **Upsert Time**: ~300ms
|
| 72 |
-
|
| 73 |
-
## Anomaly Detection
|
| 74 |
-
|
| 75 |
-
Automatically detects:
|
| 76 |
-
- Expenses without completed tickets
|
| 77 |
-
- Unusually high productivity
|
| 78 |
-
- High rejection rates
|
| 79 |
-
- Unusually high expenses
|
| 80 |
-
- Zero action taken on assignments
|
| 81 |
-
- High cancellation rates
|
| 82 |
-
|
| 83 |
-
## Future Enhancements
|
| 84 |
-
|
| 85 |
-
- Real-time reconciliation (as tickets complete)
|
| 86 |
-
- Machine learning for anomaly detection
|
| 87 |
-
- Advanced reporting and analytics
|
| 88 |
-
- Configurable approval workflows
|
| 89 |
-
- Multi-project parallel processing
|
| 90 |
-
|
| 91 |
-
## Support
|
| 92 |
-
|
| 93 |
-
For detailed information, see the full technical specification in RECONCILIATION_SYSTEM.md.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/features/timesheets/RECONCILIATION_SYSTEM.md
CHANGED
|
@@ -1,1709 +1,346 @@
|
|
| 1 |
# Daily Reconciliation System - Technical Specification
|
| 2 |
|
| 3 |
-
**Version:**
|
| 4 |
-
**Status:**
|
| 5 |
-
**
|
| 6 |
-
**Last Updated:** 2024-12-09
|
| 7 |
|
| 8 |
---
|
| 9 |
|
| 10 |
-
##
|
| 11 |
-
|
| 12 |
-
1. [Executive Summary](#executive-summary)
|
| 13 |
-
2. [System Overview](#system-overview)
|
| 14 |
-
3. [Architecture Design](#architecture-design)
|
| 15 |
-
4. [Database Schema](#database-schema)
|
| 16 |
-
5. [Core Service Implementation](#core-service-implementation)
|
| 17 |
-
6. [Scheduler Configuration](#scheduler-configuration)
|
| 18 |
-
7. [API Endpoints](#api-endpoints)
|
| 19 |
-
8. [Notification System](#notification-system)
|
| 20 |
-
9. [Performance Optimization](#performance-optimization)
|
| 21 |
-
10. [Error Handling & Rollback](#error-handling--rollback)
|
| 22 |
-
11. [Testing Strategy](#testing-strategy)
|
| 23 |
-
12. [Deployment Guide](#deployment-guide)
|
| 24 |
-
13. [Monitoring & Observability](#monitoring--observability)
|
| 25 |
-
14. [Future Enhancements](#future-enhancements)
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
## Executive Summary
|
| 30 |
-
|
| 31 |
-
### Purpose
|
| 32 |
-
The Daily Reconciliation System automatically aggregates field agent activity (ticket assignments and expenses) into daily timesheet records. This provides:
|
| 33 |
-
- **Performance tracking**: Monitor agent productivity and work patterns
|
| 34 |
-
- **Payroll foundation**: Accurate data for compensation calculations
|
| 35 |
-
- **Operational insights**: Real-time visibility into field operations
|
| 36 |
-
- **Audit trail**: Complete history of daily reconciliation runs
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
-
|
| 41 |
-
- Support manual/on-demand reconciliation
|
| 42 |
-
- Handle 500+ field agents per project efficiently
|
| 43 |
-
- Provide partial and historical reconciliation
|
| 44 |
-
- Detect anomalies in agent activity
|
| 45 |
-
- Generate daily summary reports
|
| 46 |
-
- Maintain complete audit trail
|
| 47 |
-
- Support configurable approval workflows (MVP: auto-approve)
|
| 48 |
-
|
| 49 |
-
### Success Metrics
|
| 50 |
-
- **Performance**: Process 500 agents in <30 seconds
|
| 51 |
-
- **Accuracy**: 100% data consistency with source tables
|
| 52 |
-
- **Reliability**: 99.9% successful reconciliation runs
|
| 53 |
-
- **Observability**: Complete visibility into reconciliation status
|
| 54 |
|
| 55 |
---
|
| 56 |
|
| 57 |
-
##
|
| 58 |
|
| 59 |
-
###
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
```
|
| 62 |
-
┌─────────────────────────────────────────────────────────────┐
|
| 63 |
-
│ SOURCE OF TRUTH │
|
| 64 |
-
├─────────────────────────────────────────────────────────────┤
|
| 65 |
-
│ ticket_assignments ticket_expenses │
|
| 66 |
-
│ - User activity - Money spent │
|
| 67 |
-
│ - Assignment actions - Approval status │
|
| 68 |
-
│ - Timestamps - Payment status │
|
| 69 |
-
└──────────────────┬─────────────────────────────────────────��┘
|
| 70 |
-
│
|
| 71 |
-
▼
|
| 72 |
-
┌─────────────────────┐
|
| 73 |
-
│ RECONCILIATION │
|
| 74 |
-
│ SERVICE │
|
| 75 |
-
│ - Aggregate data │
|
| 76 |
-
│ - Detect anomalies │
|
| 77 |
-
│ - Generate reports │
|
| 78 |
-
└─────────┬───────────┘
|
| 79 |
-
│
|
| 80 |
-
▼
|
| 81 |
-
┌─────────────────────────────────────────────────────────────┐
|
| 82 |
-
│ DERIVED DATA │
|
| 83 |
-
├─────────────────────────────────────────────────────────────┤
|
| 84 |
-
│ timesheets reconciliation_runs │
|
| 85 |
-
│ - Daily summaries - Audit trail │
|
| 86 |
-
│ - Performance metrics - Execution logs │
|
| 87 |
-
│ - Expense totals - Anomaly reports │
|
| 88 |
-
└─────────────────────────────────────────────────────────────┘
|
| 89 |
-
```
|
| 90 |
-
|
| 91 |
-
### Key Design Principles
|
| 92 |
-
|
| 93 |
-
1. **Single Source of Truth**: `ticket_assignments` and `ticket_expenses` are authoritative
|
| 94 |
-
2. **Idempotent Operations**: Safe to re-run reconciliation multiple times
|
| 95 |
-
3. **Transactional Integrity**: All-or-nothing with automatic rollback on failure
|
| 96 |
-
4. **Efficient Aggregation**: One query per project, not per agent
|
| 97 |
-
5. **Audit Trail**: Every reconciliation run is logged with full details
|
| 98 |
-
6. **Observable**: Clear status tracking and error reporting
|
| 99 |
-
7. **Scalable**: Designed to handle 500+ agents efficiently
|
| 100 |
-
|
| 101 |
-
---
|
| 102 |
-
|
| 103 |
-
## Architecture Design
|
| 104 |
|
| 105 |
-
###
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
```
|
| 108 |
-
┌──────────────────────────────────────────────────────────────┐
|
| 109 |
-
│ APPLICATION LAYER │
|
| 110 |
-
├──────────────────────────────────────────────────────────────┤
|
| 111 |
-
│ │
|
| 112 |
-
│ ┌────────────────┐ ┌──────────────────┐ │
|
| 113 |
-
│ │ Scheduler │────────▶│ Reconciliation │ │
|
| 114 |
-
│ │ (APScheduler) │ │ Service │ │
|
| 115 |
-
│ └────────────────┘ └────────┬─────────┘ │
|
| 116 |
-
│ │ │ │
|
| 117 |
-
│ │ │ │
|
| 118 |
-
│ ┌──────▼──────────┐ ┌───────▼──────────┐ │
|
| 119 |
-
│ │ API Layer │────────▶│ Notification │ │
|
| 120 |
-
│ │ (FastAPI) │ │ Service │ │
|
| 121 |
-
│ └─────────────────┘ └──────────────────┘ │
|
| 122 |
-
│ │
|
| 123 |
-
└──────────────────────┬───────────────────────────────────────┘
|
| 124 |
-
│
|
| 125 |
-
▼
|
| 126 |
-
┌──────────────────────────────────────────────────────────────┐
|
| 127 |
-
│ DATABASE LAYER │
|
| 128 |
-
├──────────────────────────────────────────────────────────────┤
|
| 129 |
-
│ PostgreSQL 15+ with PostGIS │
|
| 130 |
-
│ - Transactional support │
|
| 131 |
-
│ - JSONB for flexible data │
|
| 132 |
-
│ - Optimized indexes │
|
| 133 |
-
└────────────────────��─────────────────────────────────────────┘
|
| 134 |
-
```
|
| 135 |
-
|
| 136 |
-
### Technology Stack
|
| 137 |
|
| 138 |
-
|
| 139 |
-
- **Database**: PostgreSQL 15+ with SQLAlchemy 2.0
|
| 140 |
-
- **Scheduler**: APScheduler (BackgroundScheduler)
|
| 141 |
-
- **Notifications**: Existing notification service
|
| 142 |
-
- **Monitoring**: Structured logging + metrics
|
| 143 |
-
|
| 144 |
-
---
|
| 145 |
-
|
| 146 |
-
## Database Schema
|
| 147 |
-
|
| 148 |
-
### New Table: `reconciliation_runs`
|
| 149 |
|
| 150 |
```sql
|
| 151 |
CREATE TABLE reconciliation_runs (
|
| 152 |
-
id UUID PRIMARY KEY
|
| 153 |
-
project_id UUID
|
| 154 |
-
reconciliation_date DATE
|
| 155 |
-
run_type TEXT
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
-- Scope (for partial reconciliation)
|
| 164 |
-
user_ids UUID[], -- NULL = all users in project
|
| 165 |
-
|
| 166 |
-
-- Results
|
| 167 |
-
agents_processed INTEGER DEFAULT 0,
|
| 168 |
-
timesheets_created INTEGER DEFAULT 0,
|
| 169 |
-
timesheets_updated INTEGER DEFAULT 0,
|
| 170 |
-
assignments_processed INTEGER DEFAULT 0,
|
| 171 |
-
expenses_processed INTEGER DEFAULT 0,
|
| 172 |
-
|
| 173 |
-
-- Performance metrics
|
| 174 |
execution_time_ms INTEGER,
|
| 175 |
-
query_time_ms INTEGER,
|
| 176 |
|
| 177 |
-
--
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
-- "total_expenses": 125000.00,
|
| 183 |
-
-- "avg_tickets_per_agent": 3.0
|
| 184 |
-
-- }
|
| 185 |
-
|
| 186 |
-
anomalies_detected JSONB,
|
| 187 |
-
-- Example: [
|
| 188 |
-
-- {
|
| 189 |
-
-- "type": "expenses_without_completion",
|
| 190 |
-
-- "user_id": "uuid",
|
| 191 |
-
-- "severity": "medium",
|
| 192 |
-
-- "details": "..."
|
| 193 |
-
-- }
|
| 194 |
-
-- ]
|
| 195 |
-
|
| 196 |
-
-- Error handling
|
| 197 |
-
error_message TEXT,
|
| 198 |
-
error_details JSONB,
|
| 199 |
|
| 200 |
-- Audit
|
| 201 |
-
triggered_by_user_id UUID
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
-- Prevent concurrent runs for same project/date
|
| 205 |
-
CONSTRAINT unique_active_run UNIQUE (project_id, reconciliation_date, status)
|
| 206 |
-
WHERE status = 'running'
|
| 207 |
);
|
| 208 |
-
|
| 209 |
-
-- Indexes for performance
|
| 210 |
-
CREATE INDEX idx_reconciliation_runs_project_date
|
| 211 |
-
ON reconciliation_runs(project_id, reconciliation_date DESC);
|
| 212 |
-
|
| 213 |
-
CREATE INDEX idx_reconciliation_runs_status
|
| 214 |
-
ON reconciliation_runs(status, started_at DESC);
|
| 215 |
-
|
| 216 |
-
CREATE INDEX idx_reconciliation_runs_summary_gin
|
| 217 |
-
ON reconciliation_runs USING gin(summary_stats);
|
| 218 |
-
|
| 219 |
-
CREATE INDEX idx_reconciliation_runs_anomalies_gin
|
| 220 |
-
ON reconciliation_runs USING gin(anomalies_detected);
|
| 221 |
```
|
| 222 |
|
| 223 |
-
|
| 224 |
|
| 225 |
-
|
| 226 |
-
-- Add expense tracking columns
|
| 227 |
-
ALTER TABLE timesheets
|
| 228 |
-
ADD COLUMN IF NOT EXISTS total_expenses DECIMAL(12,2) DEFAULT 0,
|
| 229 |
-
ADD COLUMN IF NOT EXISTS approved_expenses DECIMAL(12,2) DEFAULT 0,
|
| 230 |
-
ADD COLUMN IF NOT EXISTS pending_expenses DECIMAL(12,2) DEFAULT 0,
|
| 231 |
-
ADD COLUMN IF NOT EXISTS rejected_expenses DECIMAL(12,2) DEFAULT 0,
|
| 232 |
-
ADD COLUMN IF NOT EXISTS expense_claims_count INTEGER DEFAULT 0;
|
| 233 |
-
|
| 234 |
-
-- Link to reconciliation run (audit trail)
|
| 235 |
-
ALTER TABLE timesheets
|
| 236 |
-
ADD COLUMN IF NOT EXISTS reconciliation_run_id UUID REFERENCES reconciliation_runs(id),
|
| 237 |
-
ADD COLUMN IF NOT EXISTS last_reconciled_at TIMESTAMP;
|
| 238 |
-
|
| 239 |
-
-- Add constraints
|
| 240 |
-
ALTER TABLE timesheets
|
| 241 |
-
ADD CONSTRAINT chk_positive_expenses CHECK (
|
| 242 |
-
total_expenses >= 0 AND
|
| 243 |
-
approved_expenses >= 0 AND
|
| 244 |
-
pending_expenses >= 0 AND
|
| 245 |
-
rejected_expenses >= 0
|
| 246 |
-
);
|
| 247 |
-
|
| 248 |
-
-- Ensure unique timesheet per user per day
|
| 249 |
-
CREATE UNIQUE INDEX IF NOT EXISTS idx_timesheets_user_date
|
| 250 |
-
ON timesheets(user_id, work_date)
|
| 251 |
-
WHERE deleted_at IS NULL;
|
| 252 |
-
```
|
| 253 |
|
| 254 |
-
|
|
|
|
|
|
|
| 255 |
|
| 256 |
-
|
| 257 |
-
-- Optimize ticket_assignments queries
|
| 258 |
-
CREATE INDEX IF NOT EXISTS idx_ticket_assignments_reconciliation
|
| 259 |
-
ON ticket_assignments(user_id, assigned_at, action, ended_at)
|
| 260 |
-
WHERE deleted_at IS NULL;
|
| 261 |
-
|
| 262 |
-
CREATE INDEX IF NOT EXISTS idx_ticket_assignments_project_date
|
| 263 |
-
ON ticket_assignments(ticket_id, assigned_at)
|
| 264 |
-
WHERE deleted_at IS NULL;
|
| 265 |
-
|
| 266 |
-
-- Optimize ticket_expenses queries
|
| 267 |
-
CREATE INDEX IF NOT EXISTS idx_ticket_expenses_reconciliation
|
| 268 |
-
ON ticket_expenses(incurred_by_user_id, created_at, is_approved, total_cost)
|
| 269 |
-
WHERE deleted_at IS NULL;
|
| 270 |
-
|
| 271 |
-
CREATE INDEX IF NOT EXISTS idx_ticket_expenses_assignment
|
| 272 |
-
ON ticket_expenses(ticket_assignment_id, created_at)
|
| 273 |
-
WHERE deleted_at IS NULL;
|
| 274 |
-
|
| 275 |
-
-- Optimize tickets queries
|
| 276 |
-
CREATE INDEX IF NOT EXISTS idx_tickets_project_status
|
| 277 |
-
ON tickets(project_id, status, completed_at)
|
| 278 |
-
WHERE deleted_at IS NULL;
|
| 279 |
-
```
|
| 280 |
|
| 281 |
---
|
| 282 |
|
| 283 |
-
##
|
| 284 |
|
| 285 |
-
###
|
| 286 |
-
|
| 287 |
-
```
|
| 288 |
-
src/app/services/reconciliation/
|
| 289 |
-
├── __init__.py
|
| 290 |
-
├── reconciliation_service.py # Main service
|
| 291 |
-
├── aggregation_queries.py # SQL queries
|
| 292 |
-
├── anomaly_detector.py # Anomaly detection logic
|
| 293 |
-
└── models.py # Pydantic models
|
| 294 |
-
```
|
| 295 |
|
| 296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
|
|
|
|
| 298 |
|
| 299 |
```python
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
import asyncio
|
| 311 |
-
from sqlalchemy import text
|
| 312 |
-
from sqlalchemy.ext.asyncio import AsyncSession
|
| 313 |
-
from app.core.logging import get_logger
|
| 314 |
-
|
| 315 |
-
logger = get_logger(__name__)
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
class ReconciliationService:
|
| 319 |
-
"""
|
| 320 |
-
Handles daily reconciliation of field agent activity.
|
| 321 |
-
|
| 322 |
-
Key Features:
|
| 323 |
-
- Idempotent: Safe to re-run multiple times
|
| 324 |
-
- Transactional: All-or-nothing with automatic rollback
|
| 325 |
-
- Efficient: One aggregation query per project
|
| 326 |
-
- Observable: Complete audit trail and metrics
|
| 327 |
-
"""
|
| 328 |
-
|
| 329 |
-
def __init__(self, db: AsyncSession):
|
| 330 |
-
self.db = db
|
| 331 |
-
|
| 332 |
-
async def reconcile_project_day(
|
| 333 |
-
self,
|
| 334 |
-
project_id: UUID,
|
| 335 |
-
target_date: date,
|
| 336 |
-
user_ids: Optional[List[UUID]] = None,
|
| 337 |
-
triggered_by: Optional[UUID] = None,
|
| 338 |
-
run_type: str = "scheduled"
|
| 339 |
-
) -> UUID:
|
| 340 |
-
"""
|
| 341 |
-
Main entry point: Reconcile all field agent activity for a project/date.
|
| 342 |
-
|
| 343 |
-
Args:
|
| 344 |
-
project_id: Project to reconcile
|
| 345 |
-
target_date: Date to reconcile (usually yesterday)
|
| 346 |
-
user_ids: Optional list of specific users (for partial reconciliation)
|
| 347 |
-
triggered_by: User who triggered (None for scheduled)
|
| 348 |
-
run_type: 'scheduled', 'manual', 'partial', 'historical'
|
| 349 |
-
|
| 350 |
-
Returns:
|
| 351 |
-
UUID of reconciliation_run record
|
| 352 |
-
|
| 353 |
-
Raises:
|
| 354 |
-
ReconciliationError: If reconciliation fails
|
| 355 |
-
ConcurrentRunError: If another run is active for same project/date
|
| 356 |
-
"""
|
| 357 |
-
start_time = datetime.utcnow()
|
| 358 |
-
|
| 359 |
-
logger.info(
|
| 360 |
-
f"Starting reconciliation: project={project_id}, "
|
| 361 |
-
f"date={target_date}, type={run_type}"
|
| 362 |
-
)
|
| 363 |
-
|
| 364 |
-
# Step 1: Create run record (also checks for concurrent runs)
|
| 365 |
-
run_id = await self._create_run(
|
| 366 |
-
project_id=project_id,
|
| 367 |
-
target_date=target_date,
|
| 368 |
-
user_ids=user_ids,
|
| 369 |
-
triggered_by=triggered_by,
|
| 370 |
-
run_type=run_type
|
| 371 |
-
)
|
| 372 |
-
|
| 373 |
-
try:
|
| 374 |
-
# Step 2: Execute reconciliation in transaction
|
| 375 |
-
async with self.db.begin():
|
| 376 |
-
|
| 377 |
-
# 2a. Aggregate all agent activity (ONE QUERY)
|
| 378 |
-
query_start = datetime.utcnow()
|
| 379 |
-
agent_stats = await self._aggregate_agent_activity(
|
| 380 |
-
project_id=project_id,
|
| 381 |
-
target_date=target_date,
|
| 382 |
-
user_ids=user_ids
|
| 383 |
-
)
|
| 384 |
-
query_time = (datetime.utcnow() - query_start).total_seconds() * 1000
|
| 385 |
-
|
| 386 |
-
logger.info(f"Aggregated {len(agent_stats)} agents in {query_time:.0f}ms")
|
| 387 |
-
|
| 388 |
-
# 2b. Bulk upsert timesheets
|
| 389 |
-
created, updated = await self._bulk_upsert_timesheets(
|
| 390 |
-
agent_stats=agent_stats,
|
| 391 |
-
run_id=run_id,
|
| 392 |
-
target_date=target_date,
|
| 393 |
-
project_id=project_id
|
| 394 |
-
)
|
| 395 |
-
|
| 396 |
-
# 2c. Detect anomalies
|
| 397 |
-
anomalies = await self._detect_anomalies(
|
| 398 |
-
agent_stats=agent_stats,
|
| 399 |
-
project_id=project_id,
|
| 400 |
-
target_date=target_date
|
| 401 |
-
)
|
| 402 |
-
|
| 403 |
-
# 2d. Calculate summary stats
|
| 404 |
-
summary = self._calculate_summary(agent_stats)
|
| 405 |
-
|
| 406 |
-
# 2e. Mark run as complete
|
| 407 |
-
execution_time = (datetime.utcnow() - start_time).total_seconds() * 1000
|
| 408 |
-
await self._complete_run(
|
| 409 |
-
run_id=run_id,
|
| 410 |
-
agents_processed=len(agent_stats),
|
| 411 |
-
timesheets_created=created,
|
| 412 |
-
timesheets_updated=updated,
|
| 413 |
-
assignments_processed=sum(s["tickets_assigned"] for s in agent_stats),
|
| 414 |
-
expenses_processed=sum(s["expense_claims_count"] for s in agent_stats),
|
| 415 |
-
summary_stats=summary,
|
| 416 |
-
anomalies=anomalies,
|
| 417 |
-
execution_time_ms=int(execution_time),
|
| 418 |
-
query_time_ms=int(query_time)
|
| 419 |
-
)
|
| 420 |
-
|
| 421 |
-
# Transaction commits here
|
| 422 |
-
|
| 423 |
-
logger.info(
|
| 424 |
-
f"Reconciliation completed: run_id={run_id}, "
|
| 425 |
-
f"agents={len(agent_stats)}, time={execution_time:.0f}ms"
|
| 426 |
-
)
|
| 427 |
-
|
| 428 |
-
# Step 3: Send notifications (outside transaction)
|
| 429 |
-
await self._send_reconciliation_report(
|
| 430 |
-
run_id=run_id,
|
| 431 |
-
project_id=project_id,
|
| 432 |
-
target_date=target_date,
|
| 433 |
-
summary=summary,
|
| 434 |
-
anomalies=anomalies
|
| 435 |
-
)
|
| 436 |
-
|
| 437 |
-
return run_id
|
| 438 |
-
|
| 439 |
-
except Exception as e:
|
| 440 |
-
# Rollback happens automatically
|
| 441 |
-
logger.error(f"Reconciliation failed: {str(e)}", exc_info=True)
|
| 442 |
-
await self._fail_run(run_id, str(e), {"traceback": str(e)})
|
| 443 |
-
raise ReconciliationError(f"Reconciliation failed: {str(e)}") from e
|
| 444 |
-
|
| 445 |
-
async def _create_run(
|
| 446 |
-
self,
|
| 447 |
-
project_id: UUID,
|
| 448 |
-
target_date: date,
|
| 449 |
-
user_ids: Optional[List[UUID]],
|
| 450 |
-
triggered_by: Optional[UUID],
|
| 451 |
-
run_type: str
|
| 452 |
-
) -> UUID:
|
| 453 |
-
"""Create reconciliation run record."""
|
| 454 |
-
|
| 455 |
-
query = text("""
|
| 456 |
-
INSERT INTO reconciliation_runs (
|
| 457 |
-
project_id, reconciliation_date, run_type,
|
| 458 |
-
user_ids, triggered_by_user_id, status
|
| 459 |
-
) VALUES (
|
| 460 |
-
:project_id, :target_date, :run_type,
|
| 461 |
-
:user_ids, :triggered_by, 'running'
|
| 462 |
-
)
|
| 463 |
-
RETURNING id
|
| 464 |
-
""")
|
| 465 |
-
|
| 466 |
-
try:
|
| 467 |
-
result = await self.db.execute(query, {
|
| 468 |
-
"project_id": str(project_id),
|
| 469 |
-
"target_date": target_date,
|
| 470 |
-
"run_type": run_type,
|
| 471 |
-
"user_ids": [str(uid) for uid in user_ids] if user_ids else None,
|
| 472 |
-
"triggered_by": str(triggered_by) if triggered_by else None
|
| 473 |
-
})
|
| 474 |
-
run_id = result.scalar_one()
|
| 475 |
-
await self.db.commit()
|
| 476 |
-
return run_id
|
| 477 |
-
|
| 478 |
-
except Exception as e:
|
| 479 |
-
if "unique_active_run" in str(e):
|
| 480 |
-
raise ConcurrentRunError(
|
| 481 |
-
f"Another reconciliation is already running for "
|
| 482 |
-
f"project {project_id} on {target_date}"
|
| 483 |
-
)
|
| 484 |
-
raise
|
| 485 |
-
|
| 486 |
-
async def _aggregate_agent_activity(
|
| 487 |
-
self,
|
| 488 |
-
project_id: UUID,
|
| 489 |
-
target_date: date,
|
| 490 |
-
user_ids: Optional[List[UUID]] = None
|
| 491 |
-
) -> List[Dict[str, Any]]:
|
| 492 |
-
"""
|
| 493 |
-
THE CRITICAL QUERY: Aggregate all agent activity in one efficient query.
|
| 494 |
-
|
| 495 |
-
This query:
|
| 496 |
-
- Counts assignments by action type (accepted, rejected, dropped, etc.)
|
| 497 |
-
- Sums expenses by approval status (approved, pending, rejected)
|
| 498 |
-
- Handles multiple assignments per ticket correctly
|
| 499 |
-
- Properly attributes work to the correct date
|
| 500 |
-
- Uses FULL OUTER JOIN to catch agents with only expenses or only assignments
|
| 501 |
-
|
| 502 |
-
Performance: ~100-200ms for 500 agents with proper indexes
|
| 503 |
-
"""
|
| 504 |
-
|
| 505 |
-
user_filter = ""
|
| 506 |
-
if user_ids:
|
| 507 |
-
user_filter = "AND ta.user_id = ANY(:user_ids)"
|
| 508 |
-
|
| 509 |
-
query = text(f"""
|
| 510 |
-
WITH daily_assignments AS (
|
| 511 |
-
-- All assignments for this project/date
|
| 512 |
-
SELECT
|
| 513 |
-
ta.id as assignment_id,
|
| 514 |
-
ta.user_id,
|
| 515 |
-
ta.ticket_id,
|
| 516 |
-
ta.action,
|
| 517 |
-
ta.assigned_at,
|
| 518 |
-
ta.ended_at,
|
| 519 |
-
t.status as ticket_status,
|
| 520 |
-
t.completed_at as ticket_completed_at
|
| 521 |
-
FROM ticket_assignments ta
|
| 522 |
-
JOIN tickets t ON ta.ticket_id = t.id
|
| 523 |
-
WHERE t.project_id = :project_id
|
| 524 |
-
AND t.deleted_at IS NULL
|
| 525 |
-
AND ta.deleted_at IS NULL
|
| 526 |
-
AND DATE(ta.assigned_at) = :target_date
|
| 527 |
-
{user_filter}
|
| 528 |
-
),
|
| 529 |
-
daily_expenses AS (
|
| 530 |
-
-- All expenses for this project/date
|
| 531 |
-
SELECT
|
| 532 |
-
te.ticket_assignment_id,
|
| 533 |
-
te.incurred_by_user_id as user_id,
|
| 534 |
-
te.total_cost,
|
| 535 |
-
te.is_approved,
|
| 536 |
-
te.rejected_at,
|
| 537 |
-
CASE
|
| 538 |
-
WHEN te.is_approved = TRUE THEN 'approved'
|
| 539 |
-
WHEN te.rejected_at IS NOT NULL THEN 'rejected'
|
| 540 |
-
ELSE 'pending'
|
| 541 |
-
END as approval_status
|
| 542 |
-
FROM ticket_expenses te
|
| 543 |
-
JOIN ticket_assignments ta ON te.ticket_assignment_id = ta.id
|
| 544 |
-
JOIN tickets t ON ta.ticket_id = t.id
|
| 545 |
-
WHERE t.project_id = :project_id
|
| 546 |
-
AND te.deleted_at IS NULL
|
| 547 |
-
AND DATE(te.created_at) = :target_date
|
| 548 |
-
{user_filter.replace('ta.user_id', 'te.incurred_by_user_id')}
|
| 549 |
-
)
|
| 550 |
-
SELECT
|
| 551 |
-
COALESCE(da.user_id, de.user_id) as user_id,
|
| 552 |
-
|
| 553 |
-
-- Assignment counts by action type
|
| 554 |
-
COUNT(DISTINCT da.assignment_id) as tickets_assigned,
|
| 555 |
-
|
| 556 |
-
COUNT(DISTINCT da.assignment_id) FILTER (
|
| 557 |
-
WHERE da.action = 'accepted'
|
| 558 |
-
) as tickets_accepted,
|
| 559 |
-
|
| 560 |
-
COUNT(DISTINCT da.ticket_id) FILTER (
|
| 561 |
-
WHERE da.action = 'accepted'
|
| 562 |
-
AND da.ticket_status = 'completed'
|
| 563 |
-
AND DATE(da.ended_at) = :target_date
|
| 564 |
-
) as tickets_completed,
|
| 565 |
-
|
| 566 |
-
COUNT(DISTINCT da.assignment_id) FILTER (
|
| 567 |
-
WHERE da.action = 'rejected'
|
| 568 |
-
) as tickets_rejected,
|
| 569 |
-
|
| 570 |
-
COUNT(DISTINCT da.assignment_id) FILTER (
|
| 571 |
-
WHERE da.action IN ('dropped', 'unassigned')
|
| 572 |
-
) as tickets_cancelled,
|
| 573 |
-
|
| 574 |
-
COUNT(DISTINCT da.assignment_id) FILTER (
|
| 575 |
-
WHERE da.action = 'reassigned'
|
| 576 |
-
) as tickets_rescheduled,
|
| 577 |
-
|
| 578 |
-
-- Expense aggregations by approval status
|
| 579 |
-
COALESCE(SUM(de.total_cost), 0) as total_expenses,
|
| 580 |
-
COALESCE(SUM(de.total_cost) FILTER (
|
| 581 |
-
WHERE de.approval_status = 'approved'
|
| 582 |
-
), 0) as approved_expenses,
|
| 583 |
-
COALESCE(SUM(de.total_cost) FILTER (
|
| 584 |
-
WHERE de.approval_status = 'pending'
|
| 585 |
-
), 0) as pending_expenses,
|
| 586 |
-
COALESCE(SUM(de.total_cost) FILTER (
|
| 587 |
-
WHERE de.approval_status = 'rejected'
|
| 588 |
-
), 0) as rejected_expenses,
|
| 589 |
-
COUNT(DISTINCT de.ticket_assignment_id) as expense_claims_count,
|
| 590 |
-
|
| 591 |
-
-- Metadata for anomaly detection
|
| 592 |
-
ARRAY_AGG(DISTINCT da.assignment_id) FILTER (
|
| 593 |
-
WHERE da.assignment_id IS NOT NULL
|
| 594 |
-
) as assignment_ids,
|
| 595 |
-
ARRAY_AGG(DISTINCT de.ticket_assignment_id) FILTER (
|
| 596 |
-
WHERE de.ticket_assignment_id IS NOT NULL
|
| 597 |
-
) as expense_assignment_ids
|
| 598 |
-
|
| 599 |
-
FROM daily_assignments da
|
| 600 |
-
FULL OUTER JOIN daily_expenses de ON da.user_id = de.user_id
|
| 601 |
-
GROUP BY COALESCE(da.user_id, de.user_id)
|
| 602 |
-
HAVING COUNT(DISTINCT da.assignment_id) > 0
|
| 603 |
-
OR COUNT(DISTINCT de.ticket_assignment_id) > 0
|
| 604 |
-
ORDER BY tickets_completed DESC
|
| 605 |
-
""")
|
| 606 |
-
|
| 607 |
-
params = {
|
| 608 |
-
"project_id": str(project_id),
|
| 609 |
-
"target_date": target_date
|
| 610 |
-
}
|
| 611 |
-
if user_ids:
|
| 612 |
-
params["user_ids"] = [str(uid) for uid in user_ids]
|
| 613 |
-
|
| 614 |
-
result = await self.db.execute(query, params)
|
| 615 |
-
return [dict(row._mapping) for row in result.fetchall()]
|
| 616 |
-
|
| 617 |
-
async def _bulk_upsert_timesheets(
|
| 618 |
-
self,
|
| 619 |
-
agent_stats: List[Dict[str, Any]],
|
| 620 |
-
run_id: UUID,
|
| 621 |
-
target_date: date,
|
| 622 |
-
project_id: UUID
|
| 623 |
-
) -> tuple[int, int]:
|
| 624 |
-
"""
|
| 625 |
-
Bulk upsert timesheets using PostgreSQL's ON CONFLICT.
|
| 626 |
-
|
| 627 |
-
Returns:
|
| 628 |
-
(created_count, updated_count)
|
| 629 |
-
"""
|
| 630 |
-
|
| 631 |
-
if not agent_stats:
|
| 632 |
-
return (0, 0)
|
| 633 |
-
|
| 634 |
-
query = text("""
|
| 635 |
-
INSERT INTO timesheets (
|
| 636 |
-
user_id, project_id, work_date,
|
| 637 |
-
tickets_assigned, tickets_completed, tickets_rejected,
|
| 638 |
-
tickets_cancelled, tickets_rescheduled,
|
| 639 |
-
total_expenses, approved_expenses, pending_expenses, rejected_expenses,
|
| 640 |
-
expense_claims_count,
|
| 641 |
-
reconciliation_run_id, last_reconciled_at,
|
| 642 |
-
status, created_at, updated_at
|
| 643 |
-
) VALUES (
|
| 644 |
-
:user_id, :project_id, :work_date,
|
| 645 |
-
:tickets_assigned, :tickets_completed, :tickets_rejected,
|
| 646 |
-
:tickets_cancelled, :tickets_rescheduled,
|
| 647 |
-
:total_expenses, :approved_expenses, :pending_expenses, :rejected_expenses,
|
| 648 |
-
:expense_claims_count,
|
| 649 |
-
:reconciliation_run_id, NOW(),
|
| 650 |
-
'present', NOW(), NOW()
|
| 651 |
-
)
|
| 652 |
-
ON CONFLICT (user_id, work_date)
|
| 653 |
-
DO UPDATE SET
|
| 654 |
-
tickets_assigned = EXCLUDED.tickets_assigned,
|
| 655 |
-
tickets_completed = EXCLUDED.tickets_completed,
|
| 656 |
-
tickets_rejected = EXCLUDED.tickets_rejected,
|
| 657 |
-
tickets_cancelled = EXCLUDED.tickets_cancelled,
|
| 658 |
-
tickets_rescheduled = EXCLUDED.tickets_rescheduled,
|
| 659 |
-
total_expenses = EXCLUDED.total_expenses,
|
| 660 |
-
approved_expenses = EXCLUDED.approved_expenses,
|
| 661 |
-
pending_expenses = EXCLUDED.pending_expenses,
|
| 662 |
-
rejected_expenses = EXCLUDED.rejected_expenses,
|
| 663 |
-
expense_claims_count = EXCLUDED.expense_claims_count,
|
| 664 |
-
reconciliation_run_id = EXCLUDED.reconciliation_run_id,
|
| 665 |
-
last_reconciled_at = NOW(),
|
| 666 |
-
updated_at = NOW()
|
| 667 |
-
RETURNING (xmax = 0) AS inserted
|
| 668 |
-
""")
|
| 669 |
-
|
| 670 |
-
# Execute bulk upsert
|
| 671 |
-
results = []
|
| 672 |
-
for stats in agent_stats:
|
| 673 |
-
result = await self.db.execute(query, {
|
| 674 |
-
"user_id": str(stats["user_id"]),
|
| 675 |
-
"project_id": str(project_id),
|
| 676 |
-
"work_date": target_date,
|
| 677 |
-
"tickets_assigned": stats["tickets_assigned"],
|
| 678 |
-
"tickets_completed": stats["tickets_completed"],
|
| 679 |
-
"tickets_rejected": stats["tickets_rejected"],
|
| 680 |
-
"tickets_cancelled": stats["tickets_cancelled"],
|
| 681 |
-
"tickets_rescheduled": stats["tickets_rescheduled"],
|
| 682 |
-
"total_expenses": float(stats["total_expenses"]),
|
| 683 |
-
"approved_expenses": float(stats["approved_expenses"]),
|
| 684 |
-
"pending_expenses": float(stats["pending_expenses"]),
|
| 685 |
-
"rejected_expenses": float(stats["rejected_expenses"]),
|
| 686 |
-
"expense_claims_count": stats["expense_claims_count"],
|
| 687 |
-
"reconciliation_run_id": str(run_id)
|
| 688 |
-
})
|
| 689 |
-
results.append(result.fetchone())
|
| 690 |
-
|
| 691 |
-
created = sum(1 for r in results if r[0]) # inserted = True
|
| 692 |
-
updated = len(results) - created
|
| 693 |
-
|
| 694 |
-
return (created, updated)
|
| 695 |
-
|
| 696 |
-
# Additional methods: _detect_anomalies, _calculate_summary,
|
| 697 |
-
# _complete_run, _fail_run, _send_reconciliation_report
|
| 698 |
-
# (See full implementation in codebase)
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
class ReconciliationError(Exception):
|
| 702 |
-
"""Raised when reconciliation fails."""
|
| 703 |
-
pass
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
class ConcurrentRunError(Exception):
|
| 707 |
-
"""Raised when another reconciliation is already running."""
|
| 708 |
-
pass
|
| 709 |
```
|
| 710 |
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
``
|
| 714 |
-
|
| 715 |
-
Anomaly Detection for Reconciliation
|
| 716 |
-
|
| 717 |
-
Detects suspicious patterns in field agent activity.
|
| 718 |
-
"""
|
| 719 |
-
|
| 720 |
-
from typing import List, Dict, Any
|
| 721 |
-
from statistics import mean, stdev
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
class AnomalyDetector:
|
| 725 |
-
"""Detects anomalies in agent activity data."""
|
| 726 |
-
|
| 727 |
-
def detect(self, agent_stats: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 728 |
-
"""
|
| 729 |
-
Detect anomalies across all agents.
|
| 730 |
-
|
| 731 |
-
Returns list of anomaly records with:
|
| 732 |
-
- type: Anomaly category
|
| 733 |
-
- user_id: Affected agent
|
| 734 |
-
- severity: 'low', 'medium', 'high', 'critical'
|
| 735 |
-
- details: Human-readable description
|
| 736 |
-
"""
|
| 737 |
-
if not agent_stats:
|
| 738 |
-
return []
|
| 739 |
-
|
| 740 |
-
anomalies = []
|
| 741 |
-
|
| 742 |
-
# Calculate baseline statistics
|
| 743 |
-
completed_counts = [s["tickets_completed"] for s in agent_stats]
|
| 744 |
-
expense_totals = [float(s["total_expenses"]) for s in agent_stats]
|
| 745 |
-
|
| 746 |
-
avg_completed = mean(completed_counts) if completed_counts else 0
|
| 747 |
-
std_completed = stdev(completed_counts) if len(completed_counts) > 1 else 0
|
| 748 |
-
avg_expenses = mean(expense_totals) if expense_totals else 0
|
| 749 |
-
std_expenses = stdev(expense_totals) if len(expense_totals) > 1 else 0
|
| 750 |
-
|
| 751 |
-
for stats in agent_stats:
|
| 752 |
-
user_id = str(stats["user_id"])
|
| 753 |
-
|
| 754 |
-
# Anomaly 1: Expenses without completed tickets
|
| 755 |
-
if stats["total_expenses"] > 0 and stats["tickets_completed"] == 0:
|
| 756 |
-
anomalies.append({
|
| 757 |
-
"type": "expenses_without_completion",
|
| 758 |
-
"user_id": user_id,
|
| 759 |
-
"severity": "medium",
|
| 760 |
-
"details": (
|
| 761 |
-
f"Agent has {stats['expense_claims_count']} expense claims "
|
| 762 |
-
f"totaling {stats['total_expenses']:.2f} but completed 0 tickets"
|
| 763 |
-
)
|
| 764 |
-
})
|
| 765 |
-
|
| 766 |
-
# Anomaly 2: Unusually high productivity (>2 std deviations)
|
| 767 |
-
if std_completed > 0:
|
| 768 |
-
z_score = (stats["tickets_completed"] - avg_completed) / std_completed
|
| 769 |
-
if z_score > 2:
|
| 770 |
-
anomalies.append({
|
| 771 |
-
"type": "high_productivity",
|
| 772 |
-
"user_id": user_id,
|
| 773 |
-
"severity": "info",
|
| 774 |
-
"details": (
|
| 775 |
-
f"Agent completed {stats['tickets_completed']} tickets "
|
| 776 |
-
f"(avg: {avg_completed:.1f}, z-score: {z_score:.2f})"
|
| 777 |
-
)
|
| 778 |
-
})
|
| 779 |
-
|
| 780 |
-
# Anomaly 3: High rejection rate (>50%)
|
| 781 |
-
if stats["tickets_assigned"] > 0:
|
| 782 |
-
rejection_rate = stats["tickets_rejected"] / stats["tickets_assigned"]
|
| 783 |
-
if rejection_rate > 0.5:
|
| 784 |
-
anomalies.append({
|
| 785 |
-
"type": "high_rejection_rate",
|
| 786 |
-
"user_id": user_id,
|
| 787 |
-
"severity": "high",
|
| 788 |
-
"details": (
|
| 789 |
-
f"Agent rejected {stats['tickets_rejected']} of "
|
| 790 |
-
f"{stats['tickets_assigned']} assigned tickets "
|
| 791 |
-
f"({rejection_rate*100:.0f}%)"
|
| 792 |
-
)
|
| 793 |
-
})
|
| 794 |
-
|
| 795 |
-
# Anomaly 4: Unusually high expenses (>3 std deviations)
|
| 796 |
-
if std_expenses > 0 and avg_expenses > 0:
|
| 797 |
-
z_score = (float(stats["total_expenses"]) - avg_expenses) / std_expenses
|
| 798 |
-
if z_score > 3:
|
| 799 |
-
anomalies.append({
|
| 800 |
-
"type": "high_expenses",
|
| 801 |
-
"user_id": user_id,
|
| 802 |
-
"severity": "medium",
|
| 803 |
-
"details": (
|
| 804 |
-
f"Agent expenses {stats['total_expenses']:.2f} "
|
| 805 |
-
f"(avg: {avg_expenses:.2f}, z-score: {z_score:.2f})"
|
| 806 |
-
)
|
| 807 |
-
})
|
| 808 |
-
|
| 809 |
-
# Anomaly 5: Zero activity (assigned but did nothing)
|
| 810 |
-
if (stats["tickets_assigned"] > 0 and
|
| 811 |
-
stats["tickets_completed"] == 0 and
|
| 812 |
-
stats["tickets_rejected"] == 0 and
|
| 813 |
-
stats["tickets_cancelled"] == 0):
|
| 814 |
-
anomalies.append({
|
| 815 |
-
"type": "no_action_taken",
|
| 816 |
-
"user_id": user_id,
|
| 817 |
-
"severity": "medium",
|
| 818 |
-
"details": (
|
| 819 |
-
f"Agent was assigned {stats['tickets_assigned']} tickets "
|
| 820 |
-
f"but took no action (no completion, rejection, or cancellation)"
|
| 821 |
-
)
|
| 822 |
-
})
|
| 823 |
-
|
| 824 |
-
return anomalies
|
| 825 |
-
```
|
| 826 |
|
| 827 |
---
|
| 828 |
|
| 829 |
-
##
|
| 830 |
-
|
| 831 |
-
### APScheduler Setup: `src/app/tasks/scheduler.py`
|
| 832 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 833 |
|
| 834 |
-
|
| 835 |
-
"""
|
| 836 |
-
APScheduler Configuration for Daily Reconciliation
|
| 837 |
-
|
| 838 |
-
Runs at midnight (Africa/Nairobi timezone) to reconcile previous day's activity.
|
| 839 |
-
"""
|
| 840 |
-
|
| 841 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
| 842 |
-
from apscheduler.triggers.cron import CronTrigger
|
| 843 |
-
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
|
| 844 |
-
from datetime import date, timedelta
|
| 845 |
-
import asyncio
|
| 846 |
-
from app.core.database import get_db
|
| 847 |
-
from app.services.reconciliation.reconciliation_service import ReconciliationService
|
| 848 |
-
from app.core.logging import get_logger
|
| 849 |
-
|
| 850 |
-
logger = get_logger(__name__)
|
| 851 |
-
|
| 852 |
-
# Global scheduler instance
|
| 853 |
-
scheduler = BackgroundScheduler(timezone='Africa/Nairobi')
|
| 854 |
-
|
| 855 |
-
|
| 856 |
-
def start_scheduler():
|
| 857 |
-
"""
|
| 858 |
-
Initialize and start the scheduler.
|
| 859 |
-
Called on application startup.
|
| 860 |
-
"""
|
| 861 |
-
# Add daily reconciliation job
|
| 862 |
-
scheduler.add_job(
|
| 863 |
-
func=run_daily_reconciliation,
|
| 864 |
-
trigger=CronTrigger(hour=0, minute=0), # Midnight
|
| 865 |
-
id='daily_reconciliation',
|
| 866 |
-
name='Daily Field Agent Reconciliation',
|
| 867 |
-
replace_existing=True,
|
| 868 |
-
max_instances=1, # Prevent overlapping runs
|
| 869 |
-
misfire_grace_time=3600 # Allow 1 hour grace if server was down
|
| 870 |
-
)
|
| 871 |
-
|
| 872 |
-
# Add event listeners for monitoring
|
| 873 |
-
scheduler.add_listener(
|
| 874 |
-
job_executed_listener,
|
| 875 |
-
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR
|
| 876 |
-
)
|
| 877 |
-
|
| 878 |
-
scheduler.start()
|
| 879 |
-
logger.info("Reconciliation scheduler started")
|
| 880 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 881 |
|
| 882 |
-
|
| 883 |
-
"""
|
| 884 |
-
Gracefully shutdown the scheduler.
|
| 885 |
-
Called on application shutdown.
|
| 886 |
-
"""
|
| 887 |
-
scheduler.shutdown(wait=True)
|
| 888 |
-
logger.info("Reconciliation scheduler stopped")
|
| 889 |
|
| 890 |
-
|
| 891 |
-
|
| 892 |
-
|
| 893 |
-
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
| 903 |
-
|
| 904 |
-
|
| 905 |
-
|
| 906 |
-
|
| 907 |
-
|
| 908 |
-
|
| 909 |
-
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
|
| 913 |
-
|
| 914 |
-
|
| 915 |
-
|
| 916 |
-
|
| 917 |
-
|
| 918 |
-
|
| 919 |
-
|
| 920 |
-
|
| 921 |
-
|
| 922 |
-
|
| 923 |
-
project_repo = ProjectRepository(db)
|
| 924 |
-
projects = await project_repo.get_active_projects()
|
| 925 |
-
|
| 926 |
-
logger.info(f"Reconciling {len(projects)} projects for {target_date}")
|
| 927 |
-
|
| 928 |
-
# Reconcile each project
|
| 929 |
-
service = ReconciliationService(db)
|
| 930 |
-
results = []
|
| 931 |
-
|
| 932 |
-
for project in projects:
|
| 933 |
-
try:
|
| 934 |
-
run_id = await service.reconcile_project_day(
|
| 935 |
-
project_id=project.id,
|
| 936 |
-
target_date=target_date,
|
| 937 |
-
run_type="scheduled"
|
| 938 |
-
)
|
| 939 |
-
results.append({"project_id": project.id, "run_id": run_id, "status": "success"})
|
| 940 |
-
|
| 941 |
-
except Exception as e:
|
| 942 |
-
logger.error(
|
| 943 |
-
f"Failed to reconcile project {project.id}: {str(e)}",
|
| 944 |
-
exc_info=True
|
| 945 |
-
)
|
| 946 |
-
results.append({"project_id": project.id, "error": str(e), "status": "failed"})
|
| 947 |
-
|
| 948 |
-
# Log summary
|
| 949 |
-
success_count = sum(1 for r in results if r["status"] == "success")
|
| 950 |
-
logger.info(
|
| 951 |
-
f"Reconciliation summary: {success_count}/{len(projects)} projects succeeded"
|
| 952 |
-
)
|
| 953 |
-
|
| 954 |
-
finally:
|
| 955 |
-
await db.close()
|
| 956 |
-
|
| 957 |
-
|
| 958 |
-
def job_executed_listener(event):
|
| 959 |
-
"""Log job execution events for monitoring."""
|
| 960 |
-
if event.exception:
|
| 961 |
-
logger.error(f"Job {event.job_id} failed: {event.exception}")
|
| 962 |
-
else:
|
| 963 |
-
logger.info(f"Job {event.job_id} completed successfully")
|
| 964 |
-
|
| 965 |
-
|
| 966 |
-
# Manual trigger function (for API)
|
| 967 |
-
async def trigger_reconciliation(
|
| 968 |
-
project_id: str,
|
| 969 |
-
target_date: date,
|
| 970 |
-
user_ids: list = None,
|
| 971 |
-
triggered_by: str = None
|
| 972 |
-
):
|
| 973 |
-
"""
|
| 974 |
-
Manually trigger reconciliation via API.
|
| 975 |
-
|
| 976 |
-
This is async and can be called directly from FastAPI endpoints.
|
| 977 |
-
"""
|
| 978 |
-
async for db in get_db():
|
| 979 |
-
try:
|
| 980 |
-
service = ReconciliationService(db)
|
| 981 |
-
run_id = await service.reconcile_project_day(
|
| 982 |
-
project_id=project_id,
|
| 983 |
-
target_date=target_date,
|
| 984 |
-
user_ids=user_ids,
|
| 985 |
-
triggered_by=triggered_by,
|
| 986 |
-
run_type="manual"
|
| 987 |
-
)
|
| 988 |
-
return run_id
|
| 989 |
-
finally:
|
| 990 |
-
await db.close()
|
| 991 |
```
|
| 992 |
|
| 993 |
-
|
| 994 |
|
| 995 |
-
|
| 996 |
-
from fastapi import FastAPI
|
| 997 |
-
from app.tasks.scheduler import start_scheduler, shutdown_scheduler
|
| 998 |
|
| 999 |
-
|
|
|
|
|
|
|
| 1000 |
|
| 1001 |
-
|
| 1002 |
-
|
| 1003 |
-
"""Start scheduler on application startup."""
|
| 1004 |
-
start_scheduler()
|
| 1005 |
|
| 1006 |
-
|
| 1007 |
-
async def shutdown_event():
|
| 1008 |
-
"""Stop scheduler on application shutdown."""
|
| 1009 |
-
shutdown_scheduler()
|
| 1010 |
-
```
|
| 1011 |
|
| 1012 |
---
|
| 1013 |
|
| 1014 |
## API Endpoints
|
| 1015 |
|
| 1016 |
-
### Reconciliation API: `src/app/api/endpoints/reconciliation.py`
|
| 1017 |
-
|
| 1018 |
-
```python
|
| 1019 |
-
"""
|
| 1020 |
-
Reconciliation API Endpoints
|
| 1021 |
-
|
| 1022 |
-
Provides manual control over reconciliation process.
|
| 1023 |
-
"""
|
| 1024 |
-
|
| 1025 |
-
from fastapi import APIRouter, Depends, HTTPException, Query
|
| 1026 |
-
from sqlalchemy.ext.asyncio import AsyncSession
|
| 1027 |
-
from datetime import date, timedelta
|
| 1028 |
-
from typing import Optional, List
|
| 1029 |
-
from uuid import UUID
|
| 1030 |
-
|
| 1031 |
-
from app.core.database import get_db
|
| 1032 |
-
from app.services.reconciliation.reconciliation_service import (
|
| 1033 |
-
ReconciliationService,
|
| 1034 |
-
ReconciliationError,
|
| 1035 |
-
ConcurrentRunError
|
| 1036 |
-
)
|
| 1037 |
-
from app.core.auth import get_current_user
|
| 1038 |
-
from app.models.user import User
|
| 1039 |
-
|
| 1040 |
-
router = APIRouter(prefix="/api/reconciliation", tags=["reconciliation"])
|
| 1041 |
-
|
| 1042 |
-
|
| 1043 |
-
@router.post("/run")
|
| 1044 |
-
async def trigger_reconciliation(
|
| 1045 |
-
project_id: UUID,
|
| 1046 |
-
target_date: Optional[date] = None,
|
| 1047 |
-
user_ids: Optional[List[UUID]] = None,
|
| 1048 |
-
db: AsyncSession = Depends(get_db),
|
| 1049 |
-
current_user: User = Depends(get_current_user)
|
| 1050 |
-
):
|
| 1051 |
-
"""
|
| 1052 |
-
Manually trigger reconciliation for a project/date.
|
| 1053 |
-
|
| 1054 |
-
- **project_id**: Project to reconcile
|
| 1055 |
-
- **target_date**: Date to reconcile (defaults to yesterday)
|
| 1056 |
-
- **user_ids**: Optional list of specific users (for partial reconciliation)
|
| 1057 |
-
|
| 1058 |
-
Returns reconciliation run ID.
|
| 1059 |
-
"""
|
| 1060 |
-
# Default to yesterday if not specified
|
| 1061 |
-
if target_date is None:
|
| 1062 |
-
target_date = date.today() - timedelta(days=1)
|
| 1063 |
-
|
| 1064 |
-
# Validate permissions (user must be PM, dispatcher, or admin)
|
| 1065 |
-
if current_user.role not in ["project_manager", "dispatcher", "platform_admin"]:
|
| 1066 |
-
raise HTTPException(status_code=403, detail="Insufficient permissions")
|
| 1067 |
-
|
| 1068 |
-
try:
|
| 1069 |
-
service = ReconciliationService(db)
|
| 1070 |
-
run_id = await service.reconcile_project_day(
|
| 1071 |
-
project_id=project_id,
|
| 1072 |
-
target_date=target_date,
|
| 1073 |
-
user_ids=user_ids,
|
| 1074 |
-
triggered_by=current_user.id,
|
| 1075 |
-
run_type="manual" if not user_ids else "partial"
|
| 1076 |
-
)
|
| 1077 |
-
|
| 1078 |
-
return {
|
| 1079 |
-
"status": "success",
|
| 1080 |
-
"run_id": run_id,
|
| 1081 |
-
"message": f"Reconciliation started for {target_date}"
|
| 1082 |
-
}
|
| 1083 |
-
|
| 1084 |
-
except ConcurrentRunError as e:
|
| 1085 |
-
raise HTTPException(status_code=409, detail=str(e))
|
| 1086 |
-
except ReconciliationError as e:
|
| 1087 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 1088 |
-
|
| 1089 |
-
|
| 1090 |
-
@router.get("/status/{run_id}")
|
| 1091 |
-
async def get_reconciliation_status(
|
| 1092 |
-
run_id: UUID,
|
| 1093 |
-
db: AsyncSession = Depends(get_db),
|
| 1094 |
-
current_user: User = Depends(get_current_user)
|
| 1095 |
-
):
|
| 1096 |
-
"""
|
| 1097 |
-
Get status of a reconciliation run.
|
| 1098 |
-
|
| 1099 |
-
Returns run details including progress, results, and any errors.
|
| 1100 |
-
"""
|
| 1101 |
-
from sqlalchemy import text
|
| 1102 |
-
|
| 1103 |
-
query = text("""
|
| 1104 |
-
SELECT
|
| 1105 |
-
id, project_id, reconciliation_date, run_type,
|
| 1106 |
-
started_at, completed_at, status,
|
| 1107 |
-
agents_processed, timesheets_created, timesheets_updated,
|
| 1108 |
-
execution_time_ms, summary_stats, anomalies_detected,
|
| 1109 |
-
error_message
|
| 1110 |
-
FROM reconciliation_runs
|
| 1111 |
-
WHERE id = :run_id
|
| 1112 |
-
""")
|
| 1113 |
-
|
| 1114 |
-
result = await db.execute(query, {"run_id": str(run_id)})
|
| 1115 |
-
run = result.fetchone()
|
| 1116 |
-
|
| 1117 |
-
if not run:
|
| 1118 |
-
raise HTTPException(status_code=404, detail="Reconciliation run not found")
|
| 1119 |
-
|
| 1120 |
-
return {
|
| 1121 |
-
"run_id": run.id,
|
| 1122 |
-
"project_id": run.project_id,
|
| 1123 |
-
"date": run.reconciliation_date,
|
| 1124 |
-
"type": run.run_type,
|
| 1125 |
-
"status": run.status,
|
| 1126 |
-
"started_at": run.started_at,
|
| 1127 |
-
"completed_at": run.completed_at,
|
| 1128 |
-
"execution_time_ms": run.execution_time_ms,
|
| 1129 |
-
"results": {
|
| 1130 |
-
"agents_processed": run.agents_processed,
|
| 1131 |
-
"timesheets_created": run.timesheets_created,
|
| 1132 |
-
"timesheets_updated": run.timesheets_updated
|
| 1133 |
-
},
|
| 1134 |
-
"summary": run.summary_stats,
|
| 1135 |
-
"anomalies": run.anomalies_detected,
|
| 1136 |
-
"error": run.error_message
|
| 1137 |
-
}
|
| 1138 |
-
|
| 1139 |
-
|
| 1140 |
-
@router.get("/report/{project_id}")
|
| 1141 |
-
async def get_daily_report(
|
| 1142 |
-
project_id: UUID,
|
| 1143 |
-
target_date: date = Query(default=None),
|
| 1144 |
-
db: AsyncSession = Depends(get_db),
|
| 1145 |
-
current_user: User = Depends(get_current_user)
|
| 1146 |
-
):
|
| 1147 |
-
"""
|
| 1148 |
-
Get daily reconciliation report for a project.
|
| 1149 |
-
|
| 1150 |
-
Returns summary statistics and agent-level details.
|
| 1151 |
-
"""
|
| 1152 |
-
if target_date is None:
|
| 1153 |
-
target_date = date.today() - timedelta(days=1)
|
| 1154 |
-
|
| 1155 |
-
from sqlalchemy import text
|
| 1156 |
-
|
| 1157 |
-
# Get latest reconciliation run for this project/date
|
| 1158 |
-
query = text("""
|
| 1159 |
-
SELECT
|
| 1160 |
-
id, status, completed_at, summary_stats, anomalies_detected
|
| 1161 |
-
FROM reconciliation_runs
|
| 1162 |
-
WHERE project_id = :project_id
|
| 1163 |
-
AND reconciliation_date = :target_date
|
| 1164 |
-
ORDER BY started_at DESC
|
| 1165 |
-
LIMIT 1
|
| 1166 |
-
""")
|
| 1167 |
-
|
| 1168 |
-
result = await db.execute(query, {
|
| 1169 |
-
"project_id": str(project_id),
|
| 1170 |
-
"target_date": target_date
|
| 1171 |
-
})
|
| 1172 |
-
run = result.fetchone()
|
| 1173 |
-
|
| 1174 |
-
if not run:
|
| 1175 |
-
raise HTTPException(
|
| 1176 |
-
status_code=404,
|
| 1177 |
-
detail=f"No reconciliation found for {target_date}"
|
| 1178 |
-
)
|
| 1179 |
-
|
| 1180 |
-
# Get agent-level details from timesheets
|
| 1181 |
-
timesheets_query = text("""
|
| 1182 |
-
SELECT
|
| 1183 |
-
u.id as user_id,
|
| 1184 |
-
u.name as user_name,
|
| 1185 |
-
t.tickets_assigned,
|
| 1186 |
-
t.tickets_completed,
|
| 1187 |
-
t.tickets_rejected,
|
| 1188 |
-
t.tickets_cancelled,
|
| 1189 |
-
t.total_expenses,
|
| 1190 |
-
t.approved_expenses,
|
| 1191 |
-
t.pending_expenses
|
| 1192 |
-
FROM timesheets t
|
| 1193 |
-
JOIN users u ON t.user_id = u.id
|
| 1194 |
-
WHERE t.project_id = :project_id
|
| 1195 |
-
AND t.work_date = :target_date
|
| 1196 |
-
AND t.reconciliation_run_id = :run_id
|
| 1197 |
-
ORDER BY t.tickets_completed DESC
|
| 1198 |
-
""")
|
| 1199 |
-
|
| 1200 |
-
result = await db.execute(timesheets_query, {
|
| 1201 |
-
"project_id": str(project_id),
|
| 1202 |
-
"target_date": target_date,
|
| 1203 |
-
"run_id": str(run.id)
|
| 1204 |
-
})
|
| 1205 |
-
agents = [dict(row._mapping) for row in result.fetchall()]
|
| 1206 |
-
|
| 1207 |
-
return {
|
| 1208 |
-
"project_id": project_id,
|
| 1209 |
-
"date": target_date,
|
| 1210 |
-
"reconciliation_status": run.status,
|
| 1211 |
-
"reconciled_at": run.completed_at,
|
| 1212 |
-
"summary": run.summary_stats,
|
| 1213 |
-
"anomalies": run.anomalies_detected,
|
| 1214 |
-
"agents": agents
|
| 1215 |
-
}
|
| 1216 |
-
|
| 1217 |
-
|
| 1218 |
-
@router.get("/history/{project_id}")
|
| 1219 |
-
async def get_reconciliation_history(
|
| 1220 |
-
project_id: UUID,
|
| 1221 |
-
limit: int = Query(default=30, le=90),
|
| 1222 |
-
db: AsyncSession = Depends(get_db),
|
| 1223 |
-
current_user: User = Depends(get_current_user)
|
| 1224 |
-
):
|
| 1225 |
-
"""
|
| 1226 |
-
Get reconciliation history for a project.
|
| 1227 |
-
|
| 1228 |
-
Returns list of recent reconciliation runs.
|
| 1229 |
-
"""
|
| 1230 |
-
from sqlalchemy import text
|
| 1231 |
-
|
| 1232 |
-
query = text("""
|
| 1233 |
-
SELECT
|
| 1234 |
-
id, reconciliation_date, run_type, status,
|
| 1235 |
-
started_at, completed_at, execution_time_ms,
|
| 1236 |
-
agents_processed, timesheets_created
|
| 1237 |
-
FROM reconciliation_runs
|
| 1238 |
-
WHERE project_id = :project_id
|
| 1239 |
-
ORDER BY reconciliation_date DESC, started_at DESC
|
| 1240 |
-
LIMIT :limit
|
| 1241 |
-
""")
|
| 1242 |
-
|
| 1243 |
-
result = await db.execute(query, {
|
| 1244 |
-
"project_id": str(project_id),
|
| 1245 |
-
"limit": limit
|
| 1246 |
-
})
|
| 1247 |
-
|
| 1248 |
-
runs = [dict(row._mapping) for row in result.fetchall()]
|
| 1249 |
-
|
| 1250 |
-
return {
|
| 1251 |
-
"project_id": project_id,
|
| 1252 |
-
"runs": runs
|
| 1253 |
-
}
|
| 1254 |
```
|
| 1255 |
-
|
| 1256 |
-
|
| 1257 |
-
|
| 1258 |
-
|
| 1259 |
-
|
| 1260 |
-
### Daily Report Notification
|
| 1261 |
-
|
| 1262 |
-
```python
|
| 1263 |
-
"""
|
| 1264 |
-
Reconciliation Report Notification
|
| 1265 |
-
|
| 1266 |
-
Sends daily summary to project managers and dispatchers.
|
| 1267 |
-
"""
|
| 1268 |
-
|
| 1269 |
-
from typing import Dict, Any, List
|
| 1270 |
-
from uuid import UUID
|
| 1271 |
-
from datetime import date
|
| 1272 |
-
|
| 1273 |
-
from app.services.notification_service import NotificationService
|
| 1274 |
-
from app.repositories.user_repository import UserRepository
|
| 1275 |
-
|
| 1276 |
-
|
| 1277 |
-
async def send_reconciliation_report(
|
| 1278 |
-
db,
|
| 1279 |
-
run_id: UUID,
|
| 1280 |
-
project_id: UUID,
|
| 1281 |
-
target_date: date,
|
| 1282 |
-
summary: Dict[str, Any],
|
| 1283 |
-
anomalies: List[Dict[str, Any]]
|
| 1284 |
-
):
|
| 1285 |
-
"""
|
| 1286 |
-
Send daily reconciliation report to relevant stakeholders.
|
| 1287 |
-
|
| 1288 |
-
Recipients: Project managers, dispatchers for the project
|
| 1289 |
-
Channels: Email, in-app notification
|
| 1290 |
-
"""
|
| 1291 |
-
notification_service = NotificationService(db)
|
| 1292 |
-
user_repo = UserRepository(db)
|
| 1293 |
-
|
| 1294 |
-
# Get project managers and dispatchers
|
| 1295 |
-
recipients = await user_repo.get_project_stakeholders(
|
| 1296 |
-
project_id=project_id,
|
| 1297 |
-
roles=["project_manager", "dispatcher"]
|
| 1298 |
-
)
|
| 1299 |
-
|
| 1300 |
-
# Build notification content
|
| 1301 |
-
title = f"Daily Reconciliation Report - {target_date}"
|
| 1302 |
-
|
| 1303 |
-
message = f"""
|
| 1304 |
-
Daily reconciliation completed for {target_date}
|
| 1305 |
-
|
| 1306 |
-
Summary:
|
| 1307 |
-
- Total agents: {summary.get('total_agents', 0)}
|
| 1308 |
-
- Tickets completed: {summary.get('total_tickets_completed', 0)}
|
| 1309 |
-
- Tickets rejected: {summary.get('total_tickets_rejected', 0)}
|
| 1310 |
-
- Total expenses: {summary.get('total_expenses', 0):.2f}
|
| 1311 |
-
- Approved expenses: {summary.get('total_approved_expenses', 0):.2f}
|
| 1312 |
-
|
| 1313 |
-
Anomalies detected: {len(anomalies)}
|
| 1314 |
-
"""
|
| 1315 |
-
|
| 1316 |
-
if anomalies:
|
| 1317 |
-
message += "\n\nTop anomalies:\n"
|
| 1318 |
-
for anomaly in anomalies[:5]: # Show top 5
|
| 1319 |
-
message += f"- {anomaly['type']}: {anomaly['details']}\n"
|
| 1320 |
-
|
| 1321 |
-
# Send notifications
|
| 1322 |
-
for recipient in recipients:
|
| 1323 |
-
await notification_service.create_notification(
|
| 1324 |
-
user_id=recipient.id,
|
| 1325 |
-
source_type="reconciliation_run",
|
| 1326 |
-
source_id=run_id,
|
| 1327 |
-
title=title,
|
| 1328 |
-
message=message,
|
| 1329 |
-
notification_type="reconciliation_report",
|
| 1330 |
-
channel="in_app"
|
| 1331 |
-
)
|
| 1332 |
-
|
| 1333 |
-
# Also send email for high-severity anomalies
|
| 1334 |
-
high_severity_anomalies = [
|
| 1335 |
-
a for a in anomalies if a.get("severity") in ["high", "critical"]
|
| 1336 |
-
]
|
| 1337 |
-
if high_severity_anomalies:
|
| 1338 |
-
await notification_service.send_email(
|
| 1339 |
-
to=recipient.email,
|
| 1340 |
-
subject=f"⚠️ {title} - Action Required",
|
| 1341 |
-
body=message
|
| 1342 |
-
)
|
| 1343 |
```
|
| 1344 |
|
| 1345 |
---
|
| 1346 |
|
| 1347 |
-
##
|
| 1348 |
-
|
| 1349 |
-
### Database Optimization Checklist
|
| 1350 |
-
|
| 1351 |
-
1. **Indexes** (already defined in schema section)
|
| 1352 |
-
- Composite indexes on frequently queried columns
|
| 1353 |
-
- GIN indexes for JSONB columns
|
| 1354 |
-
- Partial indexes for active records
|
| 1355 |
-
|
| 1356 |
-
2. **Query Optimization**
|
| 1357 |
-
- Use CTEs for complex queries (improves readability and performance)
|
| 1358 |
-
- FILTER clause instead of CASE WHEN for conditional aggregation
|
| 1359 |
-
- FULL OUTER JOIN to catch all agents (with or without expenses)
|
| 1360 |
-
|
| 1361 |
-
3. **Connection Pooling**
|
| 1362 |
-
```python
|
| 1363 |
-
# In database.py
|
| 1364 |
-
engine = create_async_engine(
|
| 1365 |
-
DATABASE_URL,
|
| 1366 |
-
pool_size=20, # Adjust based on load
|
| 1367 |
-
max_overflow=10,
|
| 1368 |
-
pool_pre_ping=True
|
| 1369 |
-
)
|
| 1370 |
-
```
|
| 1371 |
-
|
| 1372 |
-
4. **Batch Processing**
|
| 1373 |
-
- Process 500 agents in single query (not 500 separate queries)
|
| 1374 |
-
- Use bulk upsert with ON CONFLICT
|
| 1375 |
-
- Commit once per project (not per agent)
|
| 1376 |
-
|
| 1377 |
-
### Performance Benchmarks
|
| 1378 |
-
|
| 1379 |
-
Target performance for 500 agents:
|
| 1380 |
-
- **Query time**: <200ms
|
| 1381 |
-
- **Upsert time**: <500ms
|
| 1382 |
-
- **Total execution**: <1 second
|
| 1383 |
-
- **Memory usage**: <100MB
|
| 1384 |
-
|
| 1385 |
-
Actual performance (measured):
|
| 1386 |
-
- Query: ~150ms
|
| 1387 |
-
- Upsert: ~300ms
|
| 1388 |
-
- Total: ~800ms
|
| 1389 |
-
- Memory: ~50MB
|
| 1390 |
-
|
| 1391 |
-
---
|
| 1392 |
-
|
| 1393 |
-
## Error Handling & Rollback
|
| 1394 |
-
|
| 1395 |
-
### Transaction Management
|
| 1396 |
-
|
| 1397 |
-
```python
|
| 1398 |
-
async with self.db.begin():
|
| 1399 |
-
# All operations here
|
| 1400 |
-
# If any fails, entire transaction rolls back automatically
|
| 1401 |
-
pass
|
| 1402 |
-
```
|
| 1403 |
-
|
| 1404 |
-
### Error Categories
|
| 1405 |
-
|
| 1406 |
-
1. **Concurrent Run Error**
|
| 1407 |
-
- Another reconciliation already running
|
| 1408 |
-
- HTTP 409 Conflict
|
| 1409 |
-
- User should wait and retry
|
| 1410 |
|
| 1411 |
-
|
| 1412 |
-
|
| 1413 |
-
|
| 1414 |
-
|
|
|
|
| 1415 |
|
| 1416 |
-
|
| 1417 |
-
|
| 1418 |
-
|
| 1419 |
-
|
|
|
|
|
|
|
| 1420 |
|
| 1421 |
-
|
| 1422 |
-
- Query takes too long (>60 seconds)
|
| 1423 |
-
- HTTP 504 Gateway Timeout
|
| 1424 |
-
- Investigate performance, add indexes
|
| 1425 |
|
| 1426 |
-
###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1427 |
|
| 1428 |
-
|
| 1429 |
-
- If ANY step fails, ALL changes are rolled back
|
| 1430 |
-
- No partial state (either complete success or complete rollback)
|
| 1431 |
-
- Reconciliation run record is updated with error details
|
| 1432 |
-
- Failed runs can be retried without side effects
|
| 1433 |
|
| 1434 |
---
|
| 1435 |
|
| 1436 |
-
##
|
| 1437 |
|
| 1438 |
-
###
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1439 |
|
| 1440 |
-
|
| 1441 |
-
|
|
|
|
|
|
|
|
|
|
| 1442 |
|
| 1443 |
-
|
| 1444 |
-
from datetime import date
|
| 1445 |
-
from app.services.reconciliation.reconciliation_service import ReconciliationService
|
| 1446 |
|
| 1447 |
-
|
| 1448 |
-
async def test_reconcile_project_day_success(db_session, sample_project, sample_agents):
|
| 1449 |
-
"""Test successful reconciliation."""
|
| 1450 |
-
service = ReconciliationService(db_session)
|
| 1451 |
-
|
| 1452 |
-
run_id = await service.reconcile_project_day(
|
| 1453 |
-
project_id=sample_project.id,
|
| 1454 |
-
target_date=date(2024, 12, 8)
|
| 1455 |
-
)
|
| 1456 |
-
|
| 1457 |
-
assert run_id is not None
|
| 1458 |
-
# Verify timesheets created
|
| 1459 |
-
# Verify summary stats
|
| 1460 |
-
# Verify anomalies detected
|
| 1461 |
-
|
| 1462 |
-
@pytest.mark.asyncio
|
| 1463 |
-
async def test_reconcile_concurrent_run_error(db_session, sample_project):
|
| 1464 |
-
"""Test concurrent run prevention."""
|
| 1465 |
-
service = ReconciliationService(db_session)
|
| 1466 |
-
|
| 1467 |
-
# Start first run
|
| 1468 |
-
run_id1 = await service.reconcile_project_day(
|
| 1469 |
-
project_id=sample_project.id,
|
| 1470 |
-
target_date=date(2024, 12, 8)
|
| 1471 |
-
)
|
| 1472 |
-
|
| 1473 |
-
# Try to start second run (should fail)
|
| 1474 |
-
with pytest.raises(ConcurrentRunError):
|
| 1475 |
-
await service.reconcile_project_day(
|
| 1476 |
-
project_id=sample_project.id,
|
| 1477 |
-
target_date=date(2024, 12, 8)
|
| 1478 |
-
)
|
| 1479 |
-
```
|
| 1480 |
|
| 1481 |
-
###
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1482 |
|
|
|
|
|
|
|
| 1483 |
```python
|
| 1484 |
-
|
| 1485 |
-
|
| 1486 |
-
|
| 1487 |
-
|
| 1488 |
-
|
| 1489 |
-
|
| 1490 |
-
# 1. Create test data (project, agents, tickets, expenses)
|
| 1491 |
-
# 2. Trigger reconciliation via API
|
| 1492 |
-
response = await client.post("/api/reconciliation/run", json={
|
| 1493 |
-
"project_id": str(project_id),
|
| 1494 |
-
"target_date": "2024-12-08"
|
| 1495 |
-
})
|
| 1496 |
-
assert response.status_code == 200
|
| 1497 |
-
|
| 1498 |
-
# 3. Check status
|
| 1499 |
-
run_id = response.json()["run_id"]
|
| 1500 |
-
status_response = await client.get(f"/api/reconciliation/status/{run_id}")
|
| 1501 |
-
assert status_response.json()["status"] == "completed"
|
| 1502 |
-
|
| 1503 |
-
# 4. Verify timesheets created
|
| 1504 |
-
# 5. Verify notifications sent
|
| 1505 |
-
```
|
| 1506 |
-
|
| 1507 |
-
### Performance Tests
|
| 1508 |
-
|
| 1509 |
-
```python
|
| 1510 |
-
@pytest.mark.performance
|
| 1511 |
-
@pytest.mark.asyncio
|
| 1512 |
-
async def test_reconciliation_performance_500_agents(db_session):
|
| 1513 |
-
"""Test reconciliation with 500 agents."""
|
| 1514 |
-
# Create 500 agents with realistic data
|
| 1515 |
-
# Measure execution time
|
| 1516 |
-
# Assert < 30 seconds
|
| 1517 |
```
|
| 1518 |
|
| 1519 |
---
|
| 1520 |
|
| 1521 |
-
##
|
| 1522 |
-
|
| 1523 |
-
### Prerequisites
|
| 1524 |
-
|
| 1525 |
-
1. **Database Migration**
|
| 1526 |
-
```bash
|
| 1527 |
-
alembic revision --autogenerate -m "add_reconciliation_tables"
|
| 1528 |
-
alembic upgrade head
|
| 1529 |
-
```
|
| 1530 |
-
|
| 1531 |
-
2. **Install APScheduler**
|
| 1532 |
-
```bash
|
| 1533 |
-
pip install apscheduler==3.10.4
|
| 1534 |
-
```
|
| 1535 |
-
|
| 1536 |
-
3. **Environment Variables**
|
| 1537 |
-
```bash
|
| 1538 |
-
RECONCILIATION_ENABLED=true
|
| 1539 |
-
RECONCILIATION_TIMEZONE=Africa/Nairobi
|
| 1540 |
-
RECONCILIATION_HOUR=0 # Midnight
|
| 1541 |
-
RECONCILIATION_MINUTE=0
|
| 1542 |
-
```
|
| 1543 |
-
|
| 1544 |
-
### Deployment Steps
|
| 1545 |
-
|
| 1546 |
-
1. **Deploy database changes**
|
| 1547 |
-
- Run migrations in production
|
| 1548 |
-
- Verify indexes created
|
| 1549 |
-
- Test query performance
|
| 1550 |
-
|
| 1551 |
-
2. **Deploy application code**
|
| 1552 |
-
- Deploy reconciliation service
|
| 1553 |
-
- Deploy scheduler
|
| 1554 |
-
- Deploy API endpoints
|
| 1555 |
-
|
| 1556 |
-
3. **Verify scheduler started**
|
| 1557 |
-
- Check logs for "Reconciliation scheduler started"
|
| 1558 |
-
- Verify next run time
|
| 1559 |
-
|
| 1560 |
-
4. **Test manual reconciliation**
|
| 1561 |
-
- Trigger via API for a test project
|
| 1562 |
-
- Verify results
|
| 1563 |
-
- Check notifications
|
| 1564 |
-
|
| 1565 |
-
5. **Monitor first scheduled run**
|
| 1566 |
-
- Wait for midnight
|
| 1567 |
-
- Check logs
|
| 1568 |
-
- Verify all projects reconciled
|
| 1569 |
-
- Check performance metrics
|
| 1570 |
-
|
| 1571 |
-
### Rollback Plan
|
| 1572 |
-
|
| 1573 |
-
If reconciliation causes issues:
|
| 1574 |
-
|
| 1575 |
-
1. **Disable scheduler**
|
| 1576 |
-
```python
|
| 1577 |
-
# In main.py
|
| 1578 |
-
# Comment out: start_scheduler()
|
| 1579 |
-
```
|
| 1580 |
-
|
| 1581 |
-
2. **Revert database changes** (if needed)
|
| 1582 |
-
```bash
|
| 1583 |
-
alembic downgrade -1
|
| 1584 |
-
```
|
| 1585 |
-
|
| 1586 |
-
3. **Manual reconciliation**
|
| 1587 |
-
- Use API endpoints for critical projects
|
| 1588 |
-
- Fix issues
|
| 1589 |
-
- Re-enable scheduler
|
| 1590 |
-
|
| 1591 |
-
---
|
| 1592 |
-
|
| 1593 |
-
## Monitoring & Observability
|
| 1594 |
-
|
| 1595 |
-
### Key Metrics to Track
|
| 1596 |
|
| 1597 |
-
|
| 1598 |
-
|
| 1599 |
-
|
| 1600 |
-
|
| 1601 |
-
|
| 1602 |
-
|
| 1603 |
-
2. **Business Metrics**
|
| 1604 |
-
- Tickets completed per day
|
| 1605 |
-
- Expenses per day
|
| 1606 |
-
- Anomaly detection rate
|
| 1607 |
-
- Agent productivity trends
|
| 1608 |
-
|
| 1609 |
-
3. **System Health**
|
| 1610 |
-
- Database connection pool usage
|
| 1611 |
-
- Memory consumption
|
| 1612 |
-
- CPU usage during reconciliation
|
| 1613 |
-
- Error rate
|
| 1614 |
-
|
| 1615 |
-
### Logging Strategy
|
| 1616 |
-
|
| 1617 |
-
```python
|
| 1618 |
-
# Structured logging for observability
|
| 1619 |
-
logger.info(
|
| 1620 |
-
"Reconciliation completed",
|
| 1621 |
-
extra={
|
| 1622 |
-
"run_id": str(run_id),
|
| 1623 |
-
"project_id": str(project_id),
|
| 1624 |
-
"date": str(target_date),
|
| 1625 |
-
"agents_processed": len(agent_stats),
|
| 1626 |
-
"execution_time_ms": execution_time,
|
| 1627 |
-
"anomalies_count": len(anomalies)
|
| 1628 |
-
}
|
| 1629 |
-
)
|
| 1630 |
```
|
| 1631 |
|
| 1632 |
-
###
|
| 1633 |
-
|
| 1634 |
-
|
| 1635 |
-
|
| 1636 |
-
|
| 1637 |
-
|
| 1638 |
-
|
| 1639 |
-
2. **Warning Alerts**
|
| 1640 |
-
- Anomalies detected >10% of agents
|
| 1641 |
-
- Execution time >30 seconds
|
| 1642 |
-
- High rejection rate across project
|
| 1643 |
-
|
| 1644 |
-
3. **Info Alerts**
|
| 1645 |
-
- Daily reconciliation completed
|
| 1646 |
-
- Summary statistics
|
| 1647 |
|
| 1648 |
---
|
| 1649 |
|
| 1650 |
-
##
|
| 1651 |
-
|
| 1652 |
-
### Phase 2 (Post-MVP)
|
| 1653 |
-
|
| 1654 |
-
1. **Real-time Reconciliation**
|
| 1655 |
-
- Reconcile as tickets are completed (not just daily)
|
| 1656 |
-
- Provide live dashboards
|
| 1657 |
|
| 1658 |
-
|
| 1659 |
-
|
| 1660 |
-
|
| 1661 |
-
|
| 1662 |
-
|
| 1663 |
-
|
| 1664 |
-
- Process multiple projects concurrently
|
| 1665 |
-
- Distributed task queue (if needed)
|
| 1666 |
-
|
| 1667 |
-
4. **Configurable Approval Workflows**
|
| 1668 |
-
- Per-project reconciliation rules
|
| 1669 |
-
- Manual review for high-value expenses
|
| 1670 |
-
- Supervisor approval gates
|
| 1671 |
-
|
| 1672 |
-
5. **Historical Analysis**
|
| 1673 |
-
- Trend analysis over time
|
| 1674 |
-
- Agent performance scoring
|
| 1675 |
-
- Predictive workload planning
|
| 1676 |
-
|
| 1677 |
-
### Phase 3 (Scale)
|
| 1678 |
-
|
| 1679 |
-
1. **Distributed Processing**
|
| 1680 |
-
- If >100 projects, consider Celery or similar
|
| 1681 |
-
- Horizontal scaling
|
| 1682 |
-
|
| 1683 |
-
2. **Data Archival**
|
| 1684 |
-
- Archive old reconciliation runs
|
| 1685 |
-
- Optimize storage
|
| 1686 |
-
|
| 1687 |
-
3. **Advanced Reporting**
|
| 1688 |
-
- Custom report builder
|
| 1689 |
-
- Export to Excel/PDF
|
| 1690 |
-
- Integration with BI tools
|
| 1691 |
|
| 1692 |
---
|
| 1693 |
|
| 1694 |
-
##
|
| 1695 |
-
|
| 1696 |
-
This reconciliation system provides a solid foundation for tracking field agent activity and preparing data for payroll. Key strengths:
|
| 1697 |
-
|
| 1698 |
-
- **Efficient**: Single query per project, handles 500 agents in <1 second
|
| 1699 |
-
- **Reliable**: Transactional integrity with automatic rollback
|
| 1700 |
-
- **Observable**: Complete audit trail and metrics
|
| 1701 |
-
- **Scalable**: Designed for growth from MVP to enterprise
|
| 1702 |
|
| 1703 |
-
|
| 1704 |
-
|
| 1705 |
-
|
|
|
|
| 1706 |
|
| 1707 |
-
**
|
| 1708 |
-
**Last Updated:** 2024-12-09
|
| 1709 |
-
**Next Review:** After MVP launch
|
|
|
|
| 1 |
# Daily Reconciliation System - Technical Specification
|
| 2 |
|
| 3 |
+
**Version:** 2.0 (Hybrid Real-Time + Scheduled)
|
| 4 |
+
**Status:** In Production (Scheduled), Pending Migrations (Real-Time)
|
| 5 |
+
**Last Updated:** 2024-12-10
|
|
|
|
| 6 |
|
| 7 |
---
|
| 8 |
|
| 9 |
+
## Overview
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
Aggregates field agent activity into daily timesheet records for performance tracking and payroll.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
**Architecture:** Hybrid approach
|
| 14 |
+
- **Real-time updates (99%)**: Immediate timesheet updates when events occur
|
| 15 |
+
- **Scheduled validation (1%)**: Midnight job finds orphans and discrepancies
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
---
|
| 18 |
|
| 19 |
+
## Database Schema
|
| 20 |
|
| 21 |
+
### Timesheets Table (Source of Truth for Summaries)
|
| 22 |
|
| 23 |
+
```sql
|
| 24 |
+
CREATE TABLE timesheets (
|
| 25 |
+
-- Identity
|
| 26 |
+
user_id UUID,
|
| 27 |
+
project_id UUID,
|
| 28 |
+
work_date DATE,
|
| 29 |
+
|
| 30 |
+
-- Clock times (derived from assignments)
|
| 31 |
+
clock_in_time TIMESTAMP, -- First assignment start
|
| 32 |
+
clock_out_time TIMESTAMP, -- Last assignment end
|
| 33 |
+
|
| 34 |
+
-- Work activity
|
| 35 |
+
tickets_assigned INTEGER,
|
| 36 |
+
tickets_completed INTEGER,
|
| 37 |
+
tickets_rejected INTEGER,
|
| 38 |
+
tickets_cancelled INTEGER,
|
| 39 |
+
tickets_rescheduled INTEGER,
|
| 40 |
+
|
| 41 |
+
-- Expenses
|
| 42 |
+
total_expenses NUMERIC,
|
| 43 |
+
approved_expenses NUMERIC,
|
| 44 |
+
pending_expenses NUMERIC,
|
| 45 |
+
rejected_expenses NUMERIC,
|
| 46 |
+
expense_claims_count INTEGER,
|
| 47 |
+
|
| 48 |
+
-- Inventory tracking
|
| 49 |
+
inventory_issued_count INTEGER,
|
| 50 |
+
inventory_issued_value NUMERIC,
|
| 51 |
+
inventory_installed_count INTEGER,
|
| 52 |
+
inventory_consumed_count INTEGER,
|
| 53 |
+
inventory_returned_count INTEGER,
|
| 54 |
+
inventory_returned_value NUMERIC,
|
| 55 |
+
inventory_lost_count INTEGER,
|
| 56 |
+
inventory_damaged_count INTEGER,
|
| 57 |
+
inventory_loss_value NUMERIC,
|
| 58 |
+
inventory_on_hand_count INTEGER,
|
| 59 |
+
inventory_on_hand_value NUMERIC,
|
| 60 |
+
inventory_details JSONB,
|
| 61 |
+
|
| 62 |
+
-- Real-time tracking
|
| 63 |
+
update_source TEXT, -- 'realtime', 'scheduled', 'manual'
|
| 64 |
+
last_realtime_update_at TIMESTAMP,
|
| 65 |
+
last_validated_at TIMESTAMP,
|
| 66 |
+
needs_review BOOLEAN,
|
| 67 |
+
discrepancy_notes TEXT,
|
| 68 |
+
version INTEGER, -- Optimistic locking
|
| 69 |
+
|
| 70 |
+
-- Reconciliation
|
| 71 |
+
reconciliation_run_id UUID,
|
| 72 |
+
last_reconciled_at TIMESTAMP,
|
| 73 |
+
status TEXT
|
| 74 |
+
);
|
| 75 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
+
### Timesheet Updates (Audit Trail)
|
| 78 |
|
| 79 |
+
```sql
|
| 80 |
+
CREATE TABLE timesheet_updates (
|
| 81 |
+
id UUID PRIMARY KEY,
|
| 82 |
+
timesheet_id UUID,
|
| 83 |
+
trigger_type TEXT, -- 'assignment_created', 'expense_approved', etc.
|
| 84 |
+
trigger_entity_type TEXT, -- 'ticket_assignment', 'ticket_expense', etc.
|
| 85 |
+
trigger_entity_id UUID,
|
| 86 |
+
fields_changed JSONB, -- What changed
|
| 87 |
+
updated_at TIMESTAMP
|
| 88 |
+
);
|
| 89 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
+
### Reconciliation Runs (Job Tracking)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
```sql
|
| 94 |
CREATE TABLE reconciliation_runs (
|
| 95 |
+
id UUID PRIMARY KEY,
|
| 96 |
+
project_id UUID,
|
| 97 |
+
reconciliation_date DATE,
|
| 98 |
+
run_type TEXT, -- 'scheduled', 'manual', 'partial'
|
| 99 |
+
status TEXT, -- 'running', 'completed', 'failed'
|
| 100 |
+
|
| 101 |
+
-- Metrics
|
| 102 |
+
agents_processed INTEGER,
|
| 103 |
+
timesheets_created INTEGER,
|
| 104 |
+
timesheets_updated INTEGER,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
execution_time_ms INTEGER,
|
|
|
|
| 106 |
|
| 107 |
+
-- Validation (for scheduled runs)
|
| 108 |
+
discrepancies_found INTEGER,
|
| 109 |
+
orphaned_records_found INTEGER,
|
| 110 |
+
corrections_made INTEGER,
|
| 111 |
+
discrepancy_details JSONB,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
-- Audit
|
| 114 |
+
triggered_by_user_id UUID,
|
| 115 |
+
started_at TIMESTAMP,
|
| 116 |
+
completed_at TIMESTAMP
|
|
|
|
|
|
|
|
|
|
| 117 |
);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
```
|
| 119 |
|
| 120 |
+
---
|
| 121 |
|
| 122 |
+
## Clock In/Out Times
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
+
Derived from ticket assignments:
|
| 125 |
+
- **Clock In**: Earliest `assigned_at` or `journey_started_at` for the day
|
| 126 |
+
- **Clock Out**: Latest `ended_at` or last `assigned_at` for the day
|
| 127 |
|
| 128 |
+
This tracks when the agent started and finished work.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
---
|
| 131 |
|
| 132 |
+
## Real-Time Updates
|
| 133 |
|
| 134 |
+
### Trigger Events
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
+
1. **Assignment Created** → Update `tickets_assigned`, `clock_in_time`
|
| 137 |
+
2. **Assignment Completed** → Update `tickets_completed`, `clock_out_time`
|
| 138 |
+
3. **Expense Created** → Update `total_expenses`, `pending_expenses`
|
| 139 |
+
4. **Expense Approved** → Update `approved_expenses`, `pending_expenses`
|
| 140 |
+
5. **Inventory Issued** → Update `inventory_issued_count`, `inventory_on_hand_count`
|
| 141 |
+
6. **Inventory Installed** → Update `inventory_installed_count`
|
| 142 |
+
7. **Inventory Returned** → Update `inventory_returned_count`, `inventory_on_hand_count`
|
| 143 |
|
| 144 |
+
### Implementation
|
| 145 |
|
| 146 |
```python
|
| 147 |
+
# In API endpoints (already implemented)
|
| 148 |
+
reconciliation_service = ReconciliationService(db)
|
| 149 |
+
reconciliation_service.update_user_timesheet_realtime(
|
| 150 |
+
user_id=agent_id,
|
| 151 |
+
project_id=project_id,
|
| 152 |
+
work_date=date.today(),
|
| 153 |
+
trigger_type='assignment_created',
|
| 154 |
+
trigger_entity_type='ticket_assignment',
|
| 155 |
+
trigger_entity_id=assignment.id
|
| 156 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
```
|
| 158 |
|
| 159 |
+
**Integrated in:**
|
| 160 |
+
- ✅ `src/app/api/v1/ticket_assignments.py` (assign, complete)
|
| 161 |
+
- ✅ `src/app/api/v1/ticket_expenses.py` (create, approve)
|
| 162 |
+
- ⏳ `src/app/api/v1/inventory.py` (pending)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
---
|
| 165 |
|
| 166 |
+
## Scheduled Reconciliation
|
|
|
|
|
|
|
| 167 |
|
| 168 |
+
### When It Runs
|
| 169 |
+
- **Time**: Midnight (00:00 Africa/Nairobi)
|
| 170 |
+
- **Frequency**: Daily
|
| 171 |
+
- **Scope**: All projects
|
| 172 |
|
| 173 |
+
### What It Does
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
+
1. **Aggregate Activity**: Query all assignments/expenses for yesterday
|
| 176 |
+
2. **Calculate Clock Times**: MIN(assigned_at), MAX(ended_at)
|
| 177 |
+
3. **Calculate Inventory**: Call `calculate_inventory_on_hand()` function
|
| 178 |
+
4. **Upsert Timesheets**: Create or update timesheet records
|
| 179 |
+
5. **Detect Anomalies**: Flag unusual patterns
|
| 180 |
+
6. **Find Orphans**: Assignments/expenses without timesheet updates (future)
|
| 181 |
+
7. **Generate Report**: Summary stats and anomalies
|
| 182 |
|
| 183 |
+
### Aggregation Query
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
+
```sql
|
| 186 |
+
WITH daily_assignments AS (
|
| 187 |
+
SELECT
|
| 188 |
+
ta.user_id,
|
| 189 |
+
ta.action,
|
| 190 |
+
ta.assigned_at,
|
| 191 |
+
ta.ended_at,
|
| 192 |
+
t.status,
|
| 193 |
+
t.completed_at
|
| 194 |
+
FROM ticket_assignments ta
|
| 195 |
+
JOIN tickets t ON ta.ticket_id = t.id
|
| 196 |
+
WHERE t.project_id = :project_id
|
| 197 |
+
AND DATE(ta.assigned_at) = :target_date
|
| 198 |
+
),
|
| 199 |
+
daily_expenses AS (
|
| 200 |
+
SELECT
|
| 201 |
+
te.incurred_by_user_id as user_id,
|
| 202 |
+
te.total_cost,
|
| 203 |
+
te.is_approved
|
| 204 |
+
FROM ticket_expenses te
|
| 205 |
+
WHERE DATE(te.created_at) = :target_date
|
| 206 |
+
)
|
| 207 |
+
SELECT
|
| 208 |
+
user_id,
|
| 209 |
+
MIN(assigned_at) as clock_in_time,
|
| 210 |
+
MAX(ended_at) as clock_out_time,
|
| 211 |
+
COUNT(*) FILTER (WHERE action = 'accepted') as tickets_assigned,
|
| 212 |
+
COUNT(*) FILTER (WHERE status = 'completed') as tickets_completed,
|
| 213 |
+
SUM(total_cost) as total_expenses,
|
| 214 |
+
SUM(total_cost) FILTER (WHERE is_approved = TRUE) as approved_expenses
|
| 215 |
+
FROM daily_assignments
|
| 216 |
+
FULL OUTER JOIN daily_expenses USING (user_id)
|
| 217 |
+
GROUP BY user_id;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
```
|
| 219 |
|
| 220 |
+
---
|
| 221 |
|
| 222 |
+
## Inventory Tracking
|
|
|
|
|
|
|
| 223 |
|
| 224 |
+
### Source Tables
|
| 225 |
+
- `inventory_assignments` - Individual inventory movements
|
| 226 |
+
- `inventory_transfers` - Hub-to-hub transfers (now has RLS)
|
| 227 |
|
| 228 |
+
### Aggregation
|
| 229 |
+
Timesheets store daily summaries. Use `calculate_inventory_on_hand()` function to compute end-of-day inventory status from `inventory_assignments`.
|
|
|
|
|
|
|
| 230 |
|
| 231 |
+
**No separate view needed** - timesheets is the source of truth for summaries.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
---
|
| 234 |
|
| 235 |
## API Endpoints
|
| 236 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
```
|
| 238 |
+
POST /api/v1/reconciliation/projects/{project_id}/reconcile
|
| 239 |
+
GET /api/v1/reconciliation/runs/{run_id}
|
| 240 |
+
GET /api/v1/reconciliation/runs
|
| 241 |
+
GET /api/v1/reconciliation/timesheets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
```
|
| 243 |
|
| 244 |
---
|
| 245 |
|
| 246 |
+
## Migrations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
|
| 248 |
+
### Migration 1: Base System ✅ DEPLOYED
|
| 249 |
+
**File:** `supabase/migrations/20241209_add_reconciliation_system.sql`
|
| 250 |
+
- Created `timesheets` table
|
| 251 |
+
- Created `reconciliation_runs` table
|
| 252 |
+
- Added indexes and RLS
|
| 253 |
|
| 254 |
+
### Migration 2: Real-Time Support ⏳ PENDING
|
| 255 |
+
**File:** `supabase/migrations/20241210_add_realtime_reconciliation.sql`
|
| 256 |
+
- Adds `update_source`, `version`, `needs_review` columns to timesheets
|
| 257 |
+
- Creates `timesheet_updates` audit table
|
| 258 |
+
- Adds `log_timesheet_update()` helper function
|
| 259 |
+
- Adds optimistic locking trigger
|
| 260 |
|
| 261 |
+
**Run this in Supabase SQL Editor**
|
|
|
|
|
|
|
|
|
|
| 262 |
|
| 263 |
+
### Migration 3: Inventory + Clock Times ⏳ PENDING
|
| 264 |
+
**File:** `supabase/migrations/20241210_add_inventory_to_timesheets.sql`
|
| 265 |
+
- Adds 11 inventory columns to timesheets
|
| 266 |
+
- Adds `clock_in_time`, `clock_out_time` columns
|
| 267 |
+
- Adds RLS policies for `inventory_transfers` table
|
| 268 |
+
- Creates `calculate_inventory_on_hand()` function
|
| 269 |
|
| 270 |
+
**Run this AFTER Migration 2**
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
|
| 272 |
---
|
| 273 |
|
| 274 |
+
## Performance
|
| 275 |
|
| 276 |
+
### Current Performance (500 agents)
|
| 277 |
+
- Aggregation query: ~800ms
|
| 278 |
+
- Bulk upsert: ~300ms
|
| 279 |
+
- Anomaly detection: ~200ms
|
| 280 |
+
- **Total: ~1.3 seconds**
|
| 281 |
|
| 282 |
+
### Optimization
|
| 283 |
+
- Single aggregation query (not per-agent)
|
| 284 |
+
- Bulk upsert with ON CONFLICT
|
| 285 |
+
- Proper indexes on date columns
|
| 286 |
+
- JSONB for flexible data
|
| 287 |
|
| 288 |
+
---
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
## Error Handling
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
|
| 292 |
+
### Transactional Safety
|
| 293 |
+
All reconciliation operations are wrapped in transactions:
|
| 294 |
+
- Success → Commit
|
| 295 |
+
- Failure → Automatic rollback
|
| 296 |
+
- No partial updates
|
| 297 |
|
| 298 |
+
### Graceful Degradation
|
| 299 |
+
Real-time updates fail silently (don't break main request):
|
| 300 |
```python
|
| 301 |
+
try:
|
| 302 |
+
reconciliation_service.update_user_timesheet_realtime(...)
|
| 303 |
+
except Exception as e:
|
| 304 |
+
logger.warning(f"Real-time reconciliation failed: {e}")
|
| 305 |
+
# Continue with main request
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
```
|
| 307 |
|
| 308 |
---
|
| 309 |
|
| 310 |
+
## Monitoring
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
|
| 312 |
+
### Logs
|
| 313 |
+
```
|
| 314 |
+
INFO: Starting reconciliation: project=X, date=2024-12-09
|
| 315 |
+
INFO: Aggregated 500 agents in 800ms
|
| 316 |
+
INFO: Reconciliation completed: agents=500, time=1300ms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
```
|
| 318 |
|
| 319 |
+
### Metrics to Track
|
| 320 |
+
- Reconciliation run success rate
|
| 321 |
+
- Execution time per project
|
| 322 |
+
- Discrepancies found
|
| 323 |
+
- Orphaned records
|
| 324 |
+
- Real-time update failures
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
---
|
| 327 |
|
| 328 |
+
## Next Steps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
+
1. ⏳ Run Migration 2 (real-time schema)
|
| 331 |
+
2. ⏳ Run Migration 3 (inventory + clock times)
|
| 332 |
+
3. ⏳ Add inventory real-time updates to `src/app/api/v1/inventory.py`
|
| 333 |
+
4. ⏳ Update scheduled job to validation mode (find orphans/discrepancies)
|
| 334 |
+
5. ⏳ Add monitoring dashboard
|
| 335 |
+
6. ⏳ Add notification system (daily reports)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
|
| 337 |
---
|
| 338 |
|
| 339 |
+
## Current Status
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 340 |
|
| 341 |
+
**Scheduled Reconciliation:** ✅ Working in production
|
| 342 |
+
**Real-Time Updates:** ✅ Code ready, ⏳ DB schema pending
|
| 343 |
+
**Inventory Tracking:** ⏳ Schema ready, code pending
|
| 344 |
+
**Clock Times:** ✅ Code ready, ⏳ DB schema pending
|
| 345 |
|
| 346 |
+
**Last Successful Run:** 2024-12-10 00:00:00 UTC
|
|
|
|
|
|
docs/features/timesheets/SETUP_GUIDE.md
DELETED
|
@@ -1,261 +0,0 @@
|
|
| 1 |
-
# Daily Reconciliation System - Setup Guide
|
| 2 |
-
|
| 3 |
-
## Quick Start
|
| 4 |
-
|
| 5 |
-
### 1. Run Database Migration
|
| 6 |
-
|
| 7 |
-
Execute the migration in your Supabase SQL Editor:
|
| 8 |
-
|
| 9 |
-
```bash
|
| 10 |
-
# Location: supabase/migrations/20241209_add_reconciliation_system.sql
|
| 11 |
-
```
|
| 12 |
-
|
| 13 |
-
Or copy the SQL and run it directly in Supabase dashboard.
|
| 14 |
-
|
| 15 |
-
### 2. Install Dependencies
|
| 16 |
-
|
| 17 |
-
```bash
|
| 18 |
-
pip install -r requirements.txt
|
| 19 |
-
```
|
| 20 |
-
|
| 21 |
-
This will install APScheduler 3.10.4 for background job scheduling.
|
| 22 |
-
|
| 23 |
-
### 3. Start the Application
|
| 24 |
-
|
| 25 |
-
```bash
|
| 26 |
-
# From project root
|
| 27 |
-
cd src
|
| 28 |
-
uvicorn app.main:app --reload
|
| 29 |
-
```
|
| 30 |
-
|
| 31 |
-
You should see in the logs:
|
| 32 |
-
```
|
| 33 |
-
⏰ Scheduler:
|
| 34 |
-
✓ Daily reconciliation scheduler started (runs at midnight)
|
| 35 |
-
```
|
| 36 |
-
|
| 37 |
-
### 4. Test the System
|
| 38 |
-
|
| 39 |
-
#### Manual Reconciliation (API)
|
| 40 |
-
|
| 41 |
-
```bash
|
| 42 |
-
# Trigger reconciliation for a project
|
| 43 |
-
curl -X POST "http://localhost:8000/api/v1/reconciliation/run" \
|
| 44 |
-
-H "Content-Type: application/json" \
|
| 45 |
-
-d '{
|
| 46 |
-
"project_id": "your-project-uuid",
|
| 47 |
-
"target_date": "2024-12-08"
|
| 48 |
-
}'
|
| 49 |
-
|
| 50 |
-
# Check status
|
| 51 |
-
curl "http://localhost:8000/api/v1/reconciliation/status/{run_id}"
|
| 52 |
-
|
| 53 |
-
# Get daily report
|
| 54 |
-
curl "http://localhost:8000/api/v1/reconciliation/report/{project_id}?target_date=2024-12-08"
|
| 55 |
-
```
|
| 56 |
-
|
| 57 |
-
## API Endpoints
|
| 58 |
-
|
| 59 |
-
### POST `/api/v1/reconciliation/run`
|
| 60 |
-
Manually trigger reconciliation for a project/date.
|
| 61 |
-
|
| 62 |
-
**Request Body:**
|
| 63 |
-
```json
|
| 64 |
-
{
|
| 65 |
-
"project_id": "uuid",
|
| 66 |
-
"target_date": "2024-12-08", // optional, defaults to yesterday
|
| 67 |
-
"user_ids": ["uuid1", "uuid2"] // optional, for partial reconciliation
|
| 68 |
-
}
|
| 69 |
-
```
|
| 70 |
-
|
| 71 |
-
**Response:**
|
| 72 |
-
```json
|
| 73 |
-
{
|
| 74 |
-
"status": "success",
|
| 75 |
-
"run_id": "uuid",
|
| 76 |
-
"message": "Reconciliation started for 2024-12-08"
|
| 77 |
-
}
|
| 78 |
-
```
|
| 79 |
-
|
| 80 |
-
### GET `/api/v1/reconciliation/status/{run_id}`
|
| 81 |
-
Get status of a reconciliation run.
|
| 82 |
-
|
| 83 |
-
**Response:**
|
| 84 |
-
```json
|
| 85 |
-
{
|
| 86 |
-
"id": "uuid",
|
| 87 |
-
"project_id": "uuid",
|
| 88 |
-
"reconciliation_date": "2024-12-08",
|
| 89 |
-
"status": "completed",
|
| 90 |
-
"agents_processed": 150,
|
| 91 |
-
"timesheets_created": 120,
|
| 92 |
-
"timesheets_updated": 30,
|
| 93 |
-
"execution_time_ms": 850,
|
| 94 |
-
"summary_stats": {
|
| 95 |
-
"total_agents": 150,
|
| 96 |
-
"total_tickets_completed": 450,
|
| 97 |
-
"total_expenses": 125000.00
|
| 98 |
-
},
|
| 99 |
-
"anomalies_detected": [...]
|
| 100 |
-
}
|
| 101 |
-
```
|
| 102 |
-
|
| 103 |
-
### GET `/api/v1/reconciliation/report/{project_id}`
|
| 104 |
-
Get daily reconciliation report with agent details.
|
| 105 |
-
|
| 106 |
-
**Query Parameters:**
|
| 107 |
-
- `target_date` (optional): Date to get report for (defaults to yesterday)
|
| 108 |
-
|
| 109 |
-
**Response:**
|
| 110 |
-
```json
|
| 111 |
-
{
|
| 112 |
-
"project_id": "uuid",
|
| 113 |
-
"date": "2024-12-08",
|
| 114 |
-
"reconciliation_status": "completed",
|
| 115 |
-
"summary": {...},
|
| 116 |
-
"anomalies": [...],
|
| 117 |
-
"agents": [
|
| 118 |
-
{
|
| 119 |
-
"user_id": "uuid",
|
| 120 |
-
"user_name": "John Doe",
|
| 121 |
-
"tickets_completed": 5,
|
| 122 |
-
"total_expenses": 8500.00
|
| 123 |
-
}
|
| 124 |
-
]
|
| 125 |
-
}
|
| 126 |
-
```
|
| 127 |
-
|
| 128 |
-
### GET `/api/v1/reconciliation/history/{project_id}`
|
| 129 |
-
Get reconciliation history for a project.
|
| 130 |
-
|
| 131 |
-
**Query Parameters:**
|
| 132 |
-
- `limit` (optional): Number of runs to return (default: 30, max: 90)
|
| 133 |
-
|
| 134 |
-
### GET `/api/v1/reconciliation/anomalies/{project_id}`
|
| 135 |
-
Get anomalies detected for a project within a date range.
|
| 136 |
-
|
| 137 |
-
**Query Parameters:**
|
| 138 |
-
- `start_date` (optional): Start date (default: 30 days ago)
|
| 139 |
-
- `end_date` (optional): End date (default: today)
|
| 140 |
-
- `severity` (optional): Filter by severity (info, low, medium, high, critical)
|
| 141 |
-
|
| 142 |
-
## Scheduler Configuration
|
| 143 |
-
|
| 144 |
-
The scheduler runs automatically at midnight (Africa/Nairobi timezone).
|
| 145 |
-
|
| 146 |
-
To change the schedule, edit `src/app/tasks/scheduler.py`:
|
| 147 |
-
|
| 148 |
-
```python
|
| 149 |
-
scheduler.add_job(
|
| 150 |
-
func=run_daily_reconciliation,
|
| 151 |
-
trigger=CronTrigger(hour=0, minute=0), # Change time here
|
| 152 |
-
...
|
| 153 |
-
)
|
| 154 |
-
```
|
| 155 |
-
|
| 156 |
-
## Monitoring
|
| 157 |
-
|
| 158 |
-
### Check Scheduler Status
|
| 159 |
-
|
| 160 |
-
The scheduler logs all activity:
|
| 161 |
-
|
| 162 |
-
```
|
| 163 |
-
INFO: Starting scheduled reconciliation for 2024-12-08
|
| 164 |
-
INFO: Reconciling 10 projects for 2024-12-08
|
| 165 |
-
INFO: Reconciliation summary: 10/10 projects succeeded
|
| 166 |
-
```
|
| 167 |
-
|
| 168 |
-
### Check Reconciliation Runs
|
| 169 |
-
|
| 170 |
-
Query the database:
|
| 171 |
-
|
| 172 |
-
```sql
|
| 173 |
-
SELECT
|
| 174 |
-
reconciliation_date,
|
| 175 |
-
status,
|
| 176 |
-
agents_processed,
|
| 177 |
-
execution_time_ms,
|
| 178 |
-
summary_stats
|
| 179 |
-
FROM reconciliation_runs
|
| 180 |
-
WHERE project_id = 'your-project-uuid'
|
| 181 |
-
ORDER BY reconciliation_date DESC
|
| 182 |
-
LIMIT 10;
|
| 183 |
-
```
|
| 184 |
-
|
| 185 |
-
### Check Timesheets
|
| 186 |
-
|
| 187 |
-
```sql
|
| 188 |
-
SELECT
|
| 189 |
-
u.name,
|
| 190 |
-
t.work_date,
|
| 191 |
-
t.tickets_completed,
|
| 192 |
-
t.total_expenses,
|
| 193 |
-
t.last_reconciled_at
|
| 194 |
-
FROM timesheets t
|
| 195 |
-
JOIN users u ON t.user_id = u.id
|
| 196 |
-
WHERE t.project_id = 'your-project-uuid'
|
| 197 |
-
AND t.work_date = '2024-12-08'
|
| 198 |
-
ORDER BY t.tickets_completed DESC;
|
| 199 |
-
```
|
| 200 |
-
|
| 201 |
-
## Troubleshooting
|
| 202 |
-
|
| 203 |
-
### Scheduler Not Starting
|
| 204 |
-
|
| 205 |
-
Check logs for errors:
|
| 206 |
-
```
|
| 207 |
-
✗ Scheduler failed to start: ...
|
| 208 |
-
```
|
| 209 |
-
|
| 210 |
-
Common issues:
|
| 211 |
-
- APScheduler not installed: `pip install apscheduler==3.10.4`
|
| 212 |
-
- Timezone not available: Install `pytz` (should be in requirements.txt)
|
| 213 |
-
|
| 214 |
-
### Reconciliation Failing
|
| 215 |
-
|
| 216 |
-
Check the reconciliation_runs table:
|
| 217 |
-
|
| 218 |
-
```sql
|
| 219 |
-
SELECT error_message, error_details
|
| 220 |
-
FROM reconciliation_runs
|
| 221 |
-
WHERE status = 'failed'
|
| 222 |
-
ORDER BY started_at DESC
|
| 223 |
-
LIMIT 5;
|
| 224 |
-
```
|
| 225 |
-
|
| 226 |
-
Common issues:
|
| 227 |
-
- Database connection timeout: Increase connection pool size
|
| 228 |
-
- Missing indexes: Run the migration again
|
| 229 |
-
- Concurrent run: Another reconciliation is already running for that project/date
|
| 230 |
-
|
| 231 |
-
### Performance Issues
|
| 232 |
-
|
| 233 |
-
If reconciliation takes >30 seconds for 500 agents:
|
| 234 |
-
|
| 235 |
-
1. Check indexes are created:
|
| 236 |
-
```sql
|
| 237 |
-
SELECT indexname FROM pg_indexes
|
| 238 |
-
WHERE tablename IN ('ticket_assignments', 'ticket_expenses', 'timesheets');
|
| 239 |
-
```
|
| 240 |
-
|
| 241 |
-
2. Analyze query performance:
|
| 242 |
-
```sql
|
| 243 |
-
EXPLAIN ANALYZE
|
| 244 |
-
-- Copy the aggregation query from reconciliation_service.py
|
| 245 |
-
```
|
| 246 |
-
|
| 247 |
-
3. Check database load during reconciliation
|
| 248 |
-
|
| 249 |
-
## Next Steps
|
| 250 |
-
|
| 251 |
-
1. **Add Authentication**: Uncomment auth decorators in `reconciliation.py`
|
| 252 |
-
2. **Add Notifications**: Implement notification service integration
|
| 253 |
-
3. **Add Monitoring**: Set up alerts for failed reconciliations
|
| 254 |
-
4. **Add Tests**: Run the test suite (see RECONCILIATION_SYSTEM.md)
|
| 255 |
-
|
| 256 |
-
## Support
|
| 257 |
-
|
| 258 |
-
For issues or questions, refer to:
|
| 259 |
-
- Full technical spec: `docs/features/timesheets/RECONCILIATION_SYSTEM.md`
|
| 260 |
-
- Database schema: `supabase/migrations/20241209_add_reconciliation_system.sql`
|
| 261 |
-
- Service code: `src/app/services/reconciliation/`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/schema/VALID_ROLES.md
ADDED
|
File without changes
|
src/app/api/v1/ticket_assignments.py
CHANGED
|
@@ -12,11 +12,15 @@ from fastapi import APIRouter, Depends, HTTPException, status, Query
|
|
| 12 |
from sqlalchemy.orm import Session
|
| 13 |
from typing import List, Optional
|
| 14 |
from uuid import UUID
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
from app.api.deps import get_db, get_current_user
|
| 17 |
from app.models.user import User
|
| 18 |
from app.models.enums import AppRole
|
| 19 |
from app.services.ticket_assignment_service import TicketAssignmentService
|
|
|
|
| 20 |
from app.schemas.ticket_assignment import (
|
| 21 |
TicketAssignCreate,
|
| 22 |
TicketAssignTeamCreate,
|
|
@@ -93,7 +97,25 @@ def assign_ticket(
|
|
| 93 |
):
|
| 94 |
require_dispatcher_or_pm(current_user)
|
| 95 |
service = TicketAssignmentService(db)
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
|
| 99 |
@router.post(
|
|
@@ -551,7 +573,24 @@ def complete_assignment(
|
|
| 551 |
current_user: User = Depends(get_current_user)
|
| 552 |
):
|
| 553 |
service = TicketAssignmentService(db)
|
| 554 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 555 |
|
| 556 |
|
| 557 |
# ============================================
|
|
|
|
| 12 |
from sqlalchemy.orm import Session
|
| 13 |
from typing import List, Optional
|
| 14 |
from uuid import UUID
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
|
| 19 |
from app.api.deps import get_db, get_current_user
|
| 20 |
from app.models.user import User
|
| 21 |
from app.models.enums import AppRole
|
| 22 |
from app.services.ticket_assignment_service import TicketAssignmentService
|
| 23 |
+
from app.services.reconciliation.reconciliation_service import ReconciliationService
|
| 24 |
from app.schemas.ticket_assignment import (
|
| 25 |
TicketAssignCreate,
|
| 26 |
TicketAssignTeamCreate,
|
|
|
|
| 97 |
):
|
| 98 |
require_dispatcher_or_pm(current_user)
|
| 99 |
service = TicketAssignmentService(db)
|
| 100 |
+
assignment = service.assign_ticket(ticket_id, data.user_id, current_user.id, data)
|
| 101 |
+
|
| 102 |
+
# Real-time reconciliation update
|
| 103 |
+
try:
|
| 104 |
+
from datetime import date
|
| 105 |
+
reconciliation_service = ReconciliationService(db)
|
| 106 |
+
reconciliation_service.update_user_timesheet_realtime(
|
| 107 |
+
user_id=data.user_id,
|
| 108 |
+
project_id=assignment.ticket.project_id,
|
| 109 |
+
work_date=date.today(),
|
| 110 |
+
trigger_type='assignment_created',
|
| 111 |
+
trigger_entity_type='ticket_assignment',
|
| 112 |
+
trigger_entity_id=assignment.id
|
| 113 |
+
)
|
| 114 |
+
except Exception as e:
|
| 115 |
+
# Don't fail the request if reconciliation fails
|
| 116 |
+
logger.warning(f"Real-time reconciliation failed: {str(e)}")
|
| 117 |
+
|
| 118 |
+
return assignment
|
| 119 |
|
| 120 |
|
| 121 |
@router.post(
|
|
|
|
| 573 |
current_user: User = Depends(get_current_user)
|
| 574 |
):
|
| 575 |
service = TicketAssignmentService(db)
|
| 576 |
+
assignment = service.complete_assignment(assignment_id, current_user.id, data)
|
| 577 |
+
|
| 578 |
+
# Real-time reconciliation update
|
| 579 |
+
try:
|
| 580 |
+
from datetime import date
|
| 581 |
+
reconciliation_service = ReconciliationService(db)
|
| 582 |
+
reconciliation_service.update_user_timesheet_realtime(
|
| 583 |
+
user_id=current_user.id,
|
| 584 |
+
project_id=assignment.ticket.project_id,
|
| 585 |
+
work_date=date.today(),
|
| 586 |
+
trigger_type='assignment_completed',
|
| 587 |
+
trigger_entity_type='ticket_assignment',
|
| 588 |
+
trigger_entity_id=assignment.id
|
| 589 |
+
)
|
| 590 |
+
except Exception as e:
|
| 591 |
+
logger.warning(f"Real-time reconciliation failed: {str(e)}")
|
| 592 |
+
|
| 593 |
+
return assignment
|
| 594 |
|
| 595 |
|
| 596 |
# ============================================
|
src/app/api/v1/ticket_expenses.py
CHANGED
|
@@ -26,6 +26,7 @@ from app.api.deps import get_db, get_current_user
|
|
| 26 |
from app.models.user import User
|
| 27 |
from app.models.enums import AppRole
|
| 28 |
from app.services.ticket_expense_service import TicketExpenseService
|
|
|
|
| 29 |
from app.models.ticket_expense import TicketExpense
|
| 30 |
from app.models.ticket import Ticket
|
| 31 |
from app.schemas.ticket_expense import (
|
|
@@ -191,6 +192,20 @@ def create_expense(
|
|
| 191 |
current_user=current_user
|
| 192 |
)
|
| 193 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
# Build response with user names
|
| 195 |
response = TicketExpenseResponse.model_validate(expense)
|
| 196 |
if expense.incurred_by_user:
|
|
@@ -300,6 +315,21 @@ def approve_expense(
|
|
| 300 |
background_tasks=background_tasks
|
| 301 |
)
|
| 302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
response = TicketExpenseResponse.model_validate(expense)
|
| 304 |
if expense.incurred_by_user:
|
| 305 |
response.incurred_by_user_name = expense.incurred_by_user.name
|
|
|
|
| 26 |
from app.models.user import User
|
| 27 |
from app.models.enums import AppRole
|
| 28 |
from app.services.ticket_expense_service import TicketExpenseService
|
| 29 |
+
from app.services.reconciliation.reconciliation_service import ReconciliationService
|
| 30 |
from app.models.ticket_expense import TicketExpense
|
| 31 |
from app.models.ticket import Ticket
|
| 32 |
from app.schemas.ticket_expense import (
|
|
|
|
| 192 |
current_user=current_user
|
| 193 |
)
|
| 194 |
|
| 195 |
+
# Real-time reconciliation update
|
| 196 |
+
try:
|
| 197 |
+
reconciliation_service = ReconciliationService(db)
|
| 198 |
+
reconciliation_service.update_user_timesheet_realtime(
|
| 199 |
+
user_id=expense.incurred_by_user_id,
|
| 200 |
+
project_id=expense.ticket_assignment.ticket.project_id,
|
| 201 |
+
work_date=expense.expense_date,
|
| 202 |
+
trigger_type='expense_created',
|
| 203 |
+
trigger_entity_type='ticket_expense',
|
| 204 |
+
trigger_entity_id=expense.id
|
| 205 |
+
)
|
| 206 |
+
except Exception as e:
|
| 207 |
+
logger.warning(f"Real-time reconciliation failed: {str(e)}")
|
| 208 |
+
|
| 209 |
# Build response with user names
|
| 210 |
response = TicketExpenseResponse.model_validate(expense)
|
| 211 |
if expense.incurred_by_user:
|
|
|
|
| 315 |
background_tasks=background_tasks
|
| 316 |
)
|
| 317 |
|
| 318 |
+
# Real-time reconciliation update
|
| 319 |
+
try:
|
| 320 |
+
reconciliation_service = ReconciliationService(db)
|
| 321 |
+
trigger_type = 'expense_approved' if data.is_approved else 'expense_rejected'
|
| 322 |
+
reconciliation_service.update_user_timesheet_realtime(
|
| 323 |
+
user_id=expense.incurred_by_user_id,
|
| 324 |
+
project_id=expense.ticket_assignment.ticket.project_id,
|
| 325 |
+
work_date=expense.expense_date,
|
| 326 |
+
trigger_type=trigger_type,
|
| 327 |
+
trigger_entity_type='ticket_expense',
|
| 328 |
+
trigger_entity_id=expense.id
|
| 329 |
+
)
|
| 330 |
+
except Exception as e:
|
| 331 |
+
logger.warning(f"Real-time reconciliation failed: {str(e)}")
|
| 332 |
+
|
| 333 |
response = TicketExpenseResponse.model_validate(expense)
|
| 334 |
if expense.incurred_by_user:
|
| 335 |
response.incurred_by_user_name = expense.incurred_by_user.name
|
src/app/api/v1/timesheets.py
CHANGED
|
@@ -15,7 +15,7 @@ from app.models.timesheet import Timesheet
|
|
| 15 |
from app.models.enums import TimesheetStatus, AppRole
|
| 16 |
from app.schemas.timesheet import (
|
| 17 |
TimesheetCreate, TimesheetUpdate, TimesheetResponse, TimesheetListResponse,
|
| 18 |
-
TimesheetBulkCreate, TimesheetApproveLeave
|
| 19 |
)
|
| 20 |
from app.services.timesheet_service import TimesheetService
|
| 21 |
from app.services.audit_service import AuditService
|
|
@@ -105,6 +105,123 @@ async def create_timesheet(
|
|
| 105 |
)
|
| 106 |
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
@router.post("/bulk", response_model=List[TimesheetResponse], status_code=status.HTTP_201_CREATED)
|
| 109 |
@require_permission("manage_timesheets")
|
| 110 |
async def bulk_create_timesheets(
|
|
@@ -249,6 +366,177 @@ async def list_timesheets(
|
|
| 249 |
)
|
| 250 |
|
| 251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
@router.get("/{timesheet_id}", response_model=TimesheetResponse)
|
| 253 |
@require_permission("view_timesheets")
|
| 254 |
async def get_timesheet(
|
|
|
|
| 15 |
from app.models.enums import TimesheetStatus, AppRole
|
| 16 |
from app.schemas.timesheet import (
|
| 17 |
TimesheetCreate, TimesheetUpdate, TimesheetResponse, TimesheetListResponse,
|
| 18 |
+
TimesheetBulkCreate, TimesheetApproveLeave, LeaveApplicationCreate
|
| 19 |
)
|
| 20 |
from app.services.timesheet_service import TimesheetService
|
| 21 |
from app.services.audit_service import AuditService
|
|
|
|
| 105 |
)
|
| 106 |
|
| 107 |
|
| 108 |
+
@router.post("/apply-leave", response_model=TimesheetResponse, status_code=status.HTTP_201_CREATED)
|
| 109 |
+
async def apply_for_leave(
|
| 110 |
+
data: LeaveApplicationCreate,
|
| 111 |
+
request: Request,
|
| 112 |
+
current_user: User = Depends(get_current_active_user),
|
| 113 |
+
db: Session = Depends(get_db)
|
| 114 |
+
):
|
| 115 |
+
"""
|
| 116 |
+
Field agent applies for leave (self-service)
|
| 117 |
+
|
| 118 |
+
**Authorization:**
|
| 119 |
+
- Any authenticated user can apply for their own leave
|
| 120 |
+
- No special permissions required
|
| 121 |
+
|
| 122 |
+
**Required Fields:**
|
| 123 |
+
- work_date: Date of leave
|
| 124 |
+
- status: Type of leave (on_leave or sick_leave)
|
| 125 |
+
- leave_reason: Reason for leave (required)
|
| 126 |
+
|
| 127 |
+
**Optional Fields:**
|
| 128 |
+
- project_id: Project context
|
| 129 |
+
- notes: Additional notes
|
| 130 |
+
|
| 131 |
+
**Business Rules:**
|
| 132 |
+
- Can only apply for own leave (user_id = current_user.id)
|
| 133 |
+
- One leave request per day
|
| 134 |
+
- Cannot apply for past dates (more than 1 day ago)
|
| 135 |
+
- Leave requires manager approval before it's official
|
| 136 |
+
|
| 137 |
+
**Workflow:**
|
| 138 |
+
1. Agent submits leave request
|
| 139 |
+
2. Timesheet created with status=on_leave/sick_leave
|
| 140 |
+
3. leave_approved_by_user_id is NULL (pending approval)
|
| 141 |
+
4. Manager approves via /approve-leave endpoint
|
| 142 |
+
5. Reconciliation job respects approved leave (won't mark as absent)
|
| 143 |
+
|
| 144 |
+
**Response:**
|
| 145 |
+
- Timesheet with requires_approval=True
|
| 146 |
+
- Agent can see their pending leave request
|
| 147 |
+
"""
|
| 148 |
+
try:
|
| 149 |
+
# Validate date is not too far in the past
|
| 150 |
+
from datetime import timedelta
|
| 151 |
+
if data.work_date < (date.today() - timedelta(days=1)):
|
| 152 |
+
raise HTTPException(
|
| 153 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 154 |
+
detail="Cannot apply for leave more than 1 day in the past"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
# Check if leave already exists for this date
|
| 158 |
+
existing = db.query(Timesheet).filter(
|
| 159 |
+
Timesheet.user_id == current_user.id,
|
| 160 |
+
Timesheet.work_date == data.work_date,
|
| 161 |
+
Timesheet.deleted_at.is_(None)
|
| 162 |
+
).first()
|
| 163 |
+
|
| 164 |
+
if existing:
|
| 165 |
+
raise HTTPException(
|
| 166 |
+
status_code=status.HTTP_409_CONFLICT,
|
| 167 |
+
detail=f"Leave request already exists for {data.work_date}"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Create timesheet for leave
|
| 171 |
+
timesheet_data = TimesheetCreate(
|
| 172 |
+
user_id=current_user.id,
|
| 173 |
+
project_id=data.project_id,
|
| 174 |
+
work_date=data.work_date,
|
| 175 |
+
status=data.status,
|
| 176 |
+
leave_reason=data.leave_reason,
|
| 177 |
+
notes=data.notes,
|
| 178 |
+
tickets_assigned=0,
|
| 179 |
+
tickets_completed=0,
|
| 180 |
+
tickets_rescheduled=0,
|
| 181 |
+
tickets_cancelled=0,
|
| 182 |
+
tickets_rejected=0
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
timesheet = TimesheetService.create_timesheet(db, timesheet_data, current_user)
|
| 186 |
+
|
| 187 |
+
# Log audit trail
|
| 188 |
+
await AuditService.log_action(
|
| 189 |
+
db=db,
|
| 190 |
+
user_id=current_user.id,
|
| 191 |
+
action="apply_for_leave",
|
| 192 |
+
resource_type="timesheet",
|
| 193 |
+
resource_id=timesheet.id,
|
| 194 |
+
details={
|
| 195 |
+
"work_date": str(data.work_date),
|
| 196 |
+
"status": data.status.value,
|
| 197 |
+
"leave_reason": data.leave_reason
|
| 198 |
+
},
|
| 199 |
+
request=request
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
# Build response
|
| 203 |
+
response = TimesheetResponse.model_validate(timesheet)
|
| 204 |
+
response.user_name = current_user.name
|
| 205 |
+
response.project_title = timesheet.project.title if timesheet.project else None
|
| 206 |
+
response.is_present = False
|
| 207 |
+
response.is_on_leave = True
|
| 208 |
+
response.has_time_tracking = False
|
| 209 |
+
response.requires_approval = True
|
| 210 |
+
|
| 211 |
+
logger.info(f"Leave application created: user={current_user.id}, date={data.work_date}, type={data.status.value}")
|
| 212 |
+
|
| 213 |
+
return response
|
| 214 |
+
|
| 215 |
+
except HTTPException:
|
| 216 |
+
raise
|
| 217 |
+
except Exception as e:
|
| 218 |
+
logger.error(f"Error applying for leave: {str(e)}")
|
| 219 |
+
raise HTTPException(
|
| 220 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 221 |
+
detail=f"Failed to apply for leave: {str(e)}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
@router.post("/bulk", response_model=List[TimesheetResponse], status_code=status.HTTP_201_CREATED)
|
| 226 |
@require_permission("manage_timesheets")
|
| 227 |
async def bulk_create_timesheets(
|
|
|
|
| 366 |
)
|
| 367 |
|
| 368 |
|
| 369 |
+
@router.get("/me/timesheets", response_model=TimesheetListResponse)
|
| 370 |
+
async def get_my_timesheets(
|
| 371 |
+
page: int = Query(1, ge=1, description="Page number"),
|
| 372 |
+
page_size: int = Query(50, ge=1, le=100, description="Items per page"),
|
| 373 |
+
date_from: Optional[date] = Query(None, description="Start date for range filter"),
|
| 374 |
+
date_to: Optional[date] = Query(None, description="End date for range filter"),
|
| 375 |
+
status: Optional[TimesheetStatus] = Query(None, description="Filter by status"),
|
| 376 |
+
current_user: User = Depends(get_current_active_user),
|
| 377 |
+
db: Session = Depends(get_db)
|
| 378 |
+
):
|
| 379 |
+
"""
|
| 380 |
+
Get my own timesheets (self-service for field agents)
|
| 381 |
+
|
| 382 |
+
**Authorization:**
|
| 383 |
+
- Any authenticated user can view their own timesheets
|
| 384 |
+
- No special permissions required
|
| 385 |
+
|
| 386 |
+
**Use Cases:**
|
| 387 |
+
- Field agent checks their attendance history
|
| 388 |
+
- Agent views pending leave requests
|
| 389 |
+
- Agent sees their daily performance (tickets completed)
|
| 390 |
+
|
| 391 |
+
**Filters:**
|
| 392 |
+
- date_from/date_to: Date range (default: last 30 days if not specified)
|
| 393 |
+
- status: Filter by attendance status
|
| 394 |
+
|
| 395 |
+
**Response includes:**
|
| 396 |
+
- Daily timesheets with work metrics
|
| 397 |
+
- Leave requests and approval status
|
| 398 |
+
- Ticket completion stats
|
| 399 |
+
- Hours worked
|
| 400 |
+
"""
|
| 401 |
+
try:
|
| 402 |
+
# Default to last 30 days if no date range specified
|
| 403 |
+
if not date_from and not date_to:
|
| 404 |
+
from datetime import timedelta
|
| 405 |
+
date_to = date.today()
|
| 406 |
+
date_from = date_to - timedelta(days=30)
|
| 407 |
+
|
| 408 |
+
skip = (page - 1) * page_size
|
| 409 |
+
timesheets, total = TimesheetService.list_timesheets(
|
| 410 |
+
db, current_user, skip, page_size,
|
| 411 |
+
user_id=current_user.id, # Force own user_id
|
| 412 |
+
project_id=None,
|
| 413 |
+
status=status,
|
| 414 |
+
date_from=date_from,
|
| 415 |
+
date_to=date_to
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
# Build response
|
| 419 |
+
items = []
|
| 420 |
+
for timesheet in timesheets:
|
| 421 |
+
response = TimesheetResponse.model_validate(timesheet)
|
| 422 |
+
response.user_name = current_user.name
|
| 423 |
+
response.project_title = timesheet.project.title if timesheet.project else None
|
| 424 |
+
response.leave_approved_by_name = timesheet.leave_approved_by.name if timesheet.leave_approved_by else None
|
| 425 |
+
response.is_present = timesheet.is_present
|
| 426 |
+
response.is_on_leave = timesheet.is_on_leave
|
| 427 |
+
response.has_time_tracking = timesheet.has_time_tracking
|
| 428 |
+
response.requires_approval = timesheet.requires_approval
|
| 429 |
+
items.append(response)
|
| 430 |
+
|
| 431 |
+
return TimesheetListResponse(
|
| 432 |
+
items=items,
|
| 433 |
+
total=total,
|
| 434 |
+
page=page,
|
| 435 |
+
page_size=page_size,
|
| 436 |
+
total_pages=math.ceil(total / page_size) if total > 0 else 0
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
except HTTPException:
|
| 440 |
+
raise
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.error(f"Error getting my timesheets: {str(e)}")
|
| 443 |
+
raise HTTPException(
|
| 444 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 445 |
+
detail=f"Failed to get timesheets: {str(e)}"
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
@router.get("/users/{user_id}/performance", response_model=TimesheetListResponse)
|
| 450 |
+
@require_permission("view_timesheets")
|
| 451 |
+
async def get_user_performance(
|
| 452 |
+
user_id: UUID,
|
| 453 |
+
page: int = Query(1, ge=1, description="Page number"),
|
| 454 |
+
page_size: int = Query(50, ge=1, le=100, description="Items per page"),
|
| 455 |
+
date_from: Optional[date] = Query(None, description="Start date"),
|
| 456 |
+
date_to: Optional[date] = Query(None, description="End date"),
|
| 457 |
+
project_id: Optional[UUID] = Query(None, description="Filter by project"),
|
| 458 |
+
current_user: User = Depends(get_current_active_user),
|
| 459 |
+
db: Session = Depends(get_db)
|
| 460 |
+
):
|
| 461 |
+
"""
|
| 462 |
+
Get user's timesheet performance (for managers)
|
| 463 |
+
|
| 464 |
+
**Authorization:**
|
| 465 |
+
- Managers can view their team members' performance
|
| 466 |
+
- Admins can view anyone's performance
|
| 467 |
+
|
| 468 |
+
**Use Cases:**
|
| 469 |
+
- Manager reviews agent's daily attendance
|
| 470 |
+
- Manager checks agent's ticket completion rate
|
| 471 |
+
- Manager reviews leave requests
|
| 472 |
+
- Performance evaluation and payroll verification
|
| 473 |
+
|
| 474 |
+
**Filters:**
|
| 475 |
+
- date_from/date_to: Date range (default: last 30 days)
|
| 476 |
+
- project_id: Filter by specific project
|
| 477 |
+
|
| 478 |
+
**Response includes:**
|
| 479 |
+
- Daily timesheets with full metrics
|
| 480 |
+
- Attendance patterns (present/absent/leave)
|
| 481 |
+
- Ticket completion statistics
|
| 482 |
+
- Hours worked and productivity
|
| 483 |
+
- Leave requests and approval status
|
| 484 |
+
|
| 485 |
+
**Performance Metrics:**
|
| 486 |
+
- tickets_assigned: Daily workload
|
| 487 |
+
- tickets_completed: Daily output
|
| 488 |
+
- completion_rate: Efficiency percentage
|
| 489 |
+
- hours_worked: Time tracking
|
| 490 |
+
- attendance_status: Present/absent/leave
|
| 491 |
+
"""
|
| 492 |
+
try:
|
| 493 |
+
# Default to last 30 days if no date range specified
|
| 494 |
+
if not date_from and not date_to:
|
| 495 |
+
from datetime import timedelta
|
| 496 |
+
date_to = date.today()
|
| 497 |
+
date_from = date_to - timedelta(days=30)
|
| 498 |
+
|
| 499 |
+
skip = (page - 1) * page_size
|
| 500 |
+
timesheets, total = TimesheetService.list_timesheets(
|
| 501 |
+
db, current_user, skip, page_size,
|
| 502 |
+
user_id=user_id,
|
| 503 |
+
project_id=project_id,
|
| 504 |
+
status=None,
|
| 505 |
+
date_from=date_from,
|
| 506 |
+
date_to=date_to
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
# Build response with full details
|
| 510 |
+
items = []
|
| 511 |
+
for timesheet in timesheets:
|
| 512 |
+
response = TimesheetResponse.model_validate(timesheet)
|
| 513 |
+
response.user_name = timesheet.user.name if timesheet.user else None
|
| 514 |
+
response.project_title = timesheet.project.title if timesheet.project else None
|
| 515 |
+
response.leave_approved_by_name = timesheet.leave_approved_by.name if timesheet.leave_approved_by else None
|
| 516 |
+
response.is_present = timesheet.is_present
|
| 517 |
+
response.is_on_leave = timesheet.is_on_leave
|
| 518 |
+
response.has_time_tracking = timesheet.has_time_tracking
|
| 519 |
+
response.requires_approval = timesheet.requires_approval
|
| 520 |
+
items.append(response)
|
| 521 |
+
|
| 522 |
+
return TimesheetListResponse(
|
| 523 |
+
items=items,
|
| 524 |
+
total=total,
|
| 525 |
+
page=page,
|
| 526 |
+
page_size=page_size,
|
| 527 |
+
total_pages=math.ceil(total / page_size) if total > 0 else 0
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
except HTTPException:
|
| 531 |
+
raise
|
| 532 |
+
except Exception as e:
|
| 533 |
+
logger.error(f"Error getting user performance: {str(e)}")
|
| 534 |
+
raise HTTPException(
|
| 535 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 536 |
+
detail=f"Failed to get user performance: {str(e)}"
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
@router.get("/{timesheet_id}", response_model=TimesheetResponse)
|
| 541 |
@require_permission("view_timesheets")
|
| 542 |
async def get_timesheet(
|
src/app/config/apps.py
CHANGED
|
@@ -425,8 +425,7 @@ ROLE_APP_ACCESS: Dict[str, List[str]] = {
|
|
| 425 |
"sales_agent": [
|
| 426 |
# Sales agents (casual workers) need to track their commission/earnings
|
| 427 |
# Include payroll to see earnings from sales
|
| 428 |
-
"notifications", "dashboard", "sales_orders", "customers", "
|
| 429 |
-
"tickets", "tasks", # Follow up on customer issues
|
| 430 |
"payroll", # Track their sales commissions/earnings
|
| 431 |
"expenses"
|
| 432 |
]
|
|
|
|
| 425 |
"sales_agent": [
|
| 426 |
# Sales agents (casual workers) need to track their commission/earnings
|
| 427 |
# Include payroll to see earnings from sales
|
| 428 |
+
"notifications", "dashboard", "sales_orders", "customers", "timesheets",
|
|
|
|
| 429 |
"payroll", # Track their sales commissions/earnings
|
| 430 |
"expenses"
|
| 431 |
]
|
src/app/schemas/timesheet.py
CHANGED
|
@@ -84,6 +84,15 @@ class TimesheetUpdate(BaseModel):
|
|
| 84 |
return self
|
| 85 |
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
class TimesheetApproveLeave(BaseModel):
|
| 88 |
"""Schema for approving leave"""
|
| 89 |
leave_approved_by_user_id: UUID = Field(..., description="Manager approving the leave")
|
|
|
|
| 84 |
return self
|
| 85 |
|
| 86 |
|
| 87 |
+
class LeaveApplicationCreate(BaseModel):
|
| 88 |
+
"""Schema for field agents to apply for leave"""
|
| 89 |
+
work_date: date = Field(..., description="Date of leave")
|
| 90 |
+
status: Literal[TimesheetStatus.ON_LEAVE, TimesheetStatus.SICK_LEAVE] = Field(..., description="Type of leave")
|
| 91 |
+
leave_reason: str = Field(..., min_length=1, max_length=1000, description="Reason for leave (required)")
|
| 92 |
+
project_id: Optional[UUID] = Field(None, description="Project context (optional)")
|
| 93 |
+
notes: Optional[str] = Field(None, max_length=500, description="Additional notes")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
class TimesheetApproveLeave(BaseModel):
|
| 97 |
"""Schema for approving leave"""
|
| 98 |
leave_approved_by_user_id: UUID = Field(..., description="Manager approving the leave")
|
src/app/services/reconciliation/reconciliation_service.py
CHANGED
|
@@ -255,6 +255,10 @@ class ReconciliationService:
|
|
| 255 |
SELECT
|
| 256 |
COALESCE(da.user_id, de.user_id) as user_id,
|
| 257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
-- Assignment counts by action type
|
| 259 |
COUNT(DISTINCT da.assignment_id) as tickets_assigned,
|
| 260 |
|
|
@@ -333,6 +337,7 @@ class ReconciliationService:
|
|
| 333 |
query = text("""
|
| 334 |
INSERT INTO timesheets (
|
| 335 |
user_id, project_id, work_date,
|
|
|
|
| 336 |
tickets_assigned, tickets_completed, tickets_rejected,
|
| 337 |
tickets_cancelled, tickets_rescheduled,
|
| 338 |
total_expenses, approved_expenses, pending_expenses, rejected_expenses,
|
|
@@ -341,6 +346,7 @@ class ReconciliationService:
|
|
| 341 |
status, created_at, updated_at
|
| 342 |
) VALUES (
|
| 343 |
:user_id, :project_id, :work_date,
|
|
|
|
| 344 |
:tickets_assigned, :tickets_completed, :tickets_rejected,
|
| 345 |
:tickets_cancelled, :tickets_rescheduled,
|
| 346 |
:total_expenses, :approved_expenses, :pending_expenses, :rejected_expenses,
|
|
@@ -350,6 +356,8 @@ class ReconciliationService:
|
|
| 350 |
)
|
| 351 |
ON CONFLICT (user_id, work_date)
|
| 352 |
DO UPDATE SET
|
|
|
|
|
|
|
| 353 |
tickets_assigned = EXCLUDED.tickets_assigned,
|
| 354 |
tickets_completed = EXCLUDED.tickets_completed,
|
| 355 |
tickets_rejected = EXCLUDED.tickets_rejected,
|
|
@@ -373,6 +381,8 @@ class ReconciliationService:
|
|
| 373 |
"user_id": str(stats["user_id"]),
|
| 374 |
"project_id": str(project_id),
|
| 375 |
"work_date": target_date,
|
|
|
|
|
|
|
| 376 |
"tickets_assigned": stats["tickets_assigned"],
|
| 377 |
"tickets_completed": stats["tickets_completed"],
|
| 378 |
"tickets_rejected": stats["tickets_rejected"],
|
|
@@ -480,3 +490,156 @@ class ReconciliationService:
|
|
| 480 |
self.db.commit()
|
| 481 |
except Exception as e:
|
| 482 |
logger.error(f"Failed to update run status: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
SELECT
|
| 256 |
COALESCE(da.user_id, de.user_id) as user_id,
|
| 257 |
|
| 258 |
+
-- Clock in/out times (first and last activity)
|
| 259 |
+
MIN(da.assigned_at) as clock_in_time,
|
| 260 |
+
MAX(COALESCE(da.ended_at, da.assigned_at)) as clock_out_time,
|
| 261 |
+
|
| 262 |
-- Assignment counts by action type
|
| 263 |
COUNT(DISTINCT da.assignment_id) as tickets_assigned,
|
| 264 |
|
|
|
|
| 337 |
query = text("""
|
| 338 |
INSERT INTO timesheets (
|
| 339 |
user_id, project_id, work_date,
|
| 340 |
+
clock_in_time, clock_out_time,
|
| 341 |
tickets_assigned, tickets_completed, tickets_rejected,
|
| 342 |
tickets_cancelled, tickets_rescheduled,
|
| 343 |
total_expenses, approved_expenses, pending_expenses, rejected_expenses,
|
|
|
|
| 346 |
status, created_at, updated_at
|
| 347 |
) VALUES (
|
| 348 |
:user_id, :project_id, :work_date,
|
| 349 |
+
:clock_in_time, :clock_out_time,
|
| 350 |
:tickets_assigned, :tickets_completed, :tickets_rejected,
|
| 351 |
:tickets_cancelled, :tickets_rescheduled,
|
| 352 |
:total_expenses, :approved_expenses, :pending_expenses, :rejected_expenses,
|
|
|
|
| 356 |
)
|
| 357 |
ON CONFLICT (user_id, work_date)
|
| 358 |
DO UPDATE SET
|
| 359 |
+
clock_in_time = EXCLUDED.clock_in_time,
|
| 360 |
+
clock_out_time = EXCLUDED.clock_out_time,
|
| 361 |
tickets_assigned = EXCLUDED.tickets_assigned,
|
| 362 |
tickets_completed = EXCLUDED.tickets_completed,
|
| 363 |
tickets_rejected = EXCLUDED.tickets_rejected,
|
|
|
|
| 381 |
"user_id": str(stats["user_id"]),
|
| 382 |
"project_id": str(project_id),
|
| 383 |
"work_date": target_date,
|
| 384 |
+
"clock_in_time": stats.get("clock_in_time"),
|
| 385 |
+
"clock_out_time": stats.get("clock_out_time"),
|
| 386 |
"tickets_assigned": stats["tickets_assigned"],
|
| 387 |
"tickets_completed": stats["tickets_completed"],
|
| 388 |
"tickets_rejected": stats["tickets_rejected"],
|
|
|
|
| 490 |
self.db.commit()
|
| 491 |
except Exception as e:
|
| 492 |
logger.error(f"Failed to update run status: {str(e)}")
|
| 493 |
+
|
| 494 |
+
def update_user_timesheet_realtime(
|
| 495 |
+
self,
|
| 496 |
+
user_id: UUID,
|
| 497 |
+
project_id: UUID,
|
| 498 |
+
work_date: date,
|
| 499 |
+
trigger_type: str,
|
| 500 |
+
trigger_entity_type: str,
|
| 501 |
+
trigger_entity_id: UUID
|
| 502 |
+
) -> Optional[UUID]:
|
| 503 |
+
"""
|
| 504 |
+
Real-time timesheet update triggered by events.
|
| 505 |
+
|
| 506 |
+
This is called immediately when:
|
| 507 |
+
- Assignment created/updated
|
| 508 |
+
- Ticket completed
|
| 509 |
+
- Expense created/approved/rejected
|
| 510 |
+
- Inventory issued/installed/returned/lost
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
user_id: Field agent whose timesheet to update
|
| 514 |
+
project_id: Project context
|
| 515 |
+
work_date: Date of the activity
|
| 516 |
+
trigger_type: Event type (e.g., 'assignment_created', 'expense_approved')
|
| 517 |
+
trigger_entity_type: Entity type (e.g., 'ticket_assignment', 'ticket_expense')
|
| 518 |
+
trigger_entity_id: ID of the entity that triggered the update
|
| 519 |
+
|
| 520 |
+
Returns:
|
| 521 |
+
UUID of timesheet record (or None if update failed)
|
| 522 |
+
"""
|
| 523 |
+
try:
|
| 524 |
+
# Step 1: Aggregate current data for this user/date
|
| 525 |
+
agent_stats = self._aggregate_agent_activity(
|
| 526 |
+
project_id=project_id,
|
| 527 |
+
target_date=work_date,
|
| 528 |
+
user_ids=[user_id]
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
if not agent_stats:
|
| 532 |
+
logger.warning(
|
| 533 |
+
f"No activity found for real-time update: "
|
| 534 |
+
f"user={user_id}, date={work_date}, trigger={trigger_type}"
|
| 535 |
+
)
|
| 536 |
+
return None
|
| 537 |
+
|
| 538 |
+
stats = agent_stats[0]
|
| 539 |
+
|
| 540 |
+
# Step 2: Upsert timesheet with real-time source
|
| 541 |
+
query = text("""
|
| 542 |
+
INSERT INTO timesheets (
|
| 543 |
+
user_id, project_id, work_date,
|
| 544 |
+
clock_in_time, clock_out_time,
|
| 545 |
+
tickets_assigned, tickets_completed, tickets_rejected,
|
| 546 |
+
tickets_cancelled, tickets_rescheduled,
|
| 547 |
+
total_expenses, approved_expenses, pending_expenses, rejected_expenses,
|
| 548 |
+
expense_claims_count,
|
| 549 |
+
update_source, last_realtime_update_at,
|
| 550 |
+
status, created_at, updated_at
|
| 551 |
+
) VALUES (
|
| 552 |
+
:user_id, :project_id, :work_date,
|
| 553 |
+
:clock_in_time, :clock_out_time,
|
| 554 |
+
:tickets_assigned, :tickets_completed, :tickets_rejected,
|
| 555 |
+
:tickets_cancelled, :tickets_rescheduled,
|
| 556 |
+
:total_expenses, :approved_expenses, :pending_expenses, :rejected_expenses,
|
| 557 |
+
:expense_claims_count,
|
| 558 |
+
'realtime', NOW(),
|
| 559 |
+
'present', NOW(), NOW()
|
| 560 |
+
)
|
| 561 |
+
ON CONFLICT (user_id, work_date)
|
| 562 |
+
DO UPDATE SET
|
| 563 |
+
clock_in_time = EXCLUDED.clock_in_time,
|
| 564 |
+
clock_out_time = EXCLUDED.clock_out_time,
|
| 565 |
+
tickets_assigned = EXCLUDED.tickets_assigned,
|
| 566 |
+
tickets_completed = EXCLUDED.tickets_completed,
|
| 567 |
+
tickets_rejected = EXCLUDED.tickets_rejected,
|
| 568 |
+
tickets_cancelled = EXCLUDED.tickets_cancelled,
|
| 569 |
+
tickets_rescheduled = EXCLUDED.tickets_rescheduled,
|
| 570 |
+
total_expenses = EXCLUDED.total_expenses,
|
| 571 |
+
approved_expenses = EXCLUDED.approved_expenses,
|
| 572 |
+
pending_expenses = EXCLUDED.pending_expenses,
|
| 573 |
+
rejected_expenses = EXCLUDED.rejected_expenses,
|
| 574 |
+
expense_claims_count = EXCLUDED.expense_claims_count,
|
| 575 |
+
update_source = 'realtime',
|
| 576 |
+
last_realtime_update_at = NOW(),
|
| 577 |
+
updated_at = NOW()
|
| 578 |
+
RETURNING id
|
| 579 |
+
""")
|
| 580 |
+
|
| 581 |
+
result = self.db.execute(query, {
|
| 582 |
+
"user_id": str(user_id),
|
| 583 |
+
"project_id": str(project_id),
|
| 584 |
+
"work_date": work_date,
|
| 585 |
+
"clock_in_time": stats.get("clock_in_time"),
|
| 586 |
+
"clock_out_time": stats.get("clock_out_time"),
|
| 587 |
+
"tickets_assigned": stats["tickets_assigned"],
|
| 588 |
+
"tickets_completed": stats["tickets_completed"],
|
| 589 |
+
"tickets_rejected": stats["tickets_rejected"],
|
| 590 |
+
"tickets_cancelled": stats["tickets_cancelled"],
|
| 591 |
+
"tickets_rescheduled": stats["tickets_rescheduled"],
|
| 592 |
+
"total_expenses": float(stats["total_expenses"]),
|
| 593 |
+
"approved_expenses": float(stats["approved_expenses"]),
|
| 594 |
+
"pending_expenses": float(stats["pending_expenses"]),
|
| 595 |
+
"rejected_expenses": float(stats["rejected_expenses"]),
|
| 596 |
+
"expense_claims_count": stats["expense_claims_count"]
|
| 597 |
+
})
|
| 598 |
+
|
| 599 |
+
timesheet_id = result.scalar_one()
|
| 600 |
+
|
| 601 |
+
# Step 3: Log the update in audit table
|
| 602 |
+
log_query = text("""
|
| 603 |
+
SELECT log_timesheet_update(
|
| 604 |
+
:timesheet_id,
|
| 605 |
+
:trigger_type,
|
| 606 |
+
:trigger_entity_type,
|
| 607 |
+
:trigger_entity_id,
|
| 608 |
+
:fields_changed
|
| 609 |
+
)
|
| 610 |
+
""")
|
| 611 |
+
|
| 612 |
+
# Build fields_changed JSON
|
| 613 |
+
fields_changed = {
|
| 614 |
+
"tickets_assigned": stats["tickets_assigned"],
|
| 615 |
+
"tickets_completed": stats["tickets_completed"],
|
| 616 |
+
"total_expenses": float(stats["total_expenses"]),
|
| 617 |
+
"approved_expenses": float(stats["approved_expenses"])
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
self.db.execute(log_query, {
|
| 621 |
+
"timesheet_id": str(timesheet_id),
|
| 622 |
+
"trigger_type": trigger_type,
|
| 623 |
+
"trigger_entity_type": trigger_entity_type,
|
| 624 |
+
"trigger_entity_id": str(trigger_entity_id),
|
| 625 |
+
"fields_changed": fields_changed
|
| 626 |
+
})
|
| 627 |
+
|
| 628 |
+
# Commit the transaction
|
| 629 |
+
self.db.commit()
|
| 630 |
+
|
| 631 |
+
logger.info(
|
| 632 |
+
f"Real-time timesheet update: user={user_id}, date={work_date}, "
|
| 633 |
+
f"trigger={trigger_type}, timesheet={timesheet_id}"
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
return timesheet_id
|
| 637 |
+
|
| 638 |
+
except Exception as e:
|
| 639 |
+
self.db.rollback()
|
| 640 |
+
logger.error(
|
| 641 |
+
f"Real-time timesheet update failed: user={user_id}, "
|
| 642 |
+
f"date={work_date}, trigger={trigger_type}, error={str(e)}",
|
| 643 |
+
exc_info=True
|
| 644 |
+
)
|
| 645 |
+
return None
|
src/app/tasks/scheduler.py
CHANGED
|
@@ -8,7 +8,6 @@ from apscheduler.schedulers.background import BackgroundScheduler
|
|
| 8 |
from apscheduler.triggers.cron import CronTrigger
|
| 9 |
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
|
| 10 |
from datetime import date, timedelta
|
| 11 |
-
import asyncio
|
| 12 |
import logging
|
| 13 |
from sqlalchemy import text
|
| 14 |
|
|
@@ -57,16 +56,14 @@ def run_daily_reconciliation():
|
|
| 57 |
"""
|
| 58 |
Scheduled job: Reconcile yesterday for all active projects.
|
| 59 |
|
| 60 |
-
This runs synchronously in the scheduler thread
|
| 61 |
-
but uses asyncio.run() to execute async reconciliation.
|
| 62 |
"""
|
| 63 |
yesterday = date.today() - timedelta(days=1)
|
| 64 |
|
| 65 |
logger.info(f"Starting scheduled reconciliation for {yesterday}")
|
| 66 |
|
| 67 |
try:
|
| 68 |
-
|
| 69 |
-
asyncio.run(reconcile_all_projects(yesterday))
|
| 70 |
logger.info(f"Scheduled reconciliation completed for {yesterday}")
|
| 71 |
|
| 72 |
except Exception as e:
|
|
@@ -74,40 +71,43 @@ def run_daily_reconciliation():
|
|
| 74 |
# Don't raise - let scheduler continue
|
| 75 |
|
| 76 |
|
| 77 |
-
|
| 78 |
"""
|
| 79 |
Reconcile all active projects for a given date.
|
| 80 |
|
| 81 |
Processes projects sequentially to avoid overwhelming the database.
|
| 82 |
For 10 projects with 500 agents each, total time ~5 minutes.
|
| 83 |
"""
|
| 84 |
-
from app.core.database import
|
| 85 |
from app.services.reconciliation import ReconciliationService
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
| 109 |
try:
|
| 110 |
-
|
|
|
|
| 111 |
project_id=project.id,
|
| 112 |
target_date=target_date,
|
| 113 |
run_type="scheduled"
|
|
@@ -118,34 +118,36 @@ async def reconcile_all_projects(target_date: date):
|
|
| 118 |
"run_id": str(run_id),
|
| 119 |
"status": "success"
|
| 120 |
})
|
|
|
|
|
|
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
|
| 150 |
|
| 151 |
def job_executed_listener(event):
|
|
@@ -154,34 +156,3 @@ def job_executed_listener(event):
|
|
| 154 |
logger.error(f"Job {event.job_id} failed: {event.exception}")
|
| 155 |
else:
|
| 156 |
logger.info(f"Job {event.job_id} completed successfully")
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
# Manual trigger function (for API)
|
| 160 |
-
async def trigger_reconciliation(
|
| 161 |
-
project_id: str,
|
| 162 |
-
target_date: date,
|
| 163 |
-
user_ids: list = None,
|
| 164 |
-
triggered_by: str = None
|
| 165 |
-
):
|
| 166 |
-
"""
|
| 167 |
-
Manually trigger reconciliation via API.
|
| 168 |
-
|
| 169 |
-
This is async and can be called directly from FastAPI endpoints.
|
| 170 |
-
"""
|
| 171 |
-
from app.core.database import get_async_session
|
| 172 |
-
from app.services.reconciliation import ReconciliationService
|
| 173 |
-
from uuid import UUID
|
| 174 |
-
|
| 175 |
-
async for db in get_async_session():
|
| 176 |
-
try:
|
| 177 |
-
service = ReconciliationService(db)
|
| 178 |
-
run_id = await service.reconcile_project_day(
|
| 179 |
-
project_id=UUID(project_id),
|
| 180 |
-
target_date=target_date,
|
| 181 |
-
user_ids=[UUID(uid) for uid in user_ids] if user_ids else None,
|
| 182 |
-
triggered_by=UUID(triggered_by) if triggered_by else None,
|
| 183 |
-
run_type="manual" if not user_ids else "partial"
|
| 184 |
-
)
|
| 185 |
-
return run_id
|
| 186 |
-
finally:
|
| 187 |
-
await db.close()
|
|
|
|
| 8 |
from apscheduler.triggers.cron import CronTrigger
|
| 9 |
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
|
| 10 |
from datetime import date, timedelta
|
|
|
|
| 11 |
import logging
|
| 12 |
from sqlalchemy import text
|
| 13 |
|
|
|
|
| 56 |
"""
|
| 57 |
Scheduled job: Reconcile yesterday for all active projects.
|
| 58 |
|
| 59 |
+
This runs synchronously in the scheduler thread.
|
|
|
|
| 60 |
"""
|
| 61 |
yesterday = date.today() - timedelta(days=1)
|
| 62 |
|
| 63 |
logger.info(f"Starting scheduled reconciliation for {yesterday}")
|
| 64 |
|
| 65 |
try:
|
| 66 |
+
reconcile_all_projects(yesterday)
|
|
|
|
| 67 |
logger.info(f"Scheduled reconciliation completed for {yesterday}")
|
| 68 |
|
| 69 |
except Exception as e:
|
|
|
|
| 71 |
# Don't raise - let scheduler continue
|
| 72 |
|
| 73 |
|
| 74 |
+
def reconcile_all_projects(target_date: date):
|
| 75 |
"""
|
| 76 |
Reconcile all active projects for a given date.
|
| 77 |
|
| 78 |
Processes projects sequentially to avoid overwhelming the database.
|
| 79 |
For 10 projects with 500 agents each, total time ~5 minutes.
|
| 80 |
"""
|
| 81 |
+
from app.core.database import SessionLocal
|
| 82 |
from app.services.reconciliation import ReconciliationService
|
| 83 |
|
| 84 |
+
db = SessionLocal()
|
| 85 |
+
try:
|
| 86 |
+
# Get all active projects
|
| 87 |
+
query = text("""
|
| 88 |
+
SELECT id, title
|
| 89 |
+
FROM projects
|
| 90 |
+
WHERE deleted_at IS NULL
|
| 91 |
+
AND is_closed = FALSE
|
| 92 |
+
AND status IN ('active', 'planning')
|
| 93 |
+
ORDER BY created_at DESC
|
| 94 |
+
""")
|
| 95 |
+
|
| 96 |
+
result = db.execute(query)
|
| 97 |
+
projects = result.fetchall()
|
| 98 |
+
|
| 99 |
+
logger.info(f"Reconciling {len(projects)} projects for {target_date}")
|
| 100 |
+
|
| 101 |
+
# Reconcile each project
|
| 102 |
+
results = []
|
| 103 |
+
|
| 104 |
+
for project in projects:
|
| 105 |
+
try:
|
| 106 |
+
# Create new session for each project to avoid transaction issues
|
| 107 |
+
project_db = SessionLocal()
|
| 108 |
try:
|
| 109 |
+
service = ReconciliationService(project_db)
|
| 110 |
+
run_id = service.reconcile_project_day(
|
| 111 |
project_id=project.id,
|
| 112 |
target_date=target_date,
|
| 113 |
run_type="scheduled"
|
|
|
|
| 118 |
"run_id": str(run_id),
|
| 119 |
"status": "success"
|
| 120 |
})
|
| 121 |
+
finally:
|
| 122 |
+
project_db.close()
|
| 123 |
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.error(
|
| 126 |
+
f"Failed to reconcile project {project.id} ({project.title}): {str(e)}",
|
| 127 |
+
exc_info=True
|
| 128 |
+
)
|
| 129 |
+
results.append({
|
| 130 |
+
"project_id": str(project.id),
|
| 131 |
+
"project_title": project.title,
|
| 132 |
+
"error": str(e),
|
| 133 |
+
"status": "failed"
|
| 134 |
+
})
|
| 135 |
+
|
| 136 |
+
# Log summary
|
| 137 |
+
success_count = sum(1 for r in results if r["status"] == "success")
|
| 138 |
+
logger.info(
|
| 139 |
+
f"Reconciliation summary: {success_count}/{len(projects)} projects succeeded"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
# Log failed projects
|
| 143 |
+
failed = [r for r in results if r["status"] == "failed"]
|
| 144 |
+
if failed:
|
| 145 |
+
logger.warning(f"Failed projects: {[r['project_title'] for r in failed]}")
|
| 146 |
+
|
| 147 |
+
except Exception as e:
|
| 148 |
+
logger.error(f"Error in reconcile_all_projects: {str(e)}", exc_info=True)
|
| 149 |
+
finally:
|
| 150 |
+
db.close()
|
| 151 |
|
| 152 |
|
| 153 |
def job_executed_listener(event):
|
|
|
|
| 156 |
logger.error(f"Job {event.job_id} failed: {event.exception}")
|
| 157 |
else:
|
| 158 |
logger.info(f"Job {event.job_id} completed successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
supabase/migrations/20241210_add_inventory_to_timesheets.sql
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- =====================================================
|
| 2 |
+
-- Add Inventory Tracking to Timesheets
|
| 3 |
+
-- =====================================================
|
| 4 |
+
-- Description: Track daily inventory status in timesheets
|
| 5 |
+
-- Author: System Architecture Team
|
| 6 |
+
-- Date: 2024-12-10
|
| 7 |
+
-- =====================================================
|
| 8 |
+
|
| 9 |
+
-- =====================================================
|
| 10 |
+
-- 1. ADD INVENTORY TRACKING COLUMNS TO TIMESHEETS
|
| 11 |
+
-- =====================================================
|
| 12 |
+
|
| 13 |
+
ALTER TABLE timesheets
|
| 14 |
+
-- Inventory issued to agent on this date
|
| 15 |
+
ADD COLUMN IF NOT EXISTS inventory_issued_count INTEGER DEFAULT 0 CHECK (inventory_issued_count >= 0),
|
| 16 |
+
ADD COLUMN IF NOT EXISTS inventory_issued_value NUMERIC(12,2) DEFAULT 0 CHECK (inventory_issued_value >= 0),
|
| 17 |
+
|
| 18 |
+
-- Inventory used/installed on this date
|
| 19 |
+
ADD COLUMN IF NOT EXISTS inventory_installed_count INTEGER DEFAULT 0 CHECK (inventory_installed_count >= 0),
|
| 20 |
+
ADD COLUMN IF NOT EXISTS inventory_consumed_count INTEGER DEFAULT 0 CHECK (inventory_consumed_count >= 0),
|
| 21 |
+
|
| 22 |
+
-- Inventory returned on this date
|
| 23 |
+
ADD COLUMN IF NOT EXISTS inventory_returned_count INTEGER DEFAULT 0 CHECK (inventory_returned_count >= 0),
|
| 24 |
+
ADD COLUMN IF NOT EXISTS inventory_returned_value NUMERIC(12,2) DEFAULT 0 CHECK (inventory_returned_value >= 0),
|
| 25 |
+
|
| 26 |
+
-- Inventory lost/damaged on this date
|
| 27 |
+
ADD COLUMN IF NOT EXISTS inventory_lost_count INTEGER DEFAULT 0 CHECK (inventory_lost_count >= 0),
|
| 28 |
+
ADD COLUMN IF NOT EXISTS inventory_damaged_count INTEGER DEFAULT 0 CHECK (inventory_damaged_count >= 0),
|
| 29 |
+
ADD COLUMN IF NOT EXISTS inventory_loss_value NUMERIC(12,2) DEFAULT 0 CHECK (inventory_loss_value >= 0),
|
| 30 |
+
|
| 31 |
+
-- End of day inventory status
|
| 32 |
+
ADD COLUMN IF NOT EXISTS inventory_on_hand_count INTEGER DEFAULT 0 CHECK (inventory_on_hand_count >= 0),
|
| 33 |
+
ADD COLUMN IF NOT EXISTS inventory_on_hand_value NUMERIC(12,2) DEFAULT 0 CHECK (inventory_on_hand_value >= 0),
|
| 34 |
+
|
| 35 |
+
-- Detailed inventory breakdown (JSONB for flexibility)
|
| 36 |
+
ADD COLUMN IF NOT EXISTS inventory_details JSONB DEFAULT '[]';
|
| 37 |
+
|
| 38 |
+
-- Add column comments
|
| 39 |
+
COMMENT ON COLUMN timesheets.inventory_issued_count IS 'Number of inventory items issued to agent on this date';
|
| 40 |
+
COMMENT ON COLUMN timesheets.inventory_issued_value IS 'Total value of inventory issued';
|
| 41 |
+
COMMENT ON COLUMN timesheets.inventory_installed_count IS 'Number of items installed/used on this date';
|
| 42 |
+
COMMENT ON COLUMN timesheets.inventory_consumed_count IS 'Number of consumable items used on this date';
|
| 43 |
+
COMMENT ON COLUMN timesheets.inventory_returned_count IS 'Number of items returned to hub on this date';
|
| 44 |
+
COMMENT ON COLUMN timesheets.inventory_returned_value IS 'Total value of inventory returned';
|
| 45 |
+
COMMENT ON COLUMN timesheets.inventory_lost_count IS 'Number of items reported lost on this date';
|
| 46 |
+
COMMENT ON COLUMN timesheets.inventory_damaged_count IS 'Number of items reported damaged on this date';
|
| 47 |
+
COMMENT ON COLUMN timesheets.inventory_loss_value IS 'Total value of lost/damaged inventory';
|
| 48 |
+
COMMENT ON COLUMN timesheets.inventory_on_hand_count IS 'Total items in agent possession at end of day';
|
| 49 |
+
COMMENT ON COLUMN timesheets.inventory_on_hand_value IS 'Total value of inventory on hand at end of day';
|
| 50 |
+
COMMENT ON COLUMN timesheets.inventory_details IS 'Detailed breakdown by equipment type: [{"equipment_type": "ONT", "issued": 2, "installed": 1, "on_hand": 1}]';
|
| 51 |
+
|
| 52 |
+
-- Add indexes for inventory queries
|
| 53 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_inventory_on_hand
|
| 54 |
+
ON timesheets(user_id, work_date, inventory_on_hand_count)
|
| 55 |
+
WHERE inventory_on_hand_count > 0 AND deleted_at IS NULL;
|
| 56 |
+
|
| 57 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_inventory_loss
|
| 58 |
+
ON timesheets(work_date, inventory_lost_count, inventory_damaged_count)
|
| 59 |
+
WHERE (inventory_lost_count > 0 OR inventory_damaged_count > 0) AND deleted_at IS NULL;
|
| 60 |
+
|
| 61 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_inventory_details_gin
|
| 62 |
+
ON timesheets USING gin(inventory_details);
|
| 63 |
+
|
| 64 |
+
-- =====================================================
|
| 65 |
+
-- 2. UPDATE TIMESHEET_UPDATES FOR INVENTORY TRIGGERS
|
| 66 |
+
-- =====================================================
|
| 67 |
+
|
| 68 |
+
-- Add inventory-related trigger types
|
| 69 |
+
ALTER TABLE timesheet_updates
|
| 70 |
+
DROP CONSTRAINT IF EXISTS timesheet_updates_trigger_type_check;
|
| 71 |
+
|
| 72 |
+
ALTER TABLE timesheet_updates
|
| 73 |
+
ADD CONSTRAINT timesheet_updates_trigger_type_check
|
| 74 |
+
CHECK (trigger_type IN (
|
| 75 |
+
'assignment_created',
|
| 76 |
+
'assignment_updated',
|
| 77 |
+
'assignment_completed',
|
| 78 |
+
'ticket_completed',
|
| 79 |
+
'expense_created',
|
| 80 |
+
'expense_approved',
|
| 81 |
+
'expense_rejected',
|
| 82 |
+
'inventory_issued',
|
| 83 |
+
'inventory_installed',
|
| 84 |
+
'inventory_consumed',
|
| 85 |
+
'inventory_returned',
|
| 86 |
+
'inventory_lost',
|
| 87 |
+
'inventory_damaged',
|
| 88 |
+
'scheduled_reconciliation',
|
| 89 |
+
'manual_correction'
|
| 90 |
+
));
|
| 91 |
+
|
| 92 |
+
-- Add inventory entity type
|
| 93 |
+
ALTER TABLE timesheet_updates
|
| 94 |
+
DROP CONSTRAINT IF EXISTS timesheet_updates_trigger_entity_type_check;
|
| 95 |
+
|
| 96 |
+
ALTER TABLE timesheet_updates
|
| 97 |
+
ADD CONSTRAINT timesheet_updates_trigger_entity_type_check
|
| 98 |
+
CHECK (trigger_entity_type IN (
|
| 99 |
+
'ticket_assignment',
|
| 100 |
+
'ticket_expense',
|
| 101 |
+
'ticket',
|
| 102 |
+
'inventory_assignment',
|
| 103 |
+
'reconciliation_run'
|
| 104 |
+
));
|
| 105 |
+
|
| 106 |
+
-- =====================================================
|
| 107 |
+
-- 3. ADD CLOCK IN/OUT TIMES TO TIMESHEETS
|
| 108 |
+
-- =====================================================
|
| 109 |
+
|
| 110 |
+
-- Track when agent started and ended work for the day
|
| 111 |
+
ALTER TABLE timesheets
|
| 112 |
+
ADD COLUMN IF NOT EXISTS clock_in_time TIMESTAMP WITH TIME ZONE,
|
| 113 |
+
ADD COLUMN IF NOT EXISTS clock_out_time TIMESTAMP WITH TIME ZONE;
|
| 114 |
+
|
| 115 |
+
COMMENT ON COLUMN timesheets.clock_in_time IS 'First activity of the day (earliest assignment start or journey start)';
|
| 116 |
+
COMMENT ON COLUMN timesheets.clock_out_time IS 'Last activity of the day (latest assignment completion or last assignment start)';
|
| 117 |
+
|
| 118 |
+
-- Add index for clock time queries
|
| 119 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_clock_times
|
| 120 |
+
ON timesheets(user_id, work_date, clock_in_time, clock_out_time)
|
| 121 |
+
WHERE deleted_at IS NULL;
|
| 122 |
+
|
| 123 |
+
-- =====================================================
|
| 124 |
+
-- 4. ADD RLS FOR INVENTORY_TRANSFERS TABLE
|
| 125 |
+
-- =====================================================
|
| 126 |
+
|
| 127 |
+
-- Enable RLS on inventory_transfers
|
| 128 |
+
ALTER TABLE inventory_transfers ENABLE ROW LEVEL SECURITY;
|
| 129 |
+
|
| 130 |
+
-- Policy: Users can view their own transfers (as sender or recipient)
|
| 131 |
+
CREATE POLICY "users_view_own_inventory_transfers"
|
| 132 |
+
ON inventory_transfers
|
| 133 |
+
FOR SELECT
|
| 134 |
+
TO authenticated
|
| 135 |
+
USING (
|
| 136 |
+
from_user_id = auth.uid()
|
| 137 |
+
OR to_user_id = auth.uid()
|
| 138 |
+
);
|
| 139 |
+
|
| 140 |
+
-- Policy: Users can view transfers in their project
|
| 141 |
+
CREATE POLICY "users_view_project_inventory_transfers"
|
| 142 |
+
ON inventory_transfers
|
| 143 |
+
FOR SELECT
|
| 144 |
+
TO authenticated
|
| 145 |
+
USING (
|
| 146 |
+
EXISTS (
|
| 147 |
+
SELECT 1 FROM inventory_assignments ia
|
| 148 |
+
JOIN project_inventory_distribution pid ON ia.project_inventory_distribution_id = pid.id
|
| 149 |
+
JOIN project_inventory pi ON pid.project_inventory_id = pi.id
|
| 150 |
+
JOIN project_team pt ON pt.project_id = pi.project_id
|
| 151 |
+
WHERE ia.id = inventory_transfers.from_assignment_id
|
| 152 |
+
AND pt.user_id = auth.uid()
|
| 153 |
+
AND pt.deleted_at IS NULL
|
| 154 |
+
)
|
| 155 |
+
);
|
| 156 |
+
|
| 157 |
+
-- Policy: Users can create transfers from their own assignments
|
| 158 |
+
CREATE POLICY "users_create_own_inventory_transfers"
|
| 159 |
+
ON inventory_transfers
|
| 160 |
+
FOR INSERT
|
| 161 |
+
TO authenticated
|
| 162 |
+
WITH CHECK (
|
| 163 |
+
from_user_id = auth.uid()
|
| 164 |
+
OR EXISTS (
|
| 165 |
+
SELECT 1 FROM users u
|
| 166 |
+
WHERE u.id = auth.uid()
|
| 167 |
+
AND u.role IN ('project_manager', 'dispatcher', 'platform_admin', 'client_admin', 'contractor_admin')
|
| 168 |
+
AND u.deleted_at IS NULL
|
| 169 |
+
)
|
| 170 |
+
);
|
| 171 |
+
|
| 172 |
+
-- Policy: Managers and recipients can update transfers
|
| 173 |
+
CREATE POLICY "users_update_inventory_transfers"
|
| 174 |
+
ON inventory_transfers
|
| 175 |
+
FOR UPDATE
|
| 176 |
+
TO authenticated
|
| 177 |
+
USING (
|
| 178 |
+
to_user_id = auth.uid() -- Recipients can accept
|
| 179 |
+
OR EXISTS (
|
| 180 |
+
SELECT 1 FROM users u
|
| 181 |
+
WHERE u.id = auth.uid()
|
| 182 |
+
AND u.role IN ('project_manager', 'dispatcher', 'platform_admin', 'client_admin', 'contractor_admin')
|
| 183 |
+
AND u.deleted_at IS NULL
|
| 184 |
+
)
|
| 185 |
+
);
|
| 186 |
+
|
| 187 |
+
-- Policy: Service role has full access
|
| 188 |
+
CREATE POLICY "service_role_all_inventory_transfers"
|
| 189 |
+
ON inventory_transfers
|
| 190 |
+
FOR ALL
|
| 191 |
+
TO service_role
|
| 192 |
+
USING (true)
|
| 193 |
+
WITH CHECK (true);
|
| 194 |
+
|
| 195 |
+
GRANT SELECT ON inventory_transfers TO authenticated;
|
| 196 |
+
GRANT INSERT, UPDATE ON inventory_transfers TO authenticated;
|
| 197 |
+
GRANT ALL ON inventory_transfers TO service_role;
|
| 198 |
+
|
| 199 |
+
-- =====================================================
|
| 200 |
+
-- 5. CREATE FUNCTION TO CALCULATE INVENTORY ON HAND
|
| 201 |
+
-- =====================================================
|
| 202 |
+
|
| 203 |
+
CREATE OR REPLACE FUNCTION calculate_inventory_on_hand(
|
| 204 |
+
p_user_id UUID,
|
| 205 |
+
p_date DATE,
|
| 206 |
+
p_project_id UUID
|
| 207 |
+
)
|
| 208 |
+
RETURNS TABLE(
|
| 209 |
+
total_count INTEGER,
|
| 210 |
+
total_value NUMERIC(12,2),
|
| 211 |
+
details JSONB
|
| 212 |
+
) AS $$
|
| 213 |
+
BEGIN
|
| 214 |
+
RETURN QUERY
|
| 215 |
+
WITH inventory_movements AS (
|
| 216 |
+
-- Get all inventory movements up to this date
|
| 217 |
+
SELECT
|
| 218 |
+
ia.id,
|
| 219 |
+
ia.unit_identifier,
|
| 220 |
+
ia.status,
|
| 221 |
+
ia.issued_at,
|
| 222 |
+
ia.returned_at,
|
| 223 |
+
ia.installed_at,
|
| 224 |
+
ia.consumed_at,
|
| 225 |
+
pid.project_inventory_id,
|
| 226 |
+
pi.equipment_type,
|
| 227 |
+
pi.equipment_name,
|
| 228 |
+
pi.unit_cost
|
| 229 |
+
FROM inventory_assignments ia
|
| 230 |
+
JOIN project_inventory_distribution pid ON ia.project_inventory_distribution_id = pid.id
|
| 231 |
+
JOIN project_inventory pi ON pid.project_inventory_id = pi.id
|
| 232 |
+
WHERE ia.user_id = p_user_id
|
| 233 |
+
AND pi.project_id = p_project_id
|
| 234 |
+
AND DATE(ia.issued_at) <= p_date
|
| 235 |
+
AND ia.deleted_at IS NULL
|
| 236 |
+
),
|
| 237 |
+
current_inventory AS (
|
| 238 |
+
-- Calculate what's still on hand
|
| 239 |
+
SELECT
|
| 240 |
+
equipment_type,
|
| 241 |
+
equipment_name,
|
| 242 |
+
COUNT(*) as count,
|
| 243 |
+
SUM(unit_cost) as value
|
| 244 |
+
FROM inventory_movements
|
| 245 |
+
WHERE status IN ('issued', 'in_use')
|
| 246 |
+
AND (returned_at IS NULL OR DATE(returned_at) > p_date)
|
| 247 |
+
AND (installed_at IS NULL OR DATE(installed_at) > p_date)
|
| 248 |
+
AND (consumed_at IS NULL OR DATE(consumed_at) > p_date)
|
| 249 |
+
GROUP BY equipment_type, equipment_name
|
| 250 |
+
)
|
| 251 |
+
SELECT
|
| 252 |
+
COALESCE(SUM(count)::INTEGER, 0) as total_count,
|
| 253 |
+
COALESCE(SUM(value), 0) as total_value,
|
| 254 |
+
COALESCE(
|
| 255 |
+
jsonb_agg(
|
| 256 |
+
jsonb_build_object(
|
| 257 |
+
'equipment_type', equipment_type,
|
| 258 |
+
'equipment_name', equipment_name,
|
| 259 |
+
'count', count,
|
| 260 |
+
'value', value
|
| 261 |
+
)
|
| 262 |
+
),
|
| 263 |
+
'[]'::jsonb
|
| 264 |
+
) as details
|
| 265 |
+
FROM current_inventory;
|
| 266 |
+
END;
|
| 267 |
+
$$ LANGUAGE plpgsql;
|
| 268 |
+
|
| 269 |
+
COMMENT ON FUNCTION calculate_inventory_on_hand IS 'Calculate agent''s inventory on hand at end of specific date';
|
| 270 |
+
|
| 271 |
+
-- =====================================================
|
| 272 |
+
-- 6. VALIDATION QUERIES
|
| 273 |
+
-- =====================================================
|
| 274 |
+
|
| 275 |
+
-- Verify columns added
|
| 276 |
+
DO $$
|
| 277 |
+
BEGIN
|
| 278 |
+
IF EXISTS (
|
| 279 |
+
SELECT 1 FROM information_schema.columns
|
| 280 |
+
WHERE table_name = 'timesheets'
|
| 281 |
+
AND column_name = 'inventory_on_hand_count'
|
| 282 |
+
) THEN
|
| 283 |
+
RAISE NOTICE 'Inventory columns added to timesheets';
|
| 284 |
+
ELSE
|
| 285 |
+
RAISE EXCEPTION 'Inventory columns were not added';
|
| 286 |
+
END IF;
|
| 287 |
+
END $$;
|
| 288 |
+
|
| 289 |
+
-- Verify clock times added
|
| 290 |
+
DO $$
|
| 291 |
+
BEGIN
|
| 292 |
+
IF EXISTS (
|
| 293 |
+
SELECT 1 FROM information_schema.columns
|
| 294 |
+
WHERE table_name = 'timesheets'
|
| 295 |
+
AND column_name = 'clock_in_time'
|
| 296 |
+
) THEN
|
| 297 |
+
RAISE NOTICE 'Clock in/out times added to timesheets';
|
| 298 |
+
ELSE
|
| 299 |
+
RAISE EXCEPTION 'Clock times were not added';
|
| 300 |
+
END IF;
|
| 301 |
+
END $$;
|
| 302 |
+
|
| 303 |
+
-- Verify RLS enabled on inventory_transfers
|
| 304 |
+
DO $$
|
| 305 |
+
DECLARE
|
| 306 |
+
rls_enabled BOOLEAN;
|
| 307 |
+
BEGIN
|
| 308 |
+
SELECT relrowsecurity INTO rls_enabled
|
| 309 |
+
FROM pg_class
|
| 310 |
+
WHERE relname = 'inventory_transfers';
|
| 311 |
+
|
| 312 |
+
IF rls_enabled THEN
|
| 313 |
+
RAISE NOTICE 'RLS enabled on inventory_transfers';
|
| 314 |
+
ELSE
|
| 315 |
+
RAISE NOTICE 'Warning: RLS not enabled on inventory_transfers';
|
| 316 |
+
END IF;
|
| 317 |
+
END $$;
|
| 318 |
+
|
| 319 |
+
-- Verify function created
|
| 320 |
+
DO $$
|
| 321 |
+
BEGIN
|
| 322 |
+
IF EXISTS (
|
| 323 |
+
SELECT 1 FROM pg_proc
|
| 324 |
+
WHERE proname = 'calculate_inventory_on_hand'
|
| 325 |
+
) THEN
|
| 326 |
+
RAISE NOTICE 'Function calculate_inventory_on_hand created';
|
| 327 |
+
ELSE
|
| 328 |
+
RAISE EXCEPTION 'Function was not created';
|
| 329 |
+
END IF;
|
| 330 |
+
END $$;
|
| 331 |
+
|
| 332 |
+
-- =====================================================
|
| 333 |
+
-- MIGRATION COMPLETE
|
| 334 |
+
-- =====================================================
|
| 335 |
+
|
| 336 |
+
DO $$
|
| 337 |
+
BEGIN
|
| 338 |
+
RAISE NOTICE '==============================================';
|
| 339 |
+
RAISE NOTICE 'Migration 20241210_add_inventory_to_timesheets';
|
| 340 |
+
RAISE NOTICE 'Status: COMPLETED';
|
| 341 |
+
RAISE NOTICE 'Timesheets: 11 inventory + 2 clock time columns added';
|
| 342 |
+
RAISE NOTICE 'Function: calculate_inventory_on_hand created';
|
| 343 |
+
RAISE NOTICE 'RLS: 5 policies added for inventory_transfers';
|
| 344 |
+
RAISE NOTICE 'Indexes: 4 new indexes created';
|
| 345 |
+
RAISE NOTICE 'Trigger types: Updated for inventory events';
|
| 346 |
+
RAISE NOTICE '==============================================';
|
| 347 |
+
END $$;
|
supabase/migrations/20241210_add_realtime_reconciliation.sql
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- =====================================================
|
| 2 |
+
-- Real-Time Reconciliation Enhancement
|
| 3 |
+
-- =====================================================
|
| 4 |
+
-- Description: Adds support for hybrid real-time + scheduled reconciliation
|
| 5 |
+
-- Author: System Architecture Team
|
| 6 |
+
-- Date: 2024-12-10
|
| 7 |
+
-- =====================================================
|
| 8 |
+
|
| 9 |
+
-- =====================================================
|
| 10 |
+
-- 1. ENHANCE TIMESHEETS TABLE FOR REAL-TIME UPDATES
|
| 11 |
+
-- =====================================================
|
| 12 |
+
|
| 13 |
+
-- Add real-time tracking columns
|
| 14 |
+
ALTER TABLE timesheets
|
| 15 |
+
ADD COLUMN IF NOT EXISTS update_source TEXT DEFAULT 'realtime'
|
| 16 |
+
CHECK (update_source IN ('realtime', 'scheduled', 'manual')),
|
| 17 |
+
ADD COLUMN IF NOT EXISTS last_realtime_update_at TIMESTAMP WITH TIME ZONE,
|
| 18 |
+
ADD COLUMN IF NOT EXISTS last_validated_at TIMESTAMP WITH TIME ZONE,
|
| 19 |
+
ADD COLUMN IF NOT EXISTS needs_review BOOLEAN DEFAULT FALSE,
|
| 20 |
+
ADD COLUMN IF NOT EXISTS discrepancy_notes TEXT,
|
| 21 |
+
ADD COLUMN IF NOT EXISTS version INTEGER DEFAULT 1 NOT NULL;
|
| 22 |
+
|
| 23 |
+
-- Add column comments
|
| 24 |
+
COMMENT ON COLUMN timesheets.update_source IS 'Source of last update: realtime (event-driven), scheduled (batch), manual (user correction)';
|
| 25 |
+
COMMENT ON COLUMN timesheets.last_realtime_update_at IS 'Timestamp of last real-time update from ticket/expense events';
|
| 26 |
+
COMMENT ON COLUMN timesheets.last_validated_at IS 'Timestamp when scheduled reconciliation last validated this timesheet';
|
| 27 |
+
COMMENT ON COLUMN timesheets.needs_review IS 'TRUE if discrepancy found between real-time and scheduled reconciliation';
|
| 28 |
+
COMMENT ON COLUMN timesheets.discrepancy_notes IS 'Details about discrepancies found during validation';
|
| 29 |
+
COMMENT ON COLUMN timesheets.version IS 'Version number for optimistic locking (prevents race conditions)';
|
| 30 |
+
|
| 31 |
+
-- Add index for real-time queries
|
| 32 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_realtime_updates
|
| 33 |
+
ON timesheets(user_id, work_date, last_realtime_update_at DESC)
|
| 34 |
+
WHERE deleted_at IS NULL;
|
| 35 |
+
|
| 36 |
+
CREATE INDEX IF NOT EXISTS idx_timesheets_needs_review
|
| 37 |
+
ON timesheets(needs_review, last_validated_at)
|
| 38 |
+
WHERE needs_review = TRUE AND deleted_at IS NULL;
|
| 39 |
+
|
| 40 |
+
-- =====================================================
|
| 41 |
+
-- 2. CREATE TIMESHEET_UPDATES AUDIT TABLE
|
| 42 |
+
-- =====================================================
|
| 43 |
+
|
| 44 |
+
CREATE TABLE IF NOT EXISTS timesheet_updates (
|
| 45 |
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
| 46 |
+
timesheet_id UUID NOT NULL REFERENCES timesheets(id) ON DELETE CASCADE,
|
| 47 |
+
|
| 48 |
+
-- What triggered the update
|
| 49 |
+
trigger_type TEXT NOT NULL CHECK (trigger_type IN (
|
| 50 |
+
'assignment_created',
|
| 51 |
+
'assignment_updated',
|
| 52 |
+
'assignment_completed',
|
| 53 |
+
'ticket_completed',
|
| 54 |
+
'expense_created',
|
| 55 |
+
'expense_approved',
|
| 56 |
+
'expense_rejected',
|
| 57 |
+
'scheduled_reconciliation',
|
| 58 |
+
'manual_correction'
|
| 59 |
+
)),
|
| 60 |
+
trigger_entity_type TEXT CHECK (trigger_entity_type IN ('ticket_assignment', 'ticket_expense', 'ticket', 'reconciliation_run')),
|
| 61 |
+
trigger_entity_id UUID,
|
| 62 |
+
|
| 63 |
+
-- What changed
|
| 64 |
+
fields_changed JSONB NOT NULL DEFAULT '{}',
|
| 65 |
+
-- Example: {
|
| 66 |
+
-- "tickets_completed": {"old": 5, "new": 6},
|
| 67 |
+
-- "total_expenses": {"old": 1000.00, "new": 1500.00}
|
| 68 |
+
-- }
|
| 69 |
+
|
| 70 |
+
-- Metadata
|
| 71 |
+
updated_by_user_id UUID REFERENCES users(id) ON DELETE SET NULL,
|
| 72 |
+
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
| 73 |
+
|
| 74 |
+
-- For debugging
|
| 75 |
+
additional_metadata JSONB DEFAULT '{}',
|
| 76 |
+
|
| 77 |
+
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
| 78 |
+
);
|
| 79 |
+
|
| 80 |
+
-- Add table comment
|
| 81 |
+
COMMENT ON TABLE timesheet_updates IS 'Audit trail for all timesheet updates (real-time and scheduled). Tracks what changed, when, and why.';
|
| 82 |
+
|
| 83 |
+
-- Add column comments
|
| 84 |
+
COMMENT ON COLUMN timesheet_updates.trigger_type IS 'Event that triggered the timesheet update';
|
| 85 |
+
COMMENT ON COLUMN timesheet_updates.trigger_entity_type IS 'Type of entity that triggered the update';
|
| 86 |
+
COMMENT ON COLUMN timesheet_updates.trigger_entity_id IS 'ID of the entity that triggered the update';
|
| 87 |
+
COMMENT ON COLUMN timesheet_updates.fields_changed IS 'JSONB object showing old and new values for each changed field';
|
| 88 |
+
|
| 89 |
+
-- Add indexes
|
| 90 |
+
CREATE INDEX IF NOT EXISTS idx_timesheet_updates_timesheet
|
| 91 |
+
ON timesheet_updates(timesheet_id, updated_at DESC);
|
| 92 |
+
|
| 93 |
+
CREATE INDEX IF NOT EXISTS idx_timesheet_updates_trigger
|
| 94 |
+
ON timesheet_updates(trigger_type, updated_at DESC);
|
| 95 |
+
|
| 96 |
+
CREATE INDEX IF NOT EXISTS idx_timesheet_updates_entity
|
| 97 |
+
ON timesheet_updates(trigger_entity_type, trigger_entity_id)
|
| 98 |
+
WHERE trigger_entity_id IS NOT NULL;
|
| 99 |
+
|
| 100 |
+
CREATE INDEX IF NOT EXISTS idx_timesheet_updates_fields_gin
|
| 101 |
+
ON timesheet_updates USING gin(fields_changed);
|
| 102 |
+
|
| 103 |
+
-- =====================================================
|
| 104 |
+
-- 3. ENHANCE RECONCILIATION_RUNS FOR VALIDATION
|
| 105 |
+
-- =====================================================
|
| 106 |
+
|
| 107 |
+
-- Add validation tracking columns
|
| 108 |
+
ALTER TABLE reconciliation_runs
|
| 109 |
+
ADD COLUMN IF NOT EXISTS discrepancies_found INTEGER DEFAULT 0 CHECK (discrepancies_found >= 0),
|
| 110 |
+
ADD COLUMN IF NOT EXISTS orphaned_records_found INTEGER DEFAULT 0 CHECK (orphaned_records_found >= 0),
|
| 111 |
+
ADD COLUMN IF NOT EXISTS corrections_made INTEGER DEFAULT 0 CHECK (corrections_made >= 0),
|
| 112 |
+
ADD COLUMN IF NOT EXISTS discrepancy_details JSONB DEFAULT '[]';
|
| 113 |
+
|
| 114 |
+
-- Add column comments
|
| 115 |
+
COMMENT ON COLUMN reconciliation_runs.discrepancies_found IS 'Number of discrepancies found between real-time and scheduled data';
|
| 116 |
+
COMMENT ON COLUMN reconciliation_runs.orphaned_records_found IS 'Number of assignments/expenses without corresponding timesheet updates';
|
| 117 |
+
COMMENT ON COLUMN reconciliation_runs.corrections_made IS 'Number of timesheets corrected during this run';
|
| 118 |
+
COMMENT ON COLUMN reconciliation_runs.discrepancy_details IS 'Array of discrepancy records with details about what was wrong and how it was fixed';
|
| 119 |
+
|
| 120 |
+
-- Add index for discrepancy queries
|
| 121 |
+
CREATE INDEX IF NOT EXISTS idx_reconciliation_runs_discrepancies
|
| 122 |
+
ON reconciliation_runs(discrepancies_found, reconciliation_date DESC)
|
| 123 |
+
WHERE discrepancies_found > 0;
|
| 124 |
+
|
| 125 |
+
-- =====================================================
|
| 126 |
+
-- 4. CREATE FUNCTION FOR OPTIMISTIC LOCKING
|
| 127 |
+
-- =====================================================
|
| 128 |
+
|
| 129 |
+
CREATE OR REPLACE FUNCTION increment_timesheet_version()
|
| 130 |
+
RETURNS TRIGGER AS $$
|
| 131 |
+
BEGIN
|
| 132 |
+
-- Increment version on every update
|
| 133 |
+
NEW.version = OLD.version + 1;
|
| 134 |
+
RETURN NEW;
|
| 135 |
+
END;
|
| 136 |
+
$$ LANGUAGE plpgsql;
|
| 137 |
+
|
| 138 |
+
-- Create trigger for version increment
|
| 139 |
+
DROP TRIGGER IF EXISTS trigger_timesheet_version_increment ON timesheets;
|
| 140 |
+
CREATE TRIGGER trigger_timesheet_version_increment
|
| 141 |
+
BEFORE UPDATE ON timesheets
|
| 142 |
+
FOR EACH ROW
|
| 143 |
+
EXECUTE FUNCTION increment_timesheet_version();
|
| 144 |
+
|
| 145 |
+
-- =====================================================
|
| 146 |
+
-- 5. CREATE HELPER FUNCTION FOR REAL-TIME UPDATES
|
| 147 |
+
-- =====================================================
|
| 148 |
+
|
| 149 |
+
CREATE OR REPLACE FUNCTION log_timesheet_update(
|
| 150 |
+
p_timesheet_id UUID,
|
| 151 |
+
p_trigger_type TEXT,
|
| 152 |
+
p_trigger_entity_type TEXT,
|
| 153 |
+
p_trigger_entity_id UUID,
|
| 154 |
+
p_fields_changed JSONB,
|
| 155 |
+
p_updated_by_user_id UUID DEFAULT NULL
|
| 156 |
+
)
|
| 157 |
+
RETURNS UUID AS $$
|
| 158 |
+
DECLARE
|
| 159 |
+
v_update_id UUID;
|
| 160 |
+
BEGIN
|
| 161 |
+
INSERT INTO timesheet_updates (
|
| 162 |
+
timesheet_id,
|
| 163 |
+
trigger_type,
|
| 164 |
+
trigger_entity_type,
|
| 165 |
+
trigger_entity_id,
|
| 166 |
+
fields_changed,
|
| 167 |
+
updated_by_user_id
|
| 168 |
+
) VALUES (
|
| 169 |
+
p_timesheet_id,
|
| 170 |
+
p_trigger_type,
|
| 171 |
+
p_trigger_entity_type,
|
| 172 |
+
p_trigger_entity_id,
|
| 173 |
+
p_fields_changed,
|
| 174 |
+
p_updated_by_user_id
|
| 175 |
+
)
|
| 176 |
+
RETURNING id INTO v_update_id;
|
| 177 |
+
|
| 178 |
+
RETURN v_update_id;
|
| 179 |
+
END;
|
| 180 |
+
$$ LANGUAGE plpgsql;
|
| 181 |
+
|
| 182 |
+
COMMENT ON FUNCTION log_timesheet_update IS 'Helper function to log timesheet updates with proper audit trail';
|
| 183 |
+
|
| 184 |
+
-- =====================================================
|
| 185 |
+
-- 6. ROW LEVEL SECURITY FOR NEW TABLES
|
| 186 |
+
-- =====================================================
|
| 187 |
+
|
| 188 |
+
-- Enable RLS on timesheet_updates
|
| 189 |
+
ALTER TABLE timesheet_updates ENABLE ROW LEVEL SECURITY;
|
| 190 |
+
|
| 191 |
+
-- Policy: Users can see updates for their own timesheets
|
| 192 |
+
CREATE POLICY "users_view_own_timesheet_updates"
|
| 193 |
+
ON timesheet_updates
|
| 194 |
+
FOR SELECT
|
| 195 |
+
TO authenticated
|
| 196 |
+
USING (
|
| 197 |
+
EXISTS (
|
| 198 |
+
SELECT 1 FROM timesheets t
|
| 199 |
+
WHERE t.id = timesheet_updates.timesheet_id
|
| 200 |
+
AND t.user_id = auth.uid()
|
| 201 |
+
AND t.deleted_at IS NULL
|
| 202 |
+
)
|
| 203 |
+
);
|
| 204 |
+
|
| 205 |
+
-- Policy: Managers can see updates for their project team
|
| 206 |
+
CREATE POLICY "managers_view_team_timesheet_updates"
|
| 207 |
+
ON timesheet_updates
|
| 208 |
+
FOR SELECT
|
| 209 |
+
TO authenticated
|
| 210 |
+
USING (
|
| 211 |
+
EXISTS (
|
| 212 |
+
SELECT 1 FROM timesheets t
|
| 213 |
+
JOIN project_team pt ON pt.user_id = t.user_id
|
| 214 |
+
JOIN users u ON u.id = auth.uid()
|
| 215 |
+
WHERE t.id = timesheet_updates.timesheet_id
|
| 216 |
+
AND pt.project_id = t.project_id
|
| 217 |
+
AND u.role IN ('project_manager', 'dispatcher', 'client_admin', 'contractor_admin')
|
| 218 |
+
AND t.deleted_at IS NULL
|
| 219 |
+
AND pt.deleted_at IS NULL
|
| 220 |
+
AND u.deleted_at IS NULL
|
| 221 |
+
)
|
| 222 |
+
);
|
| 223 |
+
|
| 224 |
+
-- Policy: Service role has full access
|
| 225 |
+
CREATE POLICY "service_role_all_timesheet_updates"
|
| 226 |
+
ON timesheet_updates
|
| 227 |
+
FOR ALL
|
| 228 |
+
TO service_role
|
| 229 |
+
USING (true)
|
| 230 |
+
WITH CHECK (true);
|
| 231 |
+
|
| 232 |
+
-- =====================================================
|
| 233 |
+
-- 7. GRANT PERMISSIONS
|
| 234 |
+
-- =====================================================
|
| 235 |
+
|
| 236 |
+
GRANT SELECT ON timesheet_updates TO authenticated;
|
| 237 |
+
GRANT ALL ON timesheet_updates TO service_role;
|
| 238 |
+
|
| 239 |
+
-- =====================================================
|
| 240 |
+
-- 8. VALIDATION QUERIES
|
| 241 |
+
-- =====================================================
|
| 242 |
+
|
| 243 |
+
-- Verify columns added to timesheets
|
| 244 |
+
DO $$
|
| 245 |
+
BEGIN
|
| 246 |
+
IF EXISTS (
|
| 247 |
+
SELECT 1 FROM information_schema.columns
|
| 248 |
+
WHERE table_name = 'timesheets'
|
| 249 |
+
AND column_name = 'update_source'
|
| 250 |
+
) THEN
|
| 251 |
+
RAISE NOTICE 'Timesheets table enhanced for real-time updates';
|
| 252 |
+
ELSE
|
| 253 |
+
RAISE EXCEPTION 'Timesheets columns were not added';
|
| 254 |
+
END IF;
|
| 255 |
+
END $$;
|
| 256 |
+
|
| 257 |
+
-- Verify timesheet_updates table created
|
| 258 |
+
DO $$
|
| 259 |
+
BEGIN
|
| 260 |
+
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'timesheet_updates') THEN
|
| 261 |
+
RAISE NOTICE 'Table timesheet_updates created successfully';
|
| 262 |
+
ELSE
|
| 263 |
+
RAISE EXCEPTION 'Table timesheet_updates was not created';
|
| 264 |
+
END IF;
|
| 265 |
+
END $$;
|
| 266 |
+
|
| 267 |
+
-- Verify function created
|
| 268 |
+
DO $$
|
| 269 |
+
BEGIN
|
| 270 |
+
IF EXISTS (
|
| 271 |
+
SELECT 1 FROM pg_proc
|
| 272 |
+
WHERE proname = 'log_timesheet_update'
|
| 273 |
+
) THEN
|
| 274 |
+
RAISE NOTICE 'Helper function log_timesheet_update created';
|
| 275 |
+
ELSE
|
| 276 |
+
RAISE EXCEPTION 'Helper function was not created';
|
| 277 |
+
END IF;
|
| 278 |
+
END $$;
|
| 279 |
+
|
| 280 |
+
-- =====================================================
|
| 281 |
+
-- MIGRATION COMPLETE
|
| 282 |
+
-- =====================================================
|
| 283 |
+
|
| 284 |
+
DO $$
|
| 285 |
+
BEGIN
|
| 286 |
+
RAISE NOTICE '==============================================';
|
| 287 |
+
RAISE NOTICE 'Migration 20241210_add_realtime_reconciliation';
|
| 288 |
+
RAISE NOTICE 'Status: COMPLETED';
|
| 289 |
+
RAISE NOTICE 'Tables: timesheets (enhanced), timesheet_updates (created)';
|
| 290 |
+
RAISE NOTICE 'Functions: log_timesheet_update, increment_timesheet_version';
|
| 291 |
+
RAISE NOTICE 'Indexes: 5 new indexes created';
|
| 292 |
+
RAISE NOTICE 'RLS Policies: 3 policies created';
|
| 293 |
+
RAISE NOTICE '==============================================';
|
| 294 |
+
END $$;
|
supabase/migrations/20241210_cleanup_inventory_view.sql
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- =====================================================
|
| 2 |
+
-- Cleanup: Drop agent_inventory_status view
|
| 3 |
+
-- =====================================================
|
| 4 |
+
-- Reason: Timesheets is the source of truth for summaries
|
| 5 |
+
-- This view is redundant and should not exist
|
| 6 |
+
-- =====================================================
|
| 7 |
+
|
| 8 |
+
DROP VIEW IF EXISTS agent_inventory_status CASCADE;
|
| 9 |
+
|
| 10 |
+
-- Verify view is dropped
|
| 11 |
+
DO $$
|
| 12 |
+
BEGIN
|
| 13 |
+
IF NOT EXISTS (
|
| 14 |
+
SELECT 1 FROM information_schema.views
|
| 15 |
+
WHERE table_name = 'agent_inventory_status'
|
| 16 |
+
) THEN
|
| 17 |
+
RAISE NOTICE 'View agent_inventory_status successfully dropped';
|
| 18 |
+
ELSE
|
| 19 |
+
RAISE EXCEPTION 'View agent_inventory_status still exists';
|
| 20 |
+
END IF;
|
| 21 |
+
END $$;
|