stablefusiondance kunal-anthropic commited on
Commit
147754f
·
verified ·
0 Parent(s):

Duplicate from Anthropic/EconomicIndex

Browse files

Co-authored-by: kunalhanda <kunal-anthropic@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +62 -0
  2. .gitignore +1 -0
  3. README.md +79 -0
  4. release_2025_02_10/README.md +66 -0
  5. release_2025_02_10/SOC_Structure.csv +1597 -0
  6. release_2025_02_10/automation_vs_augmentation.csv +7 -0
  7. release_2025_02_10/bls_employment_may_2023.csv +23 -0
  8. release_2025_02_10/onet_task_mappings.csv +0 -0
  9. release_2025_02_10/onet_task_statements.csv +0 -0
  10. release_2025_02_10/plots.ipynb +767 -0
  11. release_2025_02_10/plots/automation_vs_augmentation.png +3 -0
  12. release_2025_02_10/plots/occupational_category_distribution.png +3 -0
  13. release_2025_02_10/plots/occupational_category_distribution_bls.png +3 -0
  14. release_2025_02_10/plots/occupations_distribution.png +3 -0
  15. release_2025_02_10/plots/task_distribution.png +3 -0
  16. release_2025_02_10/plots/wage_distribution.png +3 -0
  17. release_2025_02_10/wage_data.csv +0 -0
  18. release_2025_03_27/README.md +63 -0
  19. release_2025_03_27/SOC_Structure.csv +1597 -0
  20. release_2025_03_27/automation_augmentation_by_occupation.png +3 -0
  21. release_2025_03_27/automation_augmentation_comparison.png +3 -0
  22. release_2025_03_27/automation_vs_augmentation_by_task.csv +0 -0
  23. release_2025_03_27/automation_vs_augmentation_v1.csv +7 -0
  24. release_2025_03_27/automation_vs_augmentation_v2.csv +7 -0
  25. release_2025_03_27/cluster_level_data/README.md +42 -0
  26. release_2025_03_27/cluster_level_data/cluster_level_dataset.tsv +0 -0
  27. release_2025_03_27/cluster_level_data/cluster_level_example_analysis.ipynb +0 -0
  28. release_2025_03_27/normalized_automation_by_category.png +3 -0
  29. release_2025_03_27/onet_task_statements.csv +0 -0
  30. release_2025_03_27/task_pct_v1.csv +0 -0
  31. release_2025_03_27/task_pct_v2.csv +0 -0
  32. release_2025_03_27/task_thinking_fractions.csv +0 -0
  33. release_2025_03_27/v2_report_replication.ipynb +0 -0
  34. release_2025_09_15/README.md +72 -0
  35. release_2025_09_15/code/aei_analysis_functions_1p_api.py +2339 -0
  36. release_2025_09_15/code/aei_analysis_functions_claude_ai.py +2926 -0
  37. release_2025_09_15/code/aei_report_v3_analysis_1p_api.ipynb +315 -0
  38. release_2025_09_15/code/aei_report_v3_analysis_claude_ai.ipynb +868 -0
  39. release_2025_09_15/code/aei_report_v3_change_over_time_claude_ai.py +564 -0
  40. release_2025_09_15/code/aei_report_v3_preprocessing_claude_ai.ipynb +1840 -0
  41. release_2025_09_15/code/preprocess_gdp.py +364 -0
  42. release_2025_09_15/code/preprocess_iso_codes.py +111 -0
  43. release_2025_09_15/code/preprocess_onet.py +179 -0
  44. release_2025_09_15/code/preprocess_population.py +407 -0
  45. release_2025_09_15/data/input/BTOS_National.xlsx +3 -0
  46. release_2025_09_15/data/input/Population by single age _20250903072924.csv +3 -0
  47. release_2025_09_15/data/input/automation_vs_augmentation_v1.csv +3 -0
  48. release_2025_09_15/data/input/automation_vs_augmentation_v2.csv +3 -0
  49. release_2025_09_15/data/input/bea_us_state_gdp_2024.csv +3 -0
  50. release_2025_09_15/data/input/census_state_codes.txt +58 -0
.gitattributes ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
60
+ release_2025_09_15/**/*.csv filter=lfs diff=lfs merge=lfs -text
61
+ release_2025_09_15/**/*.xlsx filter=lfs diff=lfs merge=lfs -text
62
+ release_2025_09_15/**/*.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ pretty_name: EconomicIndex
4
+ tags:
5
+ - AI
6
+ - LLM
7
+ - Economic Impacts
8
+ - Anthropic
9
+ viewer: true
10
+ license: mit
11
+ configs:
12
+ - config_name: release_2025_09_15
13
+ data_files:
14
+ - split: raw_claude_ai
15
+ path: "release_2025_09_15/data/intermediate/aei_raw_claude_ai_2025-08-04_to_2025-08-11.csv"
16
+ - split: raw_1p_api
17
+ path: "release_2025_09_15/data/intermediate/aei_raw_1p_api_2025-08-04_to_2025-08-11.csv"
18
+ - split: enriched_claude_ai
19
+ path: "release_2025_09_15/data/output/aei_enriched_claude_ai_2025-08-04_to_2025-08-11.csv"
20
+ ---
21
+
22
+ # The Anthropic Economic Index
23
+
24
+ ## Overview
25
+
26
+ The Anthropic Economic Index provides insights into how AI is being incorporated into real-world tasks across the modern economy.
27
+
28
+ ## Data Releases
29
+
30
+ This repository contains multiple data releases, each with its own documentation:
31
+
32
+ - **[2025-09-15 Release](https://huggingface.co/datasets/Anthropic/EconomicIndex/tree/main/release_2025_09_15)**: Updated analysis with geographic and first-party API data using Sonnet 4
33
+ - **[2025-03-27 Release](https://huggingface.co/datasets/Anthropic/EconomicIndex/tree/main/release_2025_03_27)**: Updated analysis with Claude 3.7 Sonnet data and cluster-level insights
34
+ - **[2025-02-10 Release](https://huggingface.co/datasets/Anthropic/EconomicIndex/tree/main/release_2025_02_10)**: Initial release with O*NET task mappings, automation vs. augmentation data, and more
35
+
36
+
37
+ ## Resources
38
+
39
+ - [Index Home Page](https://www.anthropic.com/economic-index)
40
+ - [3rd report](https://www.anthropic.com/research/anthropic-economic-index-september-2025-report)
41
+ - [2nd report](https://www.anthropic.com/news/anthropic-economic-index-insights-from-claude-sonnet-3-7)
42
+ - [1st report](https://www.anthropic.com/news/the-anthropic-economic-index)
43
+
44
+
45
+ ## License
46
+
47
+ Data released under CC-BY, code released under MIT License
48
+
49
+ ## Contact
50
+
51
+ For inquiries, contact econ-research@anthropic.com.
52
+
53
+ ## Citation
54
+
55
+ ### Third release
56
+
57
+ ```
58
+ @online{appelmccrorytamkin2025geoapi,
59
+ author = {Ruth Appel and Peter McCrory and Alex Tamkin and Michael Stern and Miles McCain and Tyler Neylon},
60
+ title = {Anthropic Economic Index Report: Uneven Geographic and Enterprise AI Adoption},
61
+ date = {2025-09-15},
62
+   year = {2025},
63
+ url = {www.anthropic.com/research/anthropic-economic-index-september-2025-report},
64
+ }
65
+ ```
66
+
67
+ ### Second release
68
+
69
+ ```
70
+ @misc{handa2025economictasksperformedai,
71
+ title={Which Economic Tasks are Performed with AI? Evidence from Millions of Claude Conversations},
72
+ author={Kunal Handa and Alex Tamkin and Miles McCain and Saffron Huang and Esin Durmus and Sarah Heck and Jared Mueller and Jerry Hong and Stuart Ritchie and Tim Belonax and Kevin K. Troy and Dario Amodei and Jared Kaplan and Jack Clark and Deep Ganguli},
73
+ year={2025},
74
+ eprint={2503.04761},
75
+ archivePrefix={arXiv},
76
+ primaryClass={cs.CY},
77
+ url={https://arxiv.org/abs/2503.04761},
78
+ }
79
+ ```
release_2025_02_10/README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ pretty_name: EconomicIndex
4
+ tags:
5
+ - text
6
+ viewer: true
7
+ configs:
8
+ - config_name: default
9
+ data_files:
10
+ - split: train
11
+ path: "onet_task_mappings.csv"
12
+ ---
13
+ ## Overview
14
+ This directory contains O*NET task mapping and automation vs. augmentation data from "Which Economic Tasks are Performed with AI? Evidence from Millions of Claude Conversations." The data and provided analysis are described below.
15
+
16
+ **Please see our [blog post](https://www.anthropic.com/news/the-anthropic-economic-index) and [paper](https://assets.anthropic.com/m/2e23255f1e84ca97/original/Economic_Tasks_AI_Paper.pdf) for further visualizations and complete analysis.**
17
+
18
+ ## Data
19
+
20
+ - `SOC_Structure.csv` - Standard Occupational Classification (SOC) system hierarchy from the U.S. Department of Labor O*NET database
21
+ - `automation_vs_augmentation.csv` - Data on automation vs augmentation patterns, with columns:
22
+ - interaction_type: Type of human-AI interaction (directive, feedback loop, task iteration, learning, validation)
23
+ - pct: Percentage of conversations showing this interaction pattern
24
+ Data obtained using Clio (Tamkin et al. 2024)
25
+ - `bls_employment_may_2023.csv` - Employment statistics from U.S. Bureau of Labor Statistics, May 2023
26
+ - `onet_task_mappings.csv` - Mappings between tasks and O*NET categories, with columns:
27
+ - task_name: Task description
28
+ - pct: Percentage of conversations involving this task
29
+ Data obtained using Clio (Tamkin et al. 2024)
30
+ - `onet_task_statements.csv` - Task descriptions and metadata from the U.S. Department of Labor O*NET database
31
+ - `wage_data.csv` - Occupational wage data scraped from O*NET website using open source tools from https://github.com/adamkq/onet-dataviz
32
+
33
+ ## Analysis
34
+
35
+ The `plots.ipynb` notebook provides visualizations and analysis including:
36
+
37
+ ### Task Analysis
38
+ - Top tasks by percentage of conversations
39
+ - Task distribution across occupational categories
40
+ - Comparison with BLS employment data
41
+
42
+ ### Occupational Analysis
43
+ - Top occupations by conversation percentage
44
+ - Occupational category distributions
45
+ - Occupational category distributions compared to BLS employment data
46
+
47
+ ### Wage Analysis
48
+ - Occupational usage by wage
49
+
50
+ ### Automation vs Augmentation Analysis
51
+ - Distribution across interaction modes
52
+
53
+ ## Usage
54
+ To generate the analysis:
55
+
56
+ 1. Ensure all data files are present in this directory
57
+ 2. Open `plots.ipynb` in Jupyter
58
+ 3. Run all cells to generate visualizations
59
+ 4. Plots will be saved to the notebook and can be exported
60
+
61
+ The notebook uses pandas for data manipulation and seaborn/matplotlib for visualization. Example outputs are contained in the `plots\` folder.
62
+
63
+ **Data released under CC-BY, code released under MIT License**
64
+
65
+ ## Contact
66
+ You can submit inquires to kunal@anthropic.com or atamkin@anthropic.com. We invite researchers to provide input on potential future data releases using [this form](https://docs.google.com/forms/d/e/1FAIpQLSfDEdY-mT5lcXPaDSv-0Ci1rSXGlbIJierxkUbNB7_07-kddw/viewform?usp=dialog).
release_2025_02_10/SOC_Structure.csv ADDED
@@ -0,0 +1,1597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Major Group,Minor Group,Broad Occupation,Detailed Occupation,Detailed O*NET-SOC,SOC or O*NET-SOC 2019 Title
2
+ 11-0000,,,,,Management Occupations
3
+ ,11-1000,,,,Top Executives
4
+ ,,11-1010,,,Chief Executives
5
+ ,,,11-1011,,Chief Executives
6
+ ,,,,11-1011.03,Chief Sustainability Officers
7
+ ,,11-1020,,,General and Operations Managers
8
+ ,,,11-1021,,General and Operations Managers
9
+ ,,11-1030,,,Legislators
10
+ ,,,11-1031,,Legislators
11
+ ,11-2000,,,,"Advertising, Marketing, Promotions, Public Relations, and Sales Managers"
12
+ ,,11-2010,,,Advertising and Promotions Managers
13
+ ,,,11-2011,,Advertising and Promotions Managers
14
+ ,,11-2020,,,Marketing and Sales Managers
15
+ ,,,11-2021,,Marketing Managers
16
+ ,,,11-2022,,Sales Managers
17
+ ,,11-2030,,,Public Relations and Fundraising Managers
18
+ ,,,11-2032,,Public Relations Managers
19
+ ,,,11-2033,,Fundraising Managers
20
+ ,11-3000,,,,Operations Specialties Managers
21
+ ,,11-3010,,,Administrative Services and Facilities Managers
22
+ ,,,11-3012,,Administrative Services Managers
23
+ ,,,11-3013,,Facilities Managers
24
+ ,,,,11-3013.01,Security Managers
25
+ ,,11-3020,,,Computer and Information Systems Managers
26
+ ,,,11-3021,,Computer and Information Systems Managers
27
+ ,,11-3030,,,Financial Managers
28
+ ,,,11-3031,,Financial Managers
29
+ ,,,,11-3031.01,Treasurers and Controllers
30
+ ,,,,11-3031.03,Investment Fund Managers
31
+ ,,11-3050,,,Industrial Production Managers
32
+ ,,,11-3051,,Industrial Production Managers
33
+ ,,,,11-3051.01,Quality Control Systems Managers
34
+ ,,,,11-3051.02,Geothermal Production Managers
35
+ ,,,,11-3051.03,Biofuels Production Managers
36
+ ,,,,11-3051.04,Biomass Power Plant Managers
37
+ ,,,,11-3051.06,Hydroelectric Production Managers
38
+ ,,11-3060,,,Purchasing Managers
39
+ ,,,11-3061,,Purchasing Managers
40
+ ,,11-3070,,,"Transportation, Storage, and Distribution Managers"
41
+ ,,,11-3071,,"Transportation, Storage, and Distribution Managers"
42
+ ,,,,11-3071.04,Supply Chain Managers
43
+ ,,11-3110,,,Compensation and Benefits Managers
44
+ ,,,11-3111,,Compensation and Benefits Managers
45
+ ,,11-3120,,,Human Resources Managers
46
+ ,,,11-3121,,Human Resources Managers
47
+ ,,11-3130,,,Training and Development Managers
48
+ ,,,11-3131,,Training and Development Managers
49
+ ,11-9000,,,,Other Management Occupations
50
+ ,,11-9010,,,"Farmers, Ranchers, and Other Agricultural Managers"
51
+ ,,,11-9013,,"Farmers, Ranchers, and Other Agricultural Managers"
52
+ ,,11-9020,,,Construction Managers
53
+ ,,,11-9021,,Construction Managers
54
+ ,,11-9030,,,Education and Childcare Administrators
55
+ ,,,11-9031,,"Education and Childcare Administrators, Preschool and Daycare"
56
+ ,,,11-9032,,"Education Administrators, Kindergarten through Secondary"
57
+ ,,,11-9033,,"Education Administrators, Postsecondary"
58
+ ,,,11-9039,,"Education Administrators, All Other"
59
+ ,,11-9040,,,Architectural and Engineering Managers
60
+ ,,,11-9041,,Architectural and Engineering Managers
61
+ ,,,,11-9041.01,Biofuels/Biodiesel Technology and Product Development Managers
62
+ ,,11-9050,,,Food Service Managers
63
+ ,,,11-9051,,Food Service Managers
64
+ ,,11-9070,,,Entertainment and Recreation Managers
65
+ ,,,11-9071,,Gambling Managers
66
+ ,,,11-9072,,"Entertainment and Recreation Managers, Except Gambling"
67
+ ,,11-9080,,,Lodging Managers
68
+ ,,,11-9081,,Lodging Managers
69
+ ,,11-9110,,,Medical and Health Services Managers
70
+ ,,,11-9111,,Medical and Health Services Managers
71
+ ,,11-9120,,,Natural Sciences Managers
72
+ ,,,11-9121,,Natural Sciences Managers
73
+ ,,,,11-9121.01,Clinical Research Coordinators
74
+ ,,,,11-9121.02,Water Resource Specialists
75
+ ,,11-9130,,,Postmasters and Mail Superintendents
76
+ ,,,11-9131,,Postmasters and Mail Superintendents
77
+ ,,11-9140,,,"Property, Real Estate, and Community Association Managers"
78
+ ,,,11-9141,,"Property, Real Estate, and Community Association Managers"
79
+ ,,11-9150,,,Social and Community Service Managers
80
+ ,,,11-9151,,Social and Community Service Managers
81
+ ,,11-9160,,,Emergency Management Directors
82
+ ,,,11-9161,,Emergency Management Directors
83
+ ,,11-9170,,,Personal Service Managers
84
+ ,,,11-9171,,Funeral Home Managers
85
+ ,,,11-9179,,"Personal Service Managers, All Other"
86
+ ,,,,11-9179.01,Fitness and Wellness Coordinators
87
+ ,,,,11-9179.02,Spa Managers
88
+ ,,11-9190,,,Miscellaneous Managers
89
+ ,,,11-9199,,"Managers, All Other"
90
+ ,,,,11-9199.01,Regulatory Affairs Managers
91
+ ,,,,11-9199.02,Compliance Managers
92
+ ,,,,11-9199.08,Loss Prevention Managers
93
+ ,,,,11-9199.09,Wind Energy Operations Managers
94
+ ,,,,11-9199.10,Wind Energy Development Managers
95
+ ,,,,11-9199.11,Brownfield Redevelopment Specialists and Site Managers
96
+ 13-0000,,,,,Business and Financial Operations Occupations
97
+ ,13-1000,,,,Business Operations Specialists
98
+ ,,13-1010,,,"Agents and Business Managers of Artists, Performers, and Athletes"
99
+ ,,,13-1011,,"Agents and Business Managers of Artists, Performers, and Athletes"
100
+ ,,13-1020,,,Buyers and Purchasing Agents
101
+ ,,,13-1021,,"Buyers and Purchasing Agents, Farm Products"
102
+ ,,,13-1022,,"Wholesale and Retail Buyers, Except Farm Products"
103
+ ,,,13-1023,,"Purchasing Agents, Except Wholesale, Retail, and Farm Products"
104
+ ,,13-1030,,,"Claims Adjusters, Appraisers, Examiners, and Investigators"
105
+ ,,,13-1031,,"Claims Adjusters, Examiners, and Investigators"
106
+ ,,,13-1032,,"Insurance Appraisers, Auto Damage"
107
+ ,,13-1040,,,Compliance Officers
108
+ ,,,13-1041,,Compliance Officers
109
+ ,,,,13-1041.01,Environmental Compliance Inspectors
110
+ ,,,,13-1041.03,Equal Opportunity Representatives and Officers
111
+ ,,,,13-1041.04,Government Property Inspectors and Investigators
112
+ ,,,,13-1041.06,Coroners
113
+ ,,,,13-1041.07,Regulatory Affairs Specialists
114
+ ,,,,13-1041.08,Customs Brokers
115
+ ,,13-1050,,,Cost Estimators
116
+ ,,,13-1051,,Cost Estimators
117
+ ,,13-1070,,,Human Resources Workers
118
+ ,,,13-1071,,Human Resources Specialists
119
+ ,,,13-1074,,Farm Labor Contractors
120
+ ,,,13-1075,,Labor Relations Specialists
121
+ ,,13-1080,,,Logisticians and Project Management Specialists
122
+ ,,,13-1081,,Logisticians
123
+ ,,,,13-1081.01,Logistics Engineers
124
+ ,,,,13-1081.02,Logistics Analysts
125
+ ,,,13-1082,,Project Management Specialists
126
+ ,,13-1110,,,Management Analysts
127
+ ,,,13-1111,,Management Analysts
128
+ ,,13-1120,,,"Meeting, Convention, and Event Planners"
129
+ ,,,13-1121,,"Meeting, Convention, and Event Planners"
130
+ ,,13-1130,,,Fundraisers
131
+ ,,,13-1131,,Fundraisers
132
+ ,,13-1140,,,"Compensation, Benefits, and Job Analysis Specialists"
133
+ ,,,13-1141,,"Compensation, Benefits, and Job Analysis Specialists"
134
+ ,,13-1150,,,Training and Development Specialists
135
+ ,,,13-1151,,Training and Development Specialists
136
+ ,,13-1160,,,Market Research Analysts and Marketing Specialists
137
+ ,,,13-1161,,Market Research Analysts and Marketing Specialists
138
+ ,,,,13-1161.01,Search Marketing Strategists
139
+ ,,13-1190,,,Miscellaneous Business Operations Specialists
140
+ ,,,13-1199,,"Business Operations Specialists, All Other"
141
+ ,,,,13-1199.04,Business Continuity Planners
142
+ ,,,,13-1199.05,Sustainability Specialists
143
+ ,,,,13-1199.06,Online Merchants
144
+ ,,,,13-1199.07,Security Management Specialists
145
+ ,13-2000,,,,Financial Specialists
146
+ ,,13-2010,,,Accountants and Auditors
147
+ ,,,13-2011,,Accountants and Auditors
148
+ ,,13-2020,,,Property Appraisers and Assessors
149
+ ,,,13-2022,,Appraisers of Personal and Business Property
150
+ ,,,13-2023,,Appraisers and Assessors of Real Estate
151
+ ,,13-2030,,,Budget Analysts
152
+ ,,,13-2031,,Budget Analysts
153
+ ,,13-2040,,,Credit Analysts
154
+ ,,,13-2041,,Credit Analysts
155
+ ,,13-2050,,,Financial Analysts and Advisors
156
+ ,,,13-2051,,Financial and Investment Analysts
157
+ ,,,13-2052,,Personal Financial Advisors
158
+ ,,,13-2053,,Insurance Underwriters
159
+ ,,,13-2054,,Financial Risk Specialists
160
+ ,,13-2060,,,Financial Examiners
161
+ ,,,13-2061,,Financial Examiners
162
+ ,,13-2070,,,Credit Counselors and Loan Officers
163
+ ,,,13-2071,,Credit Counselors
164
+ ,,,13-2072,,Loan Officers
165
+ ,,13-2080,,,"Tax Examiners, Collectors and Preparers, and Revenue Agents"
166
+ ,,,13-2081,,"Tax Examiners and Collectors, and Revenue Agents"
167
+ ,,,13-2082,,Tax Preparers
168
+ ,,13-2090,,,Miscellaneous Financial Specialists
169
+ ,,,13-2099,,"Financial Specialists, All Other"
170
+ ,,,,13-2099.01,Financial Quantitative Analysts
171
+ ,,,,13-2099.04,"Fraud Examiners, Investigators and Analysts"
172
+ 15-0000,,,,,Computer and Mathematical Occupations
173
+ ,15-1200,,,,Computer Occupations
174
+ ,,15-1210,,,Computer and Information Analysts
175
+ ,,,15-1211,,Computer Systems Analysts
176
+ ,,,,15-1211.01,Health Informatics Specialists
177
+ ,,,15-1212,,Information Security Analysts
178
+ ,,15-1220,,,Computer and Information Research Scientists
179
+ ,,,15-1221,,Computer and Information Research Scientists
180
+ ,,15-1230,,,Computer Support Specialists
181
+ ,,,15-1231,,Computer Network Support Specialists
182
+ ,,,15-1232,,Computer User Support Specialists
183
+ ,,15-1240,,,Database and Network Administrators and Architects
184
+ ,,,15-1241,,Computer Network Architects
185
+ ,,,,15-1241.01,Telecommunications Engineering Specialists
186
+ ,,,15-1242,,Database Administrators
187
+ ,,,15-1243,,Database Architects
188
+ ,,,,15-1243.01,Data Warehousing Specialists
189
+ ,,,15-1244,,Network and Computer Systems Administrators
190
+ ,,15-1250,,,"Software and Web Developers, Programmers, and Testers"
191
+ ,,,15-1251,,Computer Programmers
192
+ ,,,15-1252,,Software Developers
193
+ ,,,15-1253,,Software Quality Assurance Analysts and Testers
194
+ ,,,15-1254,,Web Developers
195
+ ,,,15-1255,,Web and Digital Interface Designers
196
+ ,,,,15-1255.01,Video Game Designers
197
+ ,,15-1290,,,Miscellaneous Computer Occupations
198
+ ,,,15-1299,,"Computer Occupations, All Other"
199
+ ,,,,15-1299.01,Web Administrators
200
+ ,,,,15-1299.02,Geographic Information Systems Technologists and Technicians
201
+ ,,,,15-1299.03,Document Management Specialists
202
+ ,,,,15-1299.04,Penetration Testers
203
+ ,,,,15-1299.05,Information Security Engineers
204
+ ,,,,15-1299.06,Digital Forensics Analysts
205
+ ,,,,15-1299.07,Blockchain Engineers
206
+ ,,,,15-1299.08,Computer Systems Engineers/Architects
207
+ ,,,,15-1299.09,Information Technology Project Managers
208
+ ,15-2000,,,,Mathematical Science Occupations
209
+ ,,15-2010,,,Actuaries
210
+ ,,,15-2011,,Actuaries
211
+ ,,15-2020,,,Mathematicians
212
+ ,,,15-2021,,Mathematicians
213
+ ,,15-2030,,,Operations Research Analysts
214
+ ,,,15-2031,,Operations Research Analysts
215
+ ,,15-2040,,,Statisticians
216
+ ,,,15-2041,,Statisticians
217
+ ,,,,15-2041.01,Biostatisticians
218
+ ,,15-2050,,,Data Scientists
219
+ ,,,15-2051,,Data Scientists
220
+ ,,,,15-2051.01,Business Intelligence Analysts
221
+ ,,,,15-2051.02,Clinical Data Managers
222
+ ,,15-2090,,,Miscellaneous Mathematical Science Occupations
223
+ ,,,15-2099,,"Mathematical Science Occupations, All Other"
224
+ ,,,,15-2099.01,Bioinformatics Technicians
225
+ 17-0000,,,,,Architecture and Engineering Occupations
226
+ ,17-1000,,,,"Architects, Surveyors, and Cartographers"
227
+ ,,17-1010,,,"Architects, Except Naval"
228
+ ,,,17-1011,,"Architects, Except Landscape and Naval"
229
+ ,,,17-1012,,Landscape Architects
230
+ ,,17-1020,,,"Surveyors, Cartographers, and Photogrammetrists"
231
+ ,,,17-1021,,Cartographers and Photogrammetrists
232
+ ,,,17-1022,,Surveyors
233
+ ,,,,17-1022.01,Geodetic Surveyors
234
+ ,17-2000,,,,Engineers
235
+ ,,17-2010,,,Aerospace Engineers
236
+ ,,,17-2011,,Aerospace Engineers
237
+ ,,17-2020,,,Agricultural Engineers
238
+ ,,,17-2021,,Agricultural Engineers
239
+ ,,17-2030,,,Bioengineers and Biomedical Engineers
240
+ ,,,17-2031,,Bioengineers and Biomedical Engineers
241
+ ,,17-2040,,,Chemical Engineers
242
+ ,,,17-2041,,Chemical Engineers
243
+ ,,17-2050,,,Civil Engineers
244
+ ,,,17-2051,,Civil Engineers
245
+ ,,,,17-2051.01,Transportation Engineers
246
+ ,,,,17-2051.02,Water/Wastewater Engineers
247
+ ,,17-2060,,,Computer Hardware Engineers
248
+ ,,,17-2061,,Computer Hardware Engineers
249
+ ,,17-2070,,,Electrical and Electronics Engineers
250
+ ,,,17-2071,,Electrical Engineers
251
+ ,,,17-2072,,"Electronics Engineers, Except Computer"
252
+ ,,,,17-2072.01,Radio Frequency Identification Device Specialists
253
+ ,,17-2080,,,Environmental Engineers
254
+ ,,,17-2081,,Environmental Engineers
255
+ ,,17-2110,,,"Industrial Engineers, Including Health and Safety"
256
+ ,,,17-2111,,"Health and Safety Engineers, Except Mining Safety Engineers and Inspectors"
257
+ ,,,,17-2111.02,Fire-Prevention and Protection Engineers
258
+ ,,,17-2112,,Industrial Engineers
259
+ ,,,,17-2112.01,Human Factors Engineers and Ergonomists
260
+ ,,,,17-2112.02,Validation Engineers
261
+ ,,,,17-2112.03,Manufacturing Engineers
262
+ ,,17-2120,,,Marine Engineers and Naval Architects
263
+ ,,,17-2121,,Marine Engineers and Naval Architects
264
+ ,,17-2130,,,Materials Engineers
265
+ ,,,17-2131,,Materials Engineers
266
+ ,,17-2140,,,Mechanical Engineers
267
+ ,,,17-2141,,Mechanical Engineers
268
+ ,,,,17-2141.01,Fuel Cell Engineers
269
+ ,,,,17-2141.02,Automotive Engineers
270
+ ,,17-2150,,,"Mining and Geological Engineers, Including Mining Safety Engineers"
271
+ ,,,17-2151,,"Mining and Geological Engineers, Including Mining Safety Engineers"
272
+ ,,17-2160,,,Nuclear Engineers
273
+ ,,,17-2161,,Nuclear Engineers
274
+ ,,17-2170,,,Petroleum Engineers
275
+ ,,,17-2171,,Petroleum Engineers
276
+ ,,17-2190,,,Miscellaneous Engineers
277
+ ,,,17-2199,,"Engineers, All Other"
278
+ ,,,,17-2199.03,"Energy Engineers, Except Wind and Solar"
279
+ ,,,,17-2199.05,Mechatronics Engineers
280
+ ,,,,17-2199.06,Microsystems Engineers
281
+ ,,,,17-2199.07,Photonics Engineers
282
+ ,,,,17-2199.08,Robotics Engineers
283
+ ,,,,17-2199.09,Nanosystems Engineers
284
+ ,,,,17-2199.10,Wind Energy Engineers
285
+ ,,,,17-2199.11,Solar Energy Systems Engineers
286
+ ,17-3000,,,,"Drafters, Engineering Technicians, and Mapping Technicians"
287
+ ,,17-3010,,,Drafters
288
+ ,,,17-3011,,Architectural and Civil Drafters
289
+ ,,,17-3012,,Electrical and Electronics Drafters
290
+ ,,,17-3013,,Mechanical Drafters
291
+ ,,,17-3019,,"Drafters, All Other"
292
+ ,,17-3020,,,"Engineering Technologists and Technicians, Except Drafters"
293
+ ,,,17-3021,,Aerospace Engineering and Operations Technologists and Technicians
294
+ ,,,17-3022,,Civil Engineering Technologists and Technicians
295
+ ,,,17-3023,,Electrical and Electronic Engineering Technologists and Technicians
296
+ ,,,17-3024,,Electro-Mechanical and Mechatronics Technologists and Technicians
297
+ ,,,,17-3024.01,Robotics Technicians
298
+ ,,,17-3025,,Environmental Engineering Technologists and Technicians
299
+ ,,,17-3026,,Industrial Engineering Technologists and Technicians
300
+ ,,,,17-3026.01,Nanotechnology Engineering Technologists and Technicians
301
+ ,,,17-3027,,Mechanical Engineering Technologists and Technicians
302
+ ,,,,17-3027.01,Automotive Engineering Technicians
303
+ ,,,17-3028,,Calibration Technologists and Technicians
304
+ ,,,17-3029,,"Engineering Technologists and Technicians, Except Drafters, All Other"
305
+ ,,,,17-3029.01,Non-Destructive Testing Specialists
306
+ ,,,,17-3029.08,Photonics Technicians
307
+ ,,17-3030,,,Surveying and Mapping Technicians
308
+ ,,,17-3031,,Surveying and Mapping Technicians
309
+ 19-0000,,,,,"Life, Physical, and Social Science Occupations"
310
+ ,19-1000,,,,Life Scientists
311
+ ,,19-1010,,,Agricultural and Food Scientists
312
+ ,,,19-1011,,Animal Scientists
313
+ ,,,19-1012,,Food Scientists and Technologists
314
+ ,,,19-1013,,Soil and Plant Scientists
315
+ ,,19-1020,,,Biological Scientists
316
+ ,,,19-1021,,Biochemists and Biophysicists
317
+ ,,,19-1022,,Microbiologists
318
+ ,,,19-1023,,Zoologists and Wildlife Biologists
319
+ ,,,19-1029,,"Biological Scientists, All Other"
320
+ ,,,,19-1029.01,Bioinformatics Scientists
321
+ ,,,,19-1029.02,Molecular and Cellular Biologists
322
+ ,,,,19-1029.03,Geneticists
323
+ ,,,,19-1029.04,Biologists
324
+ ,,19-1030,,,Conservation Scientists and Foresters
325
+ ,,,19-1031,,Conservation Scientists
326
+ ,,,,19-1031.02,Range Managers
327
+ ,,,,19-1031.03,Park Naturalists
328
+ ,,,19-1032,,Foresters
329
+ ,,19-1040,,,Medical Scientists
330
+ ,,,19-1041,,Epidemiologists
331
+ ,,,19-1042,,"Medical Scientists, Except Epidemiologists"
332
+ ,,19-1090,,,Miscellaneous Life Scientists
333
+ ,,,19-1099,,"Life Scientists, All Other"
334
+ ,19-2000,,,,Physical Scientists
335
+ ,,19-2010,,,Astronomers and Physicists
336
+ ,,,19-2011,,Astronomers
337
+ ,,,19-2012,,Physicists
338
+ ,,19-2020,,,Atmospheric and Space Scientists
339
+ ,,,19-2021,,Atmospheric and Space Scientists
340
+ ,,19-2030,,,Chemists and Materials Scientists
341
+ ,,,19-2031,,Chemists
342
+ ,,,19-2032,,Materials Scientists
343
+ ,,19-2040,,,Environmental Scientists and Geoscientists
344
+ ,,,19-2041,,"Environmental Scientists and Specialists, Including Health"
345
+ ,,,,19-2041.01,Climate Change Policy Analysts
346
+ ,,,,19-2041.02,Environmental Restoration Planners
347
+ ,,,,19-2041.03,Industrial Ecologists
348
+ ,,,19-2042,,"Geoscientists, Except Hydrologists and Geographers"
349
+ ,,,19-2043,,Hydrologists
350
+ ,,19-2090,,,Miscellaneous Physical Scientists
351
+ ,,,19-2099,,"Physical Scientists, All Other"
352
+ ,,,,19-2099.01,Remote Sensing Scientists and Technologists
353
+ ,19-3000,,,,Social Scientists and Related Workers
354
+ ,,19-3010,,,Economists
355
+ ,,,19-3011,,Economists
356
+ ,,,,19-3011.01,Environmental Economists
357
+ ,,19-3020,,,Survey Researchers
358
+ ,,,19-3022,,Survey Researchers
359
+ ,,19-3030,,,Psychologists
360
+ ,,,19-3032,,Industrial-Organizational Psychologists
361
+ ,,,19-3033,,Clinical and Counseling Psychologists
362
+ ,,,19-3034,,School Psychologists
363
+ ,,,19-3039,,"Psychologists, All Other"
364
+ ,,,,19-3039.02,Neuropsychologists
365
+ ,,,,19-3039.03,Clinical Neuropsychologists
366
+ ,,19-3040,,,Sociologists
367
+ ,,,19-3041,,Sociologists
368
+ ,,19-3050,,,Urban and Regional Planners
369
+ ,,,19-3051,,Urban and Regional Planners
370
+ ,,19-3090,,,Miscellaneous Social Scientists and Related Workers
371
+ ,,,19-3091,,Anthropologists and Archeologists
372
+ ,,,19-3092,,Geographers
373
+ ,,,19-3093,,Historians
374
+ ,,,19-3094,,Political Scientists
375
+ ,,,19-3099,,"Social Scientists and Related Workers, All Other"
376
+ ,,,,19-3099.01,Transportation Planners
377
+ ,19-4000,,,,"Life, Physical, and Social Science Technicians"
378
+ ,,19-4010,,,Agricultural and Food Science Technicians
379
+ ,,,19-4012,,Agricultural Technicians
380
+ ,,,,19-4012.01,Precision Agriculture Technicians
381
+ ,,,19-4013,,Food Science Technicians
382
+ ,,19-4020,,,Biological Technicians
383
+ ,,,19-4021,,Biological Technicians
384
+ ,,19-4030,,,Chemical Technicians
385
+ ,,,19-4031,,Chemical Technicians
386
+ ,,19-4040,,,Environmental Science and Geoscience Technicians
387
+ ,,,19-4042,,"Environmental Science and Protection Technicians, Including Health"
388
+ ,,,19-4043,,"Geological Technicians, Except Hydrologic Technicians"
389
+ ,,,19-4044,,Hydrologic Technicians
390
+ ,,19-4050,,,Nuclear Technicians
391
+ ,,,19-4051,,Nuclear Technicians
392
+ ,,,,19-4051.02,Nuclear Monitoring Technicians
393
+ ,,19-4060,,,Social Science Research Assistants
394
+ ,,,19-4061,,Social Science Research Assistants
395
+ ,,19-4070,,,Forest and Conservation Technicians
396
+ ,,,19-4071,,Forest and Conservation Technicians
397
+ ,,19-4090,,,"Miscellaneous Life, Physical, and Social Science Technicians"
398
+ ,,,19-4092,,Forensic Science Technicians
399
+ ,,,19-4099,,"Life, Physical, and Social Science Technicians, All Other"
400
+ ,,,,19-4099.01,Quality Control Analysts
401
+ ,,,,19-4099.03,Remote Sensing Technicians
402
+ ,19-5000,,,,Occupational Health and Safety Specialists and Technicians
403
+ ,,19-5010,,,Occupational Health and Safety Specialists and Technicians
404
+ ,,,19-5011,,Occupational Health and Safety Specialists
405
+ ,,,19-5012,,Occupational Health and Safety Technicians
406
+ 21-0000,,,,,Community and Social Service Occupations
407
+ ,21-1000,,,,"Counselors, Social Workers, and Other Community and Social Service Specialists"
408
+ ,,21-1010,,,Counselors
409
+ ,,,21-1011,,Substance Abuse and Behavioral Disorder Counselors
410
+ ,,,21-1012,,"Educational, Guidance, and Career Counselors and Advisors"
411
+ ,,,21-1013,,Marriage and Family Therapists
412
+ ,,,21-1014,,Mental Health Counselors
413
+ ,,,21-1015,,Rehabilitation Counselors
414
+ ,,,21-1019,,"Counselors, All Other"
415
+ ,,21-1020,,,Social Workers
416
+ ,,,21-1021,,"Child, Family, and School Social Workers"
417
+ ,,,21-1022,,Healthcare Social Workers
418
+ ,,,21-1023,,Mental Health and Substance Abuse Social Workers
419
+ ,,,21-1029,,"Social Workers, All Other"
420
+ ,,21-1090,,,Miscellaneous Community and Social Service Specialists
421
+ ,,,21-1091,,Health Education Specialists
422
+ ,,,21-1092,,Probation Officers and Correctional Treatment Specialists
423
+ ,,,21-1093,,Social and Human Service Assistants
424
+ ,,,21-1094,,Community Health Workers
425
+ ,,,21-1099,,"Community and Social Service Specialists, All Other"
426
+ ,21-2000,,,,Religious Workers
427
+ ,,21-2010,,,Clergy
428
+ ,,,21-2011,,Clergy
429
+ ,,21-2020,,,"Directors, Religious Activities and Education"
430
+ ,,,21-2021,,"Directors, Religious Activities and Education"
431
+ ,,21-2090,,,Miscellaneous Religious Workers
432
+ ,,,21-2099,,"Religious Workers, All Other"
433
+ 23-0000,,,,,Legal Occupations
434
+ ,23-1000,,,,"Lawyers, Judges, and Related Workers"
435
+ ,,23-1010,,,Lawyers and Judicial Law Clerks
436
+ ,,,23-1011,,Lawyers
437
+ ,,,23-1012,,Judicial Law Clerks
438
+ ,,23-1020,,,"Judges, Magistrates, and Other Judicial Workers"
439
+ ,,,23-1021,,"Administrative Law Judges, Adjudicators, and Hearing Officers"
440
+ ,,,23-1022,,"Arbitrators, Mediators, and Conciliators"
441
+ ,,,23-1023,,"Judges, Magistrate Judges, and Magistrates"
442
+ ,23-2000,,,,Legal Support Workers
443
+ ,,23-2010,,,Paralegals and Legal Assistants
444
+ ,,,23-2011,,Paralegals and Legal Assistants
445
+ ,,23-2090,,,Miscellaneous Legal Support Workers
446
+ ,,,23-2093,,"Title Examiners, Abstractors, and Searchers"
447
+ ,,,23-2099,,"Legal Support Workers, All Other"
448
+ 25-0000,,,,,Educational Instruction and Library Occupations
449
+ ,25-1000,,,,Postsecondary Teachers
450
+ ,,25-1010,,,"Business Teachers, Postsecondary"
451
+ ,,,25-1011,,"Business Teachers, Postsecondary"
452
+ ,,25-1020,,,"Math and Computer Science Teachers, Postsecondary"
453
+ ,,,25-1021,,"Computer Science Teachers, Postsecondary"
454
+ ,,,25-1022,,"Mathematical Science Teachers, Postsecondary"
455
+ ,,25-1030,,,"Engineering and Architecture Teachers, Postsecondary"
456
+ ,,,25-1031,,"Architecture Teachers, Postsecondary"
457
+ ,,,25-1032,,"Engineering Teachers, Postsecondary"
458
+ ,,25-1040,,,"Life Sciences Teachers, Postsecondary"
459
+ ,,,25-1041,,"Agricultural Sciences Teachers, Postsecondary"
460
+ ,,,25-1042,,"Biological Science Teachers, Postsecondary"
461
+ ,,,25-1043,,"Forestry and Conservation Science Teachers, Postsecondary"
462
+ ,,25-1050,,,"Physical Sciences Teachers, Postsecondary"
463
+ ,,,25-1051,,"Atmospheric, Earth, Marine, and Space Sciences Teachers, Postsecondary"
464
+ ,,,25-1052,,"Chemistry Teachers, Postsecondary"
465
+ ,,,25-1053,,"Environmental Science Teachers, Postsecondary"
466
+ ,,,25-1054,,"Physics Teachers, Postsecondary"
467
+ ,,25-1060,,,"Social Sciences Teachers, Postsecondary"
468
+ ,,,25-1061,,"Anthropology and Archeology Teachers, Postsecondary"
469
+ ,,,25-1062,,"Area, Ethnic, and Cultural Studies Teachers, Postsecondary"
470
+ ,,,25-1063,,"Economics Teachers, Postsecondary"
471
+ ,,,25-1064,,"Geography Teachers, Postsecondary"
472
+ ,,,25-1065,,"Political Science Teachers, Postsecondary"
473
+ ,,,25-1066,,"Psychology Teachers, Postsecondary"
474
+ ,,,25-1067,,"Sociology Teachers, Postsecondary"
475
+ ,,,25-1069,,"Social Sciences Teachers, Postsecondary, All Other"
476
+ ,,25-1070,,,"Health Teachers, Postsecondary"
477
+ ,,,25-1071,,"Health Specialties Teachers, Postsecondary"
478
+ ,,,25-1072,,"Nursing Instructors and Teachers, Postsecondary"
479
+ ,,25-1080,,,"Education and Library Science Teachers, Postsecondary"
480
+ ,,,25-1081,,"Education Teachers, Postsecondary"
481
+ ,,,25-1082,,"Library Science Teachers, Postsecondary"
482
+ ,,25-1110,,,"Law, Criminal Justice, and Social Work Teachers, Postsecondary"
483
+ ,,,25-1111,,"Criminal Justice and Law Enforcement Teachers, Postsecondary"
484
+ ,,,25-1112,,"Law Teachers, Postsecondary"
485
+ ,,,25-1113,,"Social Work Teachers, Postsecondary"
486
+ ,,25-1120,,,"Arts, Communications, History, and Humanities Teachers, Postsecondary"
487
+ ,,,25-1121,,"Art, Drama, and Music Teachers, Postsecondary"
488
+ ,,,25-1122,,"Communications Teachers, Postsecondary"
489
+ ,,,25-1123,,"English Language and Literature Teachers, Postsecondary"
490
+ ,,,25-1124,,"Foreign Language and Literature Teachers, Postsecondary"
491
+ ,,,25-1125,,"History Teachers, Postsecondary"
492
+ ,,,25-1126,,"Philosophy and Religion Teachers, Postsecondary"
493
+ ,,25-1190,,,Miscellaneous Postsecondary Teachers
494
+ ,,,25-1192,,"Family and Consumer Sciences Teachers, Postsecondary"
495
+ ,,,25-1193,,"Recreation and Fitness Studies Teachers, Postsecondary"
496
+ ,,,25-1194,,"Career/Technical Education Teachers, Postsecondary"
497
+ ,,,25-1199,,"Postsecondary Teachers, All Other"
498
+ ,25-2000,,,,"Preschool, Elementary, Middle, Secondary, and Special Education Teachers"
499
+ ,,25-2010,,,Preschool and Kindergarten Teachers
500
+ ,,,25-2011,,"Preschool Teachers, Except Special Education"
501
+ ,,,25-2012,,"Kindergarten Teachers, Except Special Education"
502
+ ,,25-2020,,,Elementary and Middle School Teachers
503
+ ,,,25-2021,,"Elementary School Teachers, Except Special Education"
504
+ ,,,25-2022,,"Middle School Teachers, Except Special and Career/Technical Education"
505
+ ,,,25-2023,,"Career/Technical Education Teachers, Middle School"
506
+ ,,25-2030,,,Secondary School Teachers
507
+ ,,,25-2031,,"Secondary School Teachers, Except Special and Career/Technical Education"
508
+ ,,,25-2032,,"Career/Technical Education Teachers, Secondary School"
509
+ ,,25-2050,,,Special Education Teachers
510
+ ,,,25-2051,,"Special Education Teachers, Preschool"
511
+ ,,,25-2055,,"Special Education Teachers, Kindergarten"
512
+ ,,,25-2056,,"Special Education Teachers, Elementary School"
513
+ ,,,25-2057,,"Special Education Teachers, Middle School"
514
+ ,,,25-2058,,"Special Education Teachers, Secondary School"
515
+ ,,,25-2059,,"Special Education Teachers, All Other"
516
+ ,,,,25-2059.01,Adapted Physical Education Specialists
517
+ ,25-3000,,,,Other Teachers and Instructors
518
+ ,,25-3010,,,"Adult Basic Education, Adult Secondary Education, and English as a Second Language Instructors"
519
+ ,,,25-3011,,"Adult Basic Education, Adult Secondary Education, and English as a Second Language Instructors"
520
+ ,,25-3020,,,Self-Enrichment Teachers
521
+ ,,,25-3021,,Self-Enrichment Teachers
522
+ ,,25-3030,,,"Substitute Teachers, Short-Term"
523
+ ,,,25-3031,,"Substitute Teachers, Short-Term"
524
+ ,,25-3040,,,Tutors
525
+ ,,,25-3041,,Tutors
526
+ ,,25-3090,,,Miscellaneous Teachers and Instructors
527
+ ,,,25-3099,,"Teachers and Instructors, All Other"
528
+ ,25-4000,,,,"Librarians, Curators, and Archivists"
529
+ ,,25-4010,,,"Archivists, Curators, and Museum Technicians"
530
+ ,,,25-4011,,Archivists
531
+ ,,,25-4012,,Curators
532
+ ,,,25-4013,,Museum Technicians and Conservators
533
+ ,,25-4020,,,Librarians and Media Collections Specialists
534
+ ,,,25-4022,,Librarians and Media Collections Specialists
535
+ ,,25-4030,,,Library Technicians
536
+ ,,,25-4031,,Library Technicians
537
+ ,25-9000,,,,Other Educational Instruction and Library Occupations
538
+ ,,25-9020,,,Farm and Home Management Educators
539
+ ,,,25-9021,,Farm and Home Management Educators
540
+ ,,25-9030,,,Instructional Coordinators
541
+ ,,,25-9031,,Instructional Coordinators
542
+ ,,25-9040,,,Teaching Assistants
543
+ ,,,25-9042,,"Teaching Assistants, Preschool, Elementary, Middle, and Secondary School, Except Special Education"
544
+ ,,,25-9043,,"Teaching Assistants, Special Education"
545
+ ,,,25-9044,,"Teaching Assistants, Postsecondary"
546
+ ,,,25-9049,,"Teaching Assistants, All Other"
547
+ ,,25-9090,,,Miscellaneous Educational Instruction and Library Workers
548
+ ,,,25-9099,,"Educational Instruction and Library Workers, All Other"
549
+ 27-0000,,,,,"Arts, Design, Entertainment, Sports, and Media Occupations"
550
+ ,27-1000,,,,Art and Design Workers
551
+ ,,27-1010,,,Artists and Related Workers
552
+ ,,,27-1011,,Art Directors
553
+ ,,,27-1012,,Craft Artists
554
+ ,,,27-1013,,"Fine Artists, Including Painters, Sculptors, and Illustrators"
555
+ ,,,27-1014,,Special Effects Artists and Animators
556
+ ,,,27-1019,,"Artists and Related Workers, All Other"
557
+ ,,27-1020,,,Designers
558
+ ,,,27-1021,,Commercial and Industrial Designers
559
+ ,,,27-1022,,Fashion Designers
560
+ ,,,27-1023,,Floral Designers
561
+ ,,,27-1024,,Graphic Designers
562
+ ,,,27-1025,,Interior Designers
563
+ ,,,27-1026,,Merchandise Displayers and Window Trimmers
564
+ ,,,27-1027,,Set and Exhibit Designers
565
+ ,,,27-1029,,"Designers, All Other"
566
+ ,27-2000,,,,"Entertainers and Performers, Sports and Related Workers"
567
+ ,,27-2010,,,"Actors, Producers, and Directors"
568
+ ,,,27-2011,,Actors
569
+ ,,,27-2012,,Producers and Directors
570
+ ,,,,27-2012.03,Media Programming Directors
571
+ ,,,,27-2012.04,Talent Directors
572
+ ,,,,27-2012.05,Media Technical Directors/Managers
573
+ ,,27-2020,,,"Athletes, Coaches, Umpires, and Related Workers"
574
+ ,,,27-2021,,Athletes and Sports Competitors
575
+ ,,,27-2022,,Coaches and Scouts
576
+ ,,,27-2023,,"Umpires, Referees, and Other Sports Officials"
577
+ ,,27-2030,,,Dancers and Choreographers
578
+ ,,,27-2031,,Dancers
579
+ ,,,27-2032,,Choreographers
580
+ ,,27-2040,,,"Musicians, Singers, and Related Workers"
581
+ ,,,27-2041,,Music Directors and Composers
582
+ ,,,27-2042,,Musicians and Singers
583
+ ,,27-2090,,,"Miscellaneous Entertainers and Performers, Sports and Related Workers"
584
+ ,,,27-2091,,"Disc Jockeys, Except Radio"
585
+ ,,,27-2099,,"Entertainers and Performers, Sports and Related Workers, All Other"
586
+ ,27-3000,,,,Media and Communication Workers
587
+ ,,27-3010,,,Broadcast Announcers and Radio Disc Jockeys
588
+ ,,,27-3011,,Broadcast Announcers and Radio Disc Jockeys
589
+ ,,27-3020,,,"News Analysts, Reporters and Journalists"
590
+ ,,,27-3023,,"News Analysts, Reporters, and Journalists"
591
+ ,,27-3030,,,Public Relations Specialists
592
+ ,,,27-3031,,Public Relations Specialists
593
+ ,,27-3040,,,Writers and Editors
594
+ ,,,27-3041,,Editors
595
+ ,,,27-3042,,Technical Writers
596
+ ,,,27-3043,,Writers and Authors
597
+ ,,,,27-3043.05,"Poets, Lyricists and Creative Writers"
598
+ ,,27-3090,,,Miscellaneous Media and Communication Workers
599
+ ,,,27-3091,,Interpreters and Translators
600
+ ,,,27-3092,,Court Reporters and Simultaneous Captioners
601
+ ,,,27-3099,,"Media and Communication Workers, All Other"
602
+ ,27-4000,,,,Media and Communication Equipment Workers
603
+ ,,27-4010,,,"Broadcast, Sound, and Lighting Technicians"
604
+ ,,,27-4011,,Audio and Video Technicians
605
+ ,,,27-4012,,Broadcast Technicians
606
+ ,,,27-4014,,Sound Engineering Technicians
607
+ ,,,27-4015,,Lighting Technicians
608
+ ,,27-4020,,,Photographers
609
+ ,,,27-4021,,Photographers
610
+ ,,27-4030,,,"Television, Video, and Film Camera Operators and Editors"
611
+ ,,,27-4031,,"Camera Operators, Television, Video, and Film"
612
+ ,,,27-4032,,Film and Video Editors
613
+ ,,27-4090,,,Miscellaneous Media and Communication Equipment Workers
614
+ ,,,27-4099,,"Media and Communication Equipment Workers, All Other"
615
+ 29-0000,,,,,Healthcare Practitioners and Technical Occupations
616
+ ,29-1000,,,,Healthcare Diagnosing or Treating Practitioners
617
+ ,,29-1010,,,Chiropractors
618
+ ,,,29-1011,,Chiropractors
619
+ ,,29-1020,,,Dentists
620
+ ,,,29-1021,,"Dentists, General"
621
+ ,,,29-1022,,Oral and Maxillofacial Surgeons
622
+ ,,,29-1023,,Orthodontists
623
+ ,,,29-1024,,Prosthodontists
624
+ ,,,29-1029,,"Dentists, All Other Specialists"
625
+ ,,29-1030,,,Dietitians and Nutritionists
626
+ ,,,29-1031,,Dietitians and Nutritionists
627
+ ,,29-1040,,,Optometrists
628
+ ,,,29-1041,,Optometrists
629
+ ,,29-1050,,,Pharmacists
630
+ ,,,29-1051,,Pharmacists
631
+ ,,29-1070,,,Physician Assistants
632
+ ,,,29-1071,,Physician Assistants
633
+ ,,,,29-1071.01,Anesthesiologist Assistants
634
+ ,,29-1080,,,Podiatrists
635
+ ,,,29-1081,,Podiatrists
636
+ ,,29-1120,,,Therapists
637
+ ,,,29-1122,,Occupational Therapists
638
+ ,,,,29-1122.01,"Low Vision Therapists, Orientation and Mobility Specialists, and Vision Rehabilitation Therapists"
639
+ ,,,29-1123,,Physical Therapists
640
+ ,,,29-1124,,Radiation Therapists
641
+ ,,,29-1125,,Recreational Therapists
642
+ ,,,29-1126,,Respiratory Therapists
643
+ ,,,29-1127,,Speech-Language Pathologists
644
+ ,,,29-1128,,Exercise Physiologists
645
+ ,,,29-1129,,"Therapists, All Other"
646
+ ,,,,29-1129.01,Art Therapists
647
+ ,,,,29-1129.02,Music Therapists
648
+ ,,29-1130,,,Veterinarians
649
+ ,,,29-1131,,Veterinarians
650
+ ,,29-1140,,,Registered Nurses
651
+ ,,,29-1141,,Registered Nurses
652
+ ,,,,29-1141.01,Acute Care Nurses
653
+ ,,,,29-1141.02,Advanced Practice Psychiatric Nurses
654
+ ,,,,29-1141.03,Critical Care Nurses
655
+ ,,,,29-1141.04,Clinical Nurse Specialists
656
+ ,,29-1150,,,Nurse Anesthetists
657
+ ,,,29-1151,,Nurse Anesthetists
658
+ ,,29-1160,,,Nurse Midwives
659
+ ,,,29-1161,,Nurse Midwives
660
+ ,,29-1170,,,Nurse Practitioners
661
+ ,,,29-1171,,Nurse Practitioners
662
+ ,,29-1180,,,Audiologists
663
+ ,,,29-1181,,Audiologists
664
+ ,,29-1210,,,Physicians
665
+ ,,,29-1211,,Anesthesiologists
666
+ ,,,29-1212,,Cardiologists
667
+ ,,,29-1213,,Dermatologists
668
+ ,,,29-1214,,Emergency Medicine Physicians
669
+ ,,,29-1215,,Family Medicine Physicians
670
+ ,,,29-1216,,General Internal Medicine Physicians
671
+ ,,,29-1217,,Neurologists
672
+ ,,,29-1218,,Obstetricians and Gynecologists
673
+ ,,,29-1221,,"Pediatricians, General"
674
+ ,,,29-1222,,"Physicians, Pathologists"
675
+ ,,,29-1223,,Psychiatrists
676
+ ,,,29-1224,,Radiologists
677
+ ,,,29-1229,,"Physicians, All Other"
678
+ ,,,,29-1229.01,Allergists and Immunologists
679
+ ,,,,29-1229.02,Hospitalists
680
+ ,,,,29-1229.03,Urologists
681
+ ,,,,29-1229.04,Physical Medicine and Rehabilitation Physicians
682
+ ,,,,29-1229.05,Preventive Medicine Physicians
683
+ ,,,,29-1229.06,Sports Medicine Physicians
684
+ ,,29-1240,,,Surgeons
685
+ ,,,29-1241,,"Ophthalmologists, Except Pediatric"
686
+ ,,,29-1242,,"Orthopedic Surgeons, Except Pediatric"
687
+ ,,,29-1243,,Pediatric Surgeons
688
+ ,,,29-1249,,"Surgeons, All Other"
689
+ ,,29-1290,,,Miscellaneous Healthcare Diagnosing or Treating Practitioners
690
+ ,,,29-1291,,Acupuncturists
691
+ ,,,29-1292,,Dental Hygienists
692
+ ,,,29-1299,,"Healthcare Diagnosing or Treating Practitioners, All Other"
693
+ ,,,,29-1299.01,Naturopathic Physicians
694
+ ,,,,29-1299.02,Orthoptists
695
+ ,29-2000,,,,Health Technologists and Technicians
696
+ ,,29-2010,,,Clinical Laboratory Technologists and Technicians
697
+ ,,,29-2011,,Medical and Clinical Laboratory Technologists
698
+ ,,,,29-2011.01,Cytogenetic Technologists
699
+ ,,,,29-2011.02,Cytotechnologists
700
+ ,,,,29-2011.04,Histotechnologists
701
+ ,,,29-2012,,Medical and Clinical Laboratory Technicians
702
+ ,,,,29-2012.01,Histology Technicians
703
+ ,,29-2030,,,Diagnostic Related Technologists and Technicians
704
+ ,,,29-2031,,Cardiovascular Technologists and Technicians
705
+ ,,,29-2032,,Diagnostic Medical Sonographers
706
+ ,,,29-2033,,Nuclear Medicine Technologists
707
+ ,,,29-2034,,Radiologic Technologists and Technicians
708
+ ,,,29-2035,,Magnetic Resonance Imaging Technologists
709
+ ,,,29-2036,,Medical Dosimetrists
710
+ ,,29-2040,,,Emergency Medical Technicians and Paramedics
711
+ ,,,29-2042,,Emergency Medical Technicians
712
+ ,,,29-2043,,Paramedics
713
+ ,,29-2050,,,Health Practitioner Support Technologists and Technicians
714
+ ,,,29-2051,,Dietetic Technicians
715
+ ,,,29-2052,,Pharmacy Technicians
716
+ ,,,29-2053,,Psychiatric Technicians
717
+ ,,,29-2055,,Surgical Technologists
718
+ ,,,29-2056,,Veterinary Technologists and Technicians
719
+ ,,,29-2057,,Ophthalmic Medical Technicians
720
+ ,,29-2060,,,Licensed Practical and Licensed Vocational Nurses
721
+ ,,,29-2061,,Licensed Practical and Licensed Vocational Nurses
722
+ ,,29-2070,,,Medical Records Specialists
723
+ ,,,29-2072,,Medical Records Specialists
724
+ ,,29-2080,,,"Opticians, Dispensing"
725
+ ,,,29-2081,,"Opticians, Dispensing"
726
+ ,,29-2090,,,Miscellaneous Health Technologists and Technicians
727
+ ,,,29-2091,,Orthotists and Prosthetists
728
+ ,,,29-2092,,Hearing Aid Specialists
729
+ ,,,29-2099,,"Health Technologists and Technicians, All Other"
730
+ ,,,,29-2099.01,Neurodiagnostic Technologists
731
+ ,,,,29-2099.05,Ophthalmic Medical Technologists
732
+ ,,,,29-2099.08,Patient Representatives
733
+ ,29-9000,,,,Other Healthcare Practitioners and Technical Occupations
734
+ ,,29-9020,,,Health Information Technologists and Medical Registrars
735
+ ,,,29-9021,,Health Information Technologists and Medical Registrars
736
+ ,,29-9090,,,Miscellaneous Health Practitioners and Technical Workers
737
+ ,,,29-9091,,Athletic Trainers
738
+ ,,,29-9092,,Genetic Counselors
739
+ ,,,29-9093,,Surgical Assistants
740
+ ,,,29-9099,,"Healthcare Practitioners and Technical Workers, All Other"
741
+ ,,,,29-9099.01,Midwives
742
+ 31-0000,,,,,Healthcare Support Occupations
743
+ ,31-1100,,,,"Home Health and Personal Care Aides; and Nursing Assistants, Orderlies, and Psychiatric Aides"
744
+ ,,31-1120,,,Home Health and Personal Care Aides
745
+ ,,,31-1121,,Home Health Aides
746
+ ,,,31-1122,,Personal Care Aides
747
+ ,,31-1130,,,"Nursing Assistants, Orderlies, and Psychiatric Aides"
748
+ ,,,31-1131,,Nursing Assistants
749
+ ,,,31-1132,,Orderlies
750
+ ,,,31-1133,,Psychiatric Aides
751
+ ,31-2000,,,,Occupational Therapy and Physical Therapist Assistants and Aides
752
+ ,,31-2010,,,Occupational Therapy Assistants and Aides
753
+ ,,,31-2011,,Occupational Therapy Assistants
754
+ ,,,31-2012,,Occupational Therapy Aides
755
+ ,,31-2020,,,Physical Therapist Assistants and Aides
756
+ ,,,31-2021,,Physical Therapist Assistants
757
+ ,,,31-2022,,Physical Therapist Aides
758
+ ,31-9000,,,,Other Healthcare Support Occupations
759
+ ,,31-9010,,,Massage Therapists
760
+ ,,,31-9011,,Massage Therapists
761
+ ,,31-9090,,,Miscellaneous Healthcare Support Occupations
762
+ ,,,31-9091,,Dental Assistants
763
+ ,,,31-9092,,Medical Assistants
764
+ ,,,31-9093,,Medical Equipment Preparers
765
+ ,,,31-9094,,Medical Transcriptionists
766
+ ,,,31-9095,,Pharmacy Aides
767
+ ,,,31-9096,,Veterinary Assistants and Laboratory Animal Caretakers
768
+ ,,,31-9097,,Phlebotomists
769
+ ,,,31-9099,,"Healthcare Support Workers, All Other"
770
+ ,,,,31-9099.01,Speech-Language Pathology Assistants
771
+ ,,,,31-9099.02,Endoscopy Technicians
772
+ 33-0000,,,,,Protective Service Occupations
773
+ ,33-1000,,,,Supervisors of Protective Service Workers
774
+ ,,33-1010,,,First-Line Supervisors of Law Enforcement Workers
775
+ ,,,33-1011,,First-Line Supervisors of Correctional Officers
776
+ ,,,33-1012,,First-Line Supervisors of Police and Detectives
777
+ ,,33-1020,,,First-Line Supervisors of Firefighting and Prevention Workers
778
+ ,,,33-1021,,First-Line Supervisors of Firefighting and Prevention Workers
779
+ ,,33-1090,,,"Miscellaneous First-Line Supervisors, Protective Service Workers"
780
+ ,,,33-1091,,First-Line Supervisors of Security Workers
781
+ ,,,33-1099,,"First-Line Supervisors of Protective Service Workers, All Other"
782
+ ,33-2000,,,,Firefighting and Prevention Workers
783
+ ,,33-2010,,,Firefighters
784
+ ,,,33-2011,,Firefighters
785
+ ,,33-2020,,,Fire Inspectors
786
+ ,,,33-2021,,Fire Inspectors and Investigators
787
+ ,,,33-2022,,Forest Fire Inspectors and Prevention Specialists
788
+ ,33-3000,,,,Law Enforcement Workers
789
+ ,,33-3010,,,"Bailiffs, Correctional Officers, and Jailers"
790
+ ,,,33-3011,,Bailiffs
791
+ ,,,33-3012,,Correctional Officers and Jailers
792
+ ,,33-3020,,,Detectives and Criminal Investigators
793
+ ,,,33-3021,,Detectives and Criminal Investigators
794
+ ,,,,33-3021.02,Police Identification and Records Officers
795
+ ,,,,33-3021.06,Intelligence Analysts
796
+ ,,33-3030,,,Fish and Game Wardens
797
+ ,,,33-3031,,Fish and Game Wardens
798
+ ,,33-3040,,,Parking Enforcement Workers
799
+ ,,,33-3041,,Parking Enforcement Workers
800
+ ,,33-3050,,,Police Officers
801
+ ,,,33-3051,,Police and Sheriff's Patrol Officers
802
+ ,,,,33-3051.04,Customs and Border Protection Officers
803
+ ,,,33-3052,,Transit and Railroad Police
804
+ ,33-9000,,,,Other Protective Service Workers
805
+ ,,33-9010,,,Animal Control Workers
806
+ ,,,33-9011,,Animal Control Workers
807
+ ,,33-9020,,,Private Detectives and Investigators
808
+ ,,,33-9021,,Private Detectives and Investigators
809
+ ,,33-9030,,,Security Guards and Gambling Surveillance Officers
810
+ ,,,33-9031,,Gambling Surveillance Officers and Gambling Investigators
811
+ ,,,33-9032,,Security Guards
812
+ ,,33-9090,,,Miscellaneous Protective Service Workers
813
+ ,,,33-9091,,Crossing Guards and Flaggers
814
+ ,,,33-9092,,"Lifeguards, Ski Patrol, and Other Recreational Protective Service Workers"
815
+ ,,,33-9093,,Transportation Security Screeners
816
+ ,,,33-9094,,School Bus Monitors
817
+ ,,,33-9099,,"Protective Service Workers, All Other"
818
+ ,,,,33-9099.02,Retail Loss Prevention Specialists
819
+ 35-0000,,,,,Food Preparation and Serving Related Occupations
820
+ ,35-1000,,,,Supervisors of Food Preparation and Serving Workers
821
+ ,,35-1010,,,Supervisors of Food Preparation and Serving Workers
822
+ ,,,35-1011,,Chefs and Head Cooks
823
+ ,,,35-1012,,First-Line Supervisors of Food Preparation and Serving Workers
824
+ ,35-2000,,,,Cooks and Food Preparation Workers
825
+ ,,35-2010,,,Cooks
826
+ ,,,35-2011,,"Cooks, Fast Food"
827
+ ,,,35-2012,,"Cooks, Institution and Cafeteria"
828
+ ,,,35-2013,,"Cooks, Private Household"
829
+ ,,,35-2014,,"Cooks, Restaurant"
830
+ ,,,35-2015,,"Cooks, Short Order"
831
+ ,,,35-2019,,"Cooks, All Other"
832
+ ,,35-2020,,,Food Preparation Workers
833
+ ,,,35-2021,,Food Preparation Workers
834
+ ,35-3000,,,,Food and Beverage Serving Workers
835
+ ,,35-3010,,,Bartenders
836
+ ,,,35-3011,,Bartenders
837
+ ,,35-3020,,,Fast Food and Counter Workers
838
+ ,,,35-3023,,Fast Food and Counter Workers
839
+ ,,,,35-3023.01,Baristas
840
+ ,,35-3030,,,Waiters and Waitresses
841
+ ,,,35-3031,,Waiters and Waitresses
842
+ ,,35-3040,,,"Food Servers, Nonrestaurant"
843
+ ,,,35-3041,,"Food Servers, Nonrestaurant"
844
+ ,35-9000,,,,Other Food Preparation and Serving Related Workers
845
+ ,,35-9010,,,Dining Room and Cafeteria Attendants and Bartender Helpers
846
+ ,,,35-9011,,Dining Room and Cafeteria Attendants and Bartender Helpers
847
+ ,,35-9020,,,Dishwashers
848
+ ,,,35-9021,,Dishwashers
849
+ ,,35-9030,,,"Hosts and Hostesses, Restaurant, Lounge, and Coffee Shop"
850
+ ,,,35-9031,,"Hosts and Hostesses, Restaurant, Lounge, and Coffee Shop"
851
+ ,,35-9090,,,Miscellaneous Food Preparation and Serving Related Workers
852
+ ,,,35-9099,,"Food Preparation and Serving Related Workers, All Other"
853
+ 37-0000,,,,,Building and Grounds Cleaning and Maintenance Occupations
854
+ ,37-1000,,,,Supervisors of Building and Grounds Cleaning and Maintenance Workers
855
+ ,,37-1010,,,First-Line Supervisors of Building and Grounds Cleaning and Maintenance Workers
856
+ ,,,37-1011,,First-Line Supervisors of Housekeeping and Janitorial Workers
857
+ ,,,37-1012,,"First-Line Supervisors of Landscaping, Lawn Service, and Groundskeeping Workers"
858
+ ,37-2000,,,,Building Cleaning and Pest Control Workers
859
+ ,,37-2010,,,Building Cleaning Workers
860
+ ,,,37-2011,,"Janitors and Cleaners, Except Maids and Housekeeping Cleaners"
861
+ ,,,37-2012,,Maids and Housekeeping Cleaners
862
+ ,,,37-2019,,"Building Cleaning Workers, All Other"
863
+ ,,37-2020,,,Pest Control Workers
864
+ ,,,37-2021,,Pest Control Workers
865
+ ,37-3000,,,,Grounds Maintenance Workers
866
+ ,,37-3010,,,Grounds Maintenance Workers
867
+ ,,,37-3011,,Landscaping and Groundskeeping Workers
868
+ ,,,37-3012,,"Pesticide Handlers, Sprayers, and Applicators, Vegetation"
869
+ ,,,37-3013,,Tree Trimmers and Pruners
870
+ ,,,37-3019,,"Grounds Maintenance Workers, All Other"
871
+ 39-0000,,,,,Personal Care and Service Occupations
872
+ ,39-1000,,,,Supervisors of Personal Care and Service Workers
873
+ ,,39-1010,,,First-Line Supervisors of Entertainment and Recreation Workers
874
+ ,,,39-1013,,First-Line Supervisors of Gambling Services Workers
875
+ ,,,39-1014,,"First-Line Supervisors of Entertainment and Recreation Workers, Except Gambling Services"
876
+ ,,39-1020,,,First-Line Supervisors of Personal Service Workers
877
+ ,,,39-1022,,First-Line Supervisors of Personal Service Workers
878
+ ,39-2000,,,,Animal Care and Service Workers
879
+ ,,39-2010,,,Animal Trainers
880
+ ,,,39-2011,,Animal Trainers
881
+ ,,39-2020,,,Animal Caretakers
882
+ ,,,39-2021,,Animal Caretakers
883
+ ,39-3000,,,,Entertainment Attendants and Related Workers
884
+ ,,39-3010,,,Gambling Services Workers
885
+ ,,,39-3011,,Gambling Dealers
886
+ ,,,39-3012,,Gambling and Sports Book Writers and Runners
887
+ ,,,39-3019,,"Gambling Service Workers, All Other"
888
+ ,,39-3020,,,Motion Picture Projectionists
889
+ ,,,39-3021,,Motion Picture Projectionists
890
+ ,,39-3030,,,"Ushers, Lobby Attendants, and Ticket Takers"
891
+ ,,,39-3031,,"Ushers, Lobby Attendants, and Ticket Takers"
892
+ ,,39-3090,,,Miscellaneous Entertainment Attendants and Related Workers
893
+ ,,,39-3091,,Amusement and Recreation Attendants
894
+ ,,,39-3092,,Costume Attendants
895
+ ,,,39-3093,,"Locker Room, Coatroom, and Dressing Room Attendants"
896
+ ,,,39-3099,,"Entertainment Attendants and Related Workers, All Other"
897
+ ,39-4000,,,,Funeral Service Workers
898
+ ,,39-4010,,,Embalmers and Crematory Operators
899
+ ,,,39-4011,,Embalmers
900
+ ,,,39-4012,,Crematory Operators
901
+ ,,39-4020,,,Funeral Attendants
902
+ ,,,39-4021,,Funeral Attendants
903
+ ,,39-4030,,,"Morticians, Undertakers, and Funeral Arrangers"
904
+ ,,,39-4031,,"Morticians, Undertakers, and Funeral Arrangers"
905
+ ,39-5000,,,,Personal Appearance Workers
906
+ ,,39-5010,,,"Barbers, Hairdressers, Hairstylists and Cosmetologists"
907
+ ,,,39-5011,,Barbers
908
+ ,,,39-5012,,"Hairdressers, Hairstylists, and Cosmetologists"
909
+ ,,39-5090,,,Miscellaneous Personal Appearance Workers
910
+ ,,,39-5091,,"Makeup Artists, Theatrical and Performance"
911
+ ,,,39-5092,,Manicurists and Pedicurists
912
+ ,,,39-5093,,Shampooers
913
+ ,,,39-5094,,Skincare Specialists
914
+ ,39-6000,,,,"Baggage Porters, Bellhops, and Concierges"
915
+ ,,39-6010,,,"Baggage Porters, Bellhops, and Concierges"
916
+ ,,,39-6011,,Baggage Porters and Bellhops
917
+ ,,,39-6012,,Concierges
918
+ ,39-7000,,,,Tour and Travel Guides
919
+ ,,39-7010,,,Tour and Travel Guides
920
+ ,,,39-7011,,Tour Guides and Escorts
921
+ ,,,39-7012,,Travel Guides
922
+ ,39-9000,,,,Other Personal Care and Service Workers
923
+ ,,39-9010,,,Childcare Workers
924
+ ,,,39-9011,,Childcare Workers
925
+ ,,,,39-9011.01,Nannies
926
+ ,,39-9030,,,Recreation and Fitness Workers
927
+ ,,,39-9031,,Exercise Trainers and Group Fitness Instructors
928
+ ,,,39-9032,,Recreation Workers
929
+ ,,39-9040,,,Residential Advisors
930
+ ,,,39-9041,,Residential Advisors
931
+ ,,39-9090,,,Miscellaneous Personal Care and Service Workers
932
+ ,,,39-9099,,"Personal Care and Service Workers, All Other"
933
+ 41-0000,,,,,Sales and Related Occupations
934
+ ,41-1000,,,,Supervisors of Sales Workers
935
+ ,,41-1010,,,First-Line Supervisors of Sales Workers
936
+ ,,,41-1011,,First-Line Supervisors of Retail Sales Workers
937
+ ,,,41-1012,,First-Line Supervisors of Non-Retail Sales Workers
938
+ ,41-2000,,,,Retail Sales Workers
939
+ ,,41-2010,,,Cashiers
940
+ ,,,41-2011,,Cashiers
941
+ ,,,41-2012,,Gambling Change Persons and Booth Cashiers
942
+ ,,41-2020,,,Counter and Rental Clerks and Parts Salespersons
943
+ ,,,41-2021,,Counter and Rental Clerks
944
+ ,,,41-2022,,Parts Salespersons
945
+ ,,41-2030,,,Retail Salespersons
946
+ ,,,41-2031,,Retail Salespersons
947
+ ,41-3000,,,,"Sales Representatives, Services"
948
+ ,,41-3010,,,Advertising Sales Agents
949
+ ,,,41-3011,,Advertising Sales Agents
950
+ ,,41-3020,,,Insurance Sales Agents
951
+ ,,,41-3021,,Insurance Sales Agents
952
+ ,,41-3030,,,"Securities, Commodities, and Financial Services Sales Agents"
953
+ ,,,41-3031,,"Securities, Commodities, and Financial Services Sales Agents"
954
+ ,,41-3040,,,Travel Agents
955
+ ,,,41-3041,,Travel Agents
956
+ ,,41-3090,,,"Miscellaneous Sales Representatives, Services"
957
+ ,,,41-3091,,"Sales Representatives of Services, Except Advertising, Insurance, Financial Services, and Travel"
958
+ ,41-4000,,,,"Sales Representatives, Wholesale and Manufacturing"
959
+ ,,41-4010,,,"Sales Representatives, Wholesale and Manufacturing"
960
+ ,,,41-4011,,"Sales Representatives, Wholesale and Manufacturing, Technical and Scientific Products"
961
+ ,,,,41-4011.07,Solar Sales Representatives and Assessors
962
+ ,,,41-4012,,"Sales Representatives, Wholesale and Manufacturing, Except Technical and Scientific Products"
963
+ ,41-9000,,,,Other Sales and Related Workers
964
+ ,,41-9010,,,"Models, Demonstrators, and Product Promoters"
965
+ ,,,41-9011,,Demonstrators and Product Promoters
966
+ ,,,41-9012,,Models
967
+ ,,41-9020,,,Real Estate Brokers and Sales Agents
968
+ ,,,41-9021,,Real Estate Brokers
969
+ ,,,41-9022,,Real Estate Sales Agents
970
+ ,,41-9030,,,Sales Engineers
971
+ ,,,41-9031,,Sales Engineers
972
+ ,,41-9040,,,Telemarketers
973
+ ,,,41-9041,,Telemarketers
974
+ ,,41-9090,,,Miscellaneous Sales and Related Workers
975
+ ,,,41-9091,,"Door-to-Door Sales Workers, News and Street Vendors, and Related Workers"
976
+ ,,,41-9099,,"Sales and Related Workers, All Other"
977
+ 43-0000,,,,,Office and Administrative Support Occupations
978
+ ,43-1000,,,,Supervisors of Office and Administrative Support Workers
979
+ ,,43-1010,,,First-Line Supervisors of Office and Administrative Support Workers
980
+ ,,,43-1011,,First-Line Supervisors of Office and Administrative Support Workers
981
+ ,43-2000,,,,Communications Equipment Operators
982
+ ,,43-2010,,,"Switchboard Operators, Including Answering Service"
983
+ ,,,43-2011,,"Switchboard Operators, Including Answering Service"
984
+ ,,43-2020,,,Telephone Operators
985
+ ,,,43-2021,,Telephone Operators
986
+ ,,43-2090,,,Miscellaneous Communications Equipment Operators
987
+ ,,,43-2099,,"Communications Equipment Operators, All Other"
988
+ ,43-3000,,,,Financial Clerks
989
+ ,,43-3010,,,Bill and Account Collectors
990
+ ,,,43-3011,,Bill and Account Collectors
991
+ ,,43-3020,,,Billing and Posting Clerks
992
+ ,,,43-3021,,Billing and Posting Clerks
993
+ ,,43-3030,,,"Bookkeeping, Accounting, and Auditing Clerks"
994
+ ,,,43-3031,,"Bookkeeping, Accounting, and Auditing Clerks"
995
+ ,,43-3040,,,Gambling Cage Workers
996
+ ,,,43-3041,,Gambling Cage Workers
997
+ ,,43-3050,,,Payroll and Timekeeping Clerks
998
+ ,,,43-3051,,Payroll and Timekeeping Clerks
999
+ ,,43-3060,,,Procurement Clerks
1000
+ ,,,43-3061,,Procurement Clerks
1001
+ ,,43-3070,,,Tellers
1002
+ ,,,43-3071,,Tellers
1003
+ ,,43-3090,,,Miscellaneous Financial Clerks
1004
+ ,,,43-3099,,"Financial Clerks, All Other"
1005
+ ,43-4000,,,,Information and Record Clerks
1006
+ ,,43-4010,,,Brokerage Clerks
1007
+ ,,,43-4011,,Brokerage Clerks
1008
+ ,,43-4020,,,Correspondence Clerks
1009
+ ,,,43-4021,,Correspondence Clerks
1010
+ ,,43-4030,,,"Court, Municipal, and License Clerks"
1011
+ ,,,43-4031,,"Court, Municipal, and License Clerks"
1012
+ ,,43-4040,,,"Credit Authorizers, Checkers, and Clerks"
1013
+ ,,,43-4041,,"Credit Authorizers, Checkers, and Clerks"
1014
+ ,,43-4050,,,Customer Service Representatives
1015
+ ,,,43-4051,,Customer Service Representatives
1016
+ ,,43-4060,,,"Eligibility Interviewers, Government Programs"
1017
+ ,,,43-4061,,"Eligibility Interviewers, Government Programs"
1018
+ ,,43-4070,,,File Clerks
1019
+ ,,,43-4071,,File Clerks
1020
+ ,,43-4080,,,"Hotel, Motel, and Resort Desk Clerks"
1021
+ ,,,43-4081,,"Hotel, Motel, and Resort Desk Clerks"
1022
+ ,,43-4110,,,"Interviewers, Except Eligibility and Loan"
1023
+ ,,,43-4111,,"Interviewers, Except Eligibility and Loan"
1024
+ ,,43-4120,,,"Library Assistants, Clerical"
1025
+ ,,,43-4121,,"Library Assistants, Clerical"
1026
+ ,,43-4130,,,Loan Interviewers and Clerks
1027
+ ,,,43-4131,,Loan Interviewers and Clerks
1028
+ ,,43-4140,,,New Accounts Clerks
1029
+ ,,,43-4141,,New Accounts Clerks
1030
+ ,,43-4150,,,Order Clerks
1031
+ ,,,43-4151,,Order Clerks
1032
+ ,,43-4160,,,"Human Resources Assistants, Except Payroll and Timekeeping"
1033
+ ,,,43-4161,,"Human Resources Assistants, Except Payroll and Timekeeping"
1034
+ ,,43-4170,,,Receptionists and Information Clerks
1035
+ ,,,43-4171,,Receptionists and Information Clerks
1036
+ ,,43-4180,,,Reservation and Transportation Ticket Agents and Travel Clerks
1037
+ ,,,43-4181,,Reservation and Transportation Ticket Agents and Travel Clerks
1038
+ ,,43-4190,,,Miscellaneous Information and Record Clerks
1039
+ ,,,43-4199,,"Information and Record Clerks, All Other"
1040
+ ,43-5000,,,,"Material Recording, Scheduling, Dispatching, and Distributing Workers"
1041
+ ,,43-5010,,,Cargo and Freight Agents
1042
+ ,,,43-5011,,Cargo and Freight Agents
1043
+ ,,,,43-5011.01,Freight Forwarders
1044
+ ,,43-5020,,,Couriers and Messengers
1045
+ ,,,43-5021,,Couriers and Messengers
1046
+ ,,43-5030,,,Dispatchers
1047
+ ,,,43-5031,,Public Safety Telecommunicators
1048
+ ,,,43-5032,,"Dispatchers, Except Police, Fire, and Ambulance"
1049
+ ,,43-5040,,,"Meter Readers, Utilities"
1050
+ ,,,43-5041,,"Meter Readers, Utilities"
1051
+ ,,43-5050,,,Postal Service Workers
1052
+ ,,,43-5051,,Postal Service Clerks
1053
+ ,,,43-5052,,Postal Service Mail Carriers
1054
+ ,,,43-5053,,"Postal Service Mail Sorters, Processors, and Processing Machine Operators"
1055
+ ,,43-5060,,,"Production, Planning, and Expediting Clerks"
1056
+ ,,,43-5061,,"Production, Planning, and Expediting Clerks"
1057
+ ,,43-5070,,,"Shipping, Receiving, and Inventory Clerks"
1058
+ ,,,43-5071,,"Shipping, Receiving, and Inventory Clerks"
1059
+ ,,43-5110,,,"Weighers, Measurers, Checkers, and Samplers, Recordkeeping"
1060
+ ,,,43-5111,,"Weighers, Measurers, Checkers, and Samplers, Recordkeeping"
1061
+ ,43-6000,,,,Secretaries and Administrative Assistants
1062
+ ,,43-6010,,,Secretaries and Administrative Assistants
1063
+ ,,,43-6011,,Executive Secretaries and Executive Administrative Assistants
1064
+ ,,,43-6012,,Legal Secretaries and Administrative Assistants
1065
+ ,,,43-6013,,Medical Secretaries and Administrative Assistants
1066
+ ,,,43-6014,,"Secretaries and Administrative Assistants, Except Legal, Medical, and Executive"
1067
+ ,43-9000,,,,Other Office and Administrative Support Workers
1068
+ ,,43-9020,,,Data Entry and Information Processing Workers
1069
+ ,,,43-9021,,Data Entry Keyers
1070
+ ,,,43-9022,,Word Processors and Typists
1071
+ ,,43-9030,,,Desktop Publishers
1072
+ ,,,43-9031,,Desktop Publishers
1073
+ ,,43-9040,,,Insurance Claims and Policy Processing Clerks
1074
+ ,,,43-9041,,Insurance Claims and Policy Processing Clerks
1075
+ ,,43-9050,,,"Mail Clerks and Mail Machine Operators, Except Postal Service"
1076
+ ,,,43-9051,,"Mail Clerks and Mail Machine Operators, Except Postal Service"
1077
+ ,,43-9060,,,"Office Clerks, General"
1078
+ ,,,43-9061,,"Office Clerks, General"
1079
+ ,,43-9070,,,"Office Machine Operators, Except Computer"
1080
+ ,,,43-9071,,"Office Machine Operators, Except Computer"
1081
+ ,,43-9080,,,Proofreaders and Copy Markers
1082
+ ,,,43-9081,,Proofreaders and Copy Markers
1083
+ ,,43-9110,,,Statistical Assistants
1084
+ ,,,43-9111,,Statistical Assistants
1085
+ ,,43-9190,,,Miscellaneous Office and Administrative Support Workers
1086
+ ,,,43-9199,,"Office and Administrative Support Workers, All Other"
1087
+ 45-0000,,,,,"Farming, Fishing, and Forestry Occupations"
1088
+ ,45-1000,,,,"Supervisors of Farming, Fishing, and Forestry Workers"
1089
+ ,,45-1010,,,"First-Line Supervisors of Farming, Fishing, and Forestry Workers"
1090
+ ,,,45-1011,,"First-Line Supervisors of Farming, Fishing, and Forestry Workers"
1091
+ ,45-2000,,,,Agricultural Workers
1092
+ ,,45-2010,,,Agricultural Inspectors
1093
+ ,,,45-2011,,Agricultural Inspectors
1094
+ ,,45-2020,,,Animal Breeders
1095
+ ,,,45-2021,,Animal Breeders
1096
+ ,,45-2040,,,"Graders and Sorters, Agricultural Products"
1097
+ ,,,45-2041,,"Graders and Sorters, Agricultural Products"
1098
+ ,,45-2090,,,Miscellaneous Agricultural Workers
1099
+ ,,,45-2091,,Agricultural Equipment Operators
1100
+ ,,,45-2092,,"Farmworkers and Laborers, Crop, Nursery, and Greenhouse"
1101
+ ,,,45-2093,,"Farmworkers, Farm, Ranch, and Aquacultural Animals"
1102
+ ,,,45-2099,,"Agricultural Workers, All Other"
1103
+ ,45-3000,,,,Fishing and Hunting Workers
1104
+ ,,45-3030,,,Fishing and Hunting Workers
1105
+ ,,,45-3031,,Fishing and Hunting Workers
1106
+ ,45-4000,,,,"Forest, Conservation, and Logging Workers"
1107
+ ,,45-4010,,,Forest and Conservation Workers
1108
+ ,,,45-4011,,Forest and Conservation Workers
1109
+ ,,45-4020,,,Logging Workers
1110
+ ,,,45-4021,,Fallers
1111
+ ,,,45-4022,,Logging Equipment Operators
1112
+ ,,,45-4023,,Log Graders and Scalers
1113
+ ,,,45-4029,,"Logging Workers, All Other"
1114
+ 47-0000,,,,,Construction and Extraction Occupations
1115
+ ,47-1000,,,,Supervisors of Construction and Extraction Workers
1116
+ ,,47-1010,,,First-Line Supervisors of Construction Trades and Extraction Workers
1117
+ ,,,47-1011,,First-Line Supervisors of Construction Trades and Extraction Workers
1118
+ ,,,,47-1011.03,Solar Energy Installation Managers
1119
+ ,47-2000,,,,Construction Trades Workers
1120
+ ,,47-2010,,,Boilermakers
1121
+ ,,,47-2011,,Boilermakers
1122
+ ,,47-2020,,,"Brickmasons, Blockmasons, and Stonemasons"
1123
+ ,,,47-2021,,Brickmasons and Blockmasons
1124
+ ,,,47-2022,,Stonemasons
1125
+ ,,47-2030,,,Carpenters
1126
+ ,,,47-2031,,Carpenters
1127
+ ,,47-2040,,,"Carpet, Floor, and Tile Installers and Finishers"
1128
+ ,,,47-2041,,Carpet Installers
1129
+ ,,,47-2042,,"Floor Layers, Except Carpet, Wood, and Hard Tiles"
1130
+ ,,,47-2043,,Floor Sanders and Finishers
1131
+ ,,,47-2044,,Tile and Stone Setters
1132
+ ,,47-2050,,,"Cement Masons, Concrete Finishers, and Terrazzo Workers"
1133
+ ,,,47-2051,,Cement Masons and Concrete Finishers
1134
+ ,,,47-2053,,Terrazzo Workers and Finishers
1135
+ ,,47-2060,,,Construction Laborers
1136
+ ,,,47-2061,,Construction Laborers
1137
+ ,,47-2070,,,Construction Equipment Operators
1138
+ ,,,47-2071,,"Paving, Surfacing, and Tamping Equipment Operators"
1139
+ ,,,47-2072,,Pile Driver Operators
1140
+ ,,,47-2073,,Operating Engineers and Other Construction Equipment Operators
1141
+ ,,47-2080,,,"Drywall Installers, Ceiling Tile Installers, and Tapers"
1142
+ ,,,47-2081,,Drywall and Ceiling Tile Installers
1143
+ ,,,47-2082,,Tapers
1144
+ ,,47-2110,,,Electricians
1145
+ ,,,47-2111,,Electricians
1146
+ ,,47-2120,,,Glaziers
1147
+ ,,,47-2121,,Glaziers
1148
+ ,,47-2130,,,Insulation Workers
1149
+ ,,,47-2131,,"Insulation Workers, Floor, Ceiling, and Wall"
1150
+ ,,,47-2132,,"Insulation Workers, Mechanical"
1151
+ ,,47-2140,,,Painters and Paperhangers
1152
+ ,,,47-2141,,"Painters, Construction and Maintenance"
1153
+ ,,,47-2142,,Paperhangers
1154
+ ,,47-2150,,,"Pipelayers, Plumbers, Pipefitters, and Steamfitters"
1155
+ ,,,47-2151,,Pipelayers
1156
+ ,,,47-2152,,"Plumbers, Pipefitters, and Steamfitters"
1157
+ ,,,,47-2152.04,Solar Thermal Installers and Technicians
1158
+ ,,47-2160,,,Plasterers and Stucco Masons
1159
+ ,,,47-2161,,Plasterers and Stucco Masons
1160
+ ,,47-2170,,,Reinforcing Iron and Rebar Workers
1161
+ ,,,47-2171,,Reinforcing Iron and Rebar Workers
1162
+ ,,47-2180,,,Roofers
1163
+ ,,,47-2181,,Roofers
1164
+ ,,47-2210,,,Sheet Metal Workers
1165
+ ,,,47-2211,,Sheet Metal Workers
1166
+ ,,47-2220,,,Structural Iron and Steel Workers
1167
+ ,,,47-2221,,Structural Iron and Steel Workers
1168
+ ,,47-2230,,,Solar Photovoltaic Installers
1169
+ ,,,47-2231,,Solar Photovoltaic Installers
1170
+ ,47-3000,,,,"Helpers, Construction Trades"
1171
+ ,,47-3010,,,"Helpers, Construction Trades"
1172
+ ,,,47-3011,,"Helpers--Brickmasons, Blockmasons, Stonemasons, and Tile and Marble Setters"
1173
+ ,,,47-3012,,Helpers--Carpenters
1174
+ ,,,47-3013,,Helpers--Electricians
1175
+ ,,,47-3014,,"Helpers--Painters, Paperhangers, Plasterers, and Stucco Masons"
1176
+ ,,,47-3015,,"Helpers--Pipelayers, Plumbers, Pipefitters, and Steamfitters"
1177
+ ,,,47-3016,,Helpers--Roofers
1178
+ ,,,47-3019,,"Helpers, Construction Trades, All Other"
1179
+ ,47-4000,,,,Other Construction and Related Workers
1180
+ ,,47-4010,,,Construction and Building Inspectors
1181
+ ,,,47-4011,,Construction and Building Inspectors
1182
+ ,,,,47-4011.01,Energy Auditors
1183
+ ,,47-4020,,,Elevator and Escalator Installers and Repairers
1184
+ ,,,47-4021,,Elevator and Escalator Installers and Repairers
1185
+ ,,47-4030,,,Fence Erectors
1186
+ ,,,47-4031,,Fence Erectors
1187
+ ,,47-4040,,,Hazardous Materials Removal Workers
1188
+ ,,,47-4041,,Hazardous Materials Removal Workers
1189
+ ,,47-4050,,,Highway Maintenance Workers
1190
+ ,,,47-4051,,Highway Maintenance Workers
1191
+ ,,47-4060,,,Rail-Track Laying and Maintenance Equipment Operators
1192
+ ,,,47-4061,,Rail-Track Laying and Maintenance Equipment Operators
1193
+ ,,47-4070,,,Septic Tank Servicers and Sewer Pipe Cleaners
1194
+ ,,,47-4071,,Septic Tank Servicers and Sewer Pipe Cleaners
1195
+ ,,47-4090,,,Miscellaneous Construction and Related Workers
1196
+ ,,,47-4091,,Segmental Pavers
1197
+ ,,,47-4099,,"Construction and Related Workers, All Other"
1198
+ ,,,,47-4099.03,Weatherization Installers and Technicians
1199
+ ,47-5000,,,,Extraction Workers
1200
+ ,,47-5010,,,"Derrick, Rotary Drill, and Service Unit Operators, Oil and Gas"
1201
+ ,,,47-5011,,"Derrick Operators, Oil and Gas"
1202
+ ,,,47-5012,,"Rotary Drill Operators, Oil and Gas"
1203
+ ,,,47-5013,,"Service Unit Operators, Oil and Gas"
1204
+ ,,47-5020,,,Surface Mining Machine Operators and Earth Drillers
1205
+ ,,,47-5022,,"Excavating and Loading Machine and Dragline Operators, Surface Mining"
1206
+ ,,,47-5023,,"Earth Drillers, Except Oil and Gas"
1207
+ ,,47-5030,,,"Explosives Workers, Ordnance Handling Experts, and Blasters"
1208
+ ,,,47-5032,,"Explosives Workers, Ordnance Handling Experts, and Blasters"
1209
+ ,,47-5040,,,Underground Mining Machine Operators
1210
+ ,,,47-5041,,Continuous Mining Machine Operators
1211
+ ,,,47-5043,,"Roof Bolters, Mining"
1212
+ ,,,47-5044,,"Loading and Moving Machine Operators, Underground Mining"
1213
+ ,,,47-5049,,"Underground Mining Machine Operators, All Other"
1214
+ ,,47-5050,,,"Rock Splitters, Quarry"
1215
+ ,,,47-5051,,"Rock Splitters, Quarry"
1216
+ ,,47-5070,,,"Roustabouts, Oil and Gas"
1217
+ ,,,47-5071,,"Roustabouts, Oil and Gas"
1218
+ ,,47-5080,,,Helpers--Extraction Workers
1219
+ ,,,47-5081,,Helpers--Extraction Workers
1220
+ ,,47-5090,,,Miscellaneous Extraction Workers
1221
+ ,,,47-5099,,"Extraction Workers, All Other"
1222
+ 49-0000,,,,,"Installation, Maintenance, and Repair Occupations"
1223
+ ,49-1000,,,,"Supervisors of Installation, Maintenance, and Repair Workers"
1224
+ ,,49-1010,,,"First-Line Supervisors of Mechanics, Installers, and Repairers"
1225
+ ,,,49-1011,,"First-Line Supervisors of Mechanics, Installers, and Repairers"
1226
+ ,49-2000,,,,"Electrical and Electronic Equipment Mechanics, Installers, and Repairers"
1227
+ ,,49-2010,,,"Computer, Automated Teller, and Office Machine Repairers"
1228
+ ,,,49-2011,,"Computer, Automated Teller, and Office Machine Repairers"
1229
+ ,,49-2020,,,Radio and Telecommunications Equipment Installers and Repairers
1230
+ ,,,49-2021,,"Radio, Cellular, and Tower Equipment Installers and Repairers"
1231
+ ,,,49-2022,,"Telecommunications Equipment Installers and Repairers, Except Line Installers"
1232
+ ,,49-2090,,,"Miscellaneous Electrical and Electronic Equipment Mechanics, Installers, and Repairers"
1233
+ ,,,49-2091,,Avionics Technicians
1234
+ ,,,49-2092,,"Electric Motor, Power Tool, and Related Repairers"
1235
+ ,,,49-2093,,"Electrical and Electronics Installers and Repairers, Transportation Equipment"
1236
+ ,,,49-2094,,"Electrical and Electronics Repairers, Commercial and Industrial Equipment"
1237
+ ,,,49-2095,,"Electrical and Electronics Repairers, Powerhouse, Substation, and Relay"
1238
+ ,,,49-2096,,"Electronic Equipment Installers and Repairers, Motor Vehicles"
1239
+ ,,,49-2097,,Audiovisual Equipment Installers and Repairers
1240
+ ,,,49-2098,,Security and Fire Alarm Systems Installers
1241
+ ,49-3000,,,,"Vehicle and Mobile Equipment Mechanics, Installers, and Repairers"
1242
+ ,,49-3010,,,Aircraft Mechanics and Service Technicians
1243
+ ,,,49-3011,,Aircraft Mechanics and Service Technicians
1244
+ ,,49-3020,,,Automotive Technicians and Repairers
1245
+ ,,,49-3021,,Automotive Body and Related Repairers
1246
+ ,,,49-3022,,Automotive Glass Installers and Repairers
1247
+ ,,,49-3023,,Automotive Service Technicians and Mechanics
1248
+ ,,49-3030,,,Bus and Truck Mechanics and Diesel Engine Specialists
1249
+ ,,,49-3031,,Bus and Truck Mechanics and Diesel Engine Specialists
1250
+ ,,49-3040,,,Heavy Vehicle and Mobile Equipment Service Technicians and Mechanics
1251
+ ,,,49-3041,,Farm Equipment Mechanics and Service Technicians
1252
+ ,,,49-3042,,"Mobile Heavy Equipment Mechanics, Except Engines"
1253
+ ,,,49-3043,,Rail Car Repairers
1254
+ ,,49-3050,,,Small Engine Mechanics
1255
+ ,,,49-3051,,Motorboat Mechanics and Service Technicians
1256
+ ,,,49-3052,,Motorcycle Mechanics
1257
+ ,,,49-3053,,Outdoor Power Equipment and Other Small Engine Mechanics
1258
+ ,,49-3090,,,"Miscellaneous Vehicle and Mobile Equipment Mechanics, Installers, and Repairers"
1259
+ ,,,49-3091,,Bicycle Repairers
1260
+ ,,,49-3092,,Recreational Vehicle Service Technicians
1261
+ ,,,49-3093,,Tire Repairers and Changers
1262
+ ,49-9000,,,,"Other Installation, Maintenance, and Repair Occupations"
1263
+ ,,49-9010,,,Control and Valve Installers and Repairers
1264
+ ,,,49-9011,,Mechanical Door Repairers
1265
+ ,,,49-9012,,"Control and Valve Installers and Repairers, Except Mechanical Door"
1266
+ ,,49-9020,,,"Heating, Air Conditioning, and Refrigeration Mechanics and Installers"
1267
+ ,,,49-9021,,"Heating, Air Conditioning, and Refrigeration Mechanics and Installers"
1268
+ ,,49-9030,,,Home Appliance Repairers
1269
+ ,,,49-9031,,Home Appliance Repairers
1270
+ ,,49-9040,,,"Industrial Machinery Installation, Repair, and Maintenance Workers"
1271
+ ,,,49-9041,,Industrial Machinery Mechanics
1272
+ ,,,49-9043,,"Maintenance Workers, Machinery"
1273
+ ,,,49-9044,,Millwrights
1274
+ ,,,49-9045,,"Refractory Materials Repairers, Except Brickmasons"
1275
+ ,,49-9050,,,Line Installers and Repairers
1276
+ ,,,49-9051,,Electrical Power-Line Installers and Repairers
1277
+ ,,,49-9052,,Telecommunications Line Installers and Repairers
1278
+ ,,49-9060,,,Precision Instrument and Equipment Repairers
1279
+ ,,,49-9061,,Camera and Photographic Equipment Repairers
1280
+ ,,,49-9062,,Medical Equipment Repairers
1281
+ ,,,49-9063,,Musical Instrument Repairers and Tuners
1282
+ ,,,49-9064,,Watch and Clock Repairers
1283
+ ,,,49-9069,,"Precision Instrument and Equipment Repairers, All Other"
1284
+ ,,49-9070,,,"Maintenance and Repair Workers, General"
1285
+ ,,,49-9071,,"Maintenance and Repair Workers, General"
1286
+ ,,49-9080,,,Wind Turbine Service Technicians
1287
+ ,,,49-9081,,Wind Turbine Service Technicians
1288
+ ,,49-9090,,,"Miscellaneous Installation, Maintenance, and Repair Workers"
1289
+ ,,,49-9091,,"Coin, Vending, and Amusement Machine Servicers and Repairers"
1290
+ ,,,49-9092,,Commercial Divers
1291
+ ,,,49-9094,,Locksmiths and Safe Repairers
1292
+ ,,,49-9095,,Manufactured Building and Mobile Home Installers
1293
+ ,,,49-9096,,Riggers
1294
+ ,,,49-9097,,Signal and Track Switch Repairers
1295
+ ,,,49-9098,,"Helpers--Installation, Maintenance, and Repair Workers"
1296
+ ,,,49-9099,,"Installation, Maintenance, and Repair Workers, All Other"
1297
+ ,,,,49-9099.01,Geothermal Technicians
1298
+ 51-0000,,,,,Production Occupations
1299
+ ,51-1000,,,,Supervisors of Production Workers
1300
+ ,,51-1010,,,First-Line Supervisors of Production and Operating Workers
1301
+ ,,,51-1011,,First-Line Supervisors of Production and Operating Workers
1302
+ ,51-2000,,,,Assemblers and Fabricators
1303
+ ,,51-2010,,,"Aircraft Structure, Surfaces, Rigging, and Systems Assemblers"
1304
+ ,,,51-2011,,"Aircraft Structure, Surfaces, Rigging, and Systems Assemblers"
1305
+ ,,51-2020,,,"Electrical, Electronics, and Electromechanical Assemblers"
1306
+ ,,,51-2021,,"Coil Winders, Tapers, and Finishers"
1307
+ ,,,51-2022,,Electrical and Electronic Equipment Assemblers
1308
+ ,,,51-2023,,Electromechanical Equipment Assemblers
1309
+ ,,51-2030,,,Engine and Other Machine Assemblers
1310
+ ,,,51-2031,,Engine and Other Machine Assemblers
1311
+ ,,51-2040,,,Structural Metal Fabricators and Fitters
1312
+ ,,,51-2041,,Structural Metal Fabricators and Fitters
1313
+ ,,51-2050,,,Fiberglass Laminators and Fabricators
1314
+ ,,,51-2051,,Fiberglass Laminators and Fabricators
1315
+ ,,51-2060,,,Timing Device Assemblers and Adjusters
1316
+ ,,,51-2061,,Timing Device Assemblers and Adjusters
1317
+ ,,51-2090,,,Miscellaneous Assemblers and Fabricators
1318
+ ,,,51-2092,,Team Assemblers
1319
+ ,,,51-2099,,"Assemblers and Fabricators, All Other"
1320
+ ,51-3000,,,,Food Processing Workers
1321
+ ,,51-3010,,,Bakers
1322
+ ,,,51-3011,,Bakers
1323
+ ,,51-3020,,,"Butchers and Other Meat, Poultry, and Fish Processing Workers"
1324
+ ,,,51-3021,,Butchers and Meat Cutters
1325
+ ,,,51-3022,,"Meat, Poultry, and Fish Cutters and Trimmers"
1326
+ ,,,51-3023,,Slaughterers and Meat Packers
1327
+ ,,51-3090,,,Miscellaneous Food Processing Workers
1328
+ ,,,51-3091,,"Food and Tobacco Roasting, Baking, and Drying Machine Operators and Tenders"
1329
+ ,,,51-3092,,Food Batchmakers
1330
+ ,,,51-3093,,Food Cooking Machine Operators and Tenders
1331
+ ,,,51-3099,,"Food Processing Workers, All Other"
1332
+ ,51-4000,,,,Metal Workers and Plastic Workers
1333
+ ,,51-4020,,,"Forming Machine Setters, Operators, and Tenders, Metal and Plastic"
1334
+ ,,,51-4021,,"Extruding and Drawing Machine Setters, Operators, and Tenders, Metal and Plastic"
1335
+ ,,,51-4022,,"Forging Machine Setters, Operators, and Tenders, Metal and Plastic"
1336
+ ,,,51-4023,,"Rolling Machine Setters, Operators, and Tenders, Metal and Plastic"
1337
+ ,,51-4030,,,"Machine Tool Cutting Setters, Operators, and Tenders, Metal and Plastic"
1338
+ ,,,51-4031,,"Cutting, Punching, and Press Machine Setters, Operators, and Tenders, Metal and Plastic"
1339
+ ,,,51-4032,,"Drilling and Boring Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1340
+ ,,,51-4033,,"Grinding, Lapping, Polishing, and Buffing Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1341
+ ,,,51-4034,,"Lathe and Turning Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1342
+ ,,,51-4035,,"Milling and Planing Machine Setters, Operators, and Tenders, Metal and Plastic"
1343
+ ,,51-4040,,,Machinists
1344
+ ,,,51-4041,,Machinists
1345
+ ,,51-4050,,,"Metal Furnace Operators, Tenders, Pourers, and Casters"
1346
+ ,,,51-4051,,Metal-Refining Furnace Operators and Tenders
1347
+ ,,,51-4052,,"Pourers and Casters, Metal"
1348
+ ,,51-4060,,,"Model Makers and Patternmakers, Metal and Plastic"
1349
+ ,,,51-4061,,"Model Makers, Metal and Plastic"
1350
+ ,,,51-4062,,"Patternmakers, Metal and Plastic"
1351
+ ,,51-4070,,,"Molders and Molding Machine Setters, Operators, and Tenders, Metal and Plastic"
1352
+ ,,,51-4071,,Foundry Mold and Coremakers
1353
+ ,,,51-4072,,"Molding, Coremaking, and Casting Machine Setters, Operators, and Tenders, Metal and Plastic"
1354
+ ,,51-4080,,,"Multiple Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1355
+ ,,,51-4081,,"Multiple Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1356
+ ,,51-4110,,,Tool and Die Makers
1357
+ ,,,51-4111,,Tool and Die Makers
1358
+ ,,51-4120,,,"Welding, Soldering, and Brazing Workers"
1359
+ ,,,51-4121,,"Welders, Cutters, Solderers, and Brazers"
1360
+ ,,,51-4122,,"Welding, Soldering, and Brazing Machine Setters, Operators, and Tenders"
1361
+ ,,51-4190,,,Miscellaneous Metal Workers and Plastic Workers
1362
+ ,,,51-4191,,"Heat Treating Equipment Setters, Operators, and Tenders, Metal and Plastic"
1363
+ ,,,51-4192,,"Layout Workers, Metal and Plastic"
1364
+ ,,,51-4193,,"Plating Machine Setters, Operators, and Tenders, Metal and Plastic"
1365
+ ,,,51-4194,,"Tool Grinders, Filers, and Sharpeners"
1366
+ ,,,51-4199,,"Metal Workers and Plastic Workers, All Other"
1367
+ ,51-5100,,,,Printing Workers
1368
+ ,,51-5110,,,Printing Workers
1369
+ ,,,51-5111,,Prepress Technicians and Workers
1370
+ ,,,51-5112,,Printing Press Operators
1371
+ ,,,51-5113,,Print Binding and Finishing Workers
1372
+ ,51-6000,,,,"Textile, Apparel, and Furnishings Workers"
1373
+ ,,51-6010,,,Laundry and Dry-Cleaning Workers
1374
+ ,,,51-6011,,Laundry and Dry-Cleaning Workers
1375
+ ,,51-6020,,,"Pressers, Textile, Garment, and Related Materials"
1376
+ ,,,51-6021,,"Pressers, Textile, Garment, and Related Materials"
1377
+ ,,51-6030,,,Sewing Machine Operators
1378
+ ,,,51-6031,,Sewing Machine Operators
1379
+ ,,51-6040,,,Shoe and Leather Workers
1380
+ ,,,51-6041,,Shoe and Leather Workers and Repairers
1381
+ ,,,51-6042,,Shoe Machine Operators and Tenders
1382
+ ,,51-6050,,,"Tailors, Dressmakers, and Sewers"
1383
+ ,,,51-6051,,"Sewers, Hand"
1384
+ ,,,51-6052,,"Tailors, Dressmakers, and Custom Sewers"
1385
+ ,,51-6060,,,"Textile Machine Setters, Operators, and Tenders"
1386
+ ,,,51-6061,,Textile Bleaching and Dyeing Machine Operators and Tenders
1387
+ ,,,51-6062,,"Textile Cutting Machine Setters, Operators, and Tenders"
1388
+ ,,,51-6063,,"Textile Knitting and Weaving Machine Setters, Operators, and Tenders"
1389
+ ,,,51-6064,,"Textile Winding, Twisting, and Drawing Out Machine Setters, Operators, and Tenders"
1390
+ ,,51-6090,,,"Miscellaneous Textile, Apparel, and Furnishings Workers"
1391
+ ,,,51-6091,,"Extruding and Forming Machine Setters, Operators, and Tenders, Synthetic and Glass Fibers"
1392
+ ,,,51-6092,,Fabric and Apparel Patternmakers
1393
+ ,,,51-6093,,Upholsterers
1394
+ ,,,51-6099,,"Textile, Apparel, and Furnishings Workers, All Other"
1395
+ ,51-7000,,,,Woodworkers
1396
+ ,,51-7010,,,Cabinetmakers and Bench Carpenters
1397
+ ,,,51-7011,,Cabinetmakers and Bench Carpenters
1398
+ ,,51-7020,,,Furniture Finishers
1399
+ ,,,51-7021,,Furniture Finishers
1400
+ ,,51-7030,,,"Model Makers and Patternmakers, Wood"
1401
+ ,,,51-7031,,"Model Makers, Wood"
1402
+ ,,,51-7032,,"Patternmakers, Wood"
1403
+ ,,51-7040,,,"Woodworking Machine Setters, Operators, and Tenders"
1404
+ ,,,51-7041,,"Sawing Machine Setters, Operators, and Tenders, Wood"
1405
+ ,,,51-7042,,"Woodworking Machine Setters, Operators, and Tenders, Except Sawing"
1406
+ ,,51-7090,,,Miscellaneous Woodworkers
1407
+ ,,,51-7099,,"Woodworkers, All Other"
1408
+ ,51-8000,,,,Plant and System Operators
1409
+ ,,51-8010,,,"Power Plant Operators, Distributors, and Dispatchers"
1410
+ ,,,51-8011,,Nuclear Power Reactor Operators
1411
+ ,,,51-8012,,Power Distributors and Dispatchers
1412
+ ,,,51-8013,,Power Plant Operators
1413
+ ,,,,51-8013.03,Biomass Plant Technicians
1414
+ ,,,,51-8013.04,Hydroelectric Plant Technicians
1415
+ ,,51-8020,,,Stationary Engineers and Boiler Operators
1416
+ ,,,51-8021,,Stationary Engineers and Boiler Operators
1417
+ ,,51-8030,,,Water and Wastewater Treatment Plant and System Operators
1418
+ ,,,51-8031,,Water and Wastewater Treatment Plant and System Operators
1419
+ ,,51-8090,,,Miscellaneous Plant and System Operators
1420
+ ,,,51-8091,,Chemical Plant and System Operators
1421
+ ,,,51-8092,,Gas Plant Operators
1422
+ ,,,51-8093,,"Petroleum Pump System Operators, Refinery Operators, and Gaugers"
1423
+ ,,,51-8099,,"Plant and System Operators, All Other"
1424
+ ,,,,51-8099.01,Biofuels Processing Technicians
1425
+ ,51-9000,,,,Other Production Occupations
1426
+ ,,51-9010,,,"Chemical Processing Machine Setters, Operators, and Tenders"
1427
+ ,,,51-9011,,Chemical Equipment Operators and Tenders
1428
+ ,,,51-9012,,"Separating, Filtering, Clarifying, Precipitating, and Still Machine Setters, Operators, and Tenders"
1429
+ ,,51-9020,,,"Crushing, Grinding, Polishing, Mixing, and Blending Workers"
1430
+ ,,,51-9021,,"Crushing, Grinding, and Polishing Machine Setters, Operators, and Tenders"
1431
+ ,,,51-9022,,"Grinding and Polishing Workers, Hand"
1432
+ ,,,51-9023,,"Mixing and Blending Machine Setters, Operators, and Tenders"
1433
+ ,,51-9030,,,Cutting Workers
1434
+ ,,,51-9031,,"Cutters and Trimmers, Hand"
1435
+ ,,,51-9032,,"Cutting and Slicing Machine Setters, Operators, and Tenders"
1436
+ ,,51-9040,,,"Extruding, Forming, Pressing, and Compacting Machine Setters, Operators, and Tenders"
1437
+ ,,,51-9041,,"Extruding, Forming, Pressing, and Compacting Machine Setters, Operators, and Tenders"
1438
+ ,,51-9050,,,"Furnace, Kiln, Oven, Drier, and Kettle Operators and Tenders"
1439
+ ,,,51-9051,,"Furnace, Kiln, Oven, Drier, and Kettle Operators and Tenders"
1440
+ ,,51-9060,,,"Inspectors, Testers, Sorters, Samplers, and Weighers"
1441
+ ,,,51-9061,,"Inspectors, Testers, Sorters, Samplers, and Weighers"
1442
+ ,,51-9070,,,Jewelers and Precious Stone and Metal Workers
1443
+ ,,,51-9071,,Jewelers and Precious Stone and Metal Workers
1444
+ ,,,,51-9071.06,Gem and Diamond Workers
1445
+ ,,51-9080,,,Dental and Ophthalmic Laboratory Technicians and Medical Appliance Technicians
1446
+ ,,,51-9081,,Dental Laboratory Technicians
1447
+ ,,,51-9082,,Medical Appliance Technicians
1448
+ ,,,51-9083,,Ophthalmic Laboratory Technicians
1449
+ ,,51-9110,,,Packaging and Filling Machine Operators and Tenders
1450
+ ,,,51-9111,,Packaging and Filling Machine Operators and Tenders
1451
+ ,,51-9120,,,Painting Workers
1452
+ ,,,51-9123,,"Painting, Coating, and Decorating Workers"
1453
+ ,,,51-9124,,"Coating, Painting, and Spraying Machine Setters, Operators, and Tenders"
1454
+ ,,51-9140,,,Semiconductor Processing Technicians
1455
+ ,,,51-9141,,Semiconductor Processing Technicians
1456
+ ,,51-9150,,,Photographic Process Workers and Processing Machine Operators
1457
+ ,,,51-9151,,Photographic Process Workers and Processing Machine Operators
1458
+ ,,51-9160,,,Computer Numerically Controlled Tool Operators and Programmers
1459
+ ,,,51-9161,,Computer Numerically Controlled Tool Operators
1460
+ ,,,51-9162,,Computer Numerically Controlled Tool Programmers
1461
+ ,,51-9190,,,Miscellaneous Production Workers
1462
+ ,,,51-9191,,Adhesive Bonding Machine Operators and Tenders
1463
+ ,,,51-9192,,"Cleaning, Washing, and Metal Pickling Equipment Operators and Tenders"
1464
+ ,,,51-9193,,Cooling and Freezing Equipment Operators and Tenders
1465
+ ,,,51-9194,,Etchers and Engravers
1466
+ ,,,51-9195,,"Molders, Shapers, and Casters, Except Metal and Plastic"
1467
+ ,,,,51-9195.03,"Stone Cutters and Carvers, Manufacturing"
1468
+ ,,,,51-9195.04,"Glass Blowers, Molders, Benders, and Finishers"
1469
+ ,,,,51-9195.05,"Potters, Manufacturing"
1470
+ ,,,51-9196,,"Paper Goods Machine Setters, Operators, and Tenders"
1471
+ ,,,51-9197,,Tire Builders
1472
+ ,,,51-9198,,Helpers--Production Workers
1473
+ ,,,51-9199,,"Production Workers, All Other"
1474
+ 53-0000,,,,,Transportation and Material Moving Occupations
1475
+ ,53-1000,,,,Supervisors of Transportation and Material Moving Workers
1476
+ ,,53-1040,,,First-Line Supervisors of Transportation and Material Moving Workers
1477
+ ,,,53-1041,,Aircraft Cargo Handling Supervisors
1478
+ ,,,53-1042,,"First-Line Supervisors of Helpers, Laborers, and Material Movers, Hand"
1479
+ ,,,,53-1042.01,Recycling Coordinators
1480
+ ,,,53-1043,,First-Line Supervisors of Material-Moving Machine and Vehicle Operators
1481
+ ,,,53-1044,,First-Line Supervisors of Passenger Attendants
1482
+ ,,,53-1049,,"First-Line Supervisors of Transportation Workers, All Other"
1483
+ ,53-2000,,,,Air Transportation Workers
1484
+ ,,53-2010,,,Aircraft Pilots and Flight Engineers
1485
+ ,,,53-2011,,"Airline Pilots, Copilots, and Flight Engineers"
1486
+ ,,,53-2012,,Commercial Pilots
1487
+ ,,53-2020,,,Air Traffic Controllers and Airfield Operations Specialists
1488
+ ,,,53-2021,,Air Traffic Controllers
1489
+ ,,,53-2022,,Airfield Operations Specialists
1490
+ ,,53-2030,,,Flight Attendants
1491
+ ,,,53-2031,,Flight Attendants
1492
+ ,53-3000,,,,Motor Vehicle Operators
1493
+ ,,53-3010,,,"Ambulance Drivers and Attendants, Except Emergency Medical Technicians"
1494
+ ,,,53-3011,,"Ambulance Drivers and Attendants, Except Emergency Medical Technicians"
1495
+ ,,53-3030,,,Driver/Sales Workers and Truck Drivers
1496
+ ,,,53-3031,,Driver/Sales Workers
1497
+ ,,,53-3032,,Heavy and Tractor-Trailer Truck Drivers
1498
+ ,,,53-3033,,Light Truck Drivers
1499
+ ,,53-3050,,,Passenger Vehicle Drivers
1500
+ ,,,53-3051,,"Bus Drivers, School"
1501
+ ,,,53-3052,,"Bus Drivers, Transit and Intercity"
1502
+ ,,,53-3053,,Shuttle Drivers and Chauffeurs
1503
+ ,,,53-3054,,Taxi Drivers
1504
+ ,,53-3090,,,Miscellaneous Motor Vehicle Operators
1505
+ ,,,53-3099,,"Motor Vehicle Operators, All Other"
1506
+ ,53-4000,,,,Rail Transportation Workers
1507
+ ,,53-4010,,,Locomotive Engineers and Operators
1508
+ ,,,53-4011,,Locomotive Engineers
1509
+ ,,,53-4013,,"Rail Yard Engineers, Dinkey Operators, and Hostlers"
1510
+ ,,53-4020,,,"Railroad Brake, Signal, and Switch Operators and Locomotive Firers"
1511
+ ,,,53-4022,,"Railroad Brake, Signal, and Switch Operators and Locomotive Firers"
1512
+ ,,53-4030,,,Railroad Conductors and Yardmasters
1513
+ ,,,53-4031,,Railroad Conductors and Yardmasters
1514
+ ,,53-4040,,,Subway and Streetcar Operators
1515
+ ,,,53-4041,,Subway and Streetcar Operators
1516
+ ,,53-4090,,,Miscellaneous Rail Transportation Workers
1517
+ ,,,53-4099,,"Rail Transportation Workers, All Other"
1518
+ ,53-5000,,,,Water Transportation Workers
1519
+ ,,53-5010,,,Sailors and Marine Oilers
1520
+ ,,,53-5011,,Sailors and Marine Oilers
1521
+ ,,53-5020,,,Ship and Boat Captains and Operators
1522
+ ,,,53-5021,,"Captains, Mates, and Pilots of Water Vessels"
1523
+ ,,,53-5022,,Motorboat Operators
1524
+ ,,53-5030,,,Ship Engineers
1525
+ ,,,53-5031,,Ship Engineers
1526
+ ,53-6000,,,,Other Transportation Workers
1527
+ ,,53-6010,,,Bridge and Lock Tenders
1528
+ ,,,53-6011,,Bridge and Lock Tenders
1529
+ ,,53-6020,,,Parking Attendants
1530
+ ,,,53-6021,,Parking Attendants
1531
+ ,,53-6030,,,Transportation Service Attendants
1532
+ ,,,53-6031,,Automotive and Watercraft Service Attendants
1533
+ ,,,53-6032,,Aircraft Service Attendants
1534
+ ,,53-6040,,,Traffic Technicians
1535
+ ,,,53-6041,,Traffic Technicians
1536
+ ,,53-6050,,,Transportation Inspectors
1537
+ ,,,53-6051,,Transportation Inspectors
1538
+ ,,,,53-6051.01,Aviation Inspectors
1539
+ ,,,,53-6051.07,"Transportation Vehicle, Equipment and Systems Inspectors, Except Aviation"
1540
+ ,,53-6060,,,Passenger Attendants
1541
+ ,,,53-6061,,Passenger Attendants
1542
+ ,,53-6090,,,Miscellaneous Transportation Workers
1543
+ ,,,53-6099,,"Transportation Workers, All Other"
1544
+ ,53-7000,,,,Material Moving Workers
1545
+ ,,53-7010,,,Conveyor Operators and Tenders
1546
+ ,,,53-7011,,Conveyor Operators and Tenders
1547
+ ,,53-7020,,,Crane and Tower Operators
1548
+ ,,,53-7021,,Crane and Tower Operators
1549
+ ,,53-7030,,,Dredge Operators
1550
+ ,,,53-7031,,Dredge Operators
1551
+ ,,53-7040,,,Hoist and Winch Operators
1552
+ ,,,53-7041,,Hoist and Winch Operators
1553
+ ,,53-7050,,,Industrial Truck and Tractor Operators
1554
+ ,,,53-7051,,Industrial Truck and Tractor Operators
1555
+ ,,53-7060,,,Laborers and Material Movers
1556
+ ,,,53-7061,,Cleaners of Vehicles and Equipment
1557
+ ,,,53-7062,,"Laborers and Freight, Stock, and Material Movers, Hand"
1558
+ ,,,,53-7062.04,Recycling and Reclamation Workers
1559
+ ,,,53-7063,,Machine Feeders and Offbearers
1560
+ ,,,53-7064,,"Packers and Packagers, Hand"
1561
+ ,,,53-7065,,Stockers and Order Fillers
1562
+ ,,53-7070,,,Pumping Station Operators
1563
+ ,,,53-7071,,Gas Compressor and Gas Pumping Station Operators
1564
+ ,,,53-7072,,"Pump Operators, Except Wellhead Pumpers"
1565
+ ,,,53-7073,,Wellhead Pumpers
1566
+ ,,53-7080,,,Refuse and Recyclable Material Collectors
1567
+ ,,,53-7081,,Refuse and Recyclable Material Collectors
1568
+ ,,53-7120,,,"Tank Car, Truck, and Ship Loaders"
1569
+ ,,,53-7121,,"Tank Car, Truck, and Ship Loaders"
1570
+ ,,53-7190,,,Miscellaneous Material Moving Workers
1571
+ ,,,53-7199,,"Material Moving Workers, All Other"
1572
+ 55-0000,,,,,Military Specific Occupations
1573
+ ,55-1000,,,,Military Officer Special and Tactical Operations Leaders
1574
+ ,,55-1010,,,Military Officer Special and Tactical Operations Leaders
1575
+ ,,,55-1011,,Air Crew Officers
1576
+ ,,,55-1012,,Aircraft Launch and Recovery Officers
1577
+ ,,,55-1013,,Armored Assault Vehicle Officers
1578
+ ,,,55-1014,,Artillery and Missile Officers
1579
+ ,,,55-1015,,Command and Control Center Officers
1580
+ ,,,55-1016,,Infantry Officers
1581
+ ,,,55-1017,,Special Forces Officers
1582
+ ,,,55-1019,,"Military Officer Special and Tactical Operations Leaders, All Other"
1583
+ ,55-2000,,,,First-Line Enlisted Military Supervisors
1584
+ ,,55-2010,,,First-Line Enlisted Military Supervisors
1585
+ ,,,55-2011,,First-Line Supervisors of Air Crew Members
1586
+ ,,,55-2012,,First-Line Supervisors of Weapons Specialists/Crew Members
1587
+ ,,,55-2013,,First-Line Supervisors of All Other Tactical Operations Specialists
1588
+ ,55-3000,,,,Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members
1589
+ ,,55-3010,,,Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members
1590
+ ,,,55-3011,,Air Crew Members
1591
+ ,,,55-3012,,Aircraft Launch and Recovery Specialists
1592
+ ,,,55-3013,,Armored Assault Vehicle Crew Members
1593
+ ,,,55-3014,,Artillery and Missile Crew Members
1594
+ ,,,55-3015,,Command and Control Center Specialists
1595
+ ,,,55-3016,,Infantry
1596
+ ,,,55-3018,,Special Forces
1597
+ ,,,55-3019,,"Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members, All Other"
release_2025_02_10/automation_vs_augmentation.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ interaction_type,pct
2
+ directive,22.563272409918948
3
+ feedback loop,12.036303266190515
4
+ learning,18.917648061953294
5
+ none,2.9013020624347967
6
+ task iteration,25.47648663831153
7
+ validation,2.314220367546746
release_2025_02_10/bls_employment_may_2023.csv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SOC or O*NET-SOC 2019 Title,bls_distribution
2
+ Management Occupations,10495770
3
+ Business and Financial Operations Occupations,10087830
4
+ Computer and Mathematical Occupations,5177400
5
+ Architecture and Engineering Occupations,2539660
6
+ "Life, Physical, and Social Science Occupations",1389430
7
+ Community and Social Service Occupations,2418130
8
+ Legal Occupations,1240630
9
+ Educational Instruction and Library Occupations,8744560
10
+ "Arts, Design, Entertainment, Sports, and Media Occupations",2106490
11
+ Healthcare Practitioners and Technical Occupations,9284210
12
+ Healthcare Support Occupations,7063530
13
+ Protective Service Occupations,3504330
14
+ Food Preparation and Serving Related Occupations,13247870
15
+ Building and Grounds Cleaning and Maintenance Occupations,4429070
16
+ Personal Care and Service Occupations,3040630
17
+ Sales and Related Occupations,13380660
18
+ Office and Administrative Support Occupations,18533450
19
+ "Farming, Fishing, and Forestry Occupations",432200
20
+ Construction and Extraction Occupations,6225630
21
+ "Installation, Maintenance, and Repair Occupations",5989460
22
+ Production Occupations,8770170
23
+ Transportation and Material Moving Occupations,13752760
release_2025_02_10/onet_task_mappings.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_02_10/onet_task_statements.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_02_10/plots.ipynb ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Which Economic Tasks are Performed with AI? Evidence from Millions of Claude Conversations\n",
8
+ "\n",
9
+ "_Handa et al., 2025_"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 1,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "import pandas as pd\n",
19
+ "import matplotlib.pyplot as plt\n",
20
+ "import seaborn as sns\n",
21
+ "from textwrap import wrap\n",
22
+ "import numpy as np\n",
23
+ "\n",
24
+ "palette = sns.color_palette(\"colorblind\")"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "markdown",
29
+ "metadata": {},
30
+ "source": [
31
+ "### Create O*NET / SOC Merged Dataframe"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": 2,
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "def merge_onet_soc_data() -> pd.DataFrame:\n",
41
+ " \"\"\"\n",
42
+ " Merges O*NET task statements with SOC (Standard Occupational Classification) data\n",
43
+ " based on major group codes.\n",
44
+ " \n",
45
+ " Args:\n",
46
+ " onet_path (str): Path to the O*NET task statements CSV file\n",
47
+ " soc_path (str): Path to the SOC structure CSV file\n",
48
+ " \n",
49
+ " Returns:\n",
50
+ " pd.DataFrame: Merged DataFrame containing O*NET data with SOC major group titles\n",
51
+ " \"\"\"\n",
52
+ "\n",
53
+ " # Read and process O*NET data\n",
54
+ " onet_df = pd.read_csv(\"onet_task_statements.csv\")\n",
55
+ " onet_df[\"soc_major_group\"] = onet_df[\"O*NET-SOC Code\"].str[:2]\n",
56
+ " \n",
57
+ " # Read and process SOC data\n",
58
+ " soc_df = pd.read_csv(\"SOC_Structure.csv\")\n",
59
+ " soc_df = soc_df.dropna(subset=['Major Group'])\n",
60
+ " soc_df[\"soc_major_group\"] = soc_df[\"Major Group\"].str[:2]\n",
61
+ " \n",
62
+ " # Merge datasets\n",
63
+ " merged_df = onet_df.merge(\n",
64
+ " soc_df[['soc_major_group', 'SOC or O*NET-SOC 2019 Title']],\n",
65
+ " on='soc_major_group',\n",
66
+ " how='left'\n",
67
+ " )\n",
68
+ "\n",
69
+ " return merged_df"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": 3,
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "task_occupations_df = merge_onet_soc_data()"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": null,
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "task_occupations_df[\"Title\"].nunique()"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "# Update cluster mappings to include data from the merged_df\n",
97
+ "task_occupations_df[\"task_normalized\"] = task_occupations_df[\"Task\"].str.lower().str.strip()\n",
98
+ "# Some tasks are included multiple times, so we need to count the number of occurrences per task\n",
99
+ "task_occupations_df[\"n_occurrences\"] = task_occupations_df.groupby(\"task_normalized\")[\"Title\"].transform(\"nunique\")\n",
100
+ "\n",
101
+ "task_occupations_df"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "markdown",
106
+ "metadata": {},
107
+ "source": [
108
+ "### Load Task Mappings and Join"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 6,
114
+ "metadata": {},
115
+ "outputs": [],
116
+ "source": [
117
+ "task_mappings_df = pd.read_csv(\"onet_task_mappings.csv\")"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "grouped_with_occupations = task_mappings_df.merge(\n",
127
+ " task_occupations_df,\n",
128
+ " left_on=\"task_name\",\n",
129
+ " right_on=\"task_normalized\",\n",
130
+ " how=\"left\"\n",
131
+ ")\n",
132
+ "\n",
133
+ "grouped_with_occupations[\"pct_occ_scaled\"] = 100 * (grouped_with_occupations[\"pct\"] / grouped_with_occupations[\"n_occurrences\"]) / (grouped_with_occupations[\"pct\"] / grouped_with_occupations[\"n_occurrences\"]).sum()\n",
134
+ "grouped_with_occupations[\"pct_occ_scaled\"].sum()"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": null,
140
+ "metadata": {},
141
+ "outputs": [],
142
+ "source": [
143
+ "grouped_with_occupations"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "markdown",
148
+ "metadata": {},
149
+ "source": [
150
+ "## EXPERIMENTS"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "markdown",
155
+ "metadata": {},
156
+ "source": [
157
+ "### TASKS"
158
+ ]
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "execution_count": 9,
163
+ "metadata": {},
164
+ "outputs": [],
165
+ "source": [
166
+ "# Set style and increase font sizes\n",
167
+ "plt.rcParams['font.size'] = 12 # Base font size\n",
168
+ "plt.rcParams['axes.titlesize'] = 14 # Title font size\n",
169
+ "plt.rcParams['axes.labelsize'] = 12 # Axis labels size\n",
170
+ "plt.rcParams['xtick.labelsize'] = 11 # X-axis tick labels size\n",
171
+ "plt.rcParams['ytick.labelsize'] = 11 # Y-axis tick labels size\n",
172
+ "plt.rcParams['legend.fontsize'] = 11 # Legend font size\n",
173
+ "plt.rcParams['figure.titlesize'] = 16 # Figure title size\n",
174
+ "\n",
175
+ "# If you're using seaborn, you can also set its context\n",
176
+ "sns.set_context(\"notebook\", font_scale=1.2)"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": null,
182
+ "metadata": {},
183
+ "outputs": [],
184
+ "source": [
185
+ "# Get top 10 tasks overall to ensure consistent comparison\n",
186
+ "total_tasks = (grouped_with_occupations.groupby(\"Task\")[\"pct_occ_scaled\"]\n",
187
+ " .sum()\n",
188
+ " .sort_values(ascending=False))\n",
189
+ "top_10_tasks = total_tasks.head(10).index\n",
190
+ "\n",
191
+ "# Create plot dataframe with all groups\n",
192
+ "plot_df = (grouped_with_occupations[grouped_with_occupations[\"Task\"].isin(top_10_tasks)]\n",
193
+ " .groupby([\"Task\"])[\"pct_occ_scaled\"]\n",
194
+ " .sum()\n",
195
+ " .reset_index())\n",
196
+ "\n",
197
+ "# Sort tasks by overall frequency\n",
198
+ "task_order = (plot_df.groupby(\"Task\")[\"pct_occ_scaled\"]\n",
199
+ " .sum()\n",
200
+ " .sort_values(ascending=False)\n",
201
+ " .index)\n",
202
+ "plot_df[\"Task\"] = pd.Categorical(plot_df[\"Task\"], categories=task_order, ordered=True)\n",
203
+ "\n",
204
+ "# Create the plot\n",
205
+ "plt.figure(figsize=(16, 12))\n",
206
+ "sns.barplot(\n",
207
+ " data=plot_df,\n",
208
+ " x=\"pct_occ_scaled\",\n",
209
+ " y=\"Task\",\n",
210
+ " color=palette[0],\n",
211
+ ")\n",
212
+ "\n",
213
+ "# Wrap task titles\n",
214
+ "ax = plt.gca()\n",
215
+ "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n",
216
+ " for label in ax.get_yticklabels()])\n",
217
+ "\n",
218
+ "# Modify legend\n",
219
+ "handles, labels = ax.get_legend_handles_labels()\n",
220
+ "\n",
221
+ "# Wrap task labels\n",
222
+ "ax = plt.gca()\n",
223
+ "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n",
224
+ " for label in ax.get_yticklabels()])\n",
225
+ "\n",
226
+ "# Format x-axis as percentages\n",
227
+ "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n",
228
+ "\n",
229
+ "# Customize the plot\n",
230
+ "plt.title('Top Tasks by % of Conversations')\n",
231
+ "plt.xlabel('Percentage of Records')\n",
232
+ "plt.ylabel('O*NET Task')\n",
233
+ "\n",
234
+ "# Adjust layout to prevent label cutoff\n",
235
+ "plt.tight_layout()\n",
236
+ "\n",
237
+ "plt.show()\n"
238
+ ]
239
+ },
240
+ {
241
+ "cell_type": "markdown",
242
+ "metadata": {},
243
+ "source": [
244
+ "### OCCUPATIONS"
245
+ ]
246
+ },
247
+ {
248
+ "cell_type": "code",
249
+ "execution_count": null,
250
+ "metadata": {},
251
+ "outputs": [],
252
+ "source": [
253
+ "grouped_with_occupations.groupby(\"Title\")[\"pct_occ_scaled\"].sum()"
254
+ ]
255
+ },
256
+ {
257
+ "cell_type": "code",
258
+ "execution_count": null,
259
+ "metadata": {},
260
+ "outputs": [],
261
+ "source": [
262
+ "# Calculate percentages per group and occupation\n",
263
+ "plot_df = (grouped_with_occupations.groupby(\"Title\")[\"pct_occ_scaled\"]\n",
264
+ " .sum()\n",
265
+ " .reset_index())\n",
266
+ "\n",
267
+ "# Get top occupations overall\n",
268
+ "total_occs = (plot_df.groupby(\"Title\")[\"pct_occ_scaled\"]\n",
269
+ " .sum()\n",
270
+ " .sort_values(ascending=False))\n",
271
+ "top_occs = total_occs.head(15).index\n",
272
+ "\n",
273
+ "# Filter for top occupations\n",
274
+ "plot_df = plot_df[plot_df[\"Title\"].isin(top_occs)]\n",
275
+ "\n",
276
+ "# Sort occupations by overall frequency\n",
277
+ "occ_order = (plot_df.groupby(\"Title\")[\"pct_occ_scaled\"]\n",
278
+ " .sum()\n",
279
+ " .sort_values(ascending=False)\n",
280
+ " .index)\n",
281
+ "plot_df[\"Title\"] = pd.Categorical(plot_df[\"Title\"], categories=occ_order, ordered=True)\n",
282
+ "\n",
283
+ "# Create the plot\n",
284
+ "plt.figure(figsize=(18, 16))\n",
285
+ "sns.barplot(\n",
286
+ " data=plot_df,\n",
287
+ " x=\"pct_occ_scaled\",\n",
288
+ " y=\"Title\",\n",
289
+ " color=palette[0],\n",
290
+ ")\n",
291
+ "\n",
292
+ "# Wrap occupation titles\n",
293
+ "ax = plt.gca()\n",
294
+ "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n",
295
+ " for label in ax.get_yticklabels()])\n",
296
+ "\n",
297
+ "# Format x-axis as percentages\n",
298
+ "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n",
299
+ "\n",
300
+ "# Customize the plot\n",
301
+ "plt.title('Top Occupations by % of Conversations')\n",
302
+ "plt.xlabel('Percentage of Conversations')\n",
303
+ "plt.ylabel('Occupation')\n",
304
+ "\n",
305
+ "# Adjust layout to prevent label cutoff\n",
306
+ "plt.tight_layout()\n",
307
+ "\n",
308
+ "plt.show()"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "markdown",
313
+ "metadata": {},
314
+ "source": [
315
+ "### OCCUPATIONAL CATEGORIES"
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": null,
321
+ "metadata": {},
322
+ "outputs": [],
323
+ "source": [
324
+ "# Calculate percentages per group and occupational category\n",
325
+ "plot_df = (grouped_with_occupations.groupby(\"SOC or O*NET-SOC 2019 Title\")[\"pct_occ_scaled\"]\n",
326
+ " .sum()\n",
327
+ " .reset_index())\n",
328
+ "\n",
329
+ "# Sort categories by group-1 frequency\n",
330
+ "cat_order = plot_df.sort_values(\"pct_occ_scaled\", ascending=False)[\"SOC or O*NET-SOC 2019 Title\"]\n",
331
+ "plot_df[\"SOC or O*NET-SOC 2019 Title\"] = pd.Categorical(\n",
332
+ " plot_df[\"SOC or O*NET-SOC 2019 Title\"], \n",
333
+ " categories=cat_order, \n",
334
+ " ordered=True\n",
335
+ ")\n",
336
+ "\n",
337
+ "# Create the plot\n",
338
+ "plt.figure(figsize=(18, 16))\n",
339
+ "sns.barplot(\n",
340
+ " data=plot_df,\n",
341
+ " x=\"pct_occ_scaled\",\n",
342
+ " y=\"SOC or O*NET-SOC 2019 Title\",\n",
343
+ " color=palette[0],\n",
344
+ ")\n",
345
+ "\n",
346
+ "# Wrap category labels and remove \" Occupations\" string\n",
347
+ "ax = plt.gca()\n",
348
+ "ax.set_yticklabels(['\\n'.join(wrap(label.get_text().replace(\" Occupations\", \"\"), width=60)) \n",
349
+ " for label in ax.get_yticklabels()])\n",
350
+ "\n",
351
+ "# Format x-axis as percentages\n",
352
+ "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n",
353
+ "\n",
354
+ "# Customize the plot\n",
355
+ "plt.title('Occupational Categories by % of Conversations')\n",
356
+ "plt.xlabel('Percentage of Conversations')\n",
357
+ "plt.ylabel('Occupational Category')\n",
358
+ "\n",
359
+ "# Adjust layout to prevent label cutoff\n",
360
+ "plt.tight_layout()\n",
361
+ "\n",
362
+ "plt.show()"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": null,
368
+ "metadata": {},
369
+ "outputs": [],
370
+ "source": [
371
+ "grouped_with_occupations"
372
+ ]
373
+ },
374
+ {
375
+ "cell_type": "code",
376
+ "execution_count": 15,
377
+ "metadata": {},
378
+ "outputs": [],
379
+ "source": [
380
+ "# Load employment data\n",
381
+ "bls_employment_df = pd.read_csv(\"bls_employment_may_2023.csv\")\n",
382
+ "\n",
383
+ "claude_employment_df = grouped_with_occupations.groupby(\"SOC or O*NET-SOC 2019 Title\")[\"pct_occ_scaled\"].sum().reset_index(name='claude_distribution')\n",
384
+ "\n",
385
+ "employment_df = claude_employment_df.merge(bls_employment_df, \n",
386
+ " on='SOC or O*NET-SOC 2019 Title',\n",
387
+ " how='left')"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "metadata": {},
394
+ "outputs": [],
395
+ "source": [
396
+ "# Calculate percentages and setup data\n",
397
+ "plot_df = employment_df.copy()\n",
398
+ "\n",
399
+ "def get_distribution(df, value_column):\n",
400
+ " total = df[value_column].sum()\n",
401
+ " return (df[value_column] / total * 100).round(1)\n",
402
+ "\n",
403
+ "plot_df['bls_pct'] = get_distribution(plot_df, 'bls_distribution')\n",
404
+ "plot_df['claude_pct'] = get_distribution(plot_df, 'claude_distribution')\n",
405
+ "plot_df['clean_label'] = plot_df['SOC or O*NET-SOC 2019 Title'].str.replace(' Occupations', '')\n",
406
+ "plot_df['pct_difference'] = plot_df['claude_pct'] - plot_df['bls_pct']\n",
407
+ "plot_df = plot_df.sort_values('bls_pct', ascending=True)\n",
408
+ "\n",
409
+ "# Create the plot\n",
410
+ "fig, ax = plt.subplots(figsize=(20, 12))\n",
411
+ "\n",
412
+ "# Set colors\n",
413
+ "claude_color = palette[1] \n",
414
+ "bls_color = palette[0] \n",
415
+ "\n",
416
+ "# Create lines and circles\n",
417
+ "y_positions = range(len(plot_df))\n",
418
+ "for i, row in enumerate(plot_df.itertuples()):\n",
419
+ " # Determine color based on which value is larger\n",
420
+ " line_color = claude_color if row.claude_pct > row.bls_pct else bls_color\n",
421
+ " \n",
422
+ " # Draw the line between bls and claude percentages\n",
423
+ " ax.plot([row.bls_pct, row.claude_pct], [i, i], \n",
424
+ " color=line_color, \n",
425
+ " linestyle='-', \n",
426
+ " linewidth=2.5,\n",
427
+ " zorder=1)\n",
428
+ " \n",
429
+ " # Determine label positioning\n",
430
+ " if row.claude_pct > row.bls_pct:\n",
431
+ " bls_ha = 'right'\n",
432
+ " claude_ha = 'left'\n",
433
+ " bls_offset = -0.4\n",
434
+ " claude_offset = 0.4\n",
435
+ " else:\n",
436
+ " bls_ha = 'left'\n",
437
+ " claude_ha = 'right'\n",
438
+ " bls_offset = 0.4\n",
439
+ " claude_offset = -0.4\n",
440
+ "\n",
441
+ " # Plot BLS percentage\n",
442
+ " ax.scatter([row.bls_pct], [i], \n",
443
+ " color=bls_color,\n",
444
+ " s=200,\n",
445
+ " zorder=2,\n",
446
+ " label='% of U.S. workers (BLS)' if i == 0 else \"\")\n",
447
+ " ax.text(row.bls_pct + bls_offset,\n",
448
+ " i,\n",
449
+ " f'{row.bls_pct:.1f}%',\n",
450
+ " ha=bls_ha,\n",
451
+ " va='center',\n",
452
+ " color=bls_color)\n",
453
+ " \n",
454
+ " # Plot Claude's percentage\n",
455
+ " ax.scatter([row.claude_pct], [i], \n",
456
+ " color=claude_color,\n",
457
+ " s=200,\n",
458
+ " zorder=2,\n",
459
+ " label='% of Claude conversations' if i == 0 else \"\")\n",
460
+ " ax.text(row.claude_pct + claude_offset,\n",
461
+ " i,\n",
462
+ " f'{row.claude_pct:.1f}%',\n",
463
+ " ha=claude_ha,\n",
464
+ " va='center',\n",
465
+ " color=claude_color)\n",
466
+ "\n",
467
+ "# Customize the plot\n",
468
+ "ax.set_xlabel('Percentage')\n",
469
+ "ax.set_ylabel('Occupational Category')\n",
470
+ "\n",
471
+ "# Add percentage formatter to x-axis\n",
472
+ "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n",
473
+ "\n",
474
+ "# Set y-axis labels\n",
475
+ "ax.set_yticks(y_positions)\n",
476
+ "ax.set_yticklabels(plot_df['clean_label'])\n",
477
+ "\n",
478
+ "# Add legend\n",
479
+ "handles, labels = ax.get_legend_handles_labels()\n",
480
+ "handles = handles[::-1]\n",
481
+ "labels = labels[::-1]\n",
482
+ "ax.legend(handles, labels, loc='lower right', bbox_to_anchor=(1.0, 0.0))\n",
483
+ "\n",
484
+ "# Adjust grid and layout\n",
485
+ "ax.grid(axis='x', linestyle='--', alpha=0.3)\n",
486
+ "ax.set_axisbelow(True)\n",
487
+ "\n",
488
+ "# Set axis limits with padding\n",
489
+ "max_val = max(plot_df['bls_pct'].max(), plot_df['claude_pct'].max())\n",
490
+ "min_val = min(plot_df['bls_pct'].min(), plot_df['claude_pct'].min())\n",
491
+ "padding = (max_val - min_val) * 0.15\n",
492
+ "ax.set_xlim(min_val - padding, max_val + padding)\n",
493
+ "ax.set_ylim(-1, len(plot_df))\n",
494
+ "\n",
495
+ "# Adjust layout\n",
496
+ "plt.tight_layout()\n",
497
+ "plt.show()"
498
+ ]
499
+ },
500
+ {
501
+ "cell_type": "markdown",
502
+ "metadata": {},
503
+ "source": [
504
+ "### USAGE BY WAGE"
505
+ ]
506
+ },
507
+ {
508
+ "cell_type": "code",
509
+ "execution_count": 17,
510
+ "metadata": {},
511
+ "outputs": [],
512
+ "source": [
513
+ "# Read and process wage data\n",
514
+ "wage_df = pd.read_csv(\"wage_data.csv\")"
515
+ ]
516
+ },
517
+ {
518
+ "cell_type": "code",
519
+ "execution_count": null,
520
+ "metadata": {},
521
+ "outputs": [],
522
+ "source": [
523
+ "wage_df"
524
+ ]
525
+ },
526
+ {
527
+ "cell_type": "code",
528
+ "execution_count": null,
529
+ "metadata": {},
530
+ "outputs": [],
531
+ "source": [
532
+ "# Join wage and occupation data\n",
533
+ "grouped_with_occupations_and_wage = grouped_with_occupations.merge(wage_df, left_on=\"O*NET-SOC Code\", right_on=\"SOCcode\", how=\"left\")\n",
534
+ "grouped_with_occupations_and_wage"
535
+ ]
536
+ },
537
+ {
538
+ "cell_type": "code",
539
+ "execution_count": null,
540
+ "metadata": {},
541
+ "outputs": [],
542
+ "source": [
543
+ "def create_wage_distribution_plot(plot_df):\n",
544
+ " # Create figure\n",
545
+ " plt.figure(figsize=(24, 12))\n",
546
+ " \n",
547
+ " # Create scatter plot\n",
548
+ " sns.scatterplot(data=plot_df,\n",
549
+ " x='MedianSalary',\n",
550
+ " y='pct_occ_scaled',\n",
551
+ " alpha=0.5,\n",
552
+ " size='pct_occ_scaled',\n",
553
+ " sizes=(60, 400),\n",
554
+ " color=palette[0],\n",
555
+ " legend=False)\n",
556
+ " \n",
557
+ " # Style the plot\n",
558
+ " plt.xlabel('Median Wage ($)')\n",
559
+ " plt.ylabel('Percent of Conversations')\n",
560
+ " plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda y, _: '{:.1f}%'.format(y)))\n",
561
+ " \n",
562
+ " # Add title\n",
563
+ " plt.title('Wage Distribution by % of Conversations'), \n",
564
+ " \n",
565
+ " # Annotate points\n",
566
+ " # Top points by percentage\n",
567
+ " top_n = 7\n",
568
+ " for _, row in plot_df.nlargest(top_n, 'pct_occ_scaled').iterrows():\n",
569
+ " plt.annotate('\\n'.join(wrap(row['Title'], width=20)), \n",
570
+ " (row['MedianSalary'], row['pct_occ_scaled']),\n",
571
+ " xytext=(5, 5), \n",
572
+ " textcoords='offset points')\n",
573
+ " \n",
574
+ " # Extreme salary points\n",
575
+ " n_extremes = 2\n",
576
+ " # Annotate lowest and highest salaries\n",
577
+ " for df_subset in [plot_df.nsmallest(n_extremes, 'MedianSalary'),\n",
578
+ " plot_df.nlargest(n_extremes, 'MedianSalary')]:\n",
579
+ " for i, row in enumerate(df_subset.iterrows()):\n",
580
+ " if i != 0: # Skip if already annotated in top_n\n",
581
+ " plt.annotate('\\n'.join(wrap(row[1]['Title'], width=20)), \n",
582
+ " (row[1]['MedianSalary'], row[1]['pct_occ_scaled']),\n",
583
+ " xytext=(5, -15),\n",
584
+ " textcoords='offset points')\n",
585
+ " \n",
586
+ " # Formatting\n",
587
+ " plt.ylim(bottom=0)\n",
588
+ " plt.grid(True, linestyle='--', alpha=0.7)\n",
589
+ " plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'${x:,.0f}'))\n",
590
+ " \n",
591
+ " plt.tight_layout()\n",
592
+ " \n",
593
+ " plt.show()\n",
594
+ " plt.close()\n",
595
+ "\n",
596
+ "# Create aggregation dictionary, excluding groupby columns\n",
597
+ "groupby_cols = [\"Title\"]\n",
598
+ "agg_dict = {col: 'first' for col in grouped_with_occupations_and_wage.columns \n",
599
+ " if col not in groupby_cols}\n",
600
+ "agg_dict['pct_occ_scaled'] = 'sum'\n",
601
+ "\n",
602
+ "\n",
603
+ "plot_df = (grouped_with_occupations_and_wage\n",
604
+ " .groupby(groupby_cols)\n",
605
+ " .agg(agg_dict)\n",
606
+ " .reset_index()\n",
607
+ " .copy())\n",
608
+ " \n",
609
+ "# Filter out null values and very low salaries\n",
610
+ "plot_df = plot_df[plot_df[\"MedianSalary\"].notnull() & \n",
611
+ " (plot_df[\"MedianSalary\"] > 100)]\n",
612
+ " \n",
613
+ "# Create and save plot for current group\n",
614
+ "create_wage_distribution_plot(plot_df)"
615
+ ]
616
+ },
617
+ {
618
+ "cell_type": "markdown",
619
+ "metadata": {},
620
+ "source": [
621
+ "### AUTOMATION VS AUGMENTATION"
622
+ ]
623
+ },
624
+ {
625
+ "cell_type": "code",
626
+ "execution_count": null,
627
+ "metadata": {},
628
+ "outputs": [],
629
+ "source": [
630
+ "automation_vs_augmentation_df = pd.read_csv(\"automation_vs_augmentation.csv\")\n",
631
+ "\n",
632
+ "def adjust_color_brightness(color, factor):\n",
633
+ " \"\"\"Adjust the brightness of a color by a factor\"\"\"\n",
634
+ " # Convert color to RGB if it's not already\n",
635
+ " if isinstance(color, str):\n",
636
+ " color = mcolors.to_rgb(color)\n",
637
+ " # Make brighter by scaling RGB values\n",
638
+ " return tuple(min(1.0, c * factor) for c in color)\n",
639
+ "\n",
640
+ "def plot_interaction_modes(df):\n",
641
+ " # Load in dataframe\n",
642
+ " plot_df = df.copy()\n",
643
+ " \n",
644
+ " # Convert cluster_name to lowercase first, then filter and normalize\n",
645
+ " plot_df['interaction_type'] = plot_df['interaction_type'].str.lower()\n",
646
+ " plot_df = plot_df[plot_df['interaction_type'] != 'none']\n",
647
+ " total = plot_df['pct'].sum()\n",
648
+ " plot_df['pct'] = plot_df['pct'] / total\n",
649
+ " \n",
650
+ " # Create category mapping\n",
651
+ " category_map = {\n",
652
+ " 'directive': 'Automation',\n",
653
+ " 'feedback loop': 'Automation',\n",
654
+ " 'task iteration': 'Augmentation',\n",
655
+ " 'learning': 'Augmentation',\n",
656
+ " 'validation': 'Augmentation'\n",
657
+ " }\n",
658
+ " \n",
659
+ " # Add category column\n",
660
+ " plot_df['category'] = plot_df['interaction_type'].map(category_map)\n",
661
+ " \n",
662
+ " # Convert to title case for plotting\n",
663
+ " plot_df['interaction_type'] = plot_df['interaction_type'].str.title()\n",
664
+ " \n",
665
+ " # Create color variants\n",
666
+ " colors_a = [\n",
667
+ " palette[1],\n",
668
+ " adjust_color_brightness(palette[1], 1.3)\n",
669
+ " ]\n",
670
+ " \n",
671
+ " colors_b = [\n",
672
+ " palette[2],\n",
673
+ " adjust_color_brightness(palette[2], 1.3),\n",
674
+ " adjust_color_brightness(palette[2], 1.6)\n",
675
+ " ]\n",
676
+ " \n",
677
+ " # Create the stacked bar plot\n",
678
+ " plt.figure(figsize=(16, 6))\n",
679
+ " \n",
680
+ " # Create separate dataframes for each category and sort them to match visual order\n",
681
+ " automation_df = plot_df[plot_df['category'] == 'Automation'].sort_values('interaction_type', ascending=False)\n",
682
+ " augmentation_df = plot_df[plot_df['category'] == 'Augmentation'].sort_values('interaction_type', ascending=False)\n",
683
+ " \n",
684
+ " # Calculate positions for the bars\n",
685
+ " bar_positions = [0, 1]\n",
686
+ " bar_width = 0.8\n",
687
+ " \n",
688
+ " # Create the stacked bars for each category\n",
689
+ " left_auto = 0\n",
690
+ " handles, labels = [], [] # Initialize empty lists for legend ordering\n",
691
+ " \n",
692
+ " # First plot automation bars but save their handles/labels\n",
693
+ " auto_handles, auto_labels = [], []\n",
694
+ " for i, (_, row) in enumerate(automation_df.iterrows()):\n",
695
+ " bar = plt.barh(0, row['pct'], left=left_auto, height=bar_width, \n",
696
+ " color=colors_a[i])\n",
697
+ " auto_handles.append(bar)\n",
698
+ " auto_labels.append(row['interaction_type'])\n",
699
+ " plt.text(left_auto + row['pct']/2, 0, \n",
700
+ " f'{row[\"pct\"]*100:.1f}%', \n",
701
+ " ha='center', va='center',\n",
702
+ " color='white')\n",
703
+ " left_auto += row['pct']\n",
704
+ " \n",
705
+ " # Plot augmentation bars and save handles/labels\n",
706
+ " left_aug = 0\n",
707
+ " aug_handles, aug_labels = [], []\n",
708
+ " for i, (_, row) in enumerate(augmentation_df.iterrows()):\n",
709
+ " bar = plt.barh(1, row['pct'], left=left_aug, height=bar_width,\n",
710
+ " color=colors_b[i])\n",
711
+ " aug_handles.append(bar)\n",
712
+ " aug_labels.append(row['interaction_type'])\n",
713
+ " plt.text(left_aug + row['pct']/2, 1, \n",
714
+ " f'{row[\"pct\"]*100:.1f}%', \n",
715
+ " ha='center', va='center',\n",
716
+ " color='white')\n",
717
+ " left_aug += row['pct']\n",
718
+ " \n",
719
+ " # Customize the plot\n",
720
+ " plt.yticks(bar_positions, ['Automation', 'Augmentation'])\n",
721
+ " plt.xlabel('Percentage of Conversations')\n",
722
+ " \n",
723
+ " # Create legend with custom order\n",
724
+ " # Combine handles and labels in the desired order\n",
725
+ " all_handles = aug_handles + auto_handles\n",
726
+ " all_labels = aug_labels + auto_labels\n",
727
+ " \n",
728
+ " # Create legend with specified order\n",
729
+ " desired_order = ['Validation', 'Task Iteration', 'Learning', 'Feedback Loop', 'Directive'] \n",
730
+ " ordered_handles = []\n",
731
+ " ordered_labels = []\n",
732
+ " \n",
733
+ " for label in desired_order:\n",
734
+ " idx = all_labels.index(label)\n",
735
+ " ordered_handles.append(all_handles[idx])\n",
736
+ " ordered_labels.append(all_labels[idx])\n",
737
+ " \n",
738
+ " plt.legend(ordered_handles, ordered_labels, loc='lower right')\n",
739
+ " \n",
740
+ " plt.tight_layout()\n",
741
+ "\n",
742
+ "plot_interaction_modes(automation_vs_augmentation_df)"
743
+ ]
744
+ }
745
+ ],
746
+ "metadata": {
747
+ "kernelspec": {
748
+ "display_name": "py311",
749
+ "language": "python",
750
+ "name": "python3"
751
+ },
752
+ "language_info": {
753
+ "codemirror_mode": {
754
+ "name": "ipython",
755
+ "version": 3
756
+ },
757
+ "file_extension": ".py",
758
+ "mimetype": "text/x-python",
759
+ "name": "python",
760
+ "nbconvert_exporter": "python",
761
+ "pygments_lexer": "ipython3",
762
+ "version": "3.11.11"
763
+ }
764
+ },
765
+ "nbformat": 4,
766
+ "nbformat_minor": 2
767
+ }
release_2025_02_10/plots/automation_vs_augmentation.png ADDED

Git LFS Details

  • SHA256: fd534bbf2b60f501dd093abd1e486e561d647ef0c4c7f0d6b896adab85f89024
  • Pointer size: 130 Bytes
  • Size of remote file: 38 kB
release_2025_02_10/plots/occupational_category_distribution.png ADDED

Git LFS Details

  • SHA256: e1f5c0a500075a17ca506b58e328eb266c55210e40eda5389f6aa0fec1a1ccee
  • Pointer size: 131 Bytes
  • Size of remote file: 142 kB
release_2025_02_10/plots/occupational_category_distribution_bls.png ADDED

Git LFS Details

  • SHA256: 5a0398fcb5542576c245234f48263ed39f89b5ed6b6f042256886aa19eba3b20
  • Pointer size: 131 Bytes
  • Size of remote file: 230 kB
release_2025_02_10/plots/occupations_distribution.png ADDED

Git LFS Details

  • SHA256: 899ec11727036272bcedff7b49984ca24c9885203698097c1bfab14de2edbd52
  • Pointer size: 131 Bytes
  • Size of remote file: 105 kB
release_2025_02_10/plots/task_distribution.png ADDED

Git LFS Details

  • SHA256: 489664776ce00dca17f1ff48ee53117ad49b6f88ee8728651251ce59ece7a7ad
  • Pointer size: 131 Bytes
  • Size of remote file: 222 kB
release_2025_02_10/plots/wage_distribution.png ADDED

Git LFS Details

  • SHA256: b4bd0f32c587dbf68d412830aa8de1a31c8aba6c827fd7ae1436ebb57b2506d1
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
release_2025_02_10/wage_data.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Anthropic Economic Index: Insights from Claude 3.7 Sonnet
2
+ ## Analysis Replication Notebook
3
+
4
+ This notebook contains the code used to produce the visualizations and analysis for the Anthropic Economic Index report based on Claude 3.7 Sonnet data. It analyzes how different occupations interact with AI systems through automation and augmentation patterns derived from real-world usage data.
5
+
6
+ ## Data Files in this Directory
7
+
8
+ - **cluster_level_dataset**: A folder containing data released at the cluster level, including mappings to O*NET tasks, automation vs. augmentation, and "extended thinking" mode fraction
9
+ - **onet_task_statements.csv**: Contains O*NET task statements with their associated occupational codes
10
+ - **SOC_Structure.csv**: Standard Occupational Classification (SOC) structure data with major group codes and titles
11
+ - **task_pct_v1.csv**: Version 1 of task percentage data
12
+ - **task_pct_v2.csv**: Version 2 of task percentage data (current)
13
+ - **automation_vs_augmentation_by_task.csv**: Data on automation vs. augmentation classifications by task
14
+ - **automation_vs_augmentation_v1.csv**: Version 1 of automation vs. augmentation interaction type data
15
+ - **automation_vs_augmentation_v2.csv**: Version 2 of automation vs. augmentation interaction type data
16
+ - **task_thinking_fractions.csv**: Fraction of each O*NET task with its associated "extended thinking" mode fraction
17
+
18
+ ## Data Dictionary
19
+
20
+ ### onet_task_statements.csv
21
+ | Field | Description |
22
+ |-------|-------------|
23
+ | O*NET-SOC Code | Occupational code from the O*NET-SOC system |
24
+ | Title | Occupational title |
25
+ | Task | Description of specific occupational task |
26
+
27
+ ### SOC_Structure.csv
28
+ | Field | Description |
29
+ |-------|-------------|
30
+ | Major Group | Two-digit code identifying major occupational group |
31
+ | SOC or O*NET-SOC 2019 Title | Title of the major occupational group |
32
+
33
+ ### task_pct_v1.csv and task_pct_v2.csv
34
+ | Field | Description |
35
+ |-------|-------------|
36
+ | task_name | Normalized name of the task |
37
+ | pct | Percentage of task prevalence in dataset |
38
+
39
+ ### automation_vs_augmentation_by_task.csv
40
+ | Field | Description |
41
+ |-------|-------------|
42
+ | task_name | Normalized name of the task |
43
+ | directive | Ratio indicating directive automation pattern (0-1) |
44
+ | feedback_loop | Ratio indicating feedback loop automation pattern (0-1) |
45
+ | validation | Ratio indicating validation augmentation pattern (0-1) |
46
+ | task_iteration | Ratio indicating task iteration augmentation pattern (0-1) |
47
+ | learning | Ratio indicating learning augmentation pattern (0-1) |
48
+ | filtered | Ratio indicating filtered (excluded) tasks (0-1) |
49
+
50
+ ### automation_vs_augmentation_v1.csv and automation_vs_augmentation_v2.csv
51
+ | Field | Description |
52
+ |-------|-------------|
53
+ | interaction_type | Type of interaction (directive, feedback loop, validation, task iteration, learning, none) |
54
+ | pct | Percentage of this interaction type in the dataset |
55
+
56
+ ### task_thinking_fractions.csv
57
+ | Field | Description |
58
+ |-------|-------------|
59
+ | task_name | Normalized name of the task |
60
+ | thinking_fraction | Ratio of this task that used extended thinking mode |
61
+
62
+ ## Running the analysis
63
+ Open `v2_report_replication.ipynb` in a notebook editor and run the cells in order.
release_2025_03_27/SOC_Structure.csv ADDED
@@ -0,0 +1,1597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Major Group,Minor Group,Broad Occupation,Detailed Occupation,Detailed O*NET-SOC,SOC or O*NET-SOC 2019 Title
2
+ 11-0000,,,,,Management Occupations
3
+ ,11-1000,,,,Top Executives
4
+ ,,11-1010,,,Chief Executives
5
+ ,,,11-1011,,Chief Executives
6
+ ,,,,11-1011.03,Chief Sustainability Officers
7
+ ,,11-1020,,,General and Operations Managers
8
+ ,,,11-1021,,General and Operations Managers
9
+ ,,11-1030,,,Legislators
10
+ ,,,11-1031,,Legislators
11
+ ,11-2000,,,,"Advertising, Marketing, Promotions, Public Relations, and Sales Managers"
12
+ ,,11-2010,,,Advertising and Promotions Managers
13
+ ,,,11-2011,,Advertising and Promotions Managers
14
+ ,,11-2020,,,Marketing and Sales Managers
15
+ ,,,11-2021,,Marketing Managers
16
+ ,,,11-2022,,Sales Managers
17
+ ,,11-2030,,,Public Relations and Fundraising Managers
18
+ ,,,11-2032,,Public Relations Managers
19
+ ,,,11-2033,,Fundraising Managers
20
+ ,11-3000,,,,Operations Specialties Managers
21
+ ,,11-3010,,,Administrative Services and Facilities Managers
22
+ ,,,11-3012,,Administrative Services Managers
23
+ ,,,11-3013,,Facilities Managers
24
+ ,,,,11-3013.01,Security Managers
25
+ ,,11-3020,,,Computer and Information Systems Managers
26
+ ,,,11-3021,,Computer and Information Systems Managers
27
+ ,,11-3030,,,Financial Managers
28
+ ,,,11-3031,,Financial Managers
29
+ ,,,,11-3031.01,Treasurers and Controllers
30
+ ,,,,11-3031.03,Investment Fund Managers
31
+ ,,11-3050,,,Industrial Production Managers
32
+ ,,,11-3051,,Industrial Production Managers
33
+ ,,,,11-3051.01,Quality Control Systems Managers
34
+ ,,,,11-3051.02,Geothermal Production Managers
35
+ ,,,,11-3051.03,Biofuels Production Managers
36
+ ,,,,11-3051.04,Biomass Power Plant Managers
37
+ ,,,,11-3051.06,Hydroelectric Production Managers
38
+ ,,11-3060,,,Purchasing Managers
39
+ ,,,11-3061,,Purchasing Managers
40
+ ,,11-3070,,,"Transportation, Storage, and Distribution Managers"
41
+ ,,,11-3071,,"Transportation, Storage, and Distribution Managers"
42
+ ,,,,11-3071.04,Supply Chain Managers
43
+ ,,11-3110,,,Compensation and Benefits Managers
44
+ ,,,11-3111,,Compensation and Benefits Managers
45
+ ,,11-3120,,,Human Resources Managers
46
+ ,,,11-3121,,Human Resources Managers
47
+ ,,11-3130,,,Training and Development Managers
48
+ ,,,11-3131,,Training and Development Managers
49
+ ,11-9000,,,,Other Management Occupations
50
+ ,,11-9010,,,"Farmers, Ranchers, and Other Agricultural Managers"
51
+ ,,,11-9013,,"Farmers, Ranchers, and Other Agricultural Managers"
52
+ ,,11-9020,,,Construction Managers
53
+ ,,,11-9021,,Construction Managers
54
+ ,,11-9030,,,Education and Childcare Administrators
55
+ ,,,11-9031,,"Education and Childcare Administrators, Preschool and Daycare"
56
+ ,,,11-9032,,"Education Administrators, Kindergarten through Secondary"
57
+ ,,,11-9033,,"Education Administrators, Postsecondary"
58
+ ,,,11-9039,,"Education Administrators, All Other"
59
+ ,,11-9040,,,Architectural and Engineering Managers
60
+ ,,,11-9041,,Architectural and Engineering Managers
61
+ ,,,,11-9041.01,Biofuels/Biodiesel Technology and Product Development Managers
62
+ ,,11-9050,,,Food Service Managers
63
+ ,,,11-9051,,Food Service Managers
64
+ ,,11-9070,,,Entertainment and Recreation Managers
65
+ ,,,11-9071,,Gambling Managers
66
+ ,,,11-9072,,"Entertainment and Recreation Managers, Except Gambling"
67
+ ,,11-9080,,,Lodging Managers
68
+ ,,,11-9081,,Lodging Managers
69
+ ,,11-9110,,,Medical and Health Services Managers
70
+ ,,,11-9111,,Medical and Health Services Managers
71
+ ,,11-9120,,,Natural Sciences Managers
72
+ ,,,11-9121,,Natural Sciences Managers
73
+ ,,,,11-9121.01,Clinical Research Coordinators
74
+ ,,,,11-9121.02,Water Resource Specialists
75
+ ,,11-9130,,,Postmasters and Mail Superintendents
76
+ ,,,11-9131,,Postmasters and Mail Superintendents
77
+ ,,11-9140,,,"Property, Real Estate, and Community Association Managers"
78
+ ,,,11-9141,,"Property, Real Estate, and Community Association Managers"
79
+ ,,11-9150,,,Social and Community Service Managers
80
+ ,,,11-9151,,Social and Community Service Managers
81
+ ,,11-9160,,,Emergency Management Directors
82
+ ,,,11-9161,,Emergency Management Directors
83
+ ,,11-9170,,,Personal Service Managers
84
+ ,,,11-9171,,Funeral Home Managers
85
+ ,,,11-9179,,"Personal Service Managers, All Other"
86
+ ,,,,11-9179.01,Fitness and Wellness Coordinators
87
+ ,,,,11-9179.02,Spa Managers
88
+ ,,11-9190,,,Miscellaneous Managers
89
+ ,,,11-9199,,"Managers, All Other"
90
+ ,,,,11-9199.01,Regulatory Affairs Managers
91
+ ,,,,11-9199.02,Compliance Managers
92
+ ,,,,11-9199.08,Loss Prevention Managers
93
+ ,,,,11-9199.09,Wind Energy Operations Managers
94
+ ,,,,11-9199.10,Wind Energy Development Managers
95
+ ,,,,11-9199.11,Brownfield Redevelopment Specialists and Site Managers
96
+ 13-0000,,,,,Business and Financial Operations Occupations
97
+ ,13-1000,,,,Business Operations Specialists
98
+ ,,13-1010,,,"Agents and Business Managers of Artists, Performers, and Athletes"
99
+ ,,,13-1011,,"Agents and Business Managers of Artists, Performers, and Athletes"
100
+ ,,13-1020,,,Buyers and Purchasing Agents
101
+ ,,,13-1021,,"Buyers and Purchasing Agents, Farm Products"
102
+ ,,,13-1022,,"Wholesale and Retail Buyers, Except Farm Products"
103
+ ,,,13-1023,,"Purchasing Agents, Except Wholesale, Retail, and Farm Products"
104
+ ,,13-1030,,,"Claims Adjusters, Appraisers, Examiners, and Investigators"
105
+ ,,,13-1031,,"Claims Adjusters, Examiners, and Investigators"
106
+ ,,,13-1032,,"Insurance Appraisers, Auto Damage"
107
+ ,,13-1040,,,Compliance Officers
108
+ ,,,13-1041,,Compliance Officers
109
+ ,,,,13-1041.01,Environmental Compliance Inspectors
110
+ ,,,,13-1041.03,Equal Opportunity Representatives and Officers
111
+ ,,,,13-1041.04,Government Property Inspectors and Investigators
112
+ ,,,,13-1041.06,Coroners
113
+ ,,,,13-1041.07,Regulatory Affairs Specialists
114
+ ,,,,13-1041.08,Customs Brokers
115
+ ,,13-1050,,,Cost Estimators
116
+ ,,,13-1051,,Cost Estimators
117
+ ,,13-1070,,,Human Resources Workers
118
+ ,,,13-1071,,Human Resources Specialists
119
+ ,,,13-1074,,Farm Labor Contractors
120
+ ,,,13-1075,,Labor Relations Specialists
121
+ ,,13-1080,,,Logisticians and Project Management Specialists
122
+ ,,,13-1081,,Logisticians
123
+ ,,,,13-1081.01,Logistics Engineers
124
+ ,,,,13-1081.02,Logistics Analysts
125
+ ,,,13-1082,,Project Management Specialists
126
+ ,,13-1110,,,Management Analysts
127
+ ,,,13-1111,,Management Analysts
128
+ ,,13-1120,,,"Meeting, Convention, and Event Planners"
129
+ ,,,13-1121,,"Meeting, Convention, and Event Planners"
130
+ ,,13-1130,,,Fundraisers
131
+ ,,,13-1131,,Fundraisers
132
+ ,,13-1140,,,"Compensation, Benefits, and Job Analysis Specialists"
133
+ ,,,13-1141,,"Compensation, Benefits, and Job Analysis Specialists"
134
+ ,,13-1150,,,Training and Development Specialists
135
+ ,,,13-1151,,Training and Development Specialists
136
+ ,,13-1160,,,Market Research Analysts and Marketing Specialists
137
+ ,,,13-1161,,Market Research Analysts and Marketing Specialists
138
+ ,,,,13-1161.01,Search Marketing Strategists
139
+ ,,13-1190,,,Miscellaneous Business Operations Specialists
140
+ ,,,13-1199,,"Business Operations Specialists, All Other"
141
+ ,,,,13-1199.04,Business Continuity Planners
142
+ ,,,,13-1199.05,Sustainability Specialists
143
+ ,,,,13-1199.06,Online Merchants
144
+ ,,,,13-1199.07,Security Management Specialists
145
+ ,13-2000,,,,Financial Specialists
146
+ ,,13-2010,,,Accountants and Auditors
147
+ ,,,13-2011,,Accountants and Auditors
148
+ ,,13-2020,,,Property Appraisers and Assessors
149
+ ,,,13-2022,,Appraisers of Personal and Business Property
150
+ ,,,13-2023,,Appraisers and Assessors of Real Estate
151
+ ,,13-2030,,,Budget Analysts
152
+ ,,,13-2031,,Budget Analysts
153
+ ,,13-2040,,,Credit Analysts
154
+ ,,,13-2041,,Credit Analysts
155
+ ,,13-2050,,,Financial Analysts and Advisors
156
+ ,,,13-2051,,Financial and Investment Analysts
157
+ ,,,13-2052,,Personal Financial Advisors
158
+ ,,,13-2053,,Insurance Underwriters
159
+ ,,,13-2054,,Financial Risk Specialists
160
+ ,,13-2060,,,Financial Examiners
161
+ ,,,13-2061,,Financial Examiners
162
+ ,,13-2070,,,Credit Counselors and Loan Officers
163
+ ,,,13-2071,,Credit Counselors
164
+ ,,,13-2072,,Loan Officers
165
+ ,,13-2080,,,"Tax Examiners, Collectors and Preparers, and Revenue Agents"
166
+ ,,,13-2081,,"Tax Examiners and Collectors, and Revenue Agents"
167
+ ,,,13-2082,,Tax Preparers
168
+ ,,13-2090,,,Miscellaneous Financial Specialists
169
+ ,,,13-2099,,"Financial Specialists, All Other"
170
+ ,,,,13-2099.01,Financial Quantitative Analysts
171
+ ,,,,13-2099.04,"Fraud Examiners, Investigators and Analysts"
172
+ 15-0000,,,,,Computer and Mathematical Occupations
173
+ ,15-1200,,,,Computer Occupations
174
+ ,,15-1210,,,Computer and Information Analysts
175
+ ,,,15-1211,,Computer Systems Analysts
176
+ ,,,,15-1211.01,Health Informatics Specialists
177
+ ,,,15-1212,,Information Security Analysts
178
+ ,,15-1220,,,Computer and Information Research Scientists
179
+ ,,,15-1221,,Computer and Information Research Scientists
180
+ ,,15-1230,,,Computer Support Specialists
181
+ ,,,15-1231,,Computer Network Support Specialists
182
+ ,,,15-1232,,Computer User Support Specialists
183
+ ,,15-1240,,,Database and Network Administrators and Architects
184
+ ,,,15-1241,,Computer Network Architects
185
+ ,,,,15-1241.01,Telecommunications Engineering Specialists
186
+ ,,,15-1242,,Database Administrators
187
+ ,,,15-1243,,Database Architects
188
+ ,,,,15-1243.01,Data Warehousing Specialists
189
+ ,,,15-1244,,Network and Computer Systems Administrators
190
+ ,,15-1250,,,"Software and Web Developers, Programmers, and Testers"
191
+ ,,,15-1251,,Computer Programmers
192
+ ,,,15-1252,,Software Developers
193
+ ,,,15-1253,,Software Quality Assurance Analysts and Testers
194
+ ,,,15-1254,,Web Developers
195
+ ,,,15-1255,,Web and Digital Interface Designers
196
+ ,,,,15-1255.01,Video Game Designers
197
+ ,,15-1290,,,Miscellaneous Computer Occupations
198
+ ,,,15-1299,,"Computer Occupations, All Other"
199
+ ,,,,15-1299.01,Web Administrators
200
+ ,,,,15-1299.02,Geographic Information Systems Technologists and Technicians
201
+ ,,,,15-1299.03,Document Management Specialists
202
+ ,,,,15-1299.04,Penetration Testers
203
+ ,,,,15-1299.05,Information Security Engineers
204
+ ,,,,15-1299.06,Digital Forensics Analysts
205
+ ,,,,15-1299.07,Blockchain Engineers
206
+ ,,,,15-1299.08,Computer Systems Engineers/Architects
207
+ ,,,,15-1299.09,Information Technology Project Managers
208
+ ,15-2000,,,,Mathematical Science Occupations
209
+ ,,15-2010,,,Actuaries
210
+ ,,,15-2011,,Actuaries
211
+ ,,15-2020,,,Mathematicians
212
+ ,,,15-2021,,Mathematicians
213
+ ,,15-2030,,,Operations Research Analysts
214
+ ,,,15-2031,,Operations Research Analysts
215
+ ,,15-2040,,,Statisticians
216
+ ,,,15-2041,,Statisticians
217
+ ,,,,15-2041.01,Biostatisticians
218
+ ,,15-2050,,,Data Scientists
219
+ ,,,15-2051,,Data Scientists
220
+ ,,,,15-2051.01,Business Intelligence Analysts
221
+ ,,,,15-2051.02,Clinical Data Managers
222
+ ,,15-2090,,,Miscellaneous Mathematical Science Occupations
223
+ ,,,15-2099,,"Mathematical Science Occupations, All Other"
224
+ ,,,,15-2099.01,Bioinformatics Technicians
225
+ 17-0000,,,,,Architecture and Engineering Occupations
226
+ ,17-1000,,,,"Architects, Surveyors, and Cartographers"
227
+ ,,17-1010,,,"Architects, Except Naval"
228
+ ,,,17-1011,,"Architects, Except Landscape and Naval"
229
+ ,,,17-1012,,Landscape Architects
230
+ ,,17-1020,,,"Surveyors, Cartographers, and Photogrammetrists"
231
+ ,,,17-1021,,Cartographers and Photogrammetrists
232
+ ,,,17-1022,,Surveyors
233
+ ,,,,17-1022.01,Geodetic Surveyors
234
+ ,17-2000,,,,Engineers
235
+ ,,17-2010,,,Aerospace Engineers
236
+ ,,,17-2011,,Aerospace Engineers
237
+ ,,17-2020,,,Agricultural Engineers
238
+ ,,,17-2021,,Agricultural Engineers
239
+ ,,17-2030,,,Bioengineers and Biomedical Engineers
240
+ ,,,17-2031,,Bioengineers and Biomedical Engineers
241
+ ,,17-2040,,,Chemical Engineers
242
+ ,,,17-2041,,Chemical Engineers
243
+ ,,17-2050,,,Civil Engineers
244
+ ,,,17-2051,,Civil Engineers
245
+ ,,,,17-2051.01,Transportation Engineers
246
+ ,,,,17-2051.02,Water/Wastewater Engineers
247
+ ,,17-2060,,,Computer Hardware Engineers
248
+ ,,,17-2061,,Computer Hardware Engineers
249
+ ,,17-2070,,,Electrical and Electronics Engineers
250
+ ,,,17-2071,,Electrical Engineers
251
+ ,,,17-2072,,"Electronics Engineers, Except Computer"
252
+ ,,,,17-2072.01,Radio Frequency Identification Device Specialists
253
+ ,,17-2080,,,Environmental Engineers
254
+ ,,,17-2081,,Environmental Engineers
255
+ ,,17-2110,,,"Industrial Engineers, Including Health and Safety"
256
+ ,,,17-2111,,"Health and Safety Engineers, Except Mining Safety Engineers and Inspectors"
257
+ ,,,,17-2111.02,Fire-Prevention and Protection Engineers
258
+ ,,,17-2112,,Industrial Engineers
259
+ ,,,,17-2112.01,Human Factors Engineers and Ergonomists
260
+ ,,,,17-2112.02,Validation Engineers
261
+ ,,,,17-2112.03,Manufacturing Engineers
262
+ ,,17-2120,,,Marine Engineers and Naval Architects
263
+ ,,,17-2121,,Marine Engineers and Naval Architects
264
+ ,,17-2130,,,Materials Engineers
265
+ ,,,17-2131,,Materials Engineers
266
+ ,,17-2140,,,Mechanical Engineers
267
+ ,,,17-2141,,Mechanical Engineers
268
+ ,,,,17-2141.01,Fuel Cell Engineers
269
+ ,,,,17-2141.02,Automotive Engineers
270
+ ,,17-2150,,,"Mining and Geological Engineers, Including Mining Safety Engineers"
271
+ ,,,17-2151,,"Mining and Geological Engineers, Including Mining Safety Engineers"
272
+ ,,17-2160,,,Nuclear Engineers
273
+ ,,,17-2161,,Nuclear Engineers
274
+ ,,17-2170,,,Petroleum Engineers
275
+ ,,,17-2171,,Petroleum Engineers
276
+ ,,17-2190,,,Miscellaneous Engineers
277
+ ,,,17-2199,,"Engineers, All Other"
278
+ ,,,,17-2199.03,"Energy Engineers, Except Wind and Solar"
279
+ ,,,,17-2199.05,Mechatronics Engineers
280
+ ,,,,17-2199.06,Microsystems Engineers
281
+ ,,,,17-2199.07,Photonics Engineers
282
+ ,,,,17-2199.08,Robotics Engineers
283
+ ,,,,17-2199.09,Nanosystems Engineers
284
+ ,,,,17-2199.10,Wind Energy Engineers
285
+ ,,,,17-2199.11,Solar Energy Systems Engineers
286
+ ,17-3000,,,,"Drafters, Engineering Technicians, and Mapping Technicians"
287
+ ,,17-3010,,,Drafters
288
+ ,,,17-3011,,Architectural and Civil Drafters
289
+ ,,,17-3012,,Electrical and Electronics Drafters
290
+ ,,,17-3013,,Mechanical Drafters
291
+ ,,,17-3019,,"Drafters, All Other"
292
+ ,,17-3020,,,"Engineering Technologists and Technicians, Except Drafters"
293
+ ,,,17-3021,,Aerospace Engineering and Operations Technologists and Technicians
294
+ ,,,17-3022,,Civil Engineering Technologists and Technicians
295
+ ,,,17-3023,,Electrical and Electronic Engineering Technologists and Technicians
296
+ ,,,17-3024,,Electro-Mechanical and Mechatronics Technologists and Technicians
297
+ ,,,,17-3024.01,Robotics Technicians
298
+ ,,,17-3025,,Environmental Engineering Technologists and Technicians
299
+ ,,,17-3026,,Industrial Engineering Technologists and Technicians
300
+ ,,,,17-3026.01,Nanotechnology Engineering Technologists and Technicians
301
+ ,,,17-3027,,Mechanical Engineering Technologists and Technicians
302
+ ,,,,17-3027.01,Automotive Engineering Technicians
303
+ ,,,17-3028,,Calibration Technologists and Technicians
304
+ ,,,17-3029,,"Engineering Technologists and Technicians, Except Drafters, All Other"
305
+ ,,,,17-3029.01,Non-Destructive Testing Specialists
306
+ ,,,,17-3029.08,Photonics Technicians
307
+ ,,17-3030,,,Surveying and Mapping Technicians
308
+ ,,,17-3031,,Surveying and Mapping Technicians
309
+ 19-0000,,,,,"Life, Physical, and Social Science Occupations"
310
+ ,19-1000,,,,Life Scientists
311
+ ,,19-1010,,,Agricultural and Food Scientists
312
+ ,,,19-1011,,Animal Scientists
313
+ ,,,19-1012,,Food Scientists and Technologists
314
+ ,,,19-1013,,Soil and Plant Scientists
315
+ ,,19-1020,,,Biological Scientists
316
+ ,,,19-1021,,Biochemists and Biophysicists
317
+ ,,,19-1022,,Microbiologists
318
+ ,,,19-1023,,Zoologists and Wildlife Biologists
319
+ ,,,19-1029,,"Biological Scientists, All Other"
320
+ ,,,,19-1029.01,Bioinformatics Scientists
321
+ ,,,,19-1029.02,Molecular and Cellular Biologists
322
+ ,,,,19-1029.03,Geneticists
323
+ ,,,,19-1029.04,Biologists
324
+ ,,19-1030,,,Conservation Scientists and Foresters
325
+ ,,,19-1031,,Conservation Scientists
326
+ ,,,,19-1031.02,Range Managers
327
+ ,,,,19-1031.03,Park Naturalists
328
+ ,,,19-1032,,Foresters
329
+ ,,19-1040,,,Medical Scientists
330
+ ,,,19-1041,,Epidemiologists
331
+ ,,,19-1042,,"Medical Scientists, Except Epidemiologists"
332
+ ,,19-1090,,,Miscellaneous Life Scientists
333
+ ,,,19-1099,,"Life Scientists, All Other"
334
+ ,19-2000,,,,Physical Scientists
335
+ ,,19-2010,,,Astronomers and Physicists
336
+ ,,,19-2011,,Astronomers
337
+ ,,,19-2012,,Physicists
338
+ ,,19-2020,,,Atmospheric and Space Scientists
339
+ ,,,19-2021,,Atmospheric and Space Scientists
340
+ ,,19-2030,,,Chemists and Materials Scientists
341
+ ,,,19-2031,,Chemists
342
+ ,,,19-2032,,Materials Scientists
343
+ ,,19-2040,,,Environmental Scientists and Geoscientists
344
+ ,,,19-2041,,"Environmental Scientists and Specialists, Including Health"
345
+ ,,,,19-2041.01,Climate Change Policy Analysts
346
+ ,,,,19-2041.02,Environmental Restoration Planners
347
+ ,,,,19-2041.03,Industrial Ecologists
348
+ ,,,19-2042,,"Geoscientists, Except Hydrologists and Geographers"
349
+ ,,,19-2043,,Hydrologists
350
+ ,,19-2090,,,Miscellaneous Physical Scientists
351
+ ,,,19-2099,,"Physical Scientists, All Other"
352
+ ,,,,19-2099.01,Remote Sensing Scientists and Technologists
353
+ ,19-3000,,,,Social Scientists and Related Workers
354
+ ,,19-3010,,,Economists
355
+ ,,,19-3011,,Economists
356
+ ,,,,19-3011.01,Environmental Economists
357
+ ,,19-3020,,,Survey Researchers
358
+ ,,,19-3022,,Survey Researchers
359
+ ,,19-3030,,,Psychologists
360
+ ,,,19-3032,,Industrial-Organizational Psychologists
361
+ ,,,19-3033,,Clinical and Counseling Psychologists
362
+ ,,,19-3034,,School Psychologists
363
+ ,,,19-3039,,"Psychologists, All Other"
364
+ ,,,,19-3039.02,Neuropsychologists
365
+ ,,,,19-3039.03,Clinical Neuropsychologists
366
+ ,,19-3040,,,Sociologists
367
+ ,,,19-3041,,Sociologists
368
+ ,,19-3050,,,Urban and Regional Planners
369
+ ,,,19-3051,,Urban and Regional Planners
370
+ ,,19-3090,,,Miscellaneous Social Scientists and Related Workers
371
+ ,,,19-3091,,Anthropologists and Archeologists
372
+ ,,,19-3092,,Geographers
373
+ ,,,19-3093,,Historians
374
+ ,,,19-3094,,Political Scientists
375
+ ,,,19-3099,,"Social Scientists and Related Workers, All Other"
376
+ ,,,,19-3099.01,Transportation Planners
377
+ ,19-4000,,,,"Life, Physical, and Social Science Technicians"
378
+ ,,19-4010,,,Agricultural and Food Science Technicians
379
+ ,,,19-4012,,Agricultural Technicians
380
+ ,,,,19-4012.01,Precision Agriculture Technicians
381
+ ,,,19-4013,,Food Science Technicians
382
+ ,,19-4020,,,Biological Technicians
383
+ ,,,19-4021,,Biological Technicians
384
+ ,,19-4030,,,Chemical Technicians
385
+ ,,,19-4031,,Chemical Technicians
386
+ ,,19-4040,,,Environmental Science and Geoscience Technicians
387
+ ,,,19-4042,,"Environmental Science and Protection Technicians, Including Health"
388
+ ,,,19-4043,,"Geological Technicians, Except Hydrologic Technicians"
389
+ ,,,19-4044,,Hydrologic Technicians
390
+ ,,19-4050,,,Nuclear Technicians
391
+ ,,,19-4051,,Nuclear Technicians
392
+ ,,,,19-4051.02,Nuclear Monitoring Technicians
393
+ ,,19-4060,,,Social Science Research Assistants
394
+ ,,,19-4061,,Social Science Research Assistants
395
+ ,,19-4070,,,Forest and Conservation Technicians
396
+ ,,,19-4071,,Forest and Conservation Technicians
397
+ ,,19-4090,,,"Miscellaneous Life, Physical, and Social Science Technicians"
398
+ ,,,19-4092,,Forensic Science Technicians
399
+ ,,,19-4099,,"Life, Physical, and Social Science Technicians, All Other"
400
+ ,,,,19-4099.01,Quality Control Analysts
401
+ ,,,,19-4099.03,Remote Sensing Technicians
402
+ ,19-5000,,,,Occupational Health and Safety Specialists and Technicians
403
+ ,,19-5010,,,Occupational Health and Safety Specialists and Technicians
404
+ ,,,19-5011,,Occupational Health and Safety Specialists
405
+ ,,,19-5012,,Occupational Health and Safety Technicians
406
+ 21-0000,,,,,Community and Social Service Occupations
407
+ ,21-1000,,,,"Counselors, Social Workers, and Other Community and Social Service Specialists"
408
+ ,,21-1010,,,Counselors
409
+ ,,,21-1011,,Substance Abuse and Behavioral Disorder Counselors
410
+ ,,,21-1012,,"Educational, Guidance, and Career Counselors and Advisors"
411
+ ,,,21-1013,,Marriage and Family Therapists
412
+ ,,,21-1014,,Mental Health Counselors
413
+ ,,,21-1015,,Rehabilitation Counselors
414
+ ,,,21-1019,,"Counselors, All Other"
415
+ ,,21-1020,,,Social Workers
416
+ ,,,21-1021,,"Child, Family, and School Social Workers"
417
+ ,,,21-1022,,Healthcare Social Workers
418
+ ,,,21-1023,,Mental Health and Substance Abuse Social Workers
419
+ ,,,21-1029,,"Social Workers, All Other"
420
+ ,,21-1090,,,Miscellaneous Community and Social Service Specialists
421
+ ,,,21-1091,,Health Education Specialists
422
+ ,,,21-1092,,Probation Officers and Correctional Treatment Specialists
423
+ ,,,21-1093,,Social and Human Service Assistants
424
+ ,,,21-1094,,Community Health Workers
425
+ ,,,21-1099,,"Community and Social Service Specialists, All Other"
426
+ ,21-2000,,,,Religious Workers
427
+ ,,21-2010,,,Clergy
428
+ ,,,21-2011,,Clergy
429
+ ,,21-2020,,,"Directors, Religious Activities and Education"
430
+ ,,,21-2021,,"Directors, Religious Activities and Education"
431
+ ,,21-2090,,,Miscellaneous Religious Workers
432
+ ,,,21-2099,,"Religious Workers, All Other"
433
+ 23-0000,,,,,Legal Occupations
434
+ ,23-1000,,,,"Lawyers, Judges, and Related Workers"
435
+ ,,23-1010,,,Lawyers and Judicial Law Clerks
436
+ ,,,23-1011,,Lawyers
437
+ ,,,23-1012,,Judicial Law Clerks
438
+ ,,23-1020,,,"Judges, Magistrates, and Other Judicial Workers"
439
+ ,,,23-1021,,"Administrative Law Judges, Adjudicators, and Hearing Officers"
440
+ ,,,23-1022,,"Arbitrators, Mediators, and Conciliators"
441
+ ,,,23-1023,,"Judges, Magistrate Judges, and Magistrates"
442
+ ,23-2000,,,,Legal Support Workers
443
+ ,,23-2010,,,Paralegals and Legal Assistants
444
+ ,,,23-2011,,Paralegals and Legal Assistants
445
+ ,,23-2090,,,Miscellaneous Legal Support Workers
446
+ ,,,23-2093,,"Title Examiners, Abstractors, and Searchers"
447
+ ,,,23-2099,,"Legal Support Workers, All Other"
448
+ 25-0000,,,,,Educational Instruction and Library Occupations
449
+ ,25-1000,,,,Postsecondary Teachers
450
+ ,,25-1010,,,"Business Teachers, Postsecondary"
451
+ ,,,25-1011,,"Business Teachers, Postsecondary"
452
+ ,,25-1020,,,"Math and Computer Science Teachers, Postsecondary"
453
+ ,,,25-1021,,"Computer Science Teachers, Postsecondary"
454
+ ,,,25-1022,,"Mathematical Science Teachers, Postsecondary"
455
+ ,,25-1030,,,"Engineering and Architecture Teachers, Postsecondary"
456
+ ,,,25-1031,,"Architecture Teachers, Postsecondary"
457
+ ,,,25-1032,,"Engineering Teachers, Postsecondary"
458
+ ,,25-1040,,,"Life Sciences Teachers, Postsecondary"
459
+ ,,,25-1041,,"Agricultural Sciences Teachers, Postsecondary"
460
+ ,,,25-1042,,"Biological Science Teachers, Postsecondary"
461
+ ,,,25-1043,,"Forestry and Conservation Science Teachers, Postsecondary"
462
+ ,,25-1050,,,"Physical Sciences Teachers, Postsecondary"
463
+ ,,,25-1051,,"Atmospheric, Earth, Marine, and Space Sciences Teachers, Postsecondary"
464
+ ,,,25-1052,,"Chemistry Teachers, Postsecondary"
465
+ ,,,25-1053,,"Environmental Science Teachers, Postsecondary"
466
+ ,,,25-1054,,"Physics Teachers, Postsecondary"
467
+ ,,25-1060,,,"Social Sciences Teachers, Postsecondary"
468
+ ,,,25-1061,,"Anthropology and Archeology Teachers, Postsecondary"
469
+ ,,,25-1062,,"Area, Ethnic, and Cultural Studies Teachers, Postsecondary"
470
+ ,,,25-1063,,"Economics Teachers, Postsecondary"
471
+ ,,,25-1064,,"Geography Teachers, Postsecondary"
472
+ ,,,25-1065,,"Political Science Teachers, Postsecondary"
473
+ ,,,25-1066,,"Psychology Teachers, Postsecondary"
474
+ ,,,25-1067,,"Sociology Teachers, Postsecondary"
475
+ ,,,25-1069,,"Social Sciences Teachers, Postsecondary, All Other"
476
+ ,,25-1070,,,"Health Teachers, Postsecondary"
477
+ ,,,25-1071,,"Health Specialties Teachers, Postsecondary"
478
+ ,,,25-1072,,"Nursing Instructors and Teachers, Postsecondary"
479
+ ,,25-1080,,,"Education and Library Science Teachers, Postsecondary"
480
+ ,,,25-1081,,"Education Teachers, Postsecondary"
481
+ ,,,25-1082,,"Library Science Teachers, Postsecondary"
482
+ ,,25-1110,,,"Law, Criminal Justice, and Social Work Teachers, Postsecondary"
483
+ ,,,25-1111,,"Criminal Justice and Law Enforcement Teachers, Postsecondary"
484
+ ,,,25-1112,,"Law Teachers, Postsecondary"
485
+ ,,,25-1113,,"Social Work Teachers, Postsecondary"
486
+ ,,25-1120,,,"Arts, Communications, History, and Humanities Teachers, Postsecondary"
487
+ ,,,25-1121,,"Art, Drama, and Music Teachers, Postsecondary"
488
+ ,,,25-1122,,"Communications Teachers, Postsecondary"
489
+ ,,,25-1123,,"English Language and Literature Teachers, Postsecondary"
490
+ ,,,25-1124,,"Foreign Language and Literature Teachers, Postsecondary"
491
+ ,,,25-1125,,"History Teachers, Postsecondary"
492
+ ,,,25-1126,,"Philosophy and Religion Teachers, Postsecondary"
493
+ ,,25-1190,,,Miscellaneous Postsecondary Teachers
494
+ ,,,25-1192,,"Family and Consumer Sciences Teachers, Postsecondary"
495
+ ,,,25-1193,,"Recreation and Fitness Studies Teachers, Postsecondary"
496
+ ,,,25-1194,,"Career/Technical Education Teachers, Postsecondary"
497
+ ,,,25-1199,,"Postsecondary Teachers, All Other"
498
+ ,25-2000,,,,"Preschool, Elementary, Middle, Secondary, and Special Education Teachers"
499
+ ,,25-2010,,,Preschool and Kindergarten Teachers
500
+ ,,,25-2011,,"Preschool Teachers, Except Special Education"
501
+ ,,,25-2012,,"Kindergarten Teachers, Except Special Education"
502
+ ,,25-2020,,,Elementary and Middle School Teachers
503
+ ,,,25-2021,,"Elementary School Teachers, Except Special Education"
504
+ ,,,25-2022,,"Middle School Teachers, Except Special and Career/Technical Education"
505
+ ,,,25-2023,,"Career/Technical Education Teachers, Middle School"
506
+ ,,25-2030,,,Secondary School Teachers
507
+ ,,,25-2031,,"Secondary School Teachers, Except Special and Career/Technical Education"
508
+ ,,,25-2032,,"Career/Technical Education Teachers, Secondary School"
509
+ ,,25-2050,,,Special Education Teachers
510
+ ,,,25-2051,,"Special Education Teachers, Preschool"
511
+ ,,,25-2055,,"Special Education Teachers, Kindergarten"
512
+ ,,,25-2056,,"Special Education Teachers, Elementary School"
513
+ ,,,25-2057,,"Special Education Teachers, Middle School"
514
+ ,,,25-2058,,"Special Education Teachers, Secondary School"
515
+ ,,,25-2059,,"Special Education Teachers, All Other"
516
+ ,,,,25-2059.01,Adapted Physical Education Specialists
517
+ ,25-3000,,,,Other Teachers and Instructors
518
+ ,,25-3010,,,"Adult Basic Education, Adult Secondary Education, and English as a Second Language Instructors"
519
+ ,,,25-3011,,"Adult Basic Education, Adult Secondary Education, and English as a Second Language Instructors"
520
+ ,,25-3020,,,Self-Enrichment Teachers
521
+ ,,,25-3021,,Self-Enrichment Teachers
522
+ ,,25-3030,,,"Substitute Teachers, Short-Term"
523
+ ,,,25-3031,,"Substitute Teachers, Short-Term"
524
+ ,,25-3040,,,Tutors
525
+ ,,,25-3041,,Tutors
526
+ ,,25-3090,,,Miscellaneous Teachers and Instructors
527
+ ,,,25-3099,,"Teachers and Instructors, All Other"
528
+ ,25-4000,,,,"Librarians, Curators, and Archivists"
529
+ ,,25-4010,,,"Archivists, Curators, and Museum Technicians"
530
+ ,,,25-4011,,Archivists
531
+ ,,,25-4012,,Curators
532
+ ,,,25-4013,,Museum Technicians and Conservators
533
+ ,,25-4020,,,Librarians and Media Collections Specialists
534
+ ,,,25-4022,,Librarians and Media Collections Specialists
535
+ ,,25-4030,,,Library Technicians
536
+ ,,,25-4031,,Library Technicians
537
+ ,25-9000,,,,Other Educational Instruction and Library Occupations
538
+ ,,25-9020,,,Farm and Home Management Educators
539
+ ,,,25-9021,,Farm and Home Management Educators
540
+ ,,25-9030,,,Instructional Coordinators
541
+ ,,,25-9031,,Instructional Coordinators
542
+ ,,25-9040,,,Teaching Assistants
543
+ ,,,25-9042,,"Teaching Assistants, Preschool, Elementary, Middle, and Secondary School, Except Special Education"
544
+ ,,,25-9043,,"Teaching Assistants, Special Education"
545
+ ,,,25-9044,,"Teaching Assistants, Postsecondary"
546
+ ,,,25-9049,,"Teaching Assistants, All Other"
547
+ ,,25-9090,,,Miscellaneous Educational Instruction and Library Workers
548
+ ,,,25-9099,,"Educational Instruction and Library Workers, All Other"
549
+ 27-0000,,,,,"Arts, Design, Entertainment, Sports, and Media Occupations"
550
+ ,27-1000,,,,Art and Design Workers
551
+ ,,27-1010,,,Artists and Related Workers
552
+ ,,,27-1011,,Art Directors
553
+ ,,,27-1012,,Craft Artists
554
+ ,,,27-1013,,"Fine Artists, Including Painters, Sculptors, and Illustrators"
555
+ ,,,27-1014,,Special Effects Artists and Animators
556
+ ,,,27-1019,,"Artists and Related Workers, All Other"
557
+ ,,27-1020,,,Designers
558
+ ,,,27-1021,,Commercial and Industrial Designers
559
+ ,,,27-1022,,Fashion Designers
560
+ ,,,27-1023,,Floral Designers
561
+ ,,,27-1024,,Graphic Designers
562
+ ,,,27-1025,,Interior Designers
563
+ ,,,27-1026,,Merchandise Displayers and Window Trimmers
564
+ ,,,27-1027,,Set and Exhibit Designers
565
+ ,,,27-1029,,"Designers, All Other"
566
+ ,27-2000,,,,"Entertainers and Performers, Sports and Related Workers"
567
+ ,,27-2010,,,"Actors, Producers, and Directors"
568
+ ,,,27-2011,,Actors
569
+ ,,,27-2012,,Producers and Directors
570
+ ,,,,27-2012.03,Media Programming Directors
571
+ ,,,,27-2012.04,Talent Directors
572
+ ,,,,27-2012.05,Media Technical Directors/Managers
573
+ ,,27-2020,,,"Athletes, Coaches, Umpires, and Related Workers"
574
+ ,,,27-2021,,Athletes and Sports Competitors
575
+ ,,,27-2022,,Coaches and Scouts
576
+ ,,,27-2023,,"Umpires, Referees, and Other Sports Officials"
577
+ ,,27-2030,,,Dancers and Choreographers
578
+ ,,,27-2031,,Dancers
579
+ ,,,27-2032,,Choreographers
580
+ ,,27-2040,,,"Musicians, Singers, and Related Workers"
581
+ ,,,27-2041,,Music Directors and Composers
582
+ ,,,27-2042,,Musicians and Singers
583
+ ,,27-2090,,,"Miscellaneous Entertainers and Performers, Sports and Related Workers"
584
+ ,,,27-2091,,"Disc Jockeys, Except Radio"
585
+ ,,,27-2099,,"Entertainers and Performers, Sports and Related Workers, All Other"
586
+ ,27-3000,,,,Media and Communication Workers
587
+ ,,27-3010,,,Broadcast Announcers and Radio Disc Jockeys
588
+ ,,,27-3011,,Broadcast Announcers and Radio Disc Jockeys
589
+ ,,27-3020,,,"News Analysts, Reporters and Journalists"
590
+ ,,,27-3023,,"News Analysts, Reporters, and Journalists"
591
+ ,,27-3030,,,Public Relations Specialists
592
+ ,,,27-3031,,Public Relations Specialists
593
+ ,,27-3040,,,Writers and Editors
594
+ ,,,27-3041,,Editors
595
+ ,,,27-3042,,Technical Writers
596
+ ,,,27-3043,,Writers and Authors
597
+ ,,,,27-3043.05,"Poets, Lyricists and Creative Writers"
598
+ ,,27-3090,,,Miscellaneous Media and Communication Workers
599
+ ,,,27-3091,,Interpreters and Translators
600
+ ,,,27-3092,,Court Reporters and Simultaneous Captioners
601
+ ,,,27-3099,,"Media and Communication Workers, All Other"
602
+ ,27-4000,,,,Media and Communication Equipment Workers
603
+ ,,27-4010,,,"Broadcast, Sound, and Lighting Technicians"
604
+ ,,,27-4011,,Audio and Video Technicians
605
+ ,,,27-4012,,Broadcast Technicians
606
+ ,,,27-4014,,Sound Engineering Technicians
607
+ ,,,27-4015,,Lighting Technicians
608
+ ,,27-4020,,,Photographers
609
+ ,,,27-4021,,Photographers
610
+ ,,27-4030,,,"Television, Video, and Film Camera Operators and Editors"
611
+ ,,,27-4031,,"Camera Operators, Television, Video, and Film"
612
+ ,,,27-4032,,Film and Video Editors
613
+ ,,27-4090,,,Miscellaneous Media and Communication Equipment Workers
614
+ ,,,27-4099,,"Media and Communication Equipment Workers, All Other"
615
+ 29-0000,,,,,Healthcare Practitioners and Technical Occupations
616
+ ,29-1000,,,,Healthcare Diagnosing or Treating Practitioners
617
+ ,,29-1010,,,Chiropractors
618
+ ,,,29-1011,,Chiropractors
619
+ ,,29-1020,,,Dentists
620
+ ,,,29-1021,,"Dentists, General"
621
+ ,,,29-1022,,Oral and Maxillofacial Surgeons
622
+ ,,,29-1023,,Orthodontists
623
+ ,,,29-1024,,Prosthodontists
624
+ ,,,29-1029,,"Dentists, All Other Specialists"
625
+ ,,29-1030,,,Dietitians and Nutritionists
626
+ ,,,29-1031,,Dietitians and Nutritionists
627
+ ,,29-1040,,,Optometrists
628
+ ,,,29-1041,,Optometrists
629
+ ,,29-1050,,,Pharmacists
630
+ ,,,29-1051,,Pharmacists
631
+ ,,29-1070,,,Physician Assistants
632
+ ,,,29-1071,,Physician Assistants
633
+ ,,,,29-1071.01,Anesthesiologist Assistants
634
+ ,,29-1080,,,Podiatrists
635
+ ,,,29-1081,,Podiatrists
636
+ ,,29-1120,,,Therapists
637
+ ,,,29-1122,,Occupational Therapists
638
+ ,,,,29-1122.01,"Low Vision Therapists, Orientation and Mobility Specialists, and Vision Rehabilitation Therapists"
639
+ ,,,29-1123,,Physical Therapists
640
+ ,,,29-1124,,Radiation Therapists
641
+ ,,,29-1125,,Recreational Therapists
642
+ ,,,29-1126,,Respiratory Therapists
643
+ ,,,29-1127,,Speech-Language Pathologists
644
+ ,,,29-1128,,Exercise Physiologists
645
+ ,,,29-1129,,"Therapists, All Other"
646
+ ,,,,29-1129.01,Art Therapists
647
+ ,,,,29-1129.02,Music Therapists
648
+ ,,29-1130,,,Veterinarians
649
+ ,,,29-1131,,Veterinarians
650
+ ,,29-1140,,,Registered Nurses
651
+ ,,,29-1141,,Registered Nurses
652
+ ,,,,29-1141.01,Acute Care Nurses
653
+ ,,,,29-1141.02,Advanced Practice Psychiatric Nurses
654
+ ,,,,29-1141.03,Critical Care Nurses
655
+ ,,,,29-1141.04,Clinical Nurse Specialists
656
+ ,,29-1150,,,Nurse Anesthetists
657
+ ,,,29-1151,,Nurse Anesthetists
658
+ ,,29-1160,,,Nurse Midwives
659
+ ,,,29-1161,,Nurse Midwives
660
+ ,,29-1170,,,Nurse Practitioners
661
+ ,,,29-1171,,Nurse Practitioners
662
+ ,,29-1180,,,Audiologists
663
+ ,,,29-1181,,Audiologists
664
+ ,,29-1210,,,Physicians
665
+ ,,,29-1211,,Anesthesiologists
666
+ ,,,29-1212,,Cardiologists
667
+ ,,,29-1213,,Dermatologists
668
+ ,,,29-1214,,Emergency Medicine Physicians
669
+ ,,,29-1215,,Family Medicine Physicians
670
+ ,,,29-1216,,General Internal Medicine Physicians
671
+ ,,,29-1217,,Neurologists
672
+ ,,,29-1218,,Obstetricians and Gynecologists
673
+ ,,,29-1221,,"Pediatricians, General"
674
+ ,,,29-1222,,"Physicians, Pathologists"
675
+ ,,,29-1223,,Psychiatrists
676
+ ,,,29-1224,,Radiologists
677
+ ,,,29-1229,,"Physicians, All Other"
678
+ ,,,,29-1229.01,Allergists and Immunologists
679
+ ,,,,29-1229.02,Hospitalists
680
+ ,,,,29-1229.03,Urologists
681
+ ,,,,29-1229.04,Physical Medicine and Rehabilitation Physicians
682
+ ,,,,29-1229.05,Preventive Medicine Physicians
683
+ ,,,,29-1229.06,Sports Medicine Physicians
684
+ ,,29-1240,,,Surgeons
685
+ ,,,29-1241,,"Ophthalmologists, Except Pediatric"
686
+ ,,,29-1242,,"Orthopedic Surgeons, Except Pediatric"
687
+ ,,,29-1243,,Pediatric Surgeons
688
+ ,,,29-1249,,"Surgeons, All Other"
689
+ ,,29-1290,,,Miscellaneous Healthcare Diagnosing or Treating Practitioners
690
+ ,,,29-1291,,Acupuncturists
691
+ ,,,29-1292,,Dental Hygienists
692
+ ,,,29-1299,,"Healthcare Diagnosing or Treating Practitioners, All Other"
693
+ ,,,,29-1299.01,Naturopathic Physicians
694
+ ,,,,29-1299.02,Orthoptists
695
+ ,29-2000,,,,Health Technologists and Technicians
696
+ ,,29-2010,,,Clinical Laboratory Technologists and Technicians
697
+ ,,,29-2011,,Medical and Clinical Laboratory Technologists
698
+ ,,,,29-2011.01,Cytogenetic Technologists
699
+ ,,,,29-2011.02,Cytotechnologists
700
+ ,,,,29-2011.04,Histotechnologists
701
+ ,,,29-2012,,Medical and Clinical Laboratory Technicians
702
+ ,,,,29-2012.01,Histology Technicians
703
+ ,,29-2030,,,Diagnostic Related Technologists and Technicians
704
+ ,,,29-2031,,Cardiovascular Technologists and Technicians
705
+ ,,,29-2032,,Diagnostic Medical Sonographers
706
+ ,,,29-2033,,Nuclear Medicine Technologists
707
+ ,,,29-2034,,Radiologic Technologists and Technicians
708
+ ,,,29-2035,,Magnetic Resonance Imaging Technologists
709
+ ,,,29-2036,,Medical Dosimetrists
710
+ ,,29-2040,,,Emergency Medical Technicians and Paramedics
711
+ ,,,29-2042,,Emergency Medical Technicians
712
+ ,,,29-2043,,Paramedics
713
+ ,,29-2050,,,Health Practitioner Support Technologists and Technicians
714
+ ,,,29-2051,,Dietetic Technicians
715
+ ,,,29-2052,,Pharmacy Technicians
716
+ ,,,29-2053,,Psychiatric Technicians
717
+ ,,,29-2055,,Surgical Technologists
718
+ ,,,29-2056,,Veterinary Technologists and Technicians
719
+ ,,,29-2057,,Ophthalmic Medical Technicians
720
+ ,,29-2060,,,Licensed Practical and Licensed Vocational Nurses
721
+ ,,,29-2061,,Licensed Practical and Licensed Vocational Nurses
722
+ ,,29-2070,,,Medical Records Specialists
723
+ ,,,29-2072,,Medical Records Specialists
724
+ ,,29-2080,,,"Opticians, Dispensing"
725
+ ,,,29-2081,,"Opticians, Dispensing"
726
+ ,,29-2090,,,Miscellaneous Health Technologists and Technicians
727
+ ,,,29-2091,,Orthotists and Prosthetists
728
+ ,,,29-2092,,Hearing Aid Specialists
729
+ ,,,29-2099,,"Health Technologists and Technicians, All Other"
730
+ ,,,,29-2099.01,Neurodiagnostic Technologists
731
+ ,,,,29-2099.05,Ophthalmic Medical Technologists
732
+ ,,,,29-2099.08,Patient Representatives
733
+ ,29-9000,,,,Other Healthcare Practitioners and Technical Occupations
734
+ ,,29-9020,,,Health Information Technologists and Medical Registrars
735
+ ,,,29-9021,,Health Information Technologists and Medical Registrars
736
+ ,,29-9090,,,Miscellaneous Health Practitioners and Technical Workers
737
+ ,,,29-9091,,Athletic Trainers
738
+ ,,,29-9092,,Genetic Counselors
739
+ ,,,29-9093,,Surgical Assistants
740
+ ,,,29-9099,,"Healthcare Practitioners and Technical Workers, All Other"
741
+ ,,,,29-9099.01,Midwives
742
+ 31-0000,,,,,Healthcare Support Occupations
743
+ ,31-1100,,,,"Home Health and Personal Care Aides; and Nursing Assistants, Orderlies, and Psychiatric Aides"
744
+ ,,31-1120,,,Home Health and Personal Care Aides
745
+ ,,,31-1121,,Home Health Aides
746
+ ,,,31-1122,,Personal Care Aides
747
+ ,,31-1130,,,"Nursing Assistants, Orderlies, and Psychiatric Aides"
748
+ ,,,31-1131,,Nursing Assistants
749
+ ,,,31-1132,,Orderlies
750
+ ,,,31-1133,,Psychiatric Aides
751
+ ,31-2000,,,,Occupational Therapy and Physical Therapist Assistants and Aides
752
+ ,,31-2010,,,Occupational Therapy Assistants and Aides
753
+ ,,,31-2011,,Occupational Therapy Assistants
754
+ ,,,31-2012,,Occupational Therapy Aides
755
+ ,,31-2020,,,Physical Therapist Assistants and Aides
756
+ ,,,31-2021,,Physical Therapist Assistants
757
+ ,,,31-2022,,Physical Therapist Aides
758
+ ,31-9000,,,,Other Healthcare Support Occupations
759
+ ,,31-9010,,,Massage Therapists
760
+ ,,,31-9011,,Massage Therapists
761
+ ,,31-9090,,,Miscellaneous Healthcare Support Occupations
762
+ ,,,31-9091,,Dental Assistants
763
+ ,,,31-9092,,Medical Assistants
764
+ ,,,31-9093,,Medical Equipment Preparers
765
+ ,,,31-9094,,Medical Transcriptionists
766
+ ,,,31-9095,,Pharmacy Aides
767
+ ,,,31-9096,,Veterinary Assistants and Laboratory Animal Caretakers
768
+ ,,,31-9097,,Phlebotomists
769
+ ,,,31-9099,,"Healthcare Support Workers, All Other"
770
+ ,,,,31-9099.01,Speech-Language Pathology Assistants
771
+ ,,,,31-9099.02,Endoscopy Technicians
772
+ 33-0000,,,,,Protective Service Occupations
773
+ ,33-1000,,,,Supervisors of Protective Service Workers
774
+ ,,33-1010,,,First-Line Supervisors of Law Enforcement Workers
775
+ ,,,33-1011,,First-Line Supervisors of Correctional Officers
776
+ ,,,33-1012,,First-Line Supervisors of Police and Detectives
777
+ ,,33-1020,,,First-Line Supervisors of Firefighting and Prevention Workers
778
+ ,,,33-1021,,First-Line Supervisors of Firefighting and Prevention Workers
779
+ ,,33-1090,,,"Miscellaneous First-Line Supervisors, Protective Service Workers"
780
+ ,,,33-1091,,First-Line Supervisors of Security Workers
781
+ ,,,33-1099,,"First-Line Supervisors of Protective Service Workers, All Other"
782
+ ,33-2000,,,,Firefighting and Prevention Workers
783
+ ,,33-2010,,,Firefighters
784
+ ,,,33-2011,,Firefighters
785
+ ,,33-2020,,,Fire Inspectors
786
+ ,,,33-2021,,Fire Inspectors and Investigators
787
+ ,,,33-2022,,Forest Fire Inspectors and Prevention Specialists
788
+ ,33-3000,,,,Law Enforcement Workers
789
+ ,,33-3010,,,"Bailiffs, Correctional Officers, and Jailers"
790
+ ,,,33-3011,,Bailiffs
791
+ ,,,33-3012,,Correctional Officers and Jailers
792
+ ,,33-3020,,,Detectives and Criminal Investigators
793
+ ,,,33-3021,,Detectives and Criminal Investigators
794
+ ,,,,33-3021.02,Police Identification and Records Officers
795
+ ,,,,33-3021.06,Intelligence Analysts
796
+ ,,33-3030,,,Fish and Game Wardens
797
+ ,,,33-3031,,Fish and Game Wardens
798
+ ,,33-3040,,,Parking Enforcement Workers
799
+ ,,,33-3041,,Parking Enforcement Workers
800
+ ,,33-3050,,,Police Officers
801
+ ,,,33-3051,,Police and Sheriff's Patrol Officers
802
+ ,,,,33-3051.04,Customs and Border Protection Officers
803
+ ,,,33-3052,,Transit and Railroad Police
804
+ ,33-9000,,,,Other Protective Service Workers
805
+ ,,33-9010,,,Animal Control Workers
806
+ ,,,33-9011,,Animal Control Workers
807
+ ,,33-9020,,,Private Detectives and Investigators
808
+ ,,,33-9021,,Private Detectives and Investigators
809
+ ,,33-9030,,,Security Guards and Gambling Surveillance Officers
810
+ ,,,33-9031,,Gambling Surveillance Officers and Gambling Investigators
811
+ ,,,33-9032,,Security Guards
812
+ ,,33-9090,,,Miscellaneous Protective Service Workers
813
+ ,,,33-9091,,Crossing Guards and Flaggers
814
+ ,,,33-9092,,"Lifeguards, Ski Patrol, and Other Recreational Protective Service Workers"
815
+ ,,,33-9093,,Transportation Security Screeners
816
+ ,,,33-9094,,School Bus Monitors
817
+ ,,,33-9099,,"Protective Service Workers, All Other"
818
+ ,,,,33-9099.02,Retail Loss Prevention Specialists
819
+ 35-0000,,,,,Food Preparation and Serving Related Occupations
820
+ ,35-1000,,,,Supervisors of Food Preparation and Serving Workers
821
+ ,,35-1010,,,Supervisors of Food Preparation and Serving Workers
822
+ ,,,35-1011,,Chefs and Head Cooks
823
+ ,,,35-1012,,First-Line Supervisors of Food Preparation and Serving Workers
824
+ ,35-2000,,,,Cooks and Food Preparation Workers
825
+ ,,35-2010,,,Cooks
826
+ ,,,35-2011,,"Cooks, Fast Food"
827
+ ,,,35-2012,,"Cooks, Institution and Cafeteria"
828
+ ,,,35-2013,,"Cooks, Private Household"
829
+ ,,,35-2014,,"Cooks, Restaurant"
830
+ ,,,35-2015,,"Cooks, Short Order"
831
+ ,,,35-2019,,"Cooks, All Other"
832
+ ,,35-2020,,,Food Preparation Workers
833
+ ,,,35-2021,,Food Preparation Workers
834
+ ,35-3000,,,,Food and Beverage Serving Workers
835
+ ,,35-3010,,,Bartenders
836
+ ,,,35-3011,,Bartenders
837
+ ,,35-3020,,,Fast Food and Counter Workers
838
+ ,,,35-3023,,Fast Food and Counter Workers
839
+ ,,,,35-3023.01,Baristas
840
+ ,,35-3030,,,Waiters and Waitresses
841
+ ,,,35-3031,,Waiters and Waitresses
842
+ ,,35-3040,,,"Food Servers, Nonrestaurant"
843
+ ,,,35-3041,,"Food Servers, Nonrestaurant"
844
+ ,35-9000,,,,Other Food Preparation and Serving Related Workers
845
+ ,,35-9010,,,Dining Room and Cafeteria Attendants and Bartender Helpers
846
+ ,,,35-9011,,Dining Room and Cafeteria Attendants and Bartender Helpers
847
+ ,,35-9020,,,Dishwashers
848
+ ,,,35-9021,,Dishwashers
849
+ ,,35-9030,,,"Hosts and Hostesses, Restaurant, Lounge, and Coffee Shop"
850
+ ,,,35-9031,,"Hosts and Hostesses, Restaurant, Lounge, and Coffee Shop"
851
+ ,,35-9090,,,Miscellaneous Food Preparation and Serving Related Workers
852
+ ,,,35-9099,,"Food Preparation and Serving Related Workers, All Other"
853
+ 37-0000,,,,,Building and Grounds Cleaning and Maintenance Occupations
854
+ ,37-1000,,,,Supervisors of Building and Grounds Cleaning and Maintenance Workers
855
+ ,,37-1010,,,First-Line Supervisors of Building and Grounds Cleaning and Maintenance Workers
856
+ ,,,37-1011,,First-Line Supervisors of Housekeeping and Janitorial Workers
857
+ ,,,37-1012,,"First-Line Supervisors of Landscaping, Lawn Service, and Groundskeeping Workers"
858
+ ,37-2000,,,,Building Cleaning and Pest Control Workers
859
+ ,,37-2010,,,Building Cleaning Workers
860
+ ,,,37-2011,,"Janitors and Cleaners, Except Maids and Housekeeping Cleaners"
861
+ ,,,37-2012,,Maids and Housekeeping Cleaners
862
+ ,,,37-2019,,"Building Cleaning Workers, All Other"
863
+ ,,37-2020,,,Pest Control Workers
864
+ ,,,37-2021,,Pest Control Workers
865
+ ,37-3000,,,,Grounds Maintenance Workers
866
+ ,,37-3010,,,Grounds Maintenance Workers
867
+ ,,,37-3011,,Landscaping and Groundskeeping Workers
868
+ ,,,37-3012,,"Pesticide Handlers, Sprayers, and Applicators, Vegetation"
869
+ ,,,37-3013,,Tree Trimmers and Pruners
870
+ ,,,37-3019,,"Grounds Maintenance Workers, All Other"
871
+ 39-0000,,,,,Personal Care and Service Occupations
872
+ ,39-1000,,,,Supervisors of Personal Care and Service Workers
873
+ ,,39-1010,,,First-Line Supervisors of Entertainment and Recreation Workers
874
+ ,,,39-1013,,First-Line Supervisors of Gambling Services Workers
875
+ ,,,39-1014,,"First-Line Supervisors of Entertainment and Recreation Workers, Except Gambling Services"
876
+ ,,39-1020,,,First-Line Supervisors of Personal Service Workers
877
+ ,,,39-1022,,First-Line Supervisors of Personal Service Workers
878
+ ,39-2000,,,,Animal Care and Service Workers
879
+ ,,39-2010,,,Animal Trainers
880
+ ,,,39-2011,,Animal Trainers
881
+ ,,39-2020,,,Animal Caretakers
882
+ ,,,39-2021,,Animal Caretakers
883
+ ,39-3000,,,,Entertainment Attendants and Related Workers
884
+ ,,39-3010,,,Gambling Services Workers
885
+ ,,,39-3011,,Gambling Dealers
886
+ ,,,39-3012,,Gambling and Sports Book Writers and Runners
887
+ ,,,39-3019,,"Gambling Service Workers, All Other"
888
+ ,,39-3020,,,Motion Picture Projectionists
889
+ ,,,39-3021,,Motion Picture Projectionists
890
+ ,,39-3030,,,"Ushers, Lobby Attendants, and Ticket Takers"
891
+ ,,,39-3031,,"Ushers, Lobby Attendants, and Ticket Takers"
892
+ ,,39-3090,,,Miscellaneous Entertainment Attendants and Related Workers
893
+ ,,,39-3091,,Amusement and Recreation Attendants
894
+ ,,,39-3092,,Costume Attendants
895
+ ,,,39-3093,,"Locker Room, Coatroom, and Dressing Room Attendants"
896
+ ,,,39-3099,,"Entertainment Attendants and Related Workers, All Other"
897
+ ,39-4000,,,,Funeral Service Workers
898
+ ,,39-4010,,,Embalmers and Crematory Operators
899
+ ,,,39-4011,,Embalmers
900
+ ,,,39-4012,,Crematory Operators
901
+ ,,39-4020,,,Funeral Attendants
902
+ ,,,39-4021,,Funeral Attendants
903
+ ,,39-4030,,,"Morticians, Undertakers, and Funeral Arrangers"
904
+ ,,,39-4031,,"Morticians, Undertakers, and Funeral Arrangers"
905
+ ,39-5000,,,,Personal Appearance Workers
906
+ ,,39-5010,,,"Barbers, Hairdressers, Hairstylists and Cosmetologists"
907
+ ,,,39-5011,,Barbers
908
+ ,,,39-5012,,"Hairdressers, Hairstylists, and Cosmetologists"
909
+ ,,39-5090,,,Miscellaneous Personal Appearance Workers
910
+ ,,,39-5091,,"Makeup Artists, Theatrical and Performance"
911
+ ,,,39-5092,,Manicurists and Pedicurists
912
+ ,,,39-5093,,Shampooers
913
+ ,,,39-5094,,Skincare Specialists
914
+ ,39-6000,,,,"Baggage Porters, Bellhops, and Concierges"
915
+ ,,39-6010,,,"Baggage Porters, Bellhops, and Concierges"
916
+ ,,,39-6011,,Baggage Porters and Bellhops
917
+ ,,,39-6012,,Concierges
918
+ ,39-7000,,,,Tour and Travel Guides
919
+ ,,39-7010,,,Tour and Travel Guides
920
+ ,,,39-7011,,Tour Guides and Escorts
921
+ ,,,39-7012,,Travel Guides
922
+ ,39-9000,,,,Other Personal Care and Service Workers
923
+ ,,39-9010,,,Childcare Workers
924
+ ,,,39-9011,,Childcare Workers
925
+ ,,,,39-9011.01,Nannies
926
+ ,,39-9030,,,Recreation and Fitness Workers
927
+ ,,,39-9031,,Exercise Trainers and Group Fitness Instructors
928
+ ,,,39-9032,,Recreation Workers
929
+ ,,39-9040,,,Residential Advisors
930
+ ,,,39-9041,,Residential Advisors
931
+ ,,39-9090,,,Miscellaneous Personal Care and Service Workers
932
+ ,,,39-9099,,"Personal Care and Service Workers, All Other"
933
+ 41-0000,,,,,Sales and Related Occupations
934
+ ,41-1000,,,,Supervisors of Sales Workers
935
+ ,,41-1010,,,First-Line Supervisors of Sales Workers
936
+ ,,,41-1011,,First-Line Supervisors of Retail Sales Workers
937
+ ,,,41-1012,,First-Line Supervisors of Non-Retail Sales Workers
938
+ ,41-2000,,,,Retail Sales Workers
939
+ ,,41-2010,,,Cashiers
940
+ ,,,41-2011,,Cashiers
941
+ ,,,41-2012,,Gambling Change Persons and Booth Cashiers
942
+ ,,41-2020,,,Counter and Rental Clerks and Parts Salespersons
943
+ ,,,41-2021,,Counter and Rental Clerks
944
+ ,,,41-2022,,Parts Salespersons
945
+ ,,41-2030,,,Retail Salespersons
946
+ ,,,41-2031,,Retail Salespersons
947
+ ,41-3000,,,,"Sales Representatives, Services"
948
+ ,,41-3010,,,Advertising Sales Agents
949
+ ,,,41-3011,,Advertising Sales Agents
950
+ ,,41-3020,,,Insurance Sales Agents
951
+ ,,,41-3021,,Insurance Sales Agents
952
+ ,,41-3030,,,"Securities, Commodities, and Financial Services Sales Agents"
953
+ ,,,41-3031,,"Securities, Commodities, and Financial Services Sales Agents"
954
+ ,,41-3040,,,Travel Agents
955
+ ,,,41-3041,,Travel Agents
956
+ ,,41-3090,,,"Miscellaneous Sales Representatives, Services"
957
+ ,,,41-3091,,"Sales Representatives of Services, Except Advertising, Insurance, Financial Services, and Travel"
958
+ ,41-4000,,,,"Sales Representatives, Wholesale and Manufacturing"
959
+ ,,41-4010,,,"Sales Representatives, Wholesale and Manufacturing"
960
+ ,,,41-4011,,"Sales Representatives, Wholesale and Manufacturing, Technical and Scientific Products"
961
+ ,,,,41-4011.07,Solar Sales Representatives and Assessors
962
+ ,,,41-4012,,"Sales Representatives, Wholesale and Manufacturing, Except Technical and Scientific Products"
963
+ ,41-9000,,,,Other Sales and Related Workers
964
+ ,,41-9010,,,"Models, Demonstrators, and Product Promoters"
965
+ ,,,41-9011,,Demonstrators and Product Promoters
966
+ ,,,41-9012,,Models
967
+ ,,41-9020,,,Real Estate Brokers and Sales Agents
968
+ ,,,41-9021,,Real Estate Brokers
969
+ ,,,41-9022,,Real Estate Sales Agents
970
+ ,,41-9030,,,Sales Engineers
971
+ ,,,41-9031,,Sales Engineers
972
+ ,,41-9040,,,Telemarketers
973
+ ,,,41-9041,,Telemarketers
974
+ ,,41-9090,,,Miscellaneous Sales and Related Workers
975
+ ,,,41-9091,,"Door-to-Door Sales Workers, News and Street Vendors, and Related Workers"
976
+ ,,,41-9099,,"Sales and Related Workers, All Other"
977
+ 43-0000,,,,,Office and Administrative Support Occupations
978
+ ,43-1000,,,,Supervisors of Office and Administrative Support Workers
979
+ ,,43-1010,,,First-Line Supervisors of Office and Administrative Support Workers
980
+ ,,,43-1011,,First-Line Supervisors of Office and Administrative Support Workers
981
+ ,43-2000,,,,Communications Equipment Operators
982
+ ,,43-2010,,,"Switchboard Operators, Including Answering Service"
983
+ ,,,43-2011,,"Switchboard Operators, Including Answering Service"
984
+ ,,43-2020,,,Telephone Operators
985
+ ,,,43-2021,,Telephone Operators
986
+ ,,43-2090,,,Miscellaneous Communications Equipment Operators
987
+ ,,,43-2099,,"Communications Equipment Operators, All Other"
988
+ ,43-3000,,,,Financial Clerks
989
+ ,,43-3010,,,Bill and Account Collectors
990
+ ,,,43-3011,,Bill and Account Collectors
991
+ ,,43-3020,,,Billing and Posting Clerks
992
+ ,,,43-3021,,Billing and Posting Clerks
993
+ ,,43-3030,,,"Bookkeeping, Accounting, and Auditing Clerks"
994
+ ,,,43-3031,,"Bookkeeping, Accounting, and Auditing Clerks"
995
+ ,,43-3040,,,Gambling Cage Workers
996
+ ,,,43-3041,,Gambling Cage Workers
997
+ ,,43-3050,,,Payroll and Timekeeping Clerks
998
+ ,,,43-3051,,Payroll and Timekeeping Clerks
999
+ ,,43-3060,,,Procurement Clerks
1000
+ ,,,43-3061,,Procurement Clerks
1001
+ ,,43-3070,,,Tellers
1002
+ ,,,43-3071,,Tellers
1003
+ ,,43-3090,,,Miscellaneous Financial Clerks
1004
+ ,,,43-3099,,"Financial Clerks, All Other"
1005
+ ,43-4000,,,,Information and Record Clerks
1006
+ ,,43-4010,,,Brokerage Clerks
1007
+ ,,,43-4011,,Brokerage Clerks
1008
+ ,,43-4020,,,Correspondence Clerks
1009
+ ,,,43-4021,,Correspondence Clerks
1010
+ ,,43-4030,,,"Court, Municipal, and License Clerks"
1011
+ ,,,43-4031,,"Court, Municipal, and License Clerks"
1012
+ ,,43-4040,,,"Credit Authorizers, Checkers, and Clerks"
1013
+ ,,,43-4041,,"Credit Authorizers, Checkers, and Clerks"
1014
+ ,,43-4050,,,Customer Service Representatives
1015
+ ,,,43-4051,,Customer Service Representatives
1016
+ ,,43-4060,,,"Eligibility Interviewers, Government Programs"
1017
+ ,,,43-4061,,"Eligibility Interviewers, Government Programs"
1018
+ ,,43-4070,,,File Clerks
1019
+ ,,,43-4071,,File Clerks
1020
+ ,,43-4080,,,"Hotel, Motel, and Resort Desk Clerks"
1021
+ ,,,43-4081,,"Hotel, Motel, and Resort Desk Clerks"
1022
+ ,,43-4110,,,"Interviewers, Except Eligibility and Loan"
1023
+ ,,,43-4111,,"Interviewers, Except Eligibility and Loan"
1024
+ ,,43-4120,,,"Library Assistants, Clerical"
1025
+ ,,,43-4121,,"Library Assistants, Clerical"
1026
+ ,,43-4130,,,Loan Interviewers and Clerks
1027
+ ,,,43-4131,,Loan Interviewers and Clerks
1028
+ ,,43-4140,,,New Accounts Clerks
1029
+ ,,,43-4141,,New Accounts Clerks
1030
+ ,,43-4150,,,Order Clerks
1031
+ ,,,43-4151,,Order Clerks
1032
+ ,,43-4160,,,"Human Resources Assistants, Except Payroll and Timekeeping"
1033
+ ,,,43-4161,,"Human Resources Assistants, Except Payroll and Timekeeping"
1034
+ ,,43-4170,,,Receptionists and Information Clerks
1035
+ ,,,43-4171,,Receptionists and Information Clerks
1036
+ ,,43-4180,,,Reservation and Transportation Ticket Agents and Travel Clerks
1037
+ ,,,43-4181,,Reservation and Transportation Ticket Agents and Travel Clerks
1038
+ ,,43-4190,,,Miscellaneous Information and Record Clerks
1039
+ ,,,43-4199,,"Information and Record Clerks, All Other"
1040
+ ,43-5000,,,,"Material Recording, Scheduling, Dispatching, and Distributing Workers"
1041
+ ,,43-5010,,,Cargo and Freight Agents
1042
+ ,,,43-5011,,Cargo and Freight Agents
1043
+ ,,,,43-5011.01,Freight Forwarders
1044
+ ,,43-5020,,,Couriers and Messengers
1045
+ ,,,43-5021,,Couriers and Messengers
1046
+ ,,43-5030,,,Dispatchers
1047
+ ,,,43-5031,,Public Safety Telecommunicators
1048
+ ,,,43-5032,,"Dispatchers, Except Police, Fire, and Ambulance"
1049
+ ,,43-5040,,,"Meter Readers, Utilities"
1050
+ ,,,43-5041,,"Meter Readers, Utilities"
1051
+ ,,43-5050,,,Postal Service Workers
1052
+ ,,,43-5051,,Postal Service Clerks
1053
+ ,,,43-5052,,Postal Service Mail Carriers
1054
+ ,,,43-5053,,"Postal Service Mail Sorters, Processors, and Processing Machine Operators"
1055
+ ,,43-5060,,,"Production, Planning, and Expediting Clerks"
1056
+ ,,,43-5061,,"Production, Planning, and Expediting Clerks"
1057
+ ,,43-5070,,,"Shipping, Receiving, and Inventory Clerks"
1058
+ ,,,43-5071,,"Shipping, Receiving, and Inventory Clerks"
1059
+ ,,43-5110,,,"Weighers, Measurers, Checkers, and Samplers, Recordkeeping"
1060
+ ,,,43-5111,,"Weighers, Measurers, Checkers, and Samplers, Recordkeeping"
1061
+ ,43-6000,,,,Secretaries and Administrative Assistants
1062
+ ,,43-6010,,,Secretaries and Administrative Assistants
1063
+ ,,,43-6011,,Executive Secretaries and Executive Administrative Assistants
1064
+ ,,,43-6012,,Legal Secretaries and Administrative Assistants
1065
+ ,,,43-6013,,Medical Secretaries and Administrative Assistants
1066
+ ,,,43-6014,,"Secretaries and Administrative Assistants, Except Legal, Medical, and Executive"
1067
+ ,43-9000,,,,Other Office and Administrative Support Workers
1068
+ ,,43-9020,,,Data Entry and Information Processing Workers
1069
+ ,,,43-9021,,Data Entry Keyers
1070
+ ,,,43-9022,,Word Processors and Typists
1071
+ ,,43-9030,,,Desktop Publishers
1072
+ ,,,43-9031,,Desktop Publishers
1073
+ ,,43-9040,,,Insurance Claims and Policy Processing Clerks
1074
+ ,,,43-9041,,Insurance Claims and Policy Processing Clerks
1075
+ ,,43-9050,,,"Mail Clerks and Mail Machine Operators, Except Postal Service"
1076
+ ,,,43-9051,,"Mail Clerks and Mail Machine Operators, Except Postal Service"
1077
+ ,,43-9060,,,"Office Clerks, General"
1078
+ ,,,43-9061,,"Office Clerks, General"
1079
+ ,,43-9070,,,"Office Machine Operators, Except Computer"
1080
+ ,,,43-9071,,"Office Machine Operators, Except Computer"
1081
+ ,,43-9080,,,Proofreaders and Copy Markers
1082
+ ,,,43-9081,,Proofreaders and Copy Markers
1083
+ ,,43-9110,,,Statistical Assistants
1084
+ ,,,43-9111,,Statistical Assistants
1085
+ ,,43-9190,,,Miscellaneous Office and Administrative Support Workers
1086
+ ,,,43-9199,,"Office and Administrative Support Workers, All Other"
1087
+ 45-0000,,,,,"Farming, Fishing, and Forestry Occupations"
1088
+ ,45-1000,,,,"Supervisors of Farming, Fishing, and Forestry Workers"
1089
+ ,,45-1010,,,"First-Line Supervisors of Farming, Fishing, and Forestry Workers"
1090
+ ,,,45-1011,,"First-Line Supervisors of Farming, Fishing, and Forestry Workers"
1091
+ ,45-2000,,,,Agricultural Workers
1092
+ ,,45-2010,,,Agricultural Inspectors
1093
+ ,,,45-2011,,Agricultural Inspectors
1094
+ ,,45-2020,,,Animal Breeders
1095
+ ,,,45-2021,,Animal Breeders
1096
+ ,,45-2040,,,"Graders and Sorters, Agricultural Products"
1097
+ ,,,45-2041,,"Graders and Sorters, Agricultural Products"
1098
+ ,,45-2090,,,Miscellaneous Agricultural Workers
1099
+ ,,,45-2091,,Agricultural Equipment Operators
1100
+ ,,,45-2092,,"Farmworkers and Laborers, Crop, Nursery, and Greenhouse"
1101
+ ,,,45-2093,,"Farmworkers, Farm, Ranch, and Aquacultural Animals"
1102
+ ,,,45-2099,,"Agricultural Workers, All Other"
1103
+ ,45-3000,,,,Fishing and Hunting Workers
1104
+ ,,45-3030,,,Fishing and Hunting Workers
1105
+ ,,,45-3031,,Fishing and Hunting Workers
1106
+ ,45-4000,,,,"Forest, Conservation, and Logging Workers"
1107
+ ,,45-4010,,,Forest and Conservation Workers
1108
+ ,,,45-4011,,Forest and Conservation Workers
1109
+ ,,45-4020,,,Logging Workers
1110
+ ,,,45-4021,,Fallers
1111
+ ,,,45-4022,,Logging Equipment Operators
1112
+ ,,,45-4023,,Log Graders and Scalers
1113
+ ,,,45-4029,,"Logging Workers, All Other"
1114
+ 47-0000,,,,,Construction and Extraction Occupations
1115
+ ,47-1000,,,,Supervisors of Construction and Extraction Workers
1116
+ ,,47-1010,,,First-Line Supervisors of Construction Trades and Extraction Workers
1117
+ ,,,47-1011,,First-Line Supervisors of Construction Trades and Extraction Workers
1118
+ ,,,,47-1011.03,Solar Energy Installation Managers
1119
+ ,47-2000,,,,Construction Trades Workers
1120
+ ,,47-2010,,,Boilermakers
1121
+ ,,,47-2011,,Boilermakers
1122
+ ,,47-2020,,,"Brickmasons, Blockmasons, and Stonemasons"
1123
+ ,,,47-2021,,Brickmasons and Blockmasons
1124
+ ,,,47-2022,,Stonemasons
1125
+ ,,47-2030,,,Carpenters
1126
+ ,,,47-2031,,Carpenters
1127
+ ,,47-2040,,,"Carpet, Floor, and Tile Installers and Finishers"
1128
+ ,,,47-2041,,Carpet Installers
1129
+ ,,,47-2042,,"Floor Layers, Except Carpet, Wood, and Hard Tiles"
1130
+ ,,,47-2043,,Floor Sanders and Finishers
1131
+ ,,,47-2044,,Tile and Stone Setters
1132
+ ,,47-2050,,,"Cement Masons, Concrete Finishers, and Terrazzo Workers"
1133
+ ,,,47-2051,,Cement Masons and Concrete Finishers
1134
+ ,,,47-2053,,Terrazzo Workers and Finishers
1135
+ ,,47-2060,,,Construction Laborers
1136
+ ,,,47-2061,,Construction Laborers
1137
+ ,,47-2070,,,Construction Equipment Operators
1138
+ ,,,47-2071,,"Paving, Surfacing, and Tamping Equipment Operators"
1139
+ ,,,47-2072,,Pile Driver Operators
1140
+ ,,,47-2073,,Operating Engineers and Other Construction Equipment Operators
1141
+ ,,47-2080,,,"Drywall Installers, Ceiling Tile Installers, and Tapers"
1142
+ ,,,47-2081,,Drywall and Ceiling Tile Installers
1143
+ ,,,47-2082,,Tapers
1144
+ ,,47-2110,,,Electricians
1145
+ ,,,47-2111,,Electricians
1146
+ ,,47-2120,,,Glaziers
1147
+ ,,,47-2121,,Glaziers
1148
+ ,,47-2130,,,Insulation Workers
1149
+ ,,,47-2131,,"Insulation Workers, Floor, Ceiling, and Wall"
1150
+ ,,,47-2132,,"Insulation Workers, Mechanical"
1151
+ ,,47-2140,,,Painters and Paperhangers
1152
+ ,,,47-2141,,"Painters, Construction and Maintenance"
1153
+ ,,,47-2142,,Paperhangers
1154
+ ,,47-2150,,,"Pipelayers, Plumbers, Pipefitters, and Steamfitters"
1155
+ ,,,47-2151,,Pipelayers
1156
+ ,,,47-2152,,"Plumbers, Pipefitters, and Steamfitters"
1157
+ ,,,,47-2152.04,Solar Thermal Installers and Technicians
1158
+ ,,47-2160,,,Plasterers and Stucco Masons
1159
+ ,,,47-2161,,Plasterers and Stucco Masons
1160
+ ,,47-2170,,,Reinforcing Iron and Rebar Workers
1161
+ ,,,47-2171,,Reinforcing Iron and Rebar Workers
1162
+ ,,47-2180,,,Roofers
1163
+ ,,,47-2181,,Roofers
1164
+ ,,47-2210,,,Sheet Metal Workers
1165
+ ,,,47-2211,,Sheet Metal Workers
1166
+ ,,47-2220,,,Structural Iron and Steel Workers
1167
+ ,,,47-2221,,Structural Iron and Steel Workers
1168
+ ,,47-2230,,,Solar Photovoltaic Installers
1169
+ ,,,47-2231,,Solar Photovoltaic Installers
1170
+ ,47-3000,,,,"Helpers, Construction Trades"
1171
+ ,,47-3010,,,"Helpers, Construction Trades"
1172
+ ,,,47-3011,,"Helpers--Brickmasons, Blockmasons, Stonemasons, and Tile and Marble Setters"
1173
+ ,,,47-3012,,Helpers--Carpenters
1174
+ ,,,47-3013,,Helpers--Electricians
1175
+ ,,,47-3014,,"Helpers--Painters, Paperhangers, Plasterers, and Stucco Masons"
1176
+ ,,,47-3015,,"Helpers--Pipelayers, Plumbers, Pipefitters, and Steamfitters"
1177
+ ,,,47-3016,,Helpers--Roofers
1178
+ ,,,47-3019,,"Helpers, Construction Trades, All Other"
1179
+ ,47-4000,,,,Other Construction and Related Workers
1180
+ ,,47-4010,,,Construction and Building Inspectors
1181
+ ,,,47-4011,,Construction and Building Inspectors
1182
+ ,,,,47-4011.01,Energy Auditors
1183
+ ,,47-4020,,,Elevator and Escalator Installers and Repairers
1184
+ ,,,47-4021,,Elevator and Escalator Installers and Repairers
1185
+ ,,47-4030,,,Fence Erectors
1186
+ ,,,47-4031,,Fence Erectors
1187
+ ,,47-4040,,,Hazardous Materials Removal Workers
1188
+ ,,,47-4041,,Hazardous Materials Removal Workers
1189
+ ,,47-4050,,,Highway Maintenance Workers
1190
+ ,,,47-4051,,Highway Maintenance Workers
1191
+ ,,47-4060,,,Rail-Track Laying and Maintenance Equipment Operators
1192
+ ,,,47-4061,,Rail-Track Laying and Maintenance Equipment Operators
1193
+ ,,47-4070,,,Septic Tank Servicers and Sewer Pipe Cleaners
1194
+ ,,,47-4071,,Septic Tank Servicers and Sewer Pipe Cleaners
1195
+ ,,47-4090,,,Miscellaneous Construction and Related Workers
1196
+ ,,,47-4091,,Segmental Pavers
1197
+ ,,,47-4099,,"Construction and Related Workers, All Other"
1198
+ ,,,,47-4099.03,Weatherization Installers and Technicians
1199
+ ,47-5000,,,,Extraction Workers
1200
+ ,,47-5010,,,"Derrick, Rotary Drill, and Service Unit Operators, Oil and Gas"
1201
+ ,,,47-5011,,"Derrick Operators, Oil and Gas"
1202
+ ,,,47-5012,,"Rotary Drill Operators, Oil and Gas"
1203
+ ,,,47-5013,,"Service Unit Operators, Oil and Gas"
1204
+ ,,47-5020,,,Surface Mining Machine Operators and Earth Drillers
1205
+ ,,,47-5022,,"Excavating and Loading Machine and Dragline Operators, Surface Mining"
1206
+ ,,,47-5023,,"Earth Drillers, Except Oil and Gas"
1207
+ ,,47-5030,,,"Explosives Workers, Ordnance Handling Experts, and Blasters"
1208
+ ,,,47-5032,,"Explosives Workers, Ordnance Handling Experts, and Blasters"
1209
+ ,,47-5040,,,Underground Mining Machine Operators
1210
+ ,,,47-5041,,Continuous Mining Machine Operators
1211
+ ,,,47-5043,,"Roof Bolters, Mining"
1212
+ ,,,47-5044,,"Loading and Moving Machine Operators, Underground Mining"
1213
+ ,,,47-5049,,"Underground Mining Machine Operators, All Other"
1214
+ ,,47-5050,,,"Rock Splitters, Quarry"
1215
+ ,,,47-5051,,"Rock Splitters, Quarry"
1216
+ ,,47-5070,,,"Roustabouts, Oil and Gas"
1217
+ ,,,47-5071,,"Roustabouts, Oil and Gas"
1218
+ ,,47-5080,,,Helpers--Extraction Workers
1219
+ ,,,47-5081,,Helpers--Extraction Workers
1220
+ ,,47-5090,,,Miscellaneous Extraction Workers
1221
+ ,,,47-5099,,"Extraction Workers, All Other"
1222
+ 49-0000,,,,,"Installation, Maintenance, and Repair Occupations"
1223
+ ,49-1000,,,,"Supervisors of Installation, Maintenance, and Repair Workers"
1224
+ ,,49-1010,,,"First-Line Supervisors of Mechanics, Installers, and Repairers"
1225
+ ,,,49-1011,,"First-Line Supervisors of Mechanics, Installers, and Repairers"
1226
+ ,49-2000,,,,"Electrical and Electronic Equipment Mechanics, Installers, and Repairers"
1227
+ ,,49-2010,,,"Computer, Automated Teller, and Office Machine Repairers"
1228
+ ,,,49-2011,,"Computer, Automated Teller, and Office Machine Repairers"
1229
+ ,,49-2020,,,Radio and Telecommunications Equipment Installers and Repairers
1230
+ ,,,49-2021,,"Radio, Cellular, and Tower Equipment Installers and Repairers"
1231
+ ,,,49-2022,,"Telecommunications Equipment Installers and Repairers, Except Line Installers"
1232
+ ,,49-2090,,,"Miscellaneous Electrical and Electronic Equipment Mechanics, Installers, and Repairers"
1233
+ ,,,49-2091,,Avionics Technicians
1234
+ ,,,49-2092,,"Electric Motor, Power Tool, and Related Repairers"
1235
+ ,,,49-2093,,"Electrical and Electronics Installers and Repairers, Transportation Equipment"
1236
+ ,,,49-2094,,"Electrical and Electronics Repairers, Commercial and Industrial Equipment"
1237
+ ,,,49-2095,,"Electrical and Electronics Repairers, Powerhouse, Substation, and Relay"
1238
+ ,,,49-2096,,"Electronic Equipment Installers and Repairers, Motor Vehicles"
1239
+ ,,,49-2097,,Audiovisual Equipment Installers and Repairers
1240
+ ,,,49-2098,,Security and Fire Alarm Systems Installers
1241
+ ,49-3000,,,,"Vehicle and Mobile Equipment Mechanics, Installers, and Repairers"
1242
+ ,,49-3010,,,Aircraft Mechanics and Service Technicians
1243
+ ,,,49-3011,,Aircraft Mechanics and Service Technicians
1244
+ ,,49-3020,,,Automotive Technicians and Repairers
1245
+ ,,,49-3021,,Automotive Body and Related Repairers
1246
+ ,,,49-3022,,Automotive Glass Installers and Repairers
1247
+ ,,,49-3023,,Automotive Service Technicians and Mechanics
1248
+ ,,49-3030,,,Bus and Truck Mechanics and Diesel Engine Specialists
1249
+ ,,,49-3031,,Bus and Truck Mechanics and Diesel Engine Specialists
1250
+ ,,49-3040,,,Heavy Vehicle and Mobile Equipment Service Technicians and Mechanics
1251
+ ,,,49-3041,,Farm Equipment Mechanics and Service Technicians
1252
+ ,,,49-3042,,"Mobile Heavy Equipment Mechanics, Except Engines"
1253
+ ,,,49-3043,,Rail Car Repairers
1254
+ ,,49-3050,,,Small Engine Mechanics
1255
+ ,,,49-3051,,Motorboat Mechanics and Service Technicians
1256
+ ,,,49-3052,,Motorcycle Mechanics
1257
+ ,,,49-3053,,Outdoor Power Equipment and Other Small Engine Mechanics
1258
+ ,,49-3090,,,"Miscellaneous Vehicle and Mobile Equipment Mechanics, Installers, and Repairers"
1259
+ ,,,49-3091,,Bicycle Repairers
1260
+ ,,,49-3092,,Recreational Vehicle Service Technicians
1261
+ ,,,49-3093,,Tire Repairers and Changers
1262
+ ,49-9000,,,,"Other Installation, Maintenance, and Repair Occupations"
1263
+ ,,49-9010,,,Control and Valve Installers and Repairers
1264
+ ,,,49-9011,,Mechanical Door Repairers
1265
+ ,,,49-9012,,"Control and Valve Installers and Repairers, Except Mechanical Door"
1266
+ ,,49-9020,,,"Heating, Air Conditioning, and Refrigeration Mechanics and Installers"
1267
+ ,,,49-9021,,"Heating, Air Conditioning, and Refrigeration Mechanics and Installers"
1268
+ ,,49-9030,,,Home Appliance Repairers
1269
+ ,,,49-9031,,Home Appliance Repairers
1270
+ ,,49-9040,,,"Industrial Machinery Installation, Repair, and Maintenance Workers"
1271
+ ,,,49-9041,,Industrial Machinery Mechanics
1272
+ ,,,49-9043,,"Maintenance Workers, Machinery"
1273
+ ,,,49-9044,,Millwrights
1274
+ ,,,49-9045,,"Refractory Materials Repairers, Except Brickmasons"
1275
+ ,,49-9050,,,Line Installers and Repairers
1276
+ ,,,49-9051,,Electrical Power-Line Installers and Repairers
1277
+ ,,,49-9052,,Telecommunications Line Installers and Repairers
1278
+ ,,49-9060,,,Precision Instrument and Equipment Repairers
1279
+ ,,,49-9061,,Camera and Photographic Equipment Repairers
1280
+ ,,,49-9062,,Medical Equipment Repairers
1281
+ ,,,49-9063,,Musical Instrument Repairers and Tuners
1282
+ ,,,49-9064,,Watch and Clock Repairers
1283
+ ,,,49-9069,,"Precision Instrument and Equipment Repairers, All Other"
1284
+ ,,49-9070,,,"Maintenance and Repair Workers, General"
1285
+ ,,,49-9071,,"Maintenance and Repair Workers, General"
1286
+ ,,49-9080,,,Wind Turbine Service Technicians
1287
+ ,,,49-9081,,Wind Turbine Service Technicians
1288
+ ,,49-9090,,,"Miscellaneous Installation, Maintenance, and Repair Workers"
1289
+ ,,,49-9091,,"Coin, Vending, and Amusement Machine Servicers and Repairers"
1290
+ ,,,49-9092,,Commercial Divers
1291
+ ,,,49-9094,,Locksmiths and Safe Repairers
1292
+ ,,,49-9095,,Manufactured Building and Mobile Home Installers
1293
+ ,,,49-9096,,Riggers
1294
+ ,,,49-9097,,Signal and Track Switch Repairers
1295
+ ,,,49-9098,,"Helpers--Installation, Maintenance, and Repair Workers"
1296
+ ,,,49-9099,,"Installation, Maintenance, and Repair Workers, All Other"
1297
+ ,,,,49-9099.01,Geothermal Technicians
1298
+ 51-0000,,,,,Production Occupations
1299
+ ,51-1000,,,,Supervisors of Production Workers
1300
+ ,,51-1010,,,First-Line Supervisors of Production and Operating Workers
1301
+ ,,,51-1011,,First-Line Supervisors of Production and Operating Workers
1302
+ ,51-2000,,,,Assemblers and Fabricators
1303
+ ,,51-2010,,,"Aircraft Structure, Surfaces, Rigging, and Systems Assemblers"
1304
+ ,,,51-2011,,"Aircraft Structure, Surfaces, Rigging, and Systems Assemblers"
1305
+ ,,51-2020,,,"Electrical, Electronics, and Electromechanical Assemblers"
1306
+ ,,,51-2021,,"Coil Winders, Tapers, and Finishers"
1307
+ ,,,51-2022,,Electrical and Electronic Equipment Assemblers
1308
+ ,,,51-2023,,Electromechanical Equipment Assemblers
1309
+ ,,51-2030,,,Engine and Other Machine Assemblers
1310
+ ,,,51-2031,,Engine and Other Machine Assemblers
1311
+ ,,51-2040,,,Structural Metal Fabricators and Fitters
1312
+ ,,,51-2041,,Structural Metal Fabricators and Fitters
1313
+ ,,51-2050,,,Fiberglass Laminators and Fabricators
1314
+ ,,,51-2051,,Fiberglass Laminators and Fabricators
1315
+ ,,51-2060,,,Timing Device Assemblers and Adjusters
1316
+ ,,,51-2061,,Timing Device Assemblers and Adjusters
1317
+ ,,51-2090,,,Miscellaneous Assemblers and Fabricators
1318
+ ,,,51-2092,,Team Assemblers
1319
+ ,,,51-2099,,"Assemblers and Fabricators, All Other"
1320
+ ,51-3000,,,,Food Processing Workers
1321
+ ,,51-3010,,,Bakers
1322
+ ,,,51-3011,,Bakers
1323
+ ,,51-3020,,,"Butchers and Other Meat, Poultry, and Fish Processing Workers"
1324
+ ,,,51-3021,,Butchers and Meat Cutters
1325
+ ,,,51-3022,,"Meat, Poultry, and Fish Cutters and Trimmers"
1326
+ ,,,51-3023,,Slaughterers and Meat Packers
1327
+ ,,51-3090,,,Miscellaneous Food Processing Workers
1328
+ ,,,51-3091,,"Food and Tobacco Roasting, Baking, and Drying Machine Operators and Tenders"
1329
+ ,,,51-3092,,Food Batchmakers
1330
+ ,,,51-3093,,Food Cooking Machine Operators and Tenders
1331
+ ,,,51-3099,,"Food Processing Workers, All Other"
1332
+ ,51-4000,,,,Metal Workers and Plastic Workers
1333
+ ,,51-4020,,,"Forming Machine Setters, Operators, and Tenders, Metal and Plastic"
1334
+ ,,,51-4021,,"Extruding and Drawing Machine Setters, Operators, and Tenders, Metal and Plastic"
1335
+ ,,,51-4022,,"Forging Machine Setters, Operators, and Tenders, Metal and Plastic"
1336
+ ,,,51-4023,,"Rolling Machine Setters, Operators, and Tenders, Metal and Plastic"
1337
+ ,,51-4030,,,"Machine Tool Cutting Setters, Operators, and Tenders, Metal and Plastic"
1338
+ ,,,51-4031,,"Cutting, Punching, and Press Machine Setters, Operators, and Tenders, Metal and Plastic"
1339
+ ,,,51-4032,,"Drilling and Boring Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1340
+ ,,,51-4033,,"Grinding, Lapping, Polishing, and Buffing Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1341
+ ,,,51-4034,,"Lathe and Turning Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1342
+ ,,,51-4035,,"Milling and Planing Machine Setters, Operators, and Tenders, Metal and Plastic"
1343
+ ,,51-4040,,,Machinists
1344
+ ,,,51-4041,,Machinists
1345
+ ,,51-4050,,,"Metal Furnace Operators, Tenders, Pourers, and Casters"
1346
+ ,,,51-4051,,Metal-Refining Furnace Operators and Tenders
1347
+ ,,,51-4052,,"Pourers and Casters, Metal"
1348
+ ,,51-4060,,,"Model Makers and Patternmakers, Metal and Plastic"
1349
+ ,,,51-4061,,"Model Makers, Metal and Plastic"
1350
+ ,,,51-4062,,"Patternmakers, Metal and Plastic"
1351
+ ,,51-4070,,,"Molders and Molding Machine Setters, Operators, and Tenders, Metal and Plastic"
1352
+ ,,,51-4071,,Foundry Mold and Coremakers
1353
+ ,,,51-4072,,"Molding, Coremaking, and Casting Machine Setters, Operators, and Tenders, Metal and Plastic"
1354
+ ,,51-4080,,,"Multiple Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1355
+ ,,,51-4081,,"Multiple Machine Tool Setters, Operators, and Tenders, Metal and Plastic"
1356
+ ,,51-4110,,,Tool and Die Makers
1357
+ ,,,51-4111,,Tool and Die Makers
1358
+ ,,51-4120,,,"Welding, Soldering, and Brazing Workers"
1359
+ ,,,51-4121,,"Welders, Cutters, Solderers, and Brazers"
1360
+ ,,,51-4122,,"Welding, Soldering, and Brazing Machine Setters, Operators, and Tenders"
1361
+ ,,51-4190,,,Miscellaneous Metal Workers and Plastic Workers
1362
+ ,,,51-4191,,"Heat Treating Equipment Setters, Operators, and Tenders, Metal and Plastic"
1363
+ ,,,51-4192,,"Layout Workers, Metal and Plastic"
1364
+ ,,,51-4193,,"Plating Machine Setters, Operators, and Tenders, Metal and Plastic"
1365
+ ,,,51-4194,,"Tool Grinders, Filers, and Sharpeners"
1366
+ ,,,51-4199,,"Metal Workers and Plastic Workers, All Other"
1367
+ ,51-5100,,,,Printing Workers
1368
+ ,,51-5110,,,Printing Workers
1369
+ ,,,51-5111,,Prepress Technicians and Workers
1370
+ ,,,51-5112,,Printing Press Operators
1371
+ ,,,51-5113,,Print Binding and Finishing Workers
1372
+ ,51-6000,,,,"Textile, Apparel, and Furnishings Workers"
1373
+ ,,51-6010,,,Laundry and Dry-Cleaning Workers
1374
+ ,,,51-6011,,Laundry and Dry-Cleaning Workers
1375
+ ,,51-6020,,,"Pressers, Textile, Garment, and Related Materials"
1376
+ ,,,51-6021,,"Pressers, Textile, Garment, and Related Materials"
1377
+ ,,51-6030,,,Sewing Machine Operators
1378
+ ,,,51-6031,,Sewing Machine Operators
1379
+ ,,51-6040,,,Shoe and Leather Workers
1380
+ ,,,51-6041,,Shoe and Leather Workers and Repairers
1381
+ ,,,51-6042,,Shoe Machine Operators and Tenders
1382
+ ,,51-6050,,,"Tailors, Dressmakers, and Sewers"
1383
+ ,,,51-6051,,"Sewers, Hand"
1384
+ ,,,51-6052,,"Tailors, Dressmakers, and Custom Sewers"
1385
+ ,,51-6060,,,"Textile Machine Setters, Operators, and Tenders"
1386
+ ,,,51-6061,,Textile Bleaching and Dyeing Machine Operators and Tenders
1387
+ ,,,51-6062,,"Textile Cutting Machine Setters, Operators, and Tenders"
1388
+ ,,,51-6063,,"Textile Knitting and Weaving Machine Setters, Operators, and Tenders"
1389
+ ,,,51-6064,,"Textile Winding, Twisting, and Drawing Out Machine Setters, Operators, and Tenders"
1390
+ ,,51-6090,,,"Miscellaneous Textile, Apparel, and Furnishings Workers"
1391
+ ,,,51-6091,,"Extruding and Forming Machine Setters, Operators, and Tenders, Synthetic and Glass Fibers"
1392
+ ,,,51-6092,,Fabric and Apparel Patternmakers
1393
+ ,,,51-6093,,Upholsterers
1394
+ ,,,51-6099,,"Textile, Apparel, and Furnishings Workers, All Other"
1395
+ ,51-7000,,,,Woodworkers
1396
+ ,,51-7010,,,Cabinetmakers and Bench Carpenters
1397
+ ,,,51-7011,,Cabinetmakers and Bench Carpenters
1398
+ ,,51-7020,,,Furniture Finishers
1399
+ ,,,51-7021,,Furniture Finishers
1400
+ ,,51-7030,,,"Model Makers and Patternmakers, Wood"
1401
+ ,,,51-7031,,"Model Makers, Wood"
1402
+ ,,,51-7032,,"Patternmakers, Wood"
1403
+ ,,51-7040,,,"Woodworking Machine Setters, Operators, and Tenders"
1404
+ ,,,51-7041,,"Sawing Machine Setters, Operators, and Tenders, Wood"
1405
+ ,,,51-7042,,"Woodworking Machine Setters, Operators, and Tenders, Except Sawing"
1406
+ ,,51-7090,,,Miscellaneous Woodworkers
1407
+ ,,,51-7099,,"Woodworkers, All Other"
1408
+ ,51-8000,,,,Plant and System Operators
1409
+ ,,51-8010,,,"Power Plant Operators, Distributors, and Dispatchers"
1410
+ ,,,51-8011,,Nuclear Power Reactor Operators
1411
+ ,,,51-8012,,Power Distributors and Dispatchers
1412
+ ,,,51-8013,,Power Plant Operators
1413
+ ,,,,51-8013.03,Biomass Plant Technicians
1414
+ ,,,,51-8013.04,Hydroelectric Plant Technicians
1415
+ ,,51-8020,,,Stationary Engineers and Boiler Operators
1416
+ ,,,51-8021,,Stationary Engineers and Boiler Operators
1417
+ ,,51-8030,,,Water and Wastewater Treatment Plant and System Operators
1418
+ ,,,51-8031,,Water and Wastewater Treatment Plant and System Operators
1419
+ ,,51-8090,,,Miscellaneous Plant and System Operators
1420
+ ,,,51-8091,,Chemical Plant and System Operators
1421
+ ,,,51-8092,,Gas Plant Operators
1422
+ ,,,51-8093,,"Petroleum Pump System Operators, Refinery Operators, and Gaugers"
1423
+ ,,,51-8099,,"Plant and System Operators, All Other"
1424
+ ,,,,51-8099.01,Biofuels Processing Technicians
1425
+ ,51-9000,,,,Other Production Occupations
1426
+ ,,51-9010,,,"Chemical Processing Machine Setters, Operators, and Tenders"
1427
+ ,,,51-9011,,Chemical Equipment Operators and Tenders
1428
+ ,,,51-9012,,"Separating, Filtering, Clarifying, Precipitating, and Still Machine Setters, Operators, and Tenders"
1429
+ ,,51-9020,,,"Crushing, Grinding, Polishing, Mixing, and Blending Workers"
1430
+ ,,,51-9021,,"Crushing, Grinding, and Polishing Machine Setters, Operators, and Tenders"
1431
+ ,,,51-9022,,"Grinding and Polishing Workers, Hand"
1432
+ ,,,51-9023,,"Mixing and Blending Machine Setters, Operators, and Tenders"
1433
+ ,,51-9030,,,Cutting Workers
1434
+ ,,,51-9031,,"Cutters and Trimmers, Hand"
1435
+ ,,,51-9032,,"Cutting and Slicing Machine Setters, Operators, and Tenders"
1436
+ ,,51-9040,,,"Extruding, Forming, Pressing, and Compacting Machine Setters, Operators, and Tenders"
1437
+ ,,,51-9041,,"Extruding, Forming, Pressing, and Compacting Machine Setters, Operators, and Tenders"
1438
+ ,,51-9050,,,"Furnace, Kiln, Oven, Drier, and Kettle Operators and Tenders"
1439
+ ,,,51-9051,,"Furnace, Kiln, Oven, Drier, and Kettle Operators and Tenders"
1440
+ ,,51-9060,,,"Inspectors, Testers, Sorters, Samplers, and Weighers"
1441
+ ,,,51-9061,,"Inspectors, Testers, Sorters, Samplers, and Weighers"
1442
+ ,,51-9070,,,Jewelers and Precious Stone and Metal Workers
1443
+ ,,,51-9071,,Jewelers and Precious Stone and Metal Workers
1444
+ ,,,,51-9071.06,Gem and Diamond Workers
1445
+ ,,51-9080,,,Dental and Ophthalmic Laboratory Technicians and Medical Appliance Technicians
1446
+ ,,,51-9081,,Dental Laboratory Technicians
1447
+ ,,,51-9082,,Medical Appliance Technicians
1448
+ ,,,51-9083,,Ophthalmic Laboratory Technicians
1449
+ ,,51-9110,,,Packaging and Filling Machine Operators and Tenders
1450
+ ,,,51-9111,,Packaging and Filling Machine Operators and Tenders
1451
+ ,,51-9120,,,Painting Workers
1452
+ ,,,51-9123,,"Painting, Coating, and Decorating Workers"
1453
+ ,,,51-9124,,"Coating, Painting, and Spraying Machine Setters, Operators, and Tenders"
1454
+ ,,51-9140,,,Semiconductor Processing Technicians
1455
+ ,,,51-9141,,Semiconductor Processing Technicians
1456
+ ,,51-9150,,,Photographic Process Workers and Processing Machine Operators
1457
+ ,,,51-9151,,Photographic Process Workers and Processing Machine Operators
1458
+ ,,51-9160,,,Computer Numerically Controlled Tool Operators and Programmers
1459
+ ,,,51-9161,,Computer Numerically Controlled Tool Operators
1460
+ ,,,51-9162,,Computer Numerically Controlled Tool Programmers
1461
+ ,,51-9190,,,Miscellaneous Production Workers
1462
+ ,,,51-9191,,Adhesive Bonding Machine Operators and Tenders
1463
+ ,,,51-9192,,"Cleaning, Washing, and Metal Pickling Equipment Operators and Tenders"
1464
+ ,,,51-9193,,Cooling and Freezing Equipment Operators and Tenders
1465
+ ,,,51-9194,,Etchers and Engravers
1466
+ ,,,51-9195,,"Molders, Shapers, and Casters, Except Metal and Plastic"
1467
+ ,,,,51-9195.03,"Stone Cutters and Carvers, Manufacturing"
1468
+ ,,,,51-9195.04,"Glass Blowers, Molders, Benders, and Finishers"
1469
+ ,,,,51-9195.05,"Potters, Manufacturing"
1470
+ ,,,51-9196,,"Paper Goods Machine Setters, Operators, and Tenders"
1471
+ ,,,51-9197,,Tire Builders
1472
+ ,,,51-9198,,Helpers--Production Workers
1473
+ ,,,51-9199,,"Production Workers, All Other"
1474
+ 53-0000,,,,,Transportation and Material Moving Occupations
1475
+ ,53-1000,,,,Supervisors of Transportation and Material Moving Workers
1476
+ ,,53-1040,,,First-Line Supervisors of Transportation and Material Moving Workers
1477
+ ,,,53-1041,,Aircraft Cargo Handling Supervisors
1478
+ ,,,53-1042,,"First-Line Supervisors of Helpers, Laborers, and Material Movers, Hand"
1479
+ ,,,,53-1042.01,Recycling Coordinators
1480
+ ,,,53-1043,,First-Line Supervisors of Material-Moving Machine and Vehicle Operators
1481
+ ,,,53-1044,,First-Line Supervisors of Passenger Attendants
1482
+ ,,,53-1049,,"First-Line Supervisors of Transportation Workers, All Other"
1483
+ ,53-2000,,,,Air Transportation Workers
1484
+ ,,53-2010,,,Aircraft Pilots and Flight Engineers
1485
+ ,,,53-2011,,"Airline Pilots, Copilots, and Flight Engineers"
1486
+ ,,,53-2012,,Commercial Pilots
1487
+ ,,53-2020,,,Air Traffic Controllers and Airfield Operations Specialists
1488
+ ,,,53-2021,,Air Traffic Controllers
1489
+ ,,,53-2022,,Airfield Operations Specialists
1490
+ ,,53-2030,,,Flight Attendants
1491
+ ,,,53-2031,,Flight Attendants
1492
+ ,53-3000,,,,Motor Vehicle Operators
1493
+ ,,53-3010,,,"Ambulance Drivers and Attendants, Except Emergency Medical Technicians"
1494
+ ,,,53-3011,,"Ambulance Drivers and Attendants, Except Emergency Medical Technicians"
1495
+ ,,53-3030,,,Driver/Sales Workers and Truck Drivers
1496
+ ,,,53-3031,,Driver/Sales Workers
1497
+ ,,,53-3032,,Heavy and Tractor-Trailer Truck Drivers
1498
+ ,,,53-3033,,Light Truck Drivers
1499
+ ,,53-3050,,,Passenger Vehicle Drivers
1500
+ ,,,53-3051,,"Bus Drivers, School"
1501
+ ,,,53-3052,,"Bus Drivers, Transit and Intercity"
1502
+ ,,,53-3053,,Shuttle Drivers and Chauffeurs
1503
+ ,,,53-3054,,Taxi Drivers
1504
+ ,,53-3090,,,Miscellaneous Motor Vehicle Operators
1505
+ ,,,53-3099,,"Motor Vehicle Operators, All Other"
1506
+ ,53-4000,,,,Rail Transportation Workers
1507
+ ,,53-4010,,,Locomotive Engineers and Operators
1508
+ ,,,53-4011,,Locomotive Engineers
1509
+ ,,,53-4013,,"Rail Yard Engineers, Dinkey Operators, and Hostlers"
1510
+ ,,53-4020,,,"Railroad Brake, Signal, and Switch Operators and Locomotive Firers"
1511
+ ,,,53-4022,,"Railroad Brake, Signal, and Switch Operators and Locomotive Firers"
1512
+ ,,53-4030,,,Railroad Conductors and Yardmasters
1513
+ ,,,53-4031,,Railroad Conductors and Yardmasters
1514
+ ,,53-4040,,,Subway and Streetcar Operators
1515
+ ,,,53-4041,,Subway and Streetcar Operators
1516
+ ,,53-4090,,,Miscellaneous Rail Transportation Workers
1517
+ ,,,53-4099,,"Rail Transportation Workers, All Other"
1518
+ ,53-5000,,,,Water Transportation Workers
1519
+ ,,53-5010,,,Sailors and Marine Oilers
1520
+ ,,,53-5011,,Sailors and Marine Oilers
1521
+ ,,53-5020,,,Ship and Boat Captains and Operators
1522
+ ,,,53-5021,,"Captains, Mates, and Pilots of Water Vessels"
1523
+ ,,,53-5022,,Motorboat Operators
1524
+ ,,53-5030,,,Ship Engineers
1525
+ ,,,53-5031,,Ship Engineers
1526
+ ,53-6000,,,,Other Transportation Workers
1527
+ ,,53-6010,,,Bridge and Lock Tenders
1528
+ ,,,53-6011,,Bridge and Lock Tenders
1529
+ ,,53-6020,,,Parking Attendants
1530
+ ,,,53-6021,,Parking Attendants
1531
+ ,,53-6030,,,Transportation Service Attendants
1532
+ ,,,53-6031,,Automotive and Watercraft Service Attendants
1533
+ ,,,53-6032,,Aircraft Service Attendants
1534
+ ,,53-6040,,,Traffic Technicians
1535
+ ,,,53-6041,,Traffic Technicians
1536
+ ,,53-6050,,,Transportation Inspectors
1537
+ ,,,53-6051,,Transportation Inspectors
1538
+ ,,,,53-6051.01,Aviation Inspectors
1539
+ ,,,,53-6051.07,"Transportation Vehicle, Equipment and Systems Inspectors, Except Aviation"
1540
+ ,,53-6060,,,Passenger Attendants
1541
+ ,,,53-6061,,Passenger Attendants
1542
+ ,,53-6090,,,Miscellaneous Transportation Workers
1543
+ ,,,53-6099,,"Transportation Workers, All Other"
1544
+ ,53-7000,,,,Material Moving Workers
1545
+ ,,53-7010,,,Conveyor Operators and Tenders
1546
+ ,,,53-7011,,Conveyor Operators and Tenders
1547
+ ,,53-7020,,,Crane and Tower Operators
1548
+ ,,,53-7021,,Crane and Tower Operators
1549
+ ,,53-7030,,,Dredge Operators
1550
+ ,,,53-7031,,Dredge Operators
1551
+ ,,53-7040,,,Hoist and Winch Operators
1552
+ ,,,53-7041,,Hoist and Winch Operators
1553
+ ,,53-7050,,,Industrial Truck and Tractor Operators
1554
+ ,,,53-7051,,Industrial Truck and Tractor Operators
1555
+ ,,53-7060,,,Laborers and Material Movers
1556
+ ,,,53-7061,,Cleaners of Vehicles and Equipment
1557
+ ,,,53-7062,,"Laborers and Freight, Stock, and Material Movers, Hand"
1558
+ ,,,,53-7062.04,Recycling and Reclamation Workers
1559
+ ,,,53-7063,,Machine Feeders and Offbearers
1560
+ ,,,53-7064,,"Packers and Packagers, Hand"
1561
+ ,,,53-7065,,Stockers and Order Fillers
1562
+ ,,53-7070,,,Pumping Station Operators
1563
+ ,,,53-7071,,Gas Compressor and Gas Pumping Station Operators
1564
+ ,,,53-7072,,"Pump Operators, Except Wellhead Pumpers"
1565
+ ,,,53-7073,,Wellhead Pumpers
1566
+ ,,53-7080,,,Refuse and Recyclable Material Collectors
1567
+ ,,,53-7081,,Refuse and Recyclable Material Collectors
1568
+ ,,53-7120,,,"Tank Car, Truck, and Ship Loaders"
1569
+ ,,,53-7121,,"Tank Car, Truck, and Ship Loaders"
1570
+ ,,53-7190,,,Miscellaneous Material Moving Workers
1571
+ ,,,53-7199,,"Material Moving Workers, All Other"
1572
+ 55-0000,,,,,Military Specific Occupations
1573
+ ,55-1000,,,,Military Officer Special and Tactical Operations Leaders
1574
+ ,,55-1010,,,Military Officer Special and Tactical Operations Leaders
1575
+ ,,,55-1011,,Air Crew Officers
1576
+ ,,,55-1012,,Aircraft Launch and Recovery Officers
1577
+ ,,,55-1013,,Armored Assault Vehicle Officers
1578
+ ,,,55-1014,,Artillery and Missile Officers
1579
+ ,,,55-1015,,Command and Control Center Officers
1580
+ ,,,55-1016,,Infantry Officers
1581
+ ,,,55-1017,,Special Forces Officers
1582
+ ,,,55-1019,,"Military Officer Special and Tactical Operations Leaders, All Other"
1583
+ ,55-2000,,,,First-Line Enlisted Military Supervisors
1584
+ ,,55-2010,,,First-Line Enlisted Military Supervisors
1585
+ ,,,55-2011,,First-Line Supervisors of Air Crew Members
1586
+ ,,,55-2012,,First-Line Supervisors of Weapons Specialists/Crew Members
1587
+ ,,,55-2013,,First-Line Supervisors of All Other Tactical Operations Specialists
1588
+ ,55-3000,,,,Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members
1589
+ ,,55-3010,,,Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members
1590
+ ,,,55-3011,,Air Crew Members
1591
+ ,,,55-3012,,Aircraft Launch and Recovery Specialists
1592
+ ,,,55-3013,,Armored Assault Vehicle Crew Members
1593
+ ,,,55-3014,,Artillery and Missile Crew Members
1594
+ ,,,55-3015,,Command and Control Center Specialists
1595
+ ,,,55-3016,,Infantry
1596
+ ,,,55-3018,,Special Forces
1597
+ ,,,55-3019,,"Military Enlisted Tactical Operations and Air/Weapons Specialists and Crew Members, All Other"
release_2025_03_27/automation_augmentation_by_occupation.png ADDED

Git LFS Details

  • SHA256: 5c3aeba2d117968c0276be83cdc864db9b677f79a34f37b097545e256dc2e822
  • Pointer size: 131 Bytes
  • Size of remote file: 672 kB
release_2025_03_27/automation_augmentation_comparison.png ADDED

Git LFS Details

  • SHA256: 3410655893206268a5e0492a6c2b8a7850f24088301c0ed3a23aaf0f0d1e4576
  • Pointer size: 131 Bytes
  • Size of remote file: 150 kB
release_2025_03_27/automation_vs_augmentation_by_task.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/automation_vs_augmentation_v1.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ interaction_type,pct
2
+ directive,22.563272409918948
3
+ feedback loop,12.036303266190515
4
+ learning,18.917648061953294
5
+ none,2.9013020624347967
6
+ task iteration,25.47648663831153
7
+ validation,2.314220367546746
release_2025_03_27/automation_vs_augmentation_v2.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ interaction_type,pct
2
+ directive,29.420541308119624
3
+ feedback loop,12.245083676255144
4
+ learning,27.080406206093095
5
+ none,3.2389485842287633
6
+ task iteration,24.038560578408678
7
+ validation,3.972959594393916
release_2025_03_27/cluster_level_data/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Cluster Level Data
2
+
3
+ This folder contains cluster level data for the second Economic Index release associated with Claude 3.7 Sonnet Data. It contains hierarchical cluster descriptions, as well as associated prevalence metrics for each cluster (% of records, % of users). It also includes mappings to [O*NET Tasks](https://www.onetonline.org/), collaboration pattern ratios, and ratios associated with whether or not Claude Sonnet 3.7's "Thinking" feature was used during the conversation.
4
+
5
+ ## Files in this Directory
6
+
7
+ - **cluster_level_dataset.tsv**: Tab-separated values file containing the cluster data with all fields described in the data dictionary below. This is the primary dataset file for analysis.
8
+
9
+ - **cluster_level_example_analysis.ipynb**: Jupyter notebook demonstrating example analyses you can perform with the cluster level dataset. This notebook includes code for loading the data, basic exploratory analysis, and visualization techniques to help understand the cluster patterns and their relationships to O*NET tasks.
10
+
11
+ ## Data Dictionary
12
+
13
+ | Field | Description |
14
+ |-------|-------------|
15
+ | cluster_name_0 | Name of the level 0 (most granular) cluster |
16
+ | cluster_description_0 | Detailed description of the level 0 cluster |
17
+ | cluster_name_1 | Name of the level 1 (intermediate) cluster |
18
+ | cluster_description_1 | Detailed description of the level 1 cluster |
19
+ | cluster_name_2 | Name of the level 2 (broadest) cluster |
20
+ | cluster_description_2 | Detailed description of the level 2 cluster |
21
+ | percent_records | Percentage of total records that belong to this Level 0 cluster |
22
+ | percent_users | Percentage of total users who have used this Level 0 cluster |
23
+ | onet_task | Description of the associated O*NET task |
24
+ | collaboration:directive_ratio | Ratio of conversations with directive collaboration patterns |
25
+ | collaboration:feedback loop_ratio | Ratio of conversations with feedback loop collaboration patterns |
26
+ | collaboration:learning_ratio | Ratio of conversations with learning collaboration patterns |
27
+ | collaboration:none_ratio | Ratio of conversations with no collaboration patterns |
28
+ | collaboration:task iteration_ratio | Ratio of conversations with task iteration collaboration patterns |
29
+ | collaboration:validation_ratio | Ratio of conversations with validation collaboration patterns |
30
+ | has_thinking_ratio | Ratio of conversations where the "Thinking" feature was used |
31
+
32
+ ## Bucketing Adjustment
33
+
34
+ For percent_records and percent_users fields, we applied a bucketing adjustment to enhance privacy while preserving the overall distribution:
35
+
36
+ 1. Clusters were sorted by their prevalence metrics (percent_records and percent_users).
37
+ 2. 100 buckets were created.
38
+ 3. Clusters were assigned to buckets and the average prevalence within each bucket was calculated.
39
+ 4. The original values were replaced with the bucket averages to reduce precision while maintaining the distribution.
40
+
41
+
42
+
release_2025_03_27/cluster_level_data/cluster_level_dataset.tsv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/cluster_level_data/cluster_level_example_analysis.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/normalized_automation_by_category.png ADDED

Git LFS Details

  • SHA256: 7090a9d2db3a728fa4cfbe2685bf3b0f4804b4d155980d53b019cea605ec1466
  • Pointer size: 131 Bytes
  • Size of remote file: 854 kB
release_2025_03_27/onet_task_statements.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/task_pct_v1.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/task_pct_v2.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/task_thinking_fractions.csv ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_03_27/v2_report_replication.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
release_2025_09_15/README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Anthropic Economic Index September 2025 Report Replication
2
+
3
+ ## Folder Structure
4
+
5
+ ```
6
+ .
7
+ ├── code/ # Analysis scripts
8
+ ├── data/
9
+ │ ├── input/ # Raw data files (from external sources or prior releases)
10
+ │ ├── intermediate/ # Processed data files
11
+ │ └── output/ # Final outputs (plots, tables, etc.)
12
+ ├── data_documentation.md # Documentation of all data sources and datasets
13
+ └── README.md
14
+ ```
15
+
16
+ ## Data Processing Pipeline
17
+
18
+ **Note:** Since all preprocessed data files are provided, you can skip directly to the Analysis section (Section 2) if you want to replicate the results without re-running the preprocessing steps. Please refer to `data_documentation.md` for details on the different data used.
19
+
20
+ Run the following scripts in order from the `code/` directory:
21
+
22
+ ### 1. Data Preprocessing
23
+
24
+ 1. **`preprocess_iso_codes.py`**
25
+ - Processes ISO country codes
26
+ - Creates standardized country code mappings
27
+
28
+ 2. **`preprocess_population.py`**
29
+ - Processes country-level population data
30
+ - Processes US state-level population data
31
+ - Outputs working age population statistics
32
+
33
+ 3. **`preprocess_gdp.py`**
34
+ - Downloads and processes IMF country GDP data
35
+ - Processes BEA US state GDP data
36
+ - Creates standardized GDP datasets
37
+
38
+ 4. **`preprocess_onet.py`**
39
+ - Processes O*NET occupation and task data
40
+ - Creates SOC occupation mappings
41
+
42
+ 5. **`aei_report_v3_preprocessing_1p_api.ipynb`**
43
+ - Jupyter notebook for preprocessing API and Claude.ai usage data
44
+ - Prepares data for analysis
45
+
46
+ ### 2. Analysis
47
+
48
+ #### Analysis Scripts
49
+
50
+ 1. **`aei_report_v3_change_over_time_claude_ai.py`**
51
+ - Analyzes automation trends across report versions (V1, V2, V3)
52
+ - Generates comparison figures showing evolution of automation estimates
53
+
54
+ 2. **`aei_report_v3_analysis_claude_ai.ipynb`**
55
+ - Analysis notebook for Claude.ai usage patterns
56
+ - Generates figures specific to Claude.ai usage
57
+ - Uses functions from `aei_analysis_functions_claude_ai.py`
58
+
59
+ 3. **`aei_report_v3_analysis_1p_api.ipynb`**
60
+ - Main analysis notebook for API usage patterns
61
+ - Generates figures for occupational usage, collaboration patterns, and regression analyses
62
+ - Uses functions from `aei_analysis_functions_1p_api.py`
63
+
64
+ #### Supporting Function Files
65
+
66
+ - **`aei_analysis_functions_claude_ai.py`**
67
+ - Core analysis functions for Claude.ai data
68
+ - Platform-specific analysis and visualization functions
69
+
70
+ - **`aei_analysis_functions_1p_api.py`**
71
+ - Core analysis functions for API data
72
+ - Includes regression models, plotting functions, and data transformations
release_2025_09_15/code/aei_analysis_functions_1p_api.py ADDED
@@ -0,0 +1,2339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AEI 1P API Analysis Functions
2
+ # This module contains the core analysis functions for the AEI report API chapter
3
+
4
+ from pathlib import Path
5
+ from textwrap import wrap
6
+
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+ import pandas as pd
10
+ import plotly.graph_objects as go
11
+ import statsmodels.api as sm
12
+ from plotly.subplots import make_subplots
13
+
14
+ # Define the tier colors
15
+ CUSTOM_COLORS_LIST = ["#E6DBD0", "#E5C5AB", "#E4AF86", "#E39961", "#D97757"]
16
+
17
+ # Define the color cycle for charts
18
+ COLOR_CYCLE = [
19
+ "#D97757",
20
+ "#656565",
21
+ "#40668C",
22
+ "#E39961",
23
+ "#E4AF86",
24
+ "#C65A3F",
25
+ "#8778AB",
26
+ "#E5C5AB",
27
+ "#B04F35",
28
+ ]
29
+
30
+
31
+ def setup_plot_style():
32
+ """Configure matplotlib for publication-quality figures."""
33
+ plt.style.use("default")
34
+ plt.rcParams.update(
35
+ {
36
+ "figure.dpi": 100,
37
+ "savefig.dpi": 300,
38
+ "font.size": 10,
39
+ "axes.labelsize": 11,
40
+ "axes.titlesize": 12,
41
+ "xtick.labelsize": 9,
42
+ "ytick.labelsize": 9,
43
+ "legend.fontsize": 9,
44
+ "figure.facecolor": "white",
45
+ "axes.facecolor": "white",
46
+ "savefig.facecolor": "white",
47
+ "axes.edgecolor": "#333333",
48
+ "axes.linewidth": 0.8,
49
+ "axes.grid": True,
50
+ "grid.alpha": 0.3,
51
+ "grid.linestyle": "-",
52
+ "grid.linewidth": 0.5,
53
+ "axes.axisbelow": True,
54
+ "text.usetex": False,
55
+ "mathtext.default": "regular",
56
+ "axes.titlecolor": "#B86046",
57
+ "figure.titlesize": 16,
58
+ }
59
+ )
60
+
61
+
62
+ # Initialize style
63
+ setup_plot_style()
64
+
65
+
66
+ def load_preprocessed_data(input_file):
67
+ """
68
+ Load preprocessed API data from CSV or Parquet file.
69
+
70
+ Args:
71
+ input_file: Path to preprocessed data file
72
+
73
+ Returns:
74
+ DataFrame with preprocessed API data
75
+ """
76
+ input_path = Path(input_file)
77
+
78
+ if not input_path.exists():
79
+ raise FileNotFoundError(f"Input file not found: {input_path}")
80
+
81
+ df = pd.read_csv(input_path)
82
+ return df
83
+
84
+
85
+ def create_top_requests_bar_chart(df, output_dir):
86
+ """
87
+ Create bar chart showing top 15 request categories (level 2) by count share.
88
+
89
+ Args:
90
+ df: Preprocessed data DataFrame
91
+ output_dir: Directory to save the figure
92
+ """
93
+ # Get request data at level 2 (global only) using percentages
94
+ request_data = df[
95
+ (df["facet"] == "request")
96
+ & (df["geo_id"] == "GLOBAL")
97
+ & (df["level"] == 2)
98
+ & (df["variable"] == "request_pct")
99
+ ].copy()
100
+
101
+ # Filter out not_classified (but don't renormalize)
102
+ request_data = request_data[request_data["cluster_name"] != "not_classified"]
103
+
104
+ # Use the percentage values directly (already calculated in preprocessing)
105
+ request_data["request_pct"] = request_data["value"]
106
+
107
+ # Get top 15 requests by percentage share
108
+ top_requests = request_data.nlargest(15, "request_pct").sort_values(
109
+ "request_pct", ascending=True
110
+ )
111
+
112
+ # Create figure
113
+ fig, ax = plt.subplots(figsize=(14, 10))
114
+
115
+ # Create horizontal bar chart with tier color gradient
116
+ y_pos = np.arange(len(top_requests))
117
+
118
+ # Use tier colors based on ranking (top categories get darker colors)
119
+ colors = []
120
+ for i in range(len(top_requests)):
121
+ # Map position to tier color (top bars = darker, bottom bars = lighter)
122
+ # Since bars are sorted ascending, higher index = higher value = darker color
123
+ rank_position = i / (len(top_requests) - 1)
124
+ tier_index = int(rank_position * (len(CUSTOM_COLORS_LIST) - 1))
125
+ colors.append(CUSTOM_COLORS_LIST[tier_index])
126
+
127
+ ax.barh(
128
+ y_pos,
129
+ top_requests["request_pct"],
130
+ color=colors,
131
+ alpha=0.9,
132
+ edgecolor="#333333",
133
+ linewidth=0.5,
134
+ )
135
+
136
+ # Add value labels on bars
137
+ for i, (idx, row) in enumerate(top_requests.iterrows()):
138
+ ax.text(
139
+ row["request_pct"] + 0.1,
140
+ i,
141
+ f"{row['request_pct']:.1f}%",
142
+ va="center",
143
+ fontsize=11,
144
+ fontweight="bold",
145
+ )
146
+
147
+ # Clean up request names for y-axis labels
148
+ labels = []
149
+ for name in top_requests["cluster_name"]:
150
+ # Truncate long names and add line breaks
151
+ if len(name) > 60:
152
+ # Find good break point around middle
153
+ mid = len(name) // 2
154
+ break_point = name.find(" ", mid)
155
+ if break_point == -1: # No space found, just break at middle
156
+ break_point = mid
157
+ clean_name = name[:break_point] + "\n" + name[break_point:].strip()
158
+ else:
159
+ clean_name = name
160
+ labels.append(clean_name)
161
+
162
+ ax.set_yticks(y_pos)
163
+ ax.set_yticklabels(labels, fontsize=10)
164
+
165
+ # Formatting
166
+ ax.set_xlabel("Percentage of total request count", fontsize=14)
167
+ ax.set_title(
168
+ "Top use cases among 1P API transcripts by usage share \n (broad grouping, bottom-up classification)",
169
+ fontsize=14,
170
+ fontweight="bold",
171
+ pad=20,
172
+ )
173
+
174
+ # Add grid
175
+ ax.grid(True, alpha=0.3, axis="x")
176
+ ax.set_axisbelow(True)
177
+
178
+ # Remove top and right spines
179
+ ax.spines["top"].set_visible(False)
180
+ ax.spines["right"].set_visible(False)
181
+
182
+ # Increase tick label font size
183
+ ax.tick_params(axis="x", which="major", labelsize=12)
184
+
185
+ plt.tight_layout()
186
+
187
+ # Save plot
188
+ output_path = Path(output_dir) / "top_requests_level2_bar_chart.png"
189
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
190
+ plt.show()
191
+ return str(output_path)
192
+
193
+
194
+ def load_onet_mappings():
195
+ """
196
+ Load ONET task statements and SOC structure for occupational category mapping.
197
+
198
+ Returns:
199
+ Tuple of (task_statements_df, soc_structure_df)
200
+ """
201
+ # Load from local files
202
+ task_path = Path("../data/intermediate/onet_task_statements.csv")
203
+ soc_path = Path("../data/intermediate/soc_structure.csv")
204
+
205
+ # Load CSV files directly
206
+ task_statements = pd.read_csv(task_path)
207
+ soc_structure = pd.read_csv(soc_path)
208
+
209
+ return task_statements, soc_structure
210
+
211
+
212
+ def map_to_occupational_categories(df, task_statements, soc_structure):
213
+ """
214
+ Map ONET task data to major occupational categories.
215
+
216
+ Args:
217
+ df: Preprocessed data DataFrame
218
+ task_statements: ONET task statements DataFrame
219
+ soc_structure: SOC structure DataFrame
220
+
221
+ Returns:
222
+ DataFrame with occupational category mappings
223
+ """
224
+ # Filter for ONET task data
225
+ onet_data = df[df["facet"] == "onet_task"].copy()
226
+
227
+ # Handle not_classified and none tasks first
228
+ not_classified_mask = onet_data["cluster_name"].isin(["not_classified", "none"])
229
+ not_classified_data = onet_data[not_classified_mask].copy()
230
+ not_classified_data["soc_major"] = "99"
231
+ not_classified_data["occupational_category"] = "Not Classified"
232
+
233
+ # Process regular tasks
234
+ regular_data = onet_data[~not_classified_mask].copy()
235
+
236
+ # Standardize task descriptions for matching
237
+ # Create standardized task mapping from ONET statements
238
+ task_statements["task_standardized"] = (
239
+ task_statements["Task"].str.strip().str.lower()
240
+ )
241
+ regular_data["cluster_name_standardized"] = (
242
+ regular_data["cluster_name"].str.strip().str.lower()
243
+ )
244
+
245
+ # Create mapping from standardized task to major groups (allowing multiple)
246
+ task_to_major_groups = {}
247
+ for _, row in task_statements.iterrows():
248
+ if pd.notna(row["Task"]) and pd.notna(row["soc_major_group"]):
249
+ std_task = row["task_standardized"]
250
+ major_group = str(int(row["soc_major_group"]))
251
+ if std_task not in task_to_major_groups:
252
+ task_to_major_groups[std_task] = []
253
+ if major_group not in task_to_major_groups[std_task]:
254
+ task_to_major_groups[std_task].append(major_group)
255
+
256
+ # Expand rows for tasks that belong to multiple groups
257
+ expanded_rows = []
258
+ for _, row in regular_data.iterrows():
259
+ std_task = row["cluster_name_standardized"]
260
+ if std_task in task_to_major_groups:
261
+ groups = task_to_major_groups[std_task]
262
+ # Assign full value to each group (creates duplicates)
263
+ for group in groups:
264
+ new_row = row.copy()
265
+ new_row["soc_major"] = group
266
+ new_row["value"] = row["value"] # Keep full value for each group
267
+ expanded_rows.append(new_row)
268
+
269
+ # Create new dataframe from expanded rows
270
+ if expanded_rows:
271
+ regular_data = pd.DataFrame(expanded_rows)
272
+ else:
273
+ regular_data["soc_major"] = None
274
+
275
+ # Get major occupational groups from SOC structure
276
+ # Filter for rows where 'Major Group' is not null (these are the major groups)
277
+ major_groups = soc_structure[soc_structure["Major Group"].notna()].copy()
278
+
279
+ # Extract major group code and title
280
+ major_groups["soc_major"] = major_groups["Major Group"].astype(str).str[:2]
281
+ major_groups["title"] = major_groups["SOC or O*NET-SOC 2019 Title"]
282
+
283
+ # Create a clean mapping from major group code to title
284
+ major_group_mapping = (
285
+ major_groups[["soc_major", "title"]]
286
+ .drop_duplicates()
287
+ .set_index("soc_major")["title"]
288
+ .to_dict()
289
+ )
290
+
291
+ # Map major group codes to titles for regular data
292
+ regular_data["occupational_category"] = regular_data["soc_major"].map(
293
+ major_group_mapping
294
+ )
295
+
296
+ # Keep only successfully mapped regular data
297
+ regular_mapped = regular_data[regular_data["occupational_category"].notna()].copy()
298
+
299
+ # Combine regular mapped data with not_classified data
300
+ onet_mapped = pd.concat([regular_mapped, not_classified_data], ignore_index=True)
301
+
302
+ # Renormalize percentages to sum to 100 since we may have created duplicates
303
+ total = onet_mapped["value"].sum()
304
+
305
+ onet_mapped["value"] = (onet_mapped["value"] / total) * 100
306
+
307
+ return onet_mapped
308
+
309
+
310
+ def create_platform_occupational_comparison(api_df, cai_df, output_dir):
311
+ """
312
+ Create horizontal bar chart comparing occupational categories between Claude.ai and 1P API.
313
+
314
+ Args:
315
+ api_df: API preprocessed data DataFrame
316
+ cai_df: Claude.ai preprocessed data DataFrame
317
+ output_dir: Directory to save the figure
318
+ """
319
+ # Load ONET mappings for occupational categories
320
+ task_statements, soc_structure = load_onet_mappings()
321
+
322
+ # Process both datasets to get occupational categories
323
+ def get_occupational_data(df, platform_name):
324
+ # Get ONET task percentage data (global level only)
325
+ onet_data = df[
326
+ (df["facet"] == "onet_task")
327
+ & (df["geo_id"] == "GLOBAL")
328
+ & (df["variable"] == "onet_task_pct")
329
+ ].copy()
330
+
331
+ # Map to occupational categories using existing function
332
+ onet_mapped = map_to_occupational_categories(
333
+ onet_data, task_statements, soc_structure
334
+ )
335
+
336
+ # Sum percentages by occupational category
337
+ category_percentages = (
338
+ onet_mapped.groupby("occupational_category")["value"].sum().reset_index()
339
+ )
340
+
341
+ # Exclude "Not Classified" category from visualization
342
+ category_percentages = category_percentages[
343
+ category_percentages["occupational_category"] != "Not Classified"
344
+ ]
345
+
346
+ category_percentages.columns = ["category", f"{platform_name.lower()}_pct"]
347
+
348
+ return category_percentages
349
+
350
+ # Get data for both platforms
351
+ api_categories = get_occupational_data(api_df, "API")
352
+ claude_categories = get_occupational_data(cai_df, "Claude")
353
+
354
+ # Merge the datasets
355
+ category_comparison = pd.merge(
356
+ claude_categories, api_categories, on="category", how="outer"
357
+ ).fillna(0)
358
+
359
+ # Filter to substantial categories (>0.5% in either platform)
360
+ category_comparison = category_comparison[
361
+ (category_comparison["claude_pct"] > 0.5)
362
+ | (category_comparison["api_pct"] > 0.5)
363
+ ].copy()
364
+
365
+ # Calculate difference and total
366
+ category_comparison["difference"] = (
367
+ category_comparison["api_pct"] - category_comparison["claude_pct"]
368
+ )
369
+ category_comparison["total_pct"] = (
370
+ category_comparison["claude_pct"] + category_comparison["api_pct"]
371
+ )
372
+
373
+ # Get top 8 categories by total usage
374
+ top_categories = category_comparison.nlargest(8, "total_pct").sort_values(
375
+ "total_pct", ascending=True
376
+ )
377
+
378
+ # Create figure
379
+ fig, ax = plt.subplots(figsize=(12, 8))
380
+
381
+ y_pos = np.arange(len(top_categories))
382
+ bar_height = 0.35
383
+
384
+ # Create side-by-side bars
385
+ ax.barh(
386
+ y_pos - bar_height / 2,
387
+ top_categories["claude_pct"],
388
+ bar_height,
389
+ label="Claude.ai",
390
+ color=COLOR_CYCLE[2],
391
+ alpha=0.8,
392
+ )
393
+ ax.barh(
394
+ y_pos + bar_height / 2,
395
+ top_categories["api_pct"],
396
+ bar_height,
397
+ label="1P API",
398
+ color=COLOR_CYCLE[0],
399
+ alpha=0.8,
400
+ )
401
+
402
+ # Add value labels with difference percentages
403
+ for i, (idx, row) in enumerate(top_categories.iterrows()):
404
+ # Claude.ai label
405
+ if row["claude_pct"] > 0.1:
406
+ ax.text(
407
+ row["claude_pct"] + 0.2,
408
+ i - bar_height / 2,
409
+ f"{row['claude_pct']:.0f}%",
410
+ va="center",
411
+ fontsize=9,
412
+ )
413
+
414
+ # 1P API label with difference
415
+ if row["api_pct"] > 0.1:
416
+ ax.text(
417
+ row["api_pct"] + 0.2,
418
+ i + bar_height / 2,
419
+ f"{row['api_pct']:.0f}%",
420
+ va="center",
421
+ fontsize=9,
422
+ color=COLOR_CYCLE[0] if row["difference"] > 0 else COLOR_CYCLE[2],
423
+ )
424
+
425
+ # Clean up category labels
426
+ labels = []
427
+ for cat in top_categories["category"]:
428
+ # Remove "Occupations" suffix and wrap long names
429
+ clean_cat = cat.replace(" Occupations", "").replace(", and ", " & ")
430
+ wrapped = "\n".join(wrap(clean_cat, 40))
431
+ labels.append(wrapped)
432
+
433
+ ax.set_yticks(y_pos)
434
+ ax.set_yticklabels(labels, fontsize=10)
435
+
436
+ ax.set_xlabel("Percentage of usage", fontsize=12)
437
+ ax.set_title(
438
+ "Usage shares across top occupational categories: Claude.ai vs 1P API",
439
+ fontsize=14,
440
+ fontweight="bold",
441
+ pad=20,
442
+ )
443
+ ax.legend(loc="lower right", fontsize=11)
444
+ ax.grid(True, alpha=0.3, axis="x")
445
+ ax.set_axisbelow(True)
446
+
447
+ # Remove top and right spines
448
+ ax.spines["top"].set_visible(False)
449
+ ax.spines["right"].set_visible(False)
450
+
451
+ plt.tight_layout()
452
+
453
+ # Save plot
454
+ output_path = Path(output_dir) / "platform_occupational_comparison.png"
455
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
456
+ plt.show()
457
+ return str(output_path)
458
+
459
+
460
+ def create_platform_lorenz_curves(api_df, cai_df, output_dir):
461
+ """
462
+ Create Lorenz curves showing task usage concentration by platform.
463
+
464
+ Args:
465
+ api_df: API preprocessed data DataFrame
466
+ cai_df: Claude.ai preprocessed data DataFrame
467
+ output_dir: Directory to save the figure
468
+ """
469
+
470
+ def gini_coefficient(values):
471
+ """Calculate Gini coefficient for a series of values."""
472
+ sorted_values = np.sort(values)
473
+ n = len(sorted_values)
474
+ cumulative = np.cumsum(sorted_values)
475
+ gini = (2 * np.sum(np.arange(1, n + 1) * sorted_values)) / (
476
+ n * cumulative[-1]
477
+ ) - (n + 1) / n
478
+ return gini
479
+
480
+ def get_task_usage_data(df, platform_name):
481
+ # Get ONET task percentage data (global level only)
482
+ onet_data = df[
483
+ (df["facet"] == "onet_task")
484
+ & (df["geo_id"] == "GLOBAL")
485
+ & (df["variable"] == "onet_task_pct")
486
+ ].copy()
487
+
488
+ # Filter out none and not_classified
489
+ onet_data = onet_data[
490
+ ~onet_data["cluster_name"].isin(["none", "not_classified"])
491
+ ]
492
+
493
+ # Use the percentage values directly
494
+ onet_data["percentage"] = onet_data["value"]
495
+
496
+ return onet_data[["cluster_name", "percentage"]].copy()
497
+
498
+ api_tasks = get_task_usage_data(api_df, "1P API")
499
+ claude_tasks = get_task_usage_data(cai_df, "Claude.ai")
500
+
501
+ # Sort by percentage for each platform
502
+ api_tasks = api_tasks.sort_values("percentage")
503
+ claude_tasks = claude_tasks.sort_values("percentage")
504
+
505
+ # Calculate cumulative percentages of usage
506
+ api_cumulative = np.cumsum(api_tasks["percentage"])
507
+ claude_cumulative = np.cumsum(claude_tasks["percentage"])
508
+
509
+ # Calculate cumulative percentage of tasks
510
+ api_task_cumulative = np.arange(1, len(api_tasks) + 1) / len(api_tasks) * 100
511
+ claude_task_cumulative = (
512
+ np.arange(1, len(claude_tasks) + 1) / len(claude_tasks) * 100
513
+ )
514
+
515
+ # Interpolate to ensure curves reach 100%
516
+ # Add final points to reach (100, 100)
517
+ api_cumulative = np.append(api_cumulative, 100)
518
+ claude_cumulative = np.append(claude_cumulative, 100)
519
+ api_task_cumulative = np.append(api_task_cumulative, 100)
520
+ claude_task_cumulative = np.append(claude_task_cumulative, 100)
521
+
522
+ # Calculate Gini coefficients
523
+ api_gini = gini_coefficient(api_tasks["percentage"].values)
524
+ claude_gini = gini_coefficient(claude_tasks["percentage"].values)
525
+
526
+ # Create panel figure
527
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
528
+
529
+ # LEFT PANEL: Lorenz Curves
530
+ # Plot Lorenz curves
531
+ ax1.plot(
532
+ api_task_cumulative,
533
+ api_cumulative,
534
+ color=COLOR_CYCLE[1],
535
+ linewidth=2.5,
536
+ label=f"1P API (Gini = {api_gini:.3f})",
537
+ )
538
+
539
+ ax1.plot(
540
+ claude_task_cumulative,
541
+ claude_cumulative,
542
+ color=COLOR_CYCLE[0],
543
+ linewidth=2.5,
544
+ label=f"Claude.ai (Gini = {claude_gini:.3f})",
545
+ )
546
+
547
+ # Add perfect equality line (diagonal)
548
+ ax1.plot(
549
+ [0, 100],
550
+ [0, 100],
551
+ "k--",
552
+ linewidth=1.5,
553
+ alpha=0.7,
554
+ label="Perfect Equality",
555
+ )
556
+
557
+ # Calculate 80th percentile values
558
+ api_80th_usage = np.interp(80, api_task_cumulative, api_cumulative)
559
+ claude_80th_usage = np.interp(80, claude_task_cumulative, claude_cumulative)
560
+
561
+ # Add markers at 80th percentile
562
+ ax1.scatter(
563
+ 80,
564
+ api_80th_usage,
565
+ alpha=0.5,
566
+ s=100,
567
+ color=COLOR_CYCLE[1],
568
+ edgecolors="white",
569
+ linewidth=1,
570
+ zorder=5,
571
+ )
572
+ ax1.scatter(
573
+ 80,
574
+ claude_80th_usage,
575
+ alpha=0.5,
576
+ s=100,
577
+ color=COLOR_CYCLE[0],
578
+ edgecolors="white",
579
+ linewidth=1,
580
+ zorder=5,
581
+ )
582
+
583
+ # Add annotations
584
+ ax1.text(
585
+ 82,
586
+ api_80th_usage - 2,
587
+ f"{api_80th_usage:.1f}% of usage",
588
+ ha="left",
589
+ va="center",
590
+ fontsize=10,
591
+ color=COLOR_CYCLE[1],
592
+ )
593
+
594
+ ax1.text(
595
+ 78.5,
596
+ claude_80th_usage + 1,
597
+ f"{claude_80th_usage:.1f}% of usage",
598
+ ha="right",
599
+ va="center",
600
+ fontsize=10,
601
+ color=COLOR_CYCLE[0],
602
+ )
603
+
604
+ # Add text box
605
+ ax1.text(
606
+ 0.05,
607
+ 0.95,
608
+ f"The bottom 80% of tasks account for:\n• 1P API: {api_80th_usage:.1f}% of usage\n• Claude.ai: {claude_80th_usage:.1f}% of usage",
609
+ transform=ax1.transAxes,
610
+ va="top",
611
+ ha="left",
612
+ bbox=dict(
613
+ boxstyle="round,pad=0.3",
614
+ facecolor="white",
615
+ alpha=0.8,
616
+ edgecolor="black",
617
+ linewidth=1,
618
+ ),
619
+ fontsize=10,
620
+ )
621
+
622
+ # Styling for Lorenz curves
623
+ ax1.set_xlabel("Cumulative percentage of tasks", fontsize=12)
624
+ ax1.set_ylabel("Cumulative percentage of usage", fontsize=12)
625
+ ax1.set_title("Lorenz curves", fontsize=14, fontweight="bold", pad=20)
626
+ ax1.set_xlim(0, 100)
627
+ ax1.set_ylim(0, 100)
628
+ ax1.grid(True, alpha=0.3, linestyle="--")
629
+ ax1.set_axisbelow(True)
630
+ ax1.legend(loc=(0.05, 0.65), fontsize=11, frameon=True, facecolor="white")
631
+ ax1.spines["top"].set_visible(False)
632
+ ax1.spines["right"].set_visible(False)
633
+
634
+ # RIGHT PANEL: Zipf's Law Analysis
635
+ min_share = 0.1
636
+
637
+ # Filter for minimum share
638
+ api_filtered = api_tasks[api_tasks["percentage"] > min_share]["percentage"].copy()
639
+ claude_filtered = claude_tasks[claude_tasks["percentage"] > min_share][
640
+ "percentage"
641
+ ].copy()
642
+
643
+ # Calculate ranks and log transforms
644
+ ln_rank_api = np.log(api_filtered.rank(ascending=False))
645
+ ln_share_api = np.log(api_filtered)
646
+
647
+ ln_rank_claude = np.log(claude_filtered.rank(ascending=False))
648
+ ln_share_claude = np.log(claude_filtered)
649
+
650
+ # Fit regressions
651
+ api_model = sm.OLS(ln_rank_api, sm.add_constant(ln_share_api)).fit()
652
+ api_slope = api_model.params.iloc[1]
653
+ api_intercept = api_model.params.iloc[0]
654
+
655
+ claude_model = sm.OLS(ln_rank_claude, sm.add_constant(ln_share_claude)).fit()
656
+ claude_slope = claude_model.params.iloc[1]
657
+ claude_intercept = claude_model.params.iloc[0]
658
+
659
+ # Plot scatter points
660
+ ax2.scatter(
661
+ ln_share_api,
662
+ ln_rank_api,
663
+ alpha=0.5,
664
+ s=100,
665
+ color=COLOR_CYCLE[1],
666
+ label=f"1P API: y = {api_slope:.2f}x + {api_intercept:.2f}",
667
+ )
668
+
669
+ ax2.scatter(
670
+ ln_share_claude,
671
+ ln_rank_claude,
672
+ alpha=0.5,
673
+ s=100,
674
+ color=COLOR_CYCLE[0],
675
+ label=f"Claude.ai: y = {claude_slope:.2f}x + {claude_intercept:.2f}",
676
+ )
677
+
678
+ # Add Zipf's law reference line (slope = -1)
679
+ x_range = np.linspace(
680
+ min(ln_share_api.min(), ln_share_claude.min()),
681
+ max(ln_share_api.max(), ln_share_claude.max()),
682
+ 100,
683
+ )
684
+ avg_intercept = (api_intercept + claude_intercept) / 2
685
+ y_line = -1 * x_range + avg_intercept
686
+
687
+ ax2.plot(
688
+ x_range,
689
+ y_line,
690
+ color="black",
691
+ linestyle="--",
692
+ linewidth=2,
693
+ label=f"Zipf's Law: y = -1.00x + {avg_intercept:.2f}",
694
+ zorder=0,
695
+ )
696
+
697
+ # Styling for Zipf's law plot
698
+ ax2.set_xlabel("ln(Share of usage)", fontsize=12)
699
+ ax2.set_ylabel("ln(Rank by usage)", fontsize=12)
700
+ ax2.set_title(
701
+ "Task rank versus usage share", fontsize=14, fontweight="bold", pad=20
702
+ )
703
+ ax2.grid(True, alpha=0.3, linestyle="--")
704
+ ax2.set_axisbelow(True)
705
+ ax2.legend(fontsize=11)
706
+ ax2.spines["top"].set_visible(False)
707
+ ax2.spines["right"].set_visible(False)
708
+
709
+ # Overall title
710
+ fig.suptitle(
711
+ "Lorenz curves and power law analysis across tasks: 1P API vs Claude.ai",
712
+ fontsize=16,
713
+ fontweight="bold",
714
+ y=0.95,
715
+ color="#B86046",
716
+ )
717
+
718
+ plt.tight_layout()
719
+ plt.subplots_adjust(top=0.85) # More room for suptitle
720
+
721
+ # Save plot
722
+ output_path = Path(output_dir) / "platform_lorenz_zipf_panel.png"
723
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
724
+ plt.show()
725
+ return str(output_path)
726
+
727
+
728
+ def create_collaboration_alluvial(api_df, cai_df, output_dir):
729
+ """
730
+ Create alluvial diagram showing collaboration pattern flows between platforms.
731
+
732
+ Args:
733
+ api_df: API preprocessed data DataFrame
734
+ cai_df: Claude.ai preprocessed data DataFrame
735
+ output_dir: Directory to save the figure
736
+ """
737
+
738
+ def get_collaboration_data(df, platform_name):
739
+ # Get collaboration facet data (global level only)
740
+ collab_data = df[
741
+ (df["facet"] == "collaboration")
742
+ & (df["geo_id"] == "GLOBAL")
743
+ & (df["variable"] == "collaboration_pct")
744
+ ].copy()
745
+
746
+ # Use cluster_name directly as the collaboration pattern
747
+ collab_data["pattern"] = collab_data["cluster_name"]
748
+
749
+ # Filter out not_classified
750
+ collab_data = collab_data[collab_data["pattern"] != "not_classified"]
751
+
752
+ # Use the percentage values directly
753
+ result = collab_data[["pattern", "value"]].copy()
754
+ result.columns = ["pattern", "percentage"]
755
+ result["platform"] = platform_name
756
+
757
+ return result
758
+
759
+ api_collab = get_collaboration_data(api_df, "1P API")
760
+ claude_collab = get_collaboration_data(cai_df, "Claude.ai")
761
+
762
+ # Combine collaboration data
763
+ collab_df = pd.concat([claude_collab, api_collab], ignore_index=True)
764
+
765
+ # Define categories
766
+ augmentation_types = ["learning", "task iteration", "validation"]
767
+ automation_types = ["directive", "feedback loop"]
768
+
769
+ # Colors matching the original
770
+ pattern_colors = {
771
+ "validation": "#2c3e67",
772
+ "task iteration": "#4f76c7",
773
+ "learning": "#79a7e0",
774
+ "feedback loop": "#614980",
775
+ "directive": "#8e6bb1",
776
+ }
777
+
778
+ # Extract flows
779
+ flows_claude = {}
780
+ flows_api = {}
781
+
782
+ for pattern in augmentation_types + automation_types:
783
+ claude_mask = (collab_df["pattern"] == pattern) & (
784
+ collab_df["platform"] == "Claude.ai"
785
+ )
786
+ if claude_mask.any():
787
+ flows_claude[pattern] = collab_df.loc[claude_mask, "percentage"].values[0]
788
+
789
+ api_mask = (collab_df["pattern"] == pattern) & (
790
+ collab_df["platform"] == "1P API"
791
+ )
792
+ if api_mask.any():
793
+ flows_api[pattern] = collab_df.loc[api_mask, "percentage"].values[0]
794
+
795
+ # Create figure with subplots
796
+ fig = make_subplots(
797
+ rows=2,
798
+ cols=1,
799
+ row_heights=[0.5, 0.5],
800
+ vertical_spacing=0.15,
801
+ subplot_titles=("<b>Augmentation Patterns</b>", "<b>Automation Patterns</b>"),
802
+ )
803
+
804
+ # Update subplot title colors and font
805
+ for annotation in fig.layout.annotations:
806
+ annotation.update(font=dict(color="#B86046", size=14, family="Styrene B LC"))
807
+
808
+ def create_alluvial_traces(patterns, row):
809
+ """Create traces for alluvial diagram"""
810
+ # Sort by size on Claude.ai side
811
+ patterns_sorted = sorted(
812
+ [p for p in patterns if p in flows_claude],
813
+ key=lambda p: flows_claude.get(p, 0),
814
+ reverse=True,
815
+ )
816
+
817
+ # Calculate total heights first to determine centering
818
+ total_claude = sum(
819
+ flows_claude.get(p, 0) for p in patterns if p in flows_claude
820
+ )
821
+ total_api = sum(flows_api.get(p, 0) for p in patterns if p in flows_api)
822
+ gap_count = max(
823
+ len([p for p in patterns if p in flows_claude and flows_claude[p] > 0]) - 1,
824
+ 0,
825
+ )
826
+ gap_count_api = max(
827
+ len([p for p in patterns if p in flows_api and flows_api[p] > 0]) - 1, 0
828
+ )
829
+
830
+ total_height_claude = total_claude + (gap_count * 2)
831
+ total_height_api = total_api + (gap_count_api * 2)
832
+
833
+ # Calculate offset to center the smaller side
834
+ offset_claude = 0
835
+ offset_api = 0
836
+ if total_height_claude < total_height_api:
837
+ offset_claude = (total_height_api - total_height_claude) / 2
838
+ else:
839
+ offset_api = (total_height_claude - total_height_api) / 2
840
+
841
+ # Calculate positions for Claude.ai (left side)
842
+ y_pos_claude = offset_claude
843
+ claude_positions = {}
844
+ for pattern in patterns_sorted:
845
+ if pattern in flows_claude and flows_claude[pattern] > 0:
846
+ height = flows_claude[pattern]
847
+ claude_positions[pattern] = {
848
+ "bottom": y_pos_claude,
849
+ "top": y_pos_claude + height,
850
+ "center": y_pos_claude + height / 2,
851
+ }
852
+ y_pos_claude += height + 2 # Add gap
853
+
854
+ # Calculate positions for 1P API (right side)
855
+ patterns_sorted_api = sorted(
856
+ [p for p in patterns if p in flows_api],
857
+ key=lambda p: flows_api.get(p, 0),
858
+ reverse=True,
859
+ )
860
+ y_pos_api = offset_api
861
+ api_positions = {}
862
+ for pattern in patterns_sorted_api:
863
+ if pattern in flows_api and flows_api[pattern] > 0:
864
+ height = flows_api[pattern]
865
+ api_positions[pattern] = {
866
+ "bottom": y_pos_api,
867
+ "top": y_pos_api + height,
868
+ "center": y_pos_api + height / 2,
869
+ }
870
+ y_pos_api += height + 2 # Add gap
871
+
872
+ # Create shapes for flows
873
+ shapes = []
874
+ for pattern in patterns:
875
+ if pattern in claude_positions and pattern in api_positions:
876
+ # Create a quadrilateral connecting the two sides
877
+ x_left = 0.2
878
+ x_right = 0.8
879
+
880
+ claude_bottom = claude_positions[pattern]["bottom"]
881
+ claude_top = claude_positions[pattern]["top"]
882
+ api_bottom = api_positions[pattern]["bottom"]
883
+ api_top = api_positions[pattern]["top"]
884
+
885
+ # Create path for the flow
886
+ path = f"M {x_left} {claude_bottom} L {x_left} {claude_top} L {x_right} {api_top} L {x_right} {api_bottom} Z"
887
+
888
+ hex_color = pattern_colors[pattern]
889
+ r = int(hex_color[1:3], 16)
890
+ g = int(hex_color[3:5], 16)
891
+ b = int(hex_color[5:7], 16)
892
+
893
+ shapes.append(
894
+ dict(
895
+ type="path",
896
+ path=path,
897
+ fillcolor=f"rgba({r},{g},{b},0.5)",
898
+ line=dict(color=f"rgba({r},{g},{b},1)", width=1),
899
+ )
900
+ )
901
+
902
+ # Create text annotations
903
+ annotations = []
904
+
905
+ # Claude.ai labels
906
+ for pattern in patterns_sorted:
907
+ if pattern in claude_positions:
908
+ annotations.append(
909
+ dict(
910
+ x=x_left - 0.02,
911
+ y=claude_positions[pattern]["center"],
912
+ text=f"{pattern.replace('_', ' ').title()}<br>{flows_claude[pattern]:.1f}%",
913
+ showarrow=False,
914
+ xanchor="right",
915
+ yanchor="middle",
916
+ font=dict(size=10),
917
+ )
918
+ )
919
+
920
+ # 1P API labels
921
+ for pattern in patterns_sorted_api:
922
+ if pattern in api_positions:
923
+ annotations.append(
924
+ dict(
925
+ x=x_right + 0.02,
926
+ y=api_positions[pattern]["center"],
927
+ text=f"{pattern.replace('_', ' ').title()}<br>{flows_api[pattern]:.1f}%",
928
+ showarrow=False,
929
+ xanchor="left",
930
+ yanchor="middle",
931
+ font=dict(size=10),
932
+ )
933
+ )
934
+
935
+ # Platform labels
936
+ annotations.extend(
937
+ [
938
+ dict(
939
+ x=x_left,
940
+ y=max(y_pos_claude, y_pos_api) + 5,
941
+ text="Claude.ai",
942
+ showarrow=False,
943
+ xanchor="center",
944
+ font=dict(size=14, color="black"),
945
+ ),
946
+ dict(
947
+ x=x_right,
948
+ y=max(y_pos_claude, y_pos_api) + 5,
949
+ text="1P API",
950
+ showarrow=False,
951
+ xanchor="center",
952
+ font=dict(size=14, color="black"),
953
+ ),
954
+ ]
955
+ )
956
+
957
+ return shapes, annotations, max(y_pos_claude, y_pos_api)
958
+
959
+ # Create augmentation diagram
960
+ aug_shapes, aug_annotations, aug_height = create_alluvial_traces(
961
+ augmentation_types, 1
962
+ )
963
+
964
+ # Create automation diagram
965
+ auto_shapes, auto_annotations, auto_height = create_alluvial_traces(
966
+ automation_types, 2
967
+ )
968
+
969
+ # Add invisible traces to create subplots
970
+ fig.add_trace(
971
+ go.Scatter(x=[0], y=[0], mode="markers", marker=dict(size=0)), row=1, col=1
972
+ )
973
+ fig.add_trace(
974
+ go.Scatter(x=[0], y=[0], mode="markers", marker=dict(size=0)), row=2, col=1
975
+ )
976
+
977
+ # Update layout with shapes and annotations
978
+ fig.update_layout(
979
+ title=dict(
980
+ text="<b>Collaboration Modes: Claude.ai Conversations vs 1P API Transcripts</b>",
981
+ font=dict(size=16, family="Styrene B LC", color="#B86046"),
982
+ x=0.5,
983
+ xanchor="center",
984
+ ),
985
+ height=800,
986
+ width=1200,
987
+ paper_bgcolor="white",
988
+ plot_bgcolor="white",
989
+ showlegend=False,
990
+ )
991
+
992
+ # Ensure white background for both subplots
993
+ fig.update_xaxes(showgrid=False, zeroline=False, showticklabels=False, row=1, col=1)
994
+ fig.update_xaxes(showgrid=False, zeroline=False, showticklabels=False, row=2, col=1)
995
+ fig.update_yaxes(showgrid=False, zeroline=False, showticklabels=False, row=1, col=1)
996
+ fig.update_yaxes(showgrid=False, zeroline=False, showticklabels=False, row=2, col=1)
997
+
998
+ # Add shapes and annotations to each subplot
999
+ for shape in aug_shapes:
1000
+ fig.add_shape(shape, row=1, col=1)
1001
+ for shape in auto_shapes:
1002
+ fig.add_shape(shape, row=2, col=1)
1003
+
1004
+ for ann in aug_annotations:
1005
+ fig.add_annotation(ann, row=1, col=1)
1006
+ for ann in auto_annotations:
1007
+ fig.add_annotation(ann, row=2, col=1)
1008
+
1009
+ # Set axis ranges and ensure white background
1010
+ fig.update_xaxes(
1011
+ range=[0, 1],
1012
+ showgrid=False,
1013
+ zeroline=False,
1014
+ showticklabels=False,
1015
+ row=1,
1016
+ col=1,
1017
+ )
1018
+ fig.update_xaxes(
1019
+ range=[0, 1],
1020
+ showgrid=False,
1021
+ zeroline=False,
1022
+ showticklabels=False,
1023
+ row=2,
1024
+ col=1,
1025
+ )
1026
+
1027
+ fig.update_yaxes(
1028
+ range=[0, aug_height + 10],
1029
+ showgrid=False,
1030
+ zeroline=False,
1031
+ showticklabels=False,
1032
+ row=1,
1033
+ col=1,
1034
+ )
1035
+ fig.update_yaxes(
1036
+ range=[0, auto_height + 10],
1037
+ showgrid=False,
1038
+ zeroline=False,
1039
+ showticklabels=False,
1040
+ row=2,
1041
+ col=1,
1042
+ )
1043
+
1044
+ # Save plot
1045
+ output_path = Path(output_dir) / "collaboration_alluvial.png"
1046
+ fig.write_image(str(output_path), width=1200, height=800, scale=2)
1047
+ fig.show()
1048
+ return str(output_path)
1049
+
1050
+
1051
+ def get_collaboration_shares(df):
1052
+ """
1053
+ Extract collaboration mode shares for each ONET task from intersection data.
1054
+
1055
+ Args:
1056
+ df: Preprocessed data DataFrame
1057
+
1058
+ Returns:
1059
+ dict: {task_name: {mode: percentage}}
1060
+ """
1061
+ # Filter to GLOBAL data only and use pre-calculated percentages
1062
+ collab_data = df[
1063
+ (df["geo_id"] == "GLOBAL")
1064
+ & (df["facet"] == "onet_task::collaboration")
1065
+ & (df["variable"] == "onet_task_collaboration_pct")
1066
+ ].copy()
1067
+
1068
+ # Split the cluster_name into task and collaboration mode
1069
+ collab_data[["task", "mode"]] = collab_data["cluster_name"].str.rsplit(
1070
+ "::", n=1, expand=True
1071
+ )
1072
+
1073
+ # Filter out 'none' and 'not_classified' modes
1074
+ collab_data = collab_data[~collab_data["mode"].isin(["none", "not_classified"])]
1075
+
1076
+ # Use pre-calculated percentages directly
1077
+ collaboration_modes = [
1078
+ "directive",
1079
+ "feedback loop",
1080
+ "learning",
1081
+ "task iteration",
1082
+ "validation",
1083
+ ]
1084
+ result = {}
1085
+
1086
+ for _, row in collab_data.iterrows():
1087
+ task = row["task"]
1088
+ mode = row["mode"]
1089
+
1090
+ if mode in collaboration_modes:
1091
+ if task not in result:
1092
+ result[task] = {}
1093
+ result[task][mode] = float(row["value"])
1094
+
1095
+ return result
1096
+
1097
+
1098
+ def create_automation_augmentation_panel(api_df, cai_df, output_dir):
1099
+ """
1100
+ Create combined panel figure showing automation vs augmentation for both platforms.
1101
+
1102
+ Args:
1103
+ api_df: API preprocessed data DataFrame
1104
+ cai_df: Claude.ai preprocessed data DataFrame
1105
+ output_dir: Directory to save the figure
1106
+ """
1107
+ # Create figure with subplots
1108
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
1109
+
1110
+ def create_automation_augmentation_subplot(df, ax, title, platform_name):
1111
+ """Helper function to create one automation vs augmentation subplot"""
1112
+ # Get collaboration shares for each task
1113
+ collab_shares = get_collaboration_shares(df)
1114
+
1115
+ # Get task usage counts for bubble sizing
1116
+ df_global = df[df["geo_id"] == "GLOBAL"]
1117
+ task_counts = (
1118
+ df_global[
1119
+ (df_global["facet"] == "onet_task")
1120
+ & (df_global["variable"] == "onet_task_count")
1121
+ & (~df_global["cluster_name"].isin(["none", "not_classified"]))
1122
+ ]
1123
+ .set_index("cluster_name")["value"]
1124
+ .to_dict()
1125
+ )
1126
+
1127
+ # Prepare data for plotting
1128
+ tasks = []
1129
+ automation_scores = []
1130
+ augmentation_scores = []
1131
+ bubble_sizes = []
1132
+
1133
+ for task_name, shares in collab_shares.items():
1134
+ if task_name in task_counts:
1135
+ # Calculate automation score (directive + feedback loop)
1136
+ automation = shares.get("directive", 0) + shares.get("feedback loop", 0)
1137
+
1138
+ # Calculate augmentation score (learning + task iteration + validation)
1139
+ augmentation = (
1140
+ shares.get("learning", 0)
1141
+ + shares.get("task iteration", 0)
1142
+ + shares.get("validation", 0)
1143
+ )
1144
+
1145
+ # Only include tasks with some collaboration data
1146
+ if automation + augmentation > 0:
1147
+ tasks.append(task_name)
1148
+ automation_scores.append(automation)
1149
+ augmentation_scores.append(augmentation)
1150
+ bubble_sizes.append(task_counts[task_name])
1151
+
1152
+ # Convert to numpy arrays for plotting
1153
+ automation_scores = np.array(automation_scores)
1154
+ augmentation_scores = np.array(augmentation_scores)
1155
+ bubble_sizes = np.array(bubble_sizes)
1156
+
1157
+ # Scale bubble sizes
1158
+ bubble_sizes_scaled = (bubble_sizes / bubble_sizes.max()) * 800 + 40
1159
+
1160
+ # Color points based on whether automation or augmentation dominates
1161
+ colors = []
1162
+ for auto, aug in zip(automation_scores, augmentation_scores, strict=True):
1163
+ if auto > aug:
1164
+ colors.append("#8e6bb1") # Automation dominant
1165
+ else:
1166
+ colors.append("#4f76c7") # Augmentation dominant
1167
+
1168
+ # Create scatter plot
1169
+ ax.scatter(
1170
+ automation_scores,
1171
+ augmentation_scores,
1172
+ s=bubble_sizes_scaled,
1173
+ c=colors,
1174
+ alpha=0.7,
1175
+ edgecolors="black",
1176
+ linewidth=0.5,
1177
+ )
1178
+
1179
+ # Add diagonal line (automation = augmentation)
1180
+ max_val = max(automation_scores.max(), augmentation_scores.max())
1181
+ ax.plot([0, max_val], [0, max_val], "--", color="gray", alpha=0.5, linewidth=2)
1182
+
1183
+ # Labels and formatting (increased font sizes)
1184
+ ax.set_xlabel("Automation Share (%)", fontsize=14)
1185
+ ax.set_ylabel(
1186
+ "Augmentation Score (%)",
1187
+ fontsize=14,
1188
+ )
1189
+ ax.set_title(title, fontsize=14, fontweight="bold", pad=15)
1190
+
1191
+ # Calculate percentages for legend
1192
+ automation_dominant_count = sum(
1193
+ 1
1194
+ for auto, aug in zip(automation_scores, augmentation_scores, strict=True)
1195
+ if auto > aug
1196
+ )
1197
+ augmentation_dominant_count = len(automation_scores) - automation_dominant_count
1198
+ total_tasks = len(automation_scores)
1199
+
1200
+ automation_pct = (automation_dominant_count / total_tasks) * 100
1201
+ augmentation_pct = (augmentation_dominant_count / total_tasks) * 100
1202
+
1203
+ # Add legend with percentages centered at top
1204
+ automation_patch = plt.scatter(
1205
+ [],
1206
+ [],
1207
+ c="#8e6bb1",
1208
+ alpha=0.7,
1209
+ s=100,
1210
+ label=f"Automation dominant ({automation_pct:.1f}% of Tasks)",
1211
+ )
1212
+ augmentation_patch = plt.scatter(
1213
+ [],
1214
+ [],
1215
+ c="#4f76c7",
1216
+ alpha=0.7,
1217
+ s=100,
1218
+ label=f"Augmentation dominant ({augmentation_pct:.1f}% of Tasks)",
1219
+ )
1220
+ ax.legend(
1221
+ handles=[automation_patch, augmentation_patch],
1222
+ loc="upper center",
1223
+ bbox_to_anchor=(0.5, 0.95),
1224
+ fontsize=12,
1225
+ frameon=True,
1226
+ facecolor="white",
1227
+ )
1228
+
1229
+ # Grid and styling
1230
+ ax.grid(True, alpha=0.3)
1231
+ ax.set_axisbelow(True)
1232
+ ax.tick_params(axis="both", which="major", labelsize=12)
1233
+
1234
+ return len(tasks), automation_pct, augmentation_pct
1235
+
1236
+ # Create API subplot
1237
+ create_automation_augmentation_subplot(api_df, ax1, "1P API", "1P API")
1238
+
1239
+ # Create Claude.ai subplot
1240
+ create_automation_augmentation_subplot(cai_df, ax2, "Claude.ai", "Claude.ai")
1241
+
1242
+ # Add overall title
1243
+ fig.suptitle(
1244
+ "Automation and augmentation dominance across tasks: Claude.ai vs. 1P API",
1245
+ fontsize=16,
1246
+ fontweight="bold",
1247
+ y=0.95,
1248
+ color="#B86046",
1249
+ )
1250
+
1251
+ plt.tight_layout()
1252
+ plt.subplots_adjust(top=0.85) # More room for suptitle
1253
+
1254
+ # Save plot
1255
+ output_path = Path(output_dir) / "automation_vs_augmentation_panel.png"
1256
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
1257
+ plt.show()
1258
+ return str(output_path)
1259
+
1260
+
1261
+ def extract_token_metrics_from_intersections(df):
1262
+ """
1263
+ Extract token metrics from preprocessed intersection data.
1264
+
1265
+ Args:
1266
+ df: Preprocessed dataframe with intersection facets
1267
+
1268
+ Returns:
1269
+ DataFrame with token metrics for analysis
1270
+ """
1271
+ # Extract data using new variable names from mean value intersections
1272
+ cost_data = df[
1273
+ (df.facet == "onet_task::cost") & (df.variable == "cost_index")
1274
+ ].copy()
1275
+ cost_data["base_task"] = cost_data["cluster_name"].str.replace("::index", "")
1276
+ onet_cost = cost_data.set_index("base_task")["value"].copy()
1277
+
1278
+ prompt_data = df[
1279
+ (df.facet == "onet_task::prompt_tokens")
1280
+ & (df.variable == "prompt_tokens_index")
1281
+ ].copy()
1282
+ prompt_data["base_task"] = prompt_data["cluster_name"].str.replace("::index", "")
1283
+ onet_prompt = prompt_data.set_index("base_task")["value"].copy()
1284
+
1285
+ completion_data = df[
1286
+ (df.facet == "onet_task::completion_tokens")
1287
+ & (df.variable == "completion_tokens_index")
1288
+ ].copy()
1289
+ completion_data["base_task"] = completion_data["cluster_name"].str.replace(
1290
+ "::index", ""
1291
+ )
1292
+ onet_completion = completion_data.set_index("base_task")["value"].copy()
1293
+
1294
+ # Get API call counts for bubble sizing and WLS weights
1295
+ api_records_data = df[
1296
+ (df.facet == "onet_task::prompt_tokens")
1297
+ & (df.variable == "prompt_tokens_count")
1298
+ ].copy()
1299
+ api_records_data["base_task"] = api_records_data["cluster_name"].str.replace(
1300
+ "::count", ""
1301
+ )
1302
+ onet_api_records = api_records_data.set_index("base_task")["value"].copy()
1303
+
1304
+ # Create metrics DataFrame - values are already re-indexed during preprocessing
1305
+ metrics = pd.DataFrame(
1306
+ {
1307
+ "cluster_name": onet_cost.index,
1308
+ "cost_per_record": onet_cost, # Already indexed (1.0 = average)
1309
+ "avg_prompt_tokens": onet_prompt.reindex(
1310
+ onet_cost.index
1311
+ ), # Already indexed
1312
+ "avg_completion_tokens": onet_completion.reindex(
1313
+ onet_cost.index
1314
+ ), # Already indexed
1315
+ }
1316
+ )
1317
+
1318
+ # Get task usage percentages
1319
+ usage_pct_data = df[
1320
+ (df.facet == "onet_task") & (df.variable == "onet_task_pct")
1321
+ ].copy()
1322
+ usage_pct_data["base_task"] = usage_pct_data["cluster_name"]
1323
+ onet_usage_pct = usage_pct_data.set_index("base_task")["value"].copy()
1324
+
1325
+ # Add API records and usage percentages
1326
+ metrics["api_records"] = onet_api_records.reindex(onet_cost.index)
1327
+ metrics["usage_pct"] = onet_usage_pct.reindex(onet_cost.index)
1328
+
1329
+ # Calculate derived metrics
1330
+ metrics["output_input_ratio"] = (
1331
+ metrics["avg_completion_tokens"] / metrics["avg_prompt_tokens"]
1332
+ )
1333
+ metrics["total_tokens"] = (
1334
+ metrics["avg_prompt_tokens"] + metrics["avg_completion_tokens"]
1335
+ )
1336
+
1337
+ return metrics
1338
+
1339
+
1340
+ def add_occupational_categories_to_metrics(
1341
+ task_metrics, task_statements, soc_structure
1342
+ ):
1343
+ """
1344
+ Add occupational categories to task metrics based on ONET mappings.
1345
+
1346
+ Args:
1347
+ task_metrics: DataFrame with task metrics
1348
+ task_statements: ONET task statements DataFrame
1349
+ soc_structure: SOC structure DataFrame
1350
+
1351
+ Returns:
1352
+ DataFrame with occupational categories added
1353
+ """
1354
+ # Standardize task descriptions for matching
1355
+ task_statements["task_standardized"] = (
1356
+ task_statements["Task"].str.strip().str.lower()
1357
+ )
1358
+ task_metrics["cluster_name_standardized"] = (
1359
+ task_metrics["cluster_name"].str.strip().str.lower()
1360
+ )
1361
+
1362
+ # Create mapping from standardized task to major group
1363
+ task_to_major_group = {}
1364
+ for _, row in task_statements.iterrows():
1365
+ if pd.notna(row["Task"]) and pd.notna(row["soc_major_group"]):
1366
+ std_task = row["task_standardized"]
1367
+ major_group = str(int(row["soc_major_group"]))
1368
+ task_to_major_group[std_task] = major_group
1369
+
1370
+ # Map cluster names to major groups
1371
+ task_metrics["soc_major"] = task_metrics["cluster_name_standardized"].map(
1372
+ task_to_major_group
1373
+ )
1374
+
1375
+ # Get major occupational groups from SOC structure
1376
+ major_groups = soc_structure[soc_structure["Major Group"].notna()].copy()
1377
+ major_groups["soc_major"] = major_groups["Major Group"].astype(str).str[:2]
1378
+ major_groups["title"] = major_groups["SOC or O*NET-SOC 2019 Title"]
1379
+
1380
+ # Create a clean mapping from major group code to title
1381
+ major_group_mapping = (
1382
+ major_groups[["soc_major", "title"]]
1383
+ .drop_duplicates()
1384
+ .set_index("soc_major")["title"]
1385
+ .to_dict()
1386
+ )
1387
+
1388
+ # Map major group codes to titles
1389
+ task_metrics["occupational_category"] = task_metrics["soc_major"].map(
1390
+ major_group_mapping
1391
+ )
1392
+
1393
+ # Remove unmapped/not classified tasks from analysis
1394
+ task_metrics = task_metrics[task_metrics["occupational_category"].notna()].copy()
1395
+
1396
+ # Find top 6 categories by usage share (API calls) and group others as "All Other"
1397
+ category_usage = (
1398
+ task_metrics.groupby("occupational_category")["api_records"]
1399
+ .sum()
1400
+ .sort_values(ascending=False)
1401
+ )
1402
+ top_6_categories = list(category_usage.head(6).index)
1403
+
1404
+ # Group smaller categories as "All Other"
1405
+ task_metrics["occupational_category"] = task_metrics["occupational_category"].apply(
1406
+ lambda x: x if x in top_6_categories else "All Other"
1407
+ )
1408
+
1409
+ return task_metrics
1410
+
1411
+
1412
+ def create_token_output_bar_chart(df, output_dir):
1413
+ """
1414
+ Create bar chart showing average output (completion) tokens by occupational category.
1415
+
1416
+ Args:
1417
+ df: Preprocessed data DataFrame
1418
+ output_dir: Directory to save the figure
1419
+ """
1420
+ # Load ONET mappings for occupational categories
1421
+ task_statements, soc_structure = load_onet_mappings()
1422
+
1423
+ # Use preprocessed intersection data
1424
+ task_metrics = extract_token_metrics_from_intersections(df)
1425
+
1426
+ # Add occupational categories
1427
+ task_metrics = add_occupational_categories_to_metrics(
1428
+ task_metrics, task_statements, soc_structure
1429
+ )
1430
+
1431
+ # Calculate average output tokens by occupational category
1432
+ category_stats = (
1433
+ task_metrics.groupby("occupational_category")
1434
+ .agg(
1435
+ {
1436
+ "avg_completion_tokens": "mean", # Average across tasks
1437
+ "api_records": "sum", # Total API calls for ranking
1438
+ }
1439
+ )
1440
+ .reset_index()
1441
+ )
1442
+
1443
+ # Find top 6 categories by total API calls
1444
+ top_6_categories = category_stats.nlargest(6, "api_records")[
1445
+ "occupational_category"
1446
+ ].tolist()
1447
+
1448
+ # Group smaller categories as "All Other"
1449
+ def categorize(cat):
1450
+ return cat if cat in top_6_categories else "All Other"
1451
+
1452
+ task_metrics["category_group"] = task_metrics["occupational_category"].apply(
1453
+ categorize
1454
+ )
1455
+
1456
+ # Recalculate stats with grouped categories
1457
+ final_stats = (
1458
+ task_metrics.groupby("category_group")
1459
+ .agg(
1460
+ {
1461
+ "avg_completion_tokens": "mean", # Average output tokens across tasks
1462
+ "api_records": "sum", # Total usage for reference
1463
+ }
1464
+ )
1465
+ .reset_index()
1466
+ )
1467
+
1468
+ # Sort by output tokens (descending)
1469
+ final_stats = final_stats.sort_values("avg_completion_tokens", ascending=True)
1470
+
1471
+ # Create figure
1472
+ fig, ax = plt.subplots(figsize=(12, 8))
1473
+
1474
+ # Create horizontal bar chart
1475
+ y_pos = np.arange(len(final_stats))
1476
+ colors = [COLOR_CYCLE[i % len(COLOR_CYCLE)] for i in range(len(final_stats))]
1477
+
1478
+ ax.barh(
1479
+ y_pos,
1480
+ final_stats["avg_completion_tokens"],
1481
+ color=colors,
1482
+ alpha=0.8,
1483
+ edgecolor="#333333",
1484
+ linewidth=0.5,
1485
+ )
1486
+
1487
+ # Add value labels
1488
+ for i, (idx, row) in enumerate(final_stats.iterrows()):
1489
+ ax.text(
1490
+ row["avg_completion_tokens"] + 0.02,
1491
+ i,
1492
+ f"{row['avg_completion_tokens']:.2f}",
1493
+ va="center",
1494
+ fontsize=11,
1495
+ fontweight="bold",
1496
+ )
1497
+
1498
+ # Clean up category labels
1499
+ labels = []
1500
+ for cat in final_stats["category_group"]:
1501
+ clean_cat = cat.replace(" Occupations", "").replace(", and ", " & ")
1502
+ labels.append(clean_cat)
1503
+
1504
+ ax.set_yticks(y_pos)
1505
+ ax.set_yticklabels(labels, fontsize=10)
1506
+
1507
+ # Formatting
1508
+ ax.set_xlabel(
1509
+ "Average output token index for observed tasks in a given category",
1510
+ fontsize=12,
1511
+ )
1512
+ ax.set_title(
1513
+ "Average output token index across leading occupational categories",
1514
+ fontsize=14,
1515
+ fontweight="bold",
1516
+ pad=20,
1517
+ )
1518
+
1519
+ # Grid and styling
1520
+ ax.grid(True, alpha=0.3, axis="x")
1521
+ ax.set_axisbelow(True)
1522
+ ax.spines["top"].set_visible(False)
1523
+ ax.spines["right"].set_visible(False)
1524
+ ax.tick_params(axis="x", which="major", labelsize=11)
1525
+
1526
+ plt.tight_layout()
1527
+
1528
+ # Save plot
1529
+ output_path = Path(output_dir) / "token_output_bar_chart.png"
1530
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
1531
+ plt.show()
1532
+ return str(output_path)
1533
+
1534
+
1535
+ def create_completion_vs_input_tokens_scatter(df, output_dir):
1536
+ """
1537
+ Create scatter plot of ln(completion tokens) vs ln(input tokens) by occupational category.
1538
+
1539
+ Args:
1540
+ df: Preprocessed data DataFrame
1541
+ output_dir: Directory to save the figure
1542
+ """
1543
+ # Use preprocessed intersection data
1544
+ task_metrics = extract_token_metrics_from_intersections(df)
1545
+
1546
+ # Create figure
1547
+ fig, ax = plt.subplots(figsize=(12, 8))
1548
+
1549
+ # Transform to natural log
1550
+ ln_input = np.log(task_metrics["avg_prompt_tokens"])
1551
+ ln_output = np.log(task_metrics["avg_completion_tokens"])
1552
+
1553
+ # Load ONET mappings for occupational categories
1554
+ task_statements, soc_structure = load_onet_mappings()
1555
+
1556
+ # Add occupational categories
1557
+ # Standardize task descriptions for matching
1558
+ task_statements["task_standardized"] = (
1559
+ task_statements["Task"].str.strip().str.lower()
1560
+ )
1561
+ task_metrics["cluster_name_standardized"] = (
1562
+ task_metrics.index.str.strip().str.lower()
1563
+ )
1564
+
1565
+ # Create mapping from standardized task to major group
1566
+ task_to_major_group = {}
1567
+ for _, row in task_statements.iterrows():
1568
+ if pd.notna(row["Task"]) and pd.notna(row["soc_major_group"]):
1569
+ std_task = row["task_standardized"]
1570
+ major_group = str(int(row["soc_major_group"]))[:2]
1571
+ task_to_major_group[std_task] = major_group
1572
+
1573
+ # Map cluster names to major groups
1574
+ task_metrics["soc_major"] = task_metrics["cluster_name_standardized"].map(
1575
+ task_to_major_group
1576
+ )
1577
+
1578
+ # Get major occupational groups from SOC structure
1579
+ major_groups = soc_structure[soc_structure["Major Group"].notna()].copy()
1580
+ major_groups["soc_major"] = major_groups["Major Group"].astype(str).str[:2]
1581
+ major_groups["title"] = major_groups["SOC or O*NET-SOC 2019 Title"]
1582
+
1583
+ # Create mapping from major group code to title
1584
+ major_group_mapping = (
1585
+ major_groups[["soc_major", "title"]]
1586
+ .drop_duplicates()
1587
+ .set_index("soc_major")["title"]
1588
+ .to_dict()
1589
+ )
1590
+
1591
+ # Map major group codes to titles
1592
+ task_metrics["occupational_category"] = task_metrics["soc_major"].map(
1593
+ major_group_mapping
1594
+ )
1595
+
1596
+ # Remove unmapped tasks
1597
+ task_metrics = task_metrics[task_metrics["occupational_category"].notna()].copy()
1598
+
1599
+ # Find top 6 categories by total API calls and group others as "All Other"
1600
+ category_usage = (
1601
+ task_metrics.groupby("occupational_category")["api_records"]
1602
+ .sum()
1603
+ .sort_values(ascending=False)
1604
+ )
1605
+ top_6_categories = list(category_usage.head(6).index)
1606
+
1607
+ # Group smaller categories as "All Other"
1608
+ task_metrics["occupational_category"] = task_metrics["occupational_category"].apply(
1609
+ lambda x: x if x in top_6_categories else "All Other"
1610
+ )
1611
+
1612
+ # Transform to natural log
1613
+ ln_input = np.log(task_metrics["avg_prompt_tokens"])
1614
+ ln_output = np.log(task_metrics["avg_completion_tokens"])
1615
+
1616
+ # Create scatter plot with same color scheme as bar chart
1617
+ # Use exact same logic as token output bar chart for consistent colors
1618
+ category_stats = (
1619
+ task_metrics.groupby("occupational_category")
1620
+ .agg(
1621
+ {
1622
+ "avg_completion_tokens": "mean",
1623
+ "api_records": "sum",
1624
+ }
1625
+ )
1626
+ .reset_index()
1627
+ )
1628
+
1629
+ # Find top 6 categories by total API calls
1630
+ top_6_categories = category_stats.nlargest(6, "api_records")[
1631
+ "occupational_category"
1632
+ ].tolist()
1633
+
1634
+ # Group smaller categories as "All Other"
1635
+ def categorize(cat):
1636
+ return cat if cat in top_6_categories else "All Other"
1637
+
1638
+ task_metrics["category_group"] = task_metrics["occupational_category"].apply(
1639
+ categorize
1640
+ )
1641
+
1642
+ # Recalculate final stats with grouped categories
1643
+ final_stats = (
1644
+ task_metrics.groupby("category_group")
1645
+ .agg({"avg_completion_tokens": "mean"})
1646
+ .reset_index()
1647
+ .sort_values("avg_completion_tokens", ascending=True)
1648
+ )
1649
+
1650
+ # Use exact same color assignment as bar chart
1651
+ categories_ordered = final_stats["category_group"].tolist()
1652
+ category_colors = {}
1653
+ for i, category in enumerate(categories_ordered):
1654
+ category_colors[category] = COLOR_CYCLE[i % len(COLOR_CYCLE)]
1655
+
1656
+ for category in categories_ordered:
1657
+ category_data = task_metrics[task_metrics["category_group"] == category]
1658
+ if not category_data.empty:
1659
+ ln_input_cat = np.log(category_data["avg_prompt_tokens"])
1660
+ ln_output_cat = np.log(category_data["avg_completion_tokens"])
1661
+ bubble_sizes_cat = np.sqrt(category_data["api_records"]) * 2
1662
+
1663
+ # Clean up category name for legend
1664
+ clean_name = category.replace(" Occupations", "").replace(", and ", " & ")
1665
+
1666
+ ax.scatter(
1667
+ ln_input_cat,
1668
+ ln_output_cat,
1669
+ s=bubble_sizes_cat,
1670
+ alpha=0.8,
1671
+ c=category_colors[category],
1672
+ edgecolors="black",
1673
+ linewidth=0.2,
1674
+ )
1675
+
1676
+ # Create uniform legend entries
1677
+ legend_elements = []
1678
+ for category in categories_ordered:
1679
+ clean_name = category.replace(" Occupations", "").replace(", and ", " & ")
1680
+ # Get count for this category
1681
+ category_count = len(task_metrics[task_metrics["category_group"] == category])
1682
+ legend_elements.append(
1683
+ plt.scatter(
1684
+ [],
1685
+ [],
1686
+ s=100,
1687
+ alpha=0.8,
1688
+ c=category_colors[category],
1689
+ edgecolors="black",
1690
+ linewidth=0.2,
1691
+ label=f"{clean_name} (N={category_count})",
1692
+ )
1693
+ )
1694
+
1695
+ # Add legend for occupational categories with uniform sizes
1696
+ ax.legend(
1697
+ bbox_to_anchor=(1.05, 1), loc="upper left", frameon=True, facecolor="white"
1698
+ )
1699
+
1700
+ # Add line of best fit
1701
+ model = sm.OLS(ln_output, sm.add_constant(ln_input)).fit()
1702
+ slope = model.params.iloc[1]
1703
+ intercept = model.params.iloc[0]
1704
+ r_squared = model.rsquared
1705
+
1706
+ line_x = np.linspace(ln_input.min(), ln_input.max(), 100)
1707
+ line_y = slope * line_x + intercept
1708
+ ax.plot(
1709
+ line_x,
1710
+ line_y,
1711
+ "k--",
1712
+ alpha=0.7,
1713
+ linewidth=2,
1714
+ label=f"Best fit (R² = {r_squared:.3f}, $\\beta$ = {slope:.3f})",
1715
+ )
1716
+ ax.legend()
1717
+
1718
+ # Customize plot
1719
+ ax.set_xlabel("ln(Input Token Index)", fontsize=12)
1720
+ ax.set_ylabel("ln(Output Token Index)", fontsize=12)
1721
+ ax.set_title(
1722
+ "Output Token Index vs Input Token Index across tasks",
1723
+ fontsize=14,
1724
+ fontweight="bold",
1725
+ pad=20,
1726
+ )
1727
+ ax.grid(True, alpha=0.3)
1728
+
1729
+ plt.tight_layout()
1730
+
1731
+ # Save plot
1732
+ output_path = Path(output_dir) / "completion_vs_input_tokens_scatter.png"
1733
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
1734
+ plt.show()
1735
+ return str(output_path)
1736
+
1737
+
1738
+ def create_occupational_usage_cost_scatter(df, output_dir):
1739
+ """
1740
+ Create aggregated scatter plot of usage share vs average cost per API call by occupational category.
1741
+
1742
+ Args:
1743
+ df: Preprocessed data DataFrame
1744
+ output_dir: Directory to save the figure
1745
+ """
1746
+ # Load ONET mappings for occupational categories
1747
+ task_statements, soc_structure = load_onet_mappings()
1748
+
1749
+ # Use preprocessed intersection data
1750
+ task_metrics = extract_token_metrics_from_intersections(df)
1751
+
1752
+ # Add occupational categories without grouping into "All Other"
1753
+ # Standardize task descriptions for matching
1754
+ task_statements["task_standardized"] = (
1755
+ task_statements["Task"].str.strip().str.lower()
1756
+ )
1757
+ task_metrics["cluster_name_standardized"] = (
1758
+ task_metrics["cluster_name"].str.strip().str.lower()
1759
+ )
1760
+
1761
+ # Create mapping from standardized task to major group
1762
+ task_to_major_group = {}
1763
+ for _, row in task_statements.iterrows():
1764
+ if pd.notna(row["Task"]) and pd.notna(row["soc_major_group"]):
1765
+ std_task = row["task_standardized"]
1766
+ major_group = str(int(row["soc_major_group"]))
1767
+ task_to_major_group[std_task] = major_group
1768
+
1769
+ # Map cluster names to major groups
1770
+ task_metrics["soc_major"] = task_metrics["cluster_name_standardized"].map(
1771
+ task_to_major_group
1772
+ )
1773
+
1774
+ # Get major occupational groups from SOC structure
1775
+ major_groups = soc_structure[soc_structure["Major Group"].notna()].copy()
1776
+ major_groups["soc_major"] = major_groups["Major Group"].astype(str).str[:2]
1777
+ major_groups["title"] = major_groups["SOC or O*NET-SOC 2019 Title"]
1778
+
1779
+ # Create a clean mapping from major group code to title
1780
+ major_group_mapping = (
1781
+ major_groups[["soc_major", "title"]]
1782
+ .drop_duplicates()
1783
+ .set_index("soc_major")["title"]
1784
+ .to_dict()
1785
+ )
1786
+
1787
+ # Map major group codes to titles
1788
+ task_metrics["occupational_category"] = task_metrics["soc_major"].map(
1789
+ major_group_mapping
1790
+ )
1791
+
1792
+ # Remove unmapped/not classified tasks from analysis
1793
+ task_metrics = task_metrics[task_metrics["occupational_category"].notna()].copy()
1794
+
1795
+ # Aggregate by occupational category using pre-calculated percentages
1796
+ category_aggregates = (
1797
+ task_metrics.groupby("occupational_category")
1798
+ .agg(
1799
+ {
1800
+ "usage_pct": "sum", # Sum of pre-calculated task percentages within category
1801
+ "cost_per_record": "mean", # Average cost per API call for this category
1802
+ }
1803
+ )
1804
+ .reset_index()
1805
+ )
1806
+
1807
+ # Usage share is already calculated from preprocessing
1808
+ category_aggregates["usage_share"] = category_aggregates["usage_pct"]
1809
+
1810
+ # Create figure
1811
+ fig, ax = plt.subplots(figsize=(12, 8))
1812
+
1813
+ # Transform variables to natural log
1814
+ ln_cost = np.log(category_aggregates["cost_per_record"])
1815
+ ln_usage = np.log(category_aggregates["usage_share"])
1816
+
1817
+ # Get colors for each category - use same logic as token output bar chart
1818
+ # Sort by a metric to ensure consistent ordering (using usage_share descending)
1819
+ category_aggregates_sorted = category_aggregates.sort_values(
1820
+ "usage_share", ascending=False
1821
+ )
1822
+
1823
+ category_colors = {}
1824
+ for i, category in enumerate(category_aggregates_sorted["occupational_category"]):
1825
+ category_colors[category] = COLOR_CYCLE[i % len(COLOR_CYCLE)]
1826
+
1827
+ # Create invisible scatter plot to maintain axis limits
1828
+ ax.scatter(
1829
+ ln_cost,
1830
+ ln_usage,
1831
+ s=0, # Invisible markers
1832
+ alpha=0,
1833
+ )
1834
+
1835
+ # Add line of best fit
1836
+ model = sm.OLS(ln_usage, sm.add_constant(ln_cost)).fit()
1837
+ slope = model.params.iloc[1]
1838
+ intercept = model.params.iloc[0]
1839
+ r_squared = model.rsquared
1840
+
1841
+ # Generate line points
1842
+ x_line = np.linspace(ln_cost.min(), ln_cost.max(), 50)
1843
+ y_line = slope * x_line + intercept
1844
+
1845
+ # Plot the line of best fit
1846
+ ax.plot(
1847
+ x_line,
1848
+ y_line,
1849
+ "--",
1850
+ color="black",
1851
+ linewidth=2,
1852
+ alpha=0.8,
1853
+ label=f"Best fit (R² = {r_squared:.3f}, $\\beta$ = {slope:.3f})",
1854
+ )
1855
+
1856
+ # Add legend
1857
+ legend = ax.legend(loc="best", frameon=True, facecolor="white")
1858
+ legend.get_frame().set_alpha(0.9)
1859
+
1860
+ # Add category labels centered at data points with text wrapping
1861
+ for i, row in category_aggregates.iterrows():
1862
+ # Clean up and wrap category names
1863
+ clean_name = (
1864
+ row["occupational_category"]
1865
+ .replace(" Occupations", "")
1866
+ .replace(", and ", " & ")
1867
+ )
1868
+ # Wrap long category names to multiple lines
1869
+ wrapped_name = "\n".join(wrap(clean_name, 20))
1870
+
1871
+ ax.text(
1872
+ ln_cost.iloc[i],
1873
+ ln_usage.iloc[i],
1874
+ wrapped_name,
1875
+ ha="center",
1876
+ va="center",
1877
+ fontsize=8,
1878
+ alpha=0.9,
1879
+ )
1880
+
1881
+ # Set labels and title
1882
+ ax.set_xlabel("ln(Average API Cost Index across tasks)", fontsize=12)
1883
+ ax.set_ylabel("ln(Usage share (%))", fontsize=12)
1884
+ ax.set_title(
1885
+ "Usage share and average API cost index by occupational category",
1886
+ fontsize=14,
1887
+ fontweight="bold",
1888
+ pad=20,
1889
+ )
1890
+
1891
+ # Add grid
1892
+ ax.grid(True, alpha=0.3)
1893
+
1894
+ # Adjust layout and save
1895
+ plt.tight_layout()
1896
+
1897
+ output_path = Path(output_dir) / "occupational_usage_cost_scatter.png"
1898
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
1899
+ plt.show()
1900
+ return str(output_path)
1901
+
1902
+
1903
+ def get_merged_api_claude_task_data(api_df, cai_df):
1904
+ """
1905
+ Create merged dataset with API cost/usage data and Claude.ai collaboration modes.
1906
+
1907
+ Args:
1908
+ api_df: API preprocessed data DataFrame
1909
+ cai_df: Claude.ai preprocessed data DataFrame
1910
+
1911
+ Returns:
1912
+ DataFrame with API cost data + Claude.ai collaboration patterns for common tasks
1913
+ """
1914
+ # Extract API token metrics
1915
+ api_metrics = extract_token_metrics_from_intersections(api_df)
1916
+
1917
+ # Get Claude.ai collaboration shares
1918
+ claude_collab_shares = get_collaboration_shares(cai_df)
1919
+
1920
+ # Find common tasks between both platforms
1921
+ api_tasks = set(api_metrics.index)
1922
+ claude_tasks = set(claude_collab_shares.keys())
1923
+ common_tasks = api_tasks.intersection(claude_tasks)
1924
+
1925
+ # Create merged dataset
1926
+ merged_data = []
1927
+
1928
+ for task_name in common_tasks:
1929
+ # Get API metrics for this task
1930
+ api_row = api_metrics.loc[task_name]
1931
+
1932
+ # Get Claude.ai collaboration for this task
1933
+ claude_collab = claude_collab_shares[task_name]
1934
+
1935
+ # Create merged row
1936
+ merged_row = {
1937
+ "cluster_name": task_name,
1938
+ "cost_per_record": api_row["cost_per_record"],
1939
+ "avg_prompt_tokens": api_row["avg_prompt_tokens"],
1940
+ "avg_completion_tokens": api_row["avg_completion_tokens"],
1941
+ "api_records": api_row["api_records"],
1942
+ "output_input_ratio": api_row["output_input_ratio"],
1943
+ "total_tokens": api_row["total_tokens"],
1944
+ # Claude.ai collaboration modes
1945
+ "collab_directive": claude_collab.get("directive", 0),
1946
+ "collab_feedback_loop": claude_collab.get("feedback loop", 0),
1947
+ "collab_learning": claude_collab.get("learning", 0),
1948
+ "collab_task_iteration": claude_collab.get("task iteration", 0),
1949
+ "collab_validation": claude_collab.get("validation", 0),
1950
+ }
1951
+ merged_data.append(merged_row)
1952
+
1953
+ merged_df = pd.DataFrame(merged_data)
1954
+ merged_df.set_index("cluster_name", inplace=True)
1955
+
1956
+ return merged_df
1957
+
1958
+
1959
+ def reg_build_df(api_df, cai_df):
1960
+ """
1961
+ Build complete regression dataset for partial regression and full regression analysis.
1962
+ Each row is an ONET task with all variables needed for figures and regression.
1963
+
1964
+ Args:
1965
+ api_df: API preprocessed data DataFrame
1966
+ cai_df: Claude.ai preprocessed data DataFrame
1967
+
1968
+ Returns:
1969
+ DataFrame with complete regression dataset
1970
+ """
1971
+ # Load ONET mappings
1972
+ task_statements, soc_structure = load_onet_mappings()
1973
+
1974
+ # Use merged dataset with API metrics + Claude.ai collaboration
1975
+ task_metrics = get_merged_api_claude_task_data(api_df, cai_df)
1976
+
1977
+ # Add occupational categories (includes "All Other" grouping)
1978
+ task_metrics_with_names = task_metrics.reset_index()
1979
+ task_metrics_with_names = add_occupational_categories_to_metrics(
1980
+ task_metrics_with_names, task_statements, soc_structure
1981
+ )
1982
+ task_metrics = task_metrics_with_names.set_index("cluster_name")
1983
+
1984
+ # Add collaboration missing dummies
1985
+ collaboration_modes = [
1986
+ "directive",
1987
+ "feedback_loop",
1988
+ "learning",
1989
+ "task_iteration",
1990
+ "validation",
1991
+ ]
1992
+
1993
+ for mode in collaboration_modes:
1994
+ collab_col = f"collab_{mode}"
1995
+ missing_col = f"collab_{mode}_missing"
1996
+ if collab_col in task_metrics.columns:
1997
+ task_metrics[missing_col] = (task_metrics[collab_col] == 0).astype(int)
1998
+ else:
1999
+ task_metrics[missing_col] = 1
2000
+
2001
+ # Calculate usage variables
2002
+ total_api_records = task_metrics["api_records"].sum()
2003
+ task_metrics["usage_share"] = (
2004
+ task_metrics["api_records"] / total_api_records
2005
+ ) * 100
2006
+ task_metrics["ln_usage_share"] = np.log(task_metrics["usage_share"])
2007
+ task_metrics["ln_cost_per_task"] = np.log(task_metrics["cost_per_record"])
2008
+
2009
+ # Use all data
2010
+ valid_data = task_metrics
2011
+
2012
+ # Create occupational category dummies while preserving original column
2013
+ valid_data = pd.get_dummies(
2014
+ valid_data, columns=["occupational_category"], prefix="occ"
2015
+ )
2016
+
2017
+ # Restore the original occupational_category column for grouping operations
2018
+ # Extract category name from the dummy columns that are 1
2019
+ occ_cols = [col for col in valid_data.columns if col.startswith("occ_")]
2020
+ valid_data["occupational_category"] = ""
2021
+ for col in occ_cols:
2022
+ category_name = col.replace("occ_", "")
2023
+ mask = valid_data[col] == 1
2024
+ valid_data.loc[mask, "occupational_category"] = category_name
2025
+
2026
+ return valid_data
2027
+
2028
+
2029
+ def create_partial_regression_plot(api_df, cai_df, output_dir):
2030
+ """
2031
+ Create partial regression scatter plot of usage share vs cost, controlling for occupational categories.
2032
+
2033
+ Args:
2034
+ api_df: API preprocessed data DataFrame
2035
+ cai_df: Claude.ai preprocessed data DataFrame
2036
+ output_dir: Directory to save the figure
2037
+
2038
+ Returns:
2039
+ Tuple of (output_path, regression_results_dict)
2040
+ """
2041
+ # Use centralized data preparation (includes occupational dummies)
2042
+ valid_metrics = reg_build_df(api_df, cai_df)
2043
+
2044
+ # Extract occupational dummies and collaboration variables
2045
+ occ_cols = [col for col in valid_metrics.columns if col.startswith("occ_")]
2046
+ collab_vars = [
2047
+ "collab_directive",
2048
+ "collab_feedback_loop",
2049
+ "collab_learning",
2050
+ "collab_task_iteration",
2051
+ "collab_validation",
2052
+ ]
2053
+ collab_missing_vars = [
2054
+ "collab_directive_missing",
2055
+ "collab_feedback_loop_missing",
2056
+ "collab_learning_missing",
2057
+ "collab_task_iteration_missing",
2058
+ "collab_validation_missing",
2059
+ ]
2060
+
2061
+ # Control variables (all occupational dummies + collaboration modes)
2062
+ control_vars = valid_metrics[occ_cols + collab_vars + collab_missing_vars].astype(
2063
+ float
2064
+ )
2065
+
2066
+ # Ensure dependent variables are float
2067
+ y_usage = valid_metrics["ln_usage_share"].astype(float)
2068
+ y_cost = valid_metrics["ln_cost_per_task"].astype(float)
2069
+
2070
+ # Step 1: Regress ln(usage_share) on controls (no constant)
2071
+ usage_model = sm.OLS(y_usage, control_vars).fit()
2072
+ usage_residuals = usage_model.resid
2073
+
2074
+ # Step 2: Regress ln(cost) on controls (no constant)
2075
+ cost_model = sm.OLS(y_cost, control_vars).fit()
2076
+ cost_residuals = cost_model.resid
2077
+
2078
+ # Find top 6 categories by usage share for coloring
2079
+ category_usage = (
2080
+ valid_metrics.groupby("occupational_category")["api_records"]
2081
+ .sum()
2082
+ .sort_values(ascending=False)
2083
+ )
2084
+ top_6_categories = list(category_usage.head(6).index)
2085
+
2086
+ # Create category grouping for coloring
2087
+ valid_metrics["category_group"] = valid_metrics["occupational_category"].apply(
2088
+ lambda x: x if x in top_6_categories else "All Other"
2089
+ )
2090
+
2091
+ # Create figure
2092
+ fig, ax = plt.subplots(figsize=(14, 10))
2093
+
2094
+ # Create color mapping for top 6 + "All Other"
2095
+ unique_groups = valid_metrics["category_group"].unique()
2096
+ group_colors = {}
2097
+ color_idx = 0
2098
+
2099
+ # Assign colors to top 6 categories first
2100
+ for cat in top_6_categories:
2101
+ if cat in unique_groups:
2102
+ group_colors[cat] = COLOR_CYCLE[color_idx % len(COLOR_CYCLE)]
2103
+ color_idx += 1
2104
+
2105
+ # Assign color to "All Other"
2106
+ if "All Other" in unique_groups:
2107
+ group_colors["All Other"] = "#999999" # Gray for all other
2108
+
2109
+ # Create single scatter plot (no color by group)
2110
+ ax.scatter(
2111
+ cost_residuals,
2112
+ usage_residuals,
2113
+ s=100,
2114
+ alpha=0.8,
2115
+ color=COLOR_CYCLE[0],
2116
+ edgecolors="black",
2117
+ linewidth=0.2,
2118
+ )
2119
+
2120
+ # Add overall trend line for residuals
2121
+ model = sm.OLS(usage_residuals, sm.add_constant(cost_residuals)).fit()
2122
+ slope = model.params.iloc[1]
2123
+ intercept = model.params.iloc[0]
2124
+ r_squared = model.rsquared
2125
+
2126
+ line_x = np.linspace(cost_residuals.min(), cost_residuals.max(), 100)
2127
+ line_y = slope * line_x + intercept
2128
+ ax.plot(
2129
+ line_x,
2130
+ line_y,
2131
+ "k--",
2132
+ alpha=0.8,
2133
+ linewidth=2,
2134
+ label=f"Partial relationship (R² = {r_squared:.3f})",
2135
+ )
2136
+
2137
+ # Customize plot
2138
+ ax.set_xlabel("Residual ln(API Cost Index)")
2139
+ ax.set_ylabel("Residual ln(Usage share (%))")
2140
+ ax.set_title(
2141
+ "Task usage share vs API Cost Index \n(partial regression after controlling for task characteristics)",
2142
+ fontsize=16,
2143
+ fontweight="bold",
2144
+ pad=20,
2145
+ )
2146
+ ax.grid(True, alpha=0.3)
2147
+
2148
+ # Simple legend with just the trend line
2149
+ ax.legend(loc="best", frameon=True, facecolor="white", framealpha=0.9, fontsize=11)
2150
+
2151
+ plt.tight_layout()
2152
+
2153
+ # Save plot
2154
+ output_path = Path(output_dir) / "partial_regression_plot.png"
2155
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
2156
+ plt.show()
2157
+
2158
+ # Save regression results
2159
+ regression_results = {
2160
+ "partial_correlation": np.sqrt(r_squared),
2161
+ "partial_r_squared": r_squared,
2162
+ "slope": slope,
2163
+ "intercept": intercept,
2164
+ "n_observations": len(valid_metrics),
2165
+ "usage_model_summary": str(usage_model.summary()),
2166
+ "cost_model_summary": str(cost_model.summary()),
2167
+ }
2168
+
2169
+ # Print regression results instead of saving to file
2170
+ print("Partial Regression Analysis Results")
2171
+ print("=" * 50)
2172
+ print(f"Partial correlation: {np.sqrt(r_squared):.4f}")
2173
+ print(f"Partial R-squared: {r_squared:.4f}")
2174
+ print(f"Slope: {slope:.4f}")
2175
+ print(f"Intercept: {intercept:.4f}")
2176
+ print(f"Number of observations: {len(valid_metrics)}")
2177
+ print("\nUsage Model Summary:")
2178
+ print("-" * 30)
2179
+ print(usage_model.summary())
2180
+ print("\nCost Model Summary:")
2181
+ print("-" * 30)
2182
+ print(cost_model.summary())
2183
+
2184
+ return str(output_path), regression_results
2185
+
2186
+
2187
+ def perform_usage_share_regression_unweighted(api_df, cai_df, output_dir):
2188
+ """
2189
+ Perform unweighted usage share regression analysis using Claude.ai collaboration modes.
2190
+
2191
+ Args:
2192
+ api_df: API preprocessed data DataFrame
2193
+ cai_df: Claude.ai preprocessed data DataFrame
2194
+ output_dir: Directory to save regression results
2195
+
2196
+ Returns:
2197
+ OLS model results
2198
+ """
2199
+ # Use centralized data preparation (includes all dummies)
2200
+ valid_data = reg_build_df(api_df, cai_df)
2201
+
2202
+ # Extract all regression variables
2203
+ X_cols = ["ln_cost_per_task"]
2204
+ X_cols.extend(
2205
+ [
2206
+ f"collab_{mode}"
2207
+ for mode in [
2208
+ "directive",
2209
+ "feedback_loop",
2210
+ "learning",
2211
+ "task_iteration",
2212
+ "validation",
2213
+ ]
2214
+ ]
2215
+ )
2216
+ X_cols.extend(
2217
+ [
2218
+ f"collab_{mode}_missing"
2219
+ for mode in [
2220
+ "directive",
2221
+ "feedback_loop",
2222
+ "learning",
2223
+ "task_iteration",
2224
+ "validation",
2225
+ ]
2226
+ ]
2227
+ )
2228
+ X_cols.extend([col for col in valid_data.columns if col.startswith("occ_")])
2229
+
2230
+ # Ensure all columns are numeric
2231
+ X = valid_data[X_cols].astype(float)
2232
+ y = valid_data["ln_usage_share"].astype(float)
2233
+
2234
+ # Run unweighted OLS without constant (to include all occupational dummies)
2235
+ model = sm.OLS(y, X).fit()
2236
+
2237
+ # Get heteroskedasticity-robust standard errors (HC1)
2238
+ model_robust = model.get_robustcov_results(cov_type="HC1")
2239
+
2240
+ return model_robust
2241
+
2242
+
2243
+ def create_btos_ai_adoption_chart(btos_df, ref_dates_df, output_dir):
2244
+ """
2245
+ Create BTOS AI adoption time series chart.
2246
+
2247
+ Args:
2248
+ btos_df: BTOS response estimates DataFrame
2249
+ ref_dates_df: Collection and reference dates DataFrame
2250
+ output_dir: Directory to save the figure
2251
+ """
2252
+ # Filter for Question ID 7, Answer ID 1 (Yes response to AI usage)
2253
+ btos_filtered = btos_df[(btos_df["Question ID"] == 7) & (btos_df["Answer ID"] == 1)]
2254
+
2255
+ # Get date columns (string columns that look like YYYYWW)
2256
+ date_columns = [
2257
+ col for col in btos_df.columns[4:] if str(col).isdigit() and len(str(col)) == 6
2258
+ ]
2259
+
2260
+ # Extract time series
2261
+ btos_ts = btos_filtered[date_columns].T
2262
+ btos_ts.columns = ["percentage"]
2263
+
2264
+ # Map to reference end dates
2265
+ ref_dates_df["Ref End"] = pd.to_datetime(ref_dates_df["Ref End"])
2266
+ btos_ts = btos_ts.reset_index()
2267
+ btos_ts["smpdt"] = btos_ts["index"].astype(int)
2268
+ btos_ts = btos_ts.merge(
2269
+ ref_dates_df[["Smpdt", "Ref End"]],
2270
+ left_on="smpdt",
2271
+ right_on="Smpdt",
2272
+ how="left",
2273
+ )
2274
+ btos_ts = btos_ts.set_index("Ref End")[["percentage"]]
2275
+
2276
+ # Convert percentage strings to numeric
2277
+ btos_ts["percentage"] = btos_ts["percentage"].str.rstrip("%").astype(float)
2278
+ btos_ts = btos_ts.sort_index().dropna()
2279
+
2280
+ # Calculate 3-period moving average
2281
+ btos_ts["moving_avg"] = btos_ts["percentage"].rolling(window=3).mean()
2282
+
2283
+ # Create figure
2284
+ fig, ax = plt.subplots(figsize=(14, 8))
2285
+
2286
+ # Plot main line
2287
+ ax.plot(
2288
+ btos_ts.index,
2289
+ btos_ts["percentage"],
2290
+ linewidth=3,
2291
+ marker="o",
2292
+ markersize=6,
2293
+ label="AI Adoption Rate Among US Businesses",
2294
+ zorder=3,
2295
+ )
2296
+
2297
+ # Plot moving average
2298
+ ax.plot(
2299
+ btos_ts.index,
2300
+ btos_ts["moving_avg"],
2301
+ linewidth=2,
2302
+ linestyle="--",
2303
+ alpha=0.8,
2304
+ label="3-Period Moving Average",
2305
+ zorder=2,
2306
+ )
2307
+
2308
+ # Styling
2309
+ ax.set_xlabel("Date", fontsize=14)
2310
+ ax.set_ylabel("AI adoption rate (%)", fontsize=14)
2311
+ ax.set_title(
2312
+ "Census reported AI adoption rates among US businesses from the Business Trends and Outlook Survey",
2313
+ fontsize=16,
2314
+ fontweight="bold",
2315
+ pad=20,
2316
+ )
2317
+
2318
+ # Format y-axis as percentage
2319
+ ax.set_ylim(0, max(btos_ts["percentage"]) * 1.1)
2320
+
2321
+ # Rotate x-axis labels
2322
+ ax.tick_params(axis="x", rotation=45)
2323
+
2324
+ # Grid and styling
2325
+ ax.grid(True, alpha=0.3, linestyle="--")
2326
+ ax.set_axisbelow(True)
2327
+ ax.spines["top"].set_visible(False)
2328
+ ax.spines["right"].set_visible(False)
2329
+
2330
+ # Legend
2331
+ ax.legend(loc="upper left", fontsize=11, frameon=True, facecolor="white")
2332
+
2333
+ plt.tight_layout()
2334
+
2335
+ # Save plot
2336
+ output_path = Path(output_dir) / "btos_ai_adoption_chart.png"
2337
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
2338
+ plt.show()
2339
+ return str(output_path)
release_2025_09_15/code/aei_analysis_functions_claude_ai.py ADDED
@@ -0,0 +1,2926 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Analysis functions for AEI Report v3 Claude.ai chapter
4
+ """
5
+
6
+ import textwrap
7
+
8
+ import geopandas as gpd
9
+ import matplotlib.colors as mcolors
10
+ import matplotlib.patches as mpatches
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ import pandas as pd
14
+ import statsmodels.api as sm
15
+ from matplotlib.colors import LinearSegmentedColormap, Normalize, TwoSlopeNorm
16
+ from matplotlib.lines import Line2D
17
+ from matplotlib.patches import FancyBboxPatch, Patch
18
+ from mpl_toolkits.axes_grid1 import make_axes_locatable
19
+
20
+ # global list of excluded countries (ISO-3 codes)
21
+ EXCLUDED_COUNTRIES = [
22
+ "AFG",
23
+ "BLR",
24
+ "COD",
25
+ "CAF",
26
+ "CHN",
27
+ "CUB",
28
+ "ERI",
29
+ "ETH",
30
+ "HKG",
31
+ "IRN",
32
+ "PRK",
33
+ "LBY",
34
+ "MLI",
35
+ "MMR",
36
+ "MAC",
37
+ "NIC",
38
+ "RUS",
39
+ "SDN",
40
+ "SOM",
41
+ "SSD",
42
+ "SYR",
43
+ "VEN",
44
+ "YEM",
45
+ ]
46
+
47
+ # Minimum observation thresholds
48
+ MIN_OBSERVATIONS_COUNTRY = 200 # Threshold for countries
49
+ MIN_OBSERVATIONS_US_STATE = 100 # Threshold for US states
50
+
51
+ # Define the tier colors
52
+ TIER_COLORS_LIST = ["#E6DBD0", "#E5C5AB", "#E4AF86", "#E39961", "#D97757"]
53
+
54
+ # Anthropic brand color for borders
55
+ ANTHROPIC_OAT = "#E3DACC"
56
+ AUGMENTATION_COLOR = "#00A078"
57
+ AUTOMATION_COLOR = "#FF9940"
58
+
59
+ # Standard tier color mapping used throughout
60
+ TIER_COLORS_DICT = {
61
+ "Minimal": TIER_COLORS_LIST[0], # Lightest
62
+ "Emerging (bottom 25%)": TIER_COLORS_LIST[1],
63
+ "Lower middle (25-50%)": TIER_COLORS_LIST[2],
64
+ "Upper middle (50-75%)": TIER_COLORS_LIST[3],
65
+ "Leading (top 25%)": TIER_COLORS_LIST[4], # Darkest
66
+ }
67
+
68
+ # Standard tier ordering
69
+ TIER_ORDER = [
70
+ "Leading (top 25%)",
71
+ "Upper middle (50-75%)",
72
+ "Lower middle (25-50%)",
73
+ "Emerging (bottom 25%)",
74
+ "Minimal",
75
+ ]
76
+
77
+ # Numeric tier color mapping (for tier values 0-4)
78
+ TIER_COLORS_NUMERIC = {i: color for i, color in enumerate(TIER_COLORS_LIST)}
79
+
80
+ # Numeric tier name mapping (for tier values 1-4 in actual data)
81
+ TIER_NAMES_NUMERIC = {
82
+ 1: "Emerging (bottom 25%)",
83
+ 2: "Lower middle (25-50%)",
84
+ 3: "Upper middle (50-75%)",
85
+ 4: "Leading (top 25%)",
86
+ }
87
+
88
+ # Create a custom colormap that can be used for continuous variables
89
+ CUSTOM_CMAP = LinearSegmentedColormap.from_list("custom_tier", TIER_COLORS_LIST, N=256)
90
+
91
+ # Map layout constants
92
+ MAP_PADDING_X = 0.25 # Horizontal padding for legend space
93
+ MAP_PADDING_Y = 0.05 # Vertical padding
94
+ ALASKA_INSET_BOUNDS = [0.26, 0.18, 0.15, 0.15] # [left, bottom, width, height]
95
+ HAWAII_INSET_BOUNDS = [0.40, 0.18, 0.11, 0.11] # [left, bottom, width, height]
96
+
97
+
98
+ # Figure style and setup
99
+ def setup_plot_style():
100
+ """Configure matplotlib."""
101
+ plt.style.use("default")
102
+ plt.rcParams.update(
103
+ {
104
+ "figure.dpi": 150,
105
+ "savefig.dpi": 150,
106
+ "font.size": 10,
107
+ "axes.labelsize": 11,
108
+ "axes.titlesize": 12,
109
+ "xtick.labelsize": 9,
110
+ "ytick.labelsize": 9,
111
+ "legend.fontsize": 9,
112
+ "figure.facecolor": "white",
113
+ "axes.facecolor": "white",
114
+ "savefig.facecolor": "white",
115
+ "axes.edgecolor": "#333333",
116
+ "axes.linewidth": 0.8,
117
+ "axes.grid": True,
118
+ "grid.alpha": 0.3,
119
+ "grid.linestyle": "-",
120
+ "grid.linewidth": 0.5,
121
+ "axes.axisbelow": True,
122
+ }
123
+ )
124
+
125
+
126
+ def create_figure(figsize=(12, 8), tight_layout=True, nrows=1, ncols=1):
127
+ """Create a figure with consistent settings.
128
+
129
+ Args:
130
+ figsize: Figure size tuple
131
+ tight_layout: Whether to use tight layout
132
+ nrows: Number of subplot rows
133
+ ncols: Number of subplot columns
134
+
135
+ Returns:
136
+ fig, ax or fig, axes depending on subplot configuration
137
+ """
138
+ fig, ax = plt.subplots(nrows, ncols, figsize=figsize)
139
+ if tight_layout:
140
+ fig.tight_layout()
141
+ else:
142
+ # Explicitly disable the layout engine to prevent warnings
143
+ fig.set_layout_engine(layout="none")
144
+ return fig, ax
145
+
146
+
147
+ def format_axis(
148
+ ax,
149
+ xlabel=None,
150
+ ylabel=None,
151
+ title=None,
152
+ xlabel_size=11,
153
+ ylabel_size=11,
154
+ title_size=13,
155
+ grid=True,
156
+ grid_alpha=0.3,
157
+ ):
158
+ """Apply consistent axis formatting."""
159
+ if xlabel:
160
+ ax.set_xlabel(xlabel, fontsize=xlabel_size)
161
+ if ylabel:
162
+ ax.set_ylabel(ylabel, fontsize=ylabel_size)
163
+ if title:
164
+ ax.set_title(title, fontsize=title_size, fontweight="bold", pad=15)
165
+ if grid:
166
+ ax.grid(True, alpha=grid_alpha)
167
+ return ax
168
+
169
+
170
+ def get_color_normalizer(values, center_at_one=False, vmin=None, vmax=None):
171
+ """Create appropriate color normalizer for data."""
172
+ if center_at_one:
173
+ # Use TwoSlopeNorm for diverging around 1.0
174
+ if vmin is None:
175
+ vmin = min(values.min(), 0.1)
176
+ if vmax is None:
177
+ vmax = max(values.max(), 2.0)
178
+ return TwoSlopeNorm(vmin=vmin, vcenter=1.0, vmax=vmax)
179
+ else:
180
+ # Use regular normalization
181
+ if vmin is None:
182
+ vmin = values.min()
183
+ if vmax is None:
184
+ vmax = values.max()
185
+ return Normalize(vmin=vmin, vmax=vmax)
186
+
187
+
188
+ def create_tier_legend(
189
+ ax,
190
+ tier_colors,
191
+ tiers_in_data,
192
+ excluded_countries=False,
193
+ no_data=False,
194
+ loc="lower left",
195
+ title="Anthropic AI Usage Index tier",
196
+ ):
197
+ """Create a consistent tier legend for maps."""
198
+ legend_elements = []
199
+ for tier in TIER_ORDER:
200
+ if tier in tiers_in_data:
201
+ legend_elements.append(
202
+ mpatches.Patch(
203
+ facecolor=tier_colors[tier], edgecolor="none", label=tier
204
+ )
205
+ )
206
+
207
+ if excluded_countries:
208
+ legend_elements.append(
209
+ mpatches.Patch(
210
+ facecolor="#c0c0c0", edgecolor="white", label="Claude not available"
211
+ )
212
+ )
213
+
214
+ if no_data:
215
+ legend_elements.append(
216
+ mpatches.Patch(facecolor="#f0f0f0", edgecolor="white", label="No data")
217
+ )
218
+
219
+ if legend_elements:
220
+ ax.legend(
221
+ handles=legend_elements,
222
+ loc=loc,
223
+ fontsize=10,
224
+ bbox_to_anchor=(0, 0) if loc == "lower left" else None,
225
+ title=title,
226
+ title_fontsize=11,
227
+ frameon=True,
228
+ fancybox=True,
229
+ shadow=True,
230
+ )
231
+
232
+ return ax
233
+
234
+
235
+ # Data wrangling helpers
236
+
237
+
238
+ def filter_df(df, **kwargs):
239
+ """Universal filter helper for dataframes.
240
+
241
+ Args:
242
+ df: DataFrame to filter
243
+ **kwargs: Column-value pairs to filter on
244
+ Lists are handled with .isin()
245
+
246
+ Returns:
247
+ Filtered DataFrame
248
+ """
249
+ mask = pd.Series([True] * len(df), index=df.index)
250
+
251
+ for key, value in kwargs.items():
252
+ if value is None:
253
+ continue # Skip None values
254
+ if key in df.columns:
255
+ if isinstance(value, list):
256
+ mask = mask & df[key].isin(value)
257
+ else:
258
+ mask = mask & (df[key] == value)
259
+
260
+ return df[mask]
261
+
262
+
263
+ def get_filtered_geographies(df, min_obs_country=None, min_obs_state=None):
264
+ """
265
+ Get lists of countries and states that meet MIN_OBSERVATIONS thresholds.
266
+
267
+ This function does NOT filter the dataframe - it only identifies which
268
+ geographies meet the thresholds. The full dataframe is preserved
269
+ so we can still report statistics for all geographies.
270
+
271
+ Args:
272
+ df: Input dataframe
273
+ min_obs_country: Minimum observations for countries (default: MIN_OBSERVATIONS_COUNTRY)
274
+ min_obs_state: Minimum observations for states (default: MIN_OBSERVATIONS_US_STATE)
275
+
276
+ Returns:
277
+ Tuple of (filtered_countries list, filtered_states list)
278
+ """
279
+ # Use defaults if not specified
280
+ if min_obs_country is None:
281
+ min_obs_country = MIN_OBSERVATIONS_COUNTRY
282
+ if min_obs_state is None:
283
+ min_obs_state = MIN_OBSERVATIONS_US_STATE
284
+
285
+ # Get country usage counts
286
+ country_usage = filter_df(df, facet="country", variable="usage_count").set_index(
287
+ "geo_id"
288
+ )["value"]
289
+
290
+ # Get state usage counts
291
+ state_usage = filter_df(df, facet="state_us", variable="usage_count").set_index(
292
+ "geo_id"
293
+ )["value"]
294
+
295
+ # Get countries that meet threshold (excluding not_classified)
296
+ filtered_countries = country_usage[country_usage >= min_obs_country].index.tolist()
297
+ filtered_countries = [c for c in filtered_countries if c != "not_classified"]
298
+
299
+ # Get states that meet threshold (excluding not_classified)
300
+ filtered_states = state_usage[state_usage >= min_obs_state].index.tolist()
301
+ filtered_states = [s for s in filtered_states if s != "not_classified"]
302
+
303
+ return filtered_countries, filtered_states
304
+
305
+
306
+ def filter_requests_by_threshold(df, geography, geo_id, level=1, threshold=1.0):
307
+ """
308
+ Filter requests to only include requests at a specific level that meet threshold requirements.
309
+
310
+ Args:
311
+ df: Long format dataframe with request data
312
+ geography: Current geography level ('country' or 'state_us')
313
+ geo_id: Current geography ID (e.g., 'USA', 'CA')
314
+ level: Request level to filter (default=1 for middle aggregated)
315
+ threshold: Minimum percentage threshold (default=1.0%)
316
+
317
+ Returns:
318
+ List of valid cluster_names that:
319
+ 1. Are at the specified level (default level 1)
320
+ 2. Have >= threshold % in the current geography
321
+ 3. Have >= threshold % in the parent geography (USA for states, GLOBAL for countries)
322
+ """
323
+ # Determine parent geography
324
+ if geography == "state_us":
325
+ parent_geo = "USA"
326
+ parent_geography = "country"
327
+ elif geography == "country":
328
+ parent_geo = "GLOBAL"
329
+ parent_geography = "global"
330
+ else: # global
331
+ # For global, no parent filtering needed
332
+ df_local = filter_df(
333
+ df,
334
+ geography=geography,
335
+ geo_id=geo_id,
336
+ facet="request",
337
+ level=level,
338
+ variable="request_pct",
339
+ )
340
+ return df_local[df_local["value"] >= threshold]["cluster_name"].tolist()
341
+
342
+ # Get local request percentages at specified level
343
+ df_local = filter_df(
344
+ df,
345
+ geography=geography,
346
+ geo_id=geo_id,
347
+ facet="request",
348
+ level=level,
349
+ variable="request_pct",
350
+ )
351
+
352
+ # Get parent request percentages at same level
353
+ df_parent = filter_df(
354
+ df,
355
+ geography=parent_geography,
356
+ geo_id=parent_geo,
357
+ facet="request",
358
+ level=level,
359
+ variable="request_pct",
360
+ )
361
+
362
+ # Filter by local threshold
363
+ local_valid = set(df_local[df_local["value"] >= threshold]["cluster_name"])
364
+
365
+ # Filter by parent threshold
366
+ parent_valid = set(df_parent[df_parent["value"] >= threshold]["cluster_name"])
367
+
368
+ # Return intersection (must meet both thresholds)
369
+ return list(local_valid & parent_valid)
370
+
371
+
372
+ # Data loading
373
+
374
+
375
+ def load_world_shapefile():
376
+ """Load and prepare world shapefile for mapping."""
377
+ url = "https://naciscdn.org/naturalearth/10m/cultural/ne_10m_admin_0_countries_iso.zip"
378
+ world = gpd.read_file(url)
379
+
380
+ # Remove Antarctica from the dataset entirely
381
+ world = world[world["ISO_A3_EH"] != "ATA"]
382
+
383
+ # Use Robinson projection for better world map appearance
384
+ world = world.to_crs("+proj=robin")
385
+
386
+ # Mark excluded countries using global EXCLUDED_COUNTRIES
387
+ world["is_excluded"] = world["ISO_A3_EH"].isin(EXCLUDED_COUNTRIES)
388
+
389
+ return world
390
+
391
+
392
+ def load_us_states_shapefile():
393
+ """Load and prepare US states shapefile for mapping."""
394
+ import ssl
395
+
396
+ # Create unverified SSL context to handle Census Bureau cert issues
397
+ ssl._create_default_https_context = ssl._create_unverified_context
398
+
399
+ states_url = (
400
+ "https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_state_20m.zip"
401
+ )
402
+ states = gpd.read_file(states_url)
403
+
404
+ # Filter out territories but keep all 50 states and DC
405
+ states = states[~states["STUSPS"].isin(["PR", "VI", "MP", "GU", "AS"])]
406
+
407
+ return states
408
+
409
+
410
+ def merge_geo_data(shapefile, df_data, geo_column, columns_to_merge, is_tier=False):
411
+ """Merge data with geographic shapefile.
412
+
413
+ Args:
414
+ shapefile: GeoDataFrame (world or states)
415
+ df_data: DataFrame with data to merge
416
+ geo_column: Column in shapefile to join on (e.g., 'ISO_A3_EH', 'STUSPS')
417
+ columns_to_merge: List of columns to merge from df_data
418
+ is_tier: Whether this is tier data (includes cluster_name)
419
+
420
+ Returns:
421
+ Merged GeoDataFrame
422
+ """
423
+ if is_tier and "cluster_name" not in columns_to_merge:
424
+ columns_to_merge = columns_to_merge + ["cluster_name"]
425
+
426
+ return shapefile.merge(
427
+ df_data[columns_to_merge], left_on=geo_column, right_on="geo_id", how="left"
428
+ )
429
+
430
+
431
+ def prepare_map_data(
432
+ geo_df,
433
+ value_column="value",
434
+ center_at_one=False,
435
+ excluded_mask=None,
436
+ ):
437
+ """Prepare data and normalization for map plotting.
438
+
439
+ Args:
440
+ geo_df: GeoDataFrame with geographic data and values to plot
441
+ value_column: Name of column containing values to plot (default: "value")
442
+ center_at_one: If True, center color scale at 1.0 for diverging colormap (default: False)
443
+ excluded_mask: Boolean Series indicating which rows to exclude from normalization
444
+ (e.g., countries where service isn't available). If None, no exclusions.
445
+
446
+ Returns:
447
+ tuple: (plot_column_name, norm) where norm is the matplotlib Normalize object
448
+ """
449
+ if excluded_mask is None:
450
+ excluded_mask = pd.Series([False] * len(geo_df), index=geo_df.index)
451
+
452
+ valid_data = geo_df[geo_df[value_column].notna() & ~excluded_mask][value_column]
453
+
454
+ vmin = valid_data.min() if len(valid_data) > 0 else 0
455
+ vmax = valid_data.max() if len(valid_data) > 0 else 1
456
+ norm = get_color_normalizer(
457
+ valid_data, center_at_one=center_at_one, vmin=vmin, vmax=vmax
458
+ )
459
+
460
+ return value_column, norm
461
+
462
+
463
+ # Main visualization functions
464
+
465
+
466
+ def plot_world_map(
467
+ ax, world, data_column="value", tier_colors=None, cmap=None, norm=None
468
+ ):
469
+ """Plot world map with data.
470
+
471
+ Args:
472
+ ax: matplotlib axis
473
+ world: GeoDataFrame with world data (already merged with values)
474
+ data_column: column name containing data to plot
475
+ tier_colors: dict mapping tier names to colors (for categorical)
476
+ cmap: colormap (for continuous)
477
+ norm: normalization (for continuous)
478
+ """
479
+ if tier_colors:
480
+ # Plot each tier with its color
481
+ for tier, color in tier_colors.items():
482
+ tier_countries = world[
483
+ (world["cluster_name"] == tier) & (~world["is_excluded"])
484
+ ]
485
+ tier_countries.plot(ax=ax, color=color, edgecolor="white", linewidth=0.5)
486
+ else:
487
+ # Plot continuous data
488
+ world_with_data = world[
489
+ world[data_column].notna() & (world["is_excluded"] == False)
490
+ ]
491
+ world_with_data.plot(
492
+ column=data_column, ax=ax, cmap=cmap, norm=norm, legend=False
493
+ )
494
+
495
+ # Plot excluded countries
496
+ excluded = world[world["is_excluded"] == True]
497
+ if not excluded.empty:
498
+ excluded.plot(ax=ax, color="#c0c0c0", edgecolor="white", linewidth=0.5)
499
+
500
+ # Plot no-data countries
501
+ no_data = world[
502
+ (world[data_column if not tier_colors else "cluster_name"].isna())
503
+ & (~world["is_excluded"])
504
+ ]
505
+ if not no_data.empty:
506
+ no_data.plot(ax=ax, color="#f0f0f0", edgecolor="white", linewidth=0.5)
507
+
508
+ # Set appropriate bounds for Robinson projection
509
+ ax.set_xlim(-17000000, 17000000)
510
+ ax.set_ylim(-8500000, 8500000)
511
+
512
+
513
+ def plot_us_states_map(
514
+ fig, ax, states, data_column="value", tier_colors=None, cmap=None, norm=None
515
+ ):
516
+ """Plot US states map with Alaska and Hawaii insets.
517
+
518
+ Args:
519
+ fig: matplotlib figure
520
+ ax: main axis for continental US
521
+ states: GeoDataFrame with state data (already merged with values)
522
+ data_column: column name containing data to plot
523
+ tier_colors: dict mapping tier names to colors (for categorical)
524
+ cmap: colormap (for continuous)
525
+ norm: normalization (for continuous)
526
+ """
527
+ # Project to EPSG:2163 for US Albers Equal Area
528
+ states = states.to_crs("EPSG:2163")
529
+
530
+ # Plot continental US (everything except AK and HI)
531
+ continental = states[~states["STUSPS"].isin(["AK", "HI"])]
532
+
533
+ # First plot all continental states as no-data background
534
+ continental.plot(ax=ax, color="#f0f0f0", edgecolor="white", linewidth=0.5)
535
+
536
+ # Plot continental states with data
537
+ if tier_colors:
538
+ # Plot each tier with its color
539
+ for tier, color in tier_colors.items():
540
+ tier_states = continental[continental["cluster_name"] == tier]
541
+ if not tier_states.empty:
542
+ tier_states.plot(ax=ax, color=color, edgecolor="white", linewidth=0.5)
543
+ else:
544
+ # Plot continuous data
545
+ continental_with_data = continental[continental[data_column].notna()]
546
+ if not continental_with_data.empty:
547
+ continental_with_data.plot(
548
+ column=data_column, ax=ax, cmap=cmap, norm=norm, legend=False
549
+ )
550
+
551
+ # Set axis limits with padding for legend
552
+ xlim = ax.get_xlim()
553
+ ylim = ax.get_ylim()
554
+ x_padding = (xlim[1] - xlim[0]) * MAP_PADDING_X
555
+ y_padding = (ylim[1] - ylim[0]) * MAP_PADDING_Y
556
+ ax.set_xlim(xlim[0] - x_padding, xlim[1] + x_padding)
557
+ ax.set_ylim(ylim[0] - y_padding, ylim[1] + y_padding)
558
+
559
+ # Add Alaska inset
560
+ akax = fig.add_axes(ALASKA_INSET_BOUNDS)
561
+ akax.axis("off")
562
+
563
+ alaska = states[states["STUSPS"] == "AK"]
564
+ if not alaska.empty:
565
+ alaska.plot(ax=akax, color="#f0f0f0", edgecolor="white", linewidth=0.5)
566
+
567
+ if tier_colors and alaska["cluster_name"].notna().any():
568
+ tier_name = alaska["cluster_name"].iloc[0]
569
+ if tier_name in tier_colors:
570
+ alaska.plot(
571
+ ax=akax,
572
+ color=tier_colors[tier_name],
573
+ edgecolor="white",
574
+ linewidth=0.5,
575
+ )
576
+ elif not tier_colors and alaska[data_column].notna().any():
577
+ alaska.plot(column=data_column, ax=akax, cmap=cmap, norm=norm, legend=False)
578
+
579
+ # Add Hawaii inset
580
+ hiax = fig.add_axes(HAWAII_INSET_BOUNDS)
581
+ hiax.axis("off")
582
+
583
+ hawaii = states[states["STUSPS"] == "HI"]
584
+ if not hawaii.empty:
585
+ hawaii.plot(ax=hiax, color="#f0f0f0", edgecolor="white", linewidth=0.5)
586
+
587
+ if tier_colors and hawaii["cluster_name"].notna().any():
588
+ tier_name = hawaii["cluster_name"].iloc[0]
589
+ if tier_name in tier_colors:
590
+ hawaii.plot(
591
+ ax=hiax,
592
+ color=tier_colors[tier_name],
593
+ edgecolor="white",
594
+ linewidth=0.5,
595
+ )
596
+ elif not tier_colors and hawaii[data_column].notna().any():
597
+ hawaii.plot(column=data_column, ax=hiax, cmap=cmap, norm=norm, legend=False)
598
+
599
+
600
+ def plot_usage_index_bars(
601
+ df,
602
+ geography="country",
603
+ top_n=None,
604
+ figsize=(12, 8),
605
+ title=None,
606
+ filtered_entities=None,
607
+ show_usage_counts=True,
608
+ cmap=CUSTOM_CMAP,
609
+ ):
610
+ """
611
+ Create horizontal bar chart of Anthropic AI Usage Index.
612
+
613
+ Args:
614
+ df: Long format dataframe
615
+ geography: 'country' or 'state_us'
616
+ top_n: Number of top entities to show (None for all)
617
+ figsize: Figure size
618
+ title: Chart title
619
+ filtered_entities: List of geo_id values to include (if None, include all)
620
+ show_usage_counts: If True, show usage counts in labels (default: True)
621
+ """
622
+ # Get data
623
+ df_metric = filter_df(
624
+ df, geography=geography, facet=geography, variable="usage_per_capita_index"
625
+ )
626
+
627
+ # Apply entity filtering if provided
628
+ if filtered_entities is not None:
629
+ df_metric = df_metric[df_metric["geo_id"].isin(filtered_entities)]
630
+
631
+ # Get usage counts for display if requested
632
+ if show_usage_counts:
633
+ df_usage = filter_df(
634
+ df, geography=geography, facet=geography, variable="usage_count"
635
+ )
636
+ # Merge to get usage counts
637
+ df_metric = df_metric.merge(
638
+ df_usage[["geo_id", "value"]],
639
+ on="geo_id",
640
+ suffixes=("", "_usage"),
641
+ how="left",
642
+ )
643
+
644
+ # Select entities to display
645
+ if top_n is None or top_n >= len(df_metric):
646
+ # Show all entities, sorted by lowest value first (will appear at bottom of chart)
647
+ df_top = df_metric.sort_values("value", ascending=True)
648
+ # Adjust figure height for many entities
649
+ if len(df_top) > 20:
650
+ figsize = (figsize[0], max(10, len(df_top) * 0.3))
651
+ else:
652
+ # Select top N entities, then sort ascending so highest values appear at top
653
+ df_top = df_metric.nlargest(top_n, "value")
654
+ df_top = df_top.sort_values("value", ascending=True)
655
+
656
+ # Create figure
657
+ fig, ax = create_figure(figsize=figsize)
658
+
659
+ # Get colormap and create diverging colors centered at 1
660
+ values = df_top["value"].values
661
+ min_val = values.min()
662
+ max_val = values.max()
663
+
664
+ # Determine the range for symmetric color scaling around 1
665
+ max_distance = max(abs(min_val - 1), abs(max_val - 1))
666
+
667
+ # Normalize values for color mapping
668
+ if max_distance > 0:
669
+ # Normalize to 0-1 centered at 0.5 for value 1
670
+ normalized = 0.5 + (values - 1) / (2 * max_distance)
671
+ # Truncate colormap to avoid too light colors
672
+ truncate_low = 0.2
673
+ truncate_high = 0.8
674
+ normalized = truncate_low + normalized * (truncate_high - truncate_low)
675
+ normalized = np.clip(normalized, truncate_low, truncate_high)
676
+ else:
677
+ normalized = np.ones_like(values) * 0.5
678
+
679
+ colors = cmap(normalized)
680
+
681
+ # Create horizontal bars
682
+ y_positions = range(len(df_top))
683
+ bars = ax.barh(y_positions, values, color=colors, height=0.7)
684
+
685
+ # Set y-tick labels
686
+ ax.set_yticks(y_positions)
687
+ ax.set_yticklabels(df_top["geo_name"].values)
688
+
689
+ # Set y-axis limits to reduce white space
690
+ ax.set_ylim(-0.5, len(df_top) - 0.5)
691
+
692
+ # Add baseline reference line at 1.0
693
+ ax.axvline(x=1.0, color="black", linestyle="--", alpha=0.5, linewidth=1)
694
+
695
+ # Calculate and set x-axis limits with extra space for labels
696
+ if max_val > 2:
697
+ ax.set_xlim(0, max_val * 1.25)
698
+ else:
699
+ ax.set_xlim(0, max_val * 1.2)
700
+
701
+ # Add value labels and usage counts
702
+ for i, bar in enumerate(bars):
703
+ width = bar.get_width()
704
+ # Always use 2 decimal places for consistency
705
+ label = f"{width:.2f}"
706
+
707
+ # Get usage count
708
+ usage_count = df_top.iloc[i]["value_usage"]
709
+ if usage_count >= 1000:
710
+ usage_str = f"{usage_count / 1000:.1f}k"
711
+ else:
712
+ usage_str = f"{int(usage_count)}"
713
+
714
+ # For top_n > 20, combine label with usage count to avoid overlap
715
+ if not top_n or top_n > 20:
716
+ combined_label = f"{label} (N={usage_str})"
717
+ ax.text(
718
+ width + 0.03,
719
+ bar.get_y() + bar.get_height() / 2.0,
720
+ combined_label,
721
+ ha="left",
722
+ va="center",
723
+ fontsize=8,
724
+ )
725
+ else:
726
+ # Add value label to the right of the bar
727
+ ax.text(
728
+ width + 0.03,
729
+ bar.get_y() + bar.get_height() / 2.0,
730
+ label,
731
+ ha="left",
732
+ va="center",
733
+ fontsize=9,
734
+ )
735
+
736
+ # Add usage count inside the bar
737
+ usage_str_full = f"N = {usage_str}"
738
+ ax.text(
739
+ 0.05,
740
+ bar.get_y() + bar.get_height() / 2.0,
741
+ usage_str_full,
742
+ ha="left",
743
+ va="center",
744
+ fontsize=8,
745
+ color="white",
746
+ )
747
+
748
+ # Set labels and title
749
+ if top_n:
750
+ default_title = f"Top {top_n} {'countries' if geography == 'country' else 'US states'} by Anthropic AI Usage Index"
751
+ else:
752
+ default_title = f"Anthropic AI Usage Index by {'country' if geography == 'country' else 'US state'}"
753
+
754
+ format_axis(
755
+ ax,
756
+ xlabel="Anthropic AI Usage Index (usage % / working-age population %)",
757
+ title=title or default_title,
758
+ grid=True,
759
+ grid_alpha=0.3,
760
+ )
761
+
762
+ return fig
763
+
764
+
765
+ def plot_variable_bars(
766
+ df,
767
+ variable,
768
+ facet,
769
+ geography="country",
770
+ geo_id=None,
771
+ top_n=None,
772
+ figsize=(12, 8),
773
+ title=None,
774
+ xlabel=None,
775
+ filtered_entities=None,
776
+ cmap=CUSTOM_CMAP,
777
+ normalize=False,
778
+ exclude_not_classified=False,
779
+ ):
780
+ """
781
+ Create horizontal bar chart for any variable.
782
+
783
+ Args:
784
+ df: Long format dataframe
785
+ variable: Variable name to plot (e.g., 'soc_pct', 'gdp_per_capita')
786
+ facet: Facet to use
787
+ geography: 'country' or 'state_us'
788
+ geo_id: Optional specific geo_id to filter (e.g., 'USA' for SOC data)
789
+ top_n: Number of top entities to show (None for all)
790
+ figsize: Figure size
791
+ title: Chart title
792
+ xlabel: x-axis label
793
+ filtered_entities: List of cluster_name or geo_id values to include
794
+ cmap: Colormap to use
795
+ normalize: If True, rescale values to sum to 100% (useful for percentages)
796
+ exclude_not_classified: If True, exclude 'not_classified' entries before normalizing
797
+ """
798
+ # Get data
799
+ df_metric = filter_df(
800
+ df, geography=geography, facet=facet, variable=variable, geo_id=geo_id
801
+ )
802
+
803
+ # Exclude not_classified if requested (before normalization)
804
+ if exclude_not_classified:
805
+ # Check both cluster_name and geo_id columns
806
+ if "cluster_name" in df_metric.columns:
807
+ df_metric = df_metric[
808
+ ~df_metric["cluster_name"].isin(["not_classified", "none"])
809
+ ]
810
+ if "geo_id" in df_metric.columns:
811
+ df_metric = df_metric[~df_metric["geo_id"].isin(["not_classified", "none"])]
812
+
813
+ # Normalize if requested (after filtering not_classified)
814
+ if normalize:
815
+ total_sum = df_metric["value"].sum()
816
+ if total_sum > 0:
817
+ df_metric["value"] = (df_metric["value"] / total_sum) * 100
818
+
819
+ # Apply entity filtering if provided
820
+ if filtered_entities is not None:
821
+ # Check if we're filtering by cluster_name or geo_id
822
+ if "cluster_name" in df_metric.columns:
823
+ df_metric = df_metric[df_metric["cluster_name"].isin(filtered_entities)]
824
+ else:
825
+ df_metric = df_metric[df_metric["geo_id"].isin(filtered_entities)]
826
+
827
+ # Select entities to display
828
+ if top_n is None or top_n >= len(df_metric):
829
+ # Show all entities, sorted by lowest value first
830
+ df_top = df_metric.sort_values("value", ascending=True)
831
+ # Adjust figure height for many entities
832
+ if len(df_top) > 20:
833
+ figsize = (figsize[0], max(10, len(df_top) * 0.3))
834
+ else:
835
+ # Select top N entities
836
+ df_top = df_metric.nlargest(top_n, "value")
837
+ df_top = df_top.sort_values("value", ascending=True)
838
+
839
+ # Create figure
840
+ fig, ax = create_figure(figsize=figsize)
841
+
842
+ # Get colormap and colors
843
+ values = df_top["value"].values
844
+ min_val = values.min()
845
+ max_val = values.max()
846
+
847
+ # Linear color mapping
848
+ if max_val > min_val:
849
+ normalized = (values - min_val) / (max_val - min_val)
850
+ # Truncate to avoid extremes
851
+ normalized = 0.2 + normalized * 0.6
852
+ else:
853
+ normalized = np.ones_like(values) * 0.5
854
+
855
+ colors = cmap(normalized)
856
+
857
+ # Create horizontal bars
858
+ y_positions = range(len(df_top))
859
+ bars = ax.barh(y_positions, values, color=colors, height=0.7)
860
+
861
+ # Set y-tick labels
862
+ ax.set_yticks(y_positions)
863
+ # Use cluster_name or geo_name depending on what's available
864
+ if "cluster_name" in df_top.columns:
865
+ labels = df_top["cluster_name"].values
866
+ elif "geo_name" in df_top.columns:
867
+ labels = df_top["geo_name"].values
868
+ else:
869
+ labels = df_top["geo_id"].values
870
+ ax.set_yticklabels(labels)
871
+
872
+ # Set y-axis limits to reduce white space
873
+ ax.set_ylim(-0.5, len(df_top) - 0.5)
874
+
875
+ # Calculate and set x-axis limits
876
+ x_range = max_val - min_val
877
+ if min_val < 0:
878
+ # Include negative values with some padding
879
+ ax.set_xlim(min_val - x_range * 0.1, max_val + x_range * 0.2)
880
+ else:
881
+ # Positive values only
882
+ ax.set_xlim(0, max_val * 1.2)
883
+
884
+ # Add value labels
885
+ for _, bar in enumerate(bars):
886
+ width = bar.get_width()
887
+ # Format based on value magnitude
888
+ if abs(width) >= 1000:
889
+ label = f"{width:.0f}"
890
+ elif abs(width) >= 10:
891
+ label = f"{width:.1f}"
892
+ else:
893
+ label = f"{width:.2f}"
894
+
895
+ # Position label
896
+ if width < 0:
897
+ ha = "right"
898
+ x_offset = -0.01 * (max_val - min_val)
899
+ else:
900
+ ha = "left"
901
+ x_offset = 0.01 * (max_val - min_val)
902
+
903
+ ax.text(
904
+ width + x_offset,
905
+ bar.get_y() + bar.get_height() / 2.0,
906
+ label,
907
+ ha=ha,
908
+ va="center",
909
+ fontsize=8 if len(df_top) > 20 else 9,
910
+ )
911
+
912
+ # Set labels and title
913
+ if not title:
914
+ if top_n:
915
+ title = f"Top {top_n} by {variable}"
916
+ else:
917
+ title = f"{variable} distribution"
918
+
919
+ format_axis(
920
+ ax,
921
+ xlabel=xlabel or variable,
922
+ title=title,
923
+ grid=True,
924
+ grid_alpha=0.3,
925
+ )
926
+
927
+ return fig
928
+
929
+
930
+ def plot_usage_share_bars(
931
+ df,
932
+ geography="country",
933
+ top_n=20,
934
+ figsize=(12, 8),
935
+ title=None,
936
+ filtered_entities=None,
937
+ cmap=CUSTOM_CMAP,
938
+ ):
939
+ """
940
+ Create bar chart showing share of global usage.
941
+
942
+ Args:
943
+ df: Long format dataframe
944
+ geography: Geographic level
945
+ top_n: Number of top entities
946
+ figsize: Figure size
947
+ title: Chart title
948
+ filtered_entities: List of geo_id values to include (if None, include all)
949
+
950
+ """
951
+ # Get data
952
+ df_metric = filter_df(
953
+ df, geography=geography, facet=geography, variable="usage_pct"
954
+ )
955
+
956
+ # Exclude "not_classified" from the data
957
+ df_metric = df_metric[df_metric["geo_id"] != "not_classified"]
958
+
959
+ # Apply entity filtering if provided
960
+ if filtered_entities is not None:
961
+ df_metric = df_metric[df_metric["geo_id"].isin(filtered_entities)]
962
+
963
+ # Get top n
964
+ df_top = df_metric.nlargest(top_n, "value")
965
+
966
+ # Create figure
967
+ fig, ax = create_figure(figsize=figsize)
968
+
969
+ # Create bars
970
+ positions = range(len(df_top))
971
+ values = df_top["value"].values
972
+ names = df_top["geo_name"].values
973
+
974
+ # Use custom colormap
975
+ norm = get_color_normalizer(values, center_at_one=False)
976
+ colors = [cmap(norm(val)) for val in values]
977
+
978
+ bars = ax.bar(positions, values, color=colors, alpha=0.8)
979
+
980
+ # Customize
981
+ ax.set_xticks(positions)
982
+ ax.set_xticklabels(names, rotation=45, ha="right")
983
+ # Reduce horizontal margins to bring bars closer to plot borders
984
+ ax.margins(x=0.01)
985
+
986
+ default_title = f"Top {top_n} {'countries' if geography == 'country' else 'US states'} by share of global Claude usage"
987
+ format_axis(
988
+ ax, ylabel="Share of global usage (%)", title=title or default_title, grid=False
989
+ )
990
+
991
+ # Add value labels
992
+ for bar, value in zip(bars, values, strict=True):
993
+ label = f"{value:.1f}%"
994
+
995
+ # Add value label above the bar
996
+ ax.text(
997
+ bar.get_x() + bar.get_width() / 2,
998
+ value + 0.1,
999
+ label,
1000
+ ha="center",
1001
+ fontsize=8,
1002
+ )
1003
+
1004
+ # Grid
1005
+ ax.grid(True, axis="y", alpha=0.3)
1006
+
1007
+ return fig
1008
+
1009
+
1010
+ def plot_usage_index_histogram(
1011
+ df, geography="country", bins=30, figsize=(10, 6), title=None, cmap=CUSTOM_CMAP
1012
+ ):
1013
+ """
1014
+ Create histogram of Anthropic AI Usage Index distribution.
1015
+
1016
+ Args:
1017
+ df: Long format dataframe
1018
+ geography: Geographic level
1019
+ bins: Number of histogram bins
1020
+ figsize: Figure size
1021
+ title: Chart title
1022
+ """
1023
+ # Get data
1024
+ df_metric = filter_df(
1025
+ df, geography=geography, facet=geography, variable="usage_per_capita_index"
1026
+ )
1027
+
1028
+ # Create figure
1029
+ fig, ax = create_figure(figsize=figsize)
1030
+
1031
+ # Create histogram
1032
+ values = df_metric["value"].values
1033
+ _, bins_edges, patches = ax.hist(
1034
+ values, bins=bins, edgecolor="white", linewidth=0.5
1035
+ )
1036
+
1037
+ # Color bars with custom gradient based on value
1038
+ norm = get_color_normalizer(
1039
+ values,
1040
+ center_at_one=False,
1041
+ vmin=min(bins_edges[0], 0),
1042
+ vmax=max(bins_edges[-1], 2),
1043
+ )
1044
+
1045
+ for patch, left_edge, right_edge in zip(
1046
+ patches, bins_edges[:-1], bins_edges[1:], strict=True
1047
+ ):
1048
+ # Use the midpoint of the bin for color
1049
+ mid_val = (left_edge + right_edge) / 2
1050
+ color = cmap(norm(mid_val))
1051
+ patch.set_facecolor(color)
1052
+
1053
+ # Add vertical line at 1.0 (where usage and population shares match)
1054
+ ax.axvline(x=1.0, color="black", linestyle="--", alpha=0.5, linewidth=1)
1055
+
1056
+ # Add statistics
1057
+ mean_val = values.mean()
1058
+ median_val = np.median(values)
1059
+
1060
+ stats_text = f"Mean: {mean_val:.2f}\nMedian: {median_val:.2f}\nN = {len(values)}"
1061
+ ax.text(
1062
+ 0.98,
1063
+ 0.97,
1064
+ stats_text,
1065
+ transform=ax.transAxes,
1066
+ ha="right",
1067
+ va="top",
1068
+ fontsize=9,
1069
+ bbox=dict(boxstyle="round", facecolor="white", alpha=0.8),
1070
+ )
1071
+
1072
+ # Customize
1073
+ geo_label = "countries" if geography == "country" else "US states"
1074
+ default_title = f"Distribution of Anthropic AI Usage Index ({geo_label})"
1075
+
1076
+ format_axis(
1077
+ ax,
1078
+ xlabel="Anthropic AI Usage Index (usage % / working-age population %)",
1079
+ ylabel=f"Number of {geo_label}",
1080
+ title=title or default_title,
1081
+ )
1082
+
1083
+ return fig
1084
+
1085
+
1086
+ def plot_gdp_scatter(
1087
+ df,
1088
+ geography="country",
1089
+ figsize=(10, 8),
1090
+ title=None,
1091
+ cmap=CUSTOM_CMAP,
1092
+ filtered_entities=None,
1093
+ ):
1094
+ """
1095
+ Create log-log scatter plot of GDP vs Anthropic AI Usage Index.
1096
+
1097
+ Args:
1098
+ df: Long format dataframe
1099
+ geography: Geographic level
1100
+ figsize: Figure size
1101
+ title: Chart title
1102
+ cmap: Colormap to use
1103
+ filtered_entities: List of geo_id values that meet MIN_OBSERVATIONS threshold (optional)
1104
+ """
1105
+ # Get usage data
1106
+ df_usage = filter_df(
1107
+ df, geography=geography, facet=geography, variable="usage_per_capita_index"
1108
+ )
1109
+
1110
+ # Apply filtering if provided
1111
+ if filtered_entities is not None:
1112
+ df_usage = df_usage[df_usage["geo_id"].isin(filtered_entities)]
1113
+
1114
+ df_usage = df_usage[["geo_id", "cluster_name", "value"]].rename(
1115
+ columns={"value": "usage_index"}
1116
+ )
1117
+
1118
+ # Get GDP data
1119
+ df_gdp = filter_df(
1120
+ df, geography=geography, facet=geography, variable="gdp_per_working_age_capita"
1121
+ )
1122
+
1123
+ # Apply same filtering to GDP data
1124
+ if filtered_entities is not None:
1125
+ df_gdp = df_gdp[df_gdp["geo_id"].isin(filtered_entities)]
1126
+
1127
+ df_gdp = df_gdp[["geo_id", "value"]].rename(columns={"value": "gdp_per_capita"})
1128
+
1129
+ # Merge
1130
+ df_plot = df_usage.merge(df_gdp, on="geo_id", how="inner")
1131
+
1132
+ # Filter out zeros and negative values for log scale
1133
+ # Explicitly check both GDP and usage are positive (will be true for filtered geos)
1134
+ mask = (df_plot["gdp_per_capita"] > 0) & (df_plot["usage_index"] > 0)
1135
+ df_plot = df_plot[mask]
1136
+
1137
+ # Create figure
1138
+ fig, ax = create_figure(figsize=figsize)
1139
+
1140
+ # Create scatter plot with geo_id values as labels
1141
+ x = df_plot["gdp_per_capita"].values
1142
+ y = df_plot["usage_index"].values
1143
+
1144
+ # Transform to log space for plotting
1145
+ log_x = np.log(x)
1146
+ log_y = np.log(y)
1147
+
1148
+ # Create norm for colorbar (using natural log)
1149
+ norm = plt.Normalize(vmin=log_y.min(), vmax=log_y.max())
1150
+
1151
+ # First, plot invisible points to ensure matplotlib's autoscaling includes all data points
1152
+ ax.scatter(log_x, log_y, s=0, alpha=0) # Size 0, invisible points for autoscaling
1153
+
1154
+ # Plot the geo_id values as text at the exact data points in log space
1155
+ for ln_x, ln_y, geo_id in zip(log_x, log_y, df_plot["geo_id"].values, strict=True):
1156
+ # Get color from colormap based on ln(usage_index)
1157
+ color_val = norm(ln_y)
1158
+ text_color = cmap(color_val)
1159
+
1160
+ ax.text(
1161
+ ln_x,
1162
+ ln_y,
1163
+ geo_id,
1164
+ fontsize=7,
1165
+ ha="center",
1166
+ va="center",
1167
+ color=text_color,
1168
+ alpha=0.9,
1169
+ weight="bold",
1170
+ )
1171
+
1172
+ # Add constant for intercept
1173
+ X_with_const = sm.add_constant(log_x)
1174
+
1175
+ # Fit OLS regression in log space
1176
+ model = sm.OLS(log_y, X_with_const)
1177
+ results = model.fit()
1178
+
1179
+ # Extract statistics
1180
+ intercept = results.params[0]
1181
+ slope = results.params[1]
1182
+ r_squared = results.rsquared
1183
+ p_value = results.pvalues[1] # p-value for slope
1184
+
1185
+ # Create fit line (we're already in log space)
1186
+ x_fit = np.linspace(log_x.min(), log_x.max(), 100)
1187
+ y_fit = intercept + slope * x_fit
1188
+ ax.plot(
1189
+ x_fit,
1190
+ y_fit,
1191
+ "gray",
1192
+ linestyle="--",
1193
+ alpha=0.7,
1194
+ linewidth=2,
1195
+ label=f"Power law: AUI ~ GDP^{slope:.2f}",
1196
+ )
1197
+
1198
+ # Add regression statistics
1199
+ # Format p-value display
1200
+ if p_value < 0.001:
1201
+ p_str = "p < 0.001"
1202
+ else:
1203
+ p_str = f"p = {p_value:.3f}"
1204
+
1205
+ ax.text(
1206
+ 0.05,
1207
+ 0.95,
1208
+ f"$\\beta = {slope:.3f}\\ ({p_str})$\n$R^2 = {r_squared:.3f}$",
1209
+ transform=ax.transAxes,
1210
+ fontsize=10,
1211
+ bbox=dict(boxstyle="round", facecolor="white", alpha=0.8),
1212
+ verticalalignment="top",
1213
+ )
1214
+
1215
+ # Customize labels for log-transformed values
1216
+ xlabel = "ln(GDP per working-age capita in USD)"
1217
+ ylabel = "ln(Anthropic AI Usage Index)"
1218
+ default_title = f"Income and Anthropic AI Usage Index by {'country' if geography == 'country' else 'US state'}"
1219
+
1220
+ format_axis(
1221
+ ax, xlabel=xlabel, ylabel=ylabel, title=title or default_title, grid=False
1222
+ )
1223
+
1224
+ # Grid for log scale
1225
+ ax.grid(True, alpha=0.3, which="both", linestyle="-", linewidth=0.5)
1226
+
1227
+ # Add legend
1228
+ ax.legend(loc="best")
1229
+
1230
+ # Create colorbar using ScalarMappable
1231
+ scalar_mappable = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
1232
+ scalar_mappable.set_array([])
1233
+ cbar = plt.colorbar(scalar_mappable, ax=ax)
1234
+ cbar.set_label(
1235
+ "ln(Anthropic AI Usage Index)", fontsize=9, rotation=270, labelpad=15
1236
+ )
1237
+
1238
+ return fig
1239
+
1240
+
1241
+ def plot_request_comparison_cards(
1242
+ df,
1243
+ geo_ids,
1244
+ title,
1245
+ geography,
1246
+ top_n=5,
1247
+ figsize=(10, 6),
1248
+ exclude_not_classified=True,
1249
+ request_level=1,
1250
+ request_threshold=1.0,
1251
+ ):
1252
+ """
1253
+ Create a condensed card visualization showing top overrepresented request categories
1254
+ for multiple geographies (countries or states).
1255
+
1256
+ Args:
1257
+ df: Long format dataframe
1258
+ geo_ids: List of geography IDs to compare (e.g., ['USA', 'BRA', 'VNM', 'IND'])
1259
+ title: Title for the figure (required)
1260
+ geography: Geographic level ('country' or 'state_us')
1261
+ top_n: Number of top requests to show per geography (default 5)
1262
+ figsize: Figure size as tuple
1263
+ exclude_not_classified: Whether to exclude "not_classified" entries
1264
+ request_level: Request hierarchy level to use (default 1)
1265
+ request_threshold: Minimum percentage threshold for requests (default 1.0%)
1266
+ """
1267
+ # Get data for specified geography
1268
+ data_subset = filter_df(df, facet="request", geo_id=geo_ids, geography=geography)
1269
+
1270
+ # Filter for request_pct_index variable and specified level
1271
+ data_subset = filter_df(
1272
+ data_subset, variable="request_pct_index", level=request_level
1273
+ )
1274
+
1275
+ # Exclude not_classified if requested
1276
+ if exclude_not_classified:
1277
+ data_subset = data_subset[
1278
+ ~data_subset["cluster_name"].str.contains("not_classified", na=False)
1279
+ ]
1280
+
1281
+ # Get tier and geo_name information
1282
+ geo_info = filter_df(
1283
+ df, geography=geography, variable="usage_tier", geo_id=geo_ids
1284
+ )[["geo_id", "geo_name", "value"]].drop_duplicates()
1285
+ tier_map = dict(zip(geo_info["geo_id"], geo_info["value"], strict=True))
1286
+ name_map = dict(zip(geo_info["geo_id"], geo_info["geo_name"], strict=True))
1287
+
1288
+ # Set up figure with 2x2 grid for 4 geographies
1289
+ n_rows, n_cols = 2, 2
1290
+ fig, axes = create_figure(figsize=figsize, nrows=n_rows, ncols=n_cols)
1291
+ axes = axes.flatten()
1292
+
1293
+ # Use global tier colors
1294
+ tier_colors = TIER_COLORS_NUMERIC
1295
+
1296
+ # Process each geography
1297
+ for idx, geo_id in enumerate(geo_ids):
1298
+ ax = axes[idx]
1299
+
1300
+ # Apply request threshold filtering to get valid requests for this geography
1301
+ valid_requests = filter_requests_by_threshold(
1302
+ df, geography, geo_id, level=request_level, threshold=request_threshold
1303
+ )
1304
+
1305
+ # Get data for this geography, filtered by valid requests
1306
+ geo_data = data_subset[
1307
+ (data_subset["geo_id"] == geo_id)
1308
+ & (data_subset["cluster_name"].isin(valid_requests))
1309
+ & (data_subset["value"] > 1.0) # Only show overrepresented requests
1310
+ ].copy()
1311
+
1312
+ # Get top n from the filtered requests
1313
+ geo_data = geo_data.nlargest(top_n, "value")
1314
+
1315
+ # Get tier color
1316
+ tier = tier_map[geo_id]
1317
+ base_color = tier_colors[tier]
1318
+
1319
+ # Create a lighter version of the tier color for the card background
1320
+ rgb = mcolors.to_rgb(base_color)
1321
+ # Mix with white (85% white, 15% color for very subtle background)
1322
+ pastel_rgb = tuple(0.85 + 0.15 * c for c in rgb)
1323
+ card_bg_color = mcolors.to_hex(pastel_rgb)
1324
+
1325
+ # Fill entire axis with background color
1326
+ ax.set_facecolor(card_bg_color)
1327
+
1328
+ # Create card with requests
1329
+ card_height = 0.9 # Fixed height for all cards
1330
+ card_bottom = 0.965 - card_height # Consistent positioning
1331
+
1332
+ card_rect = FancyBboxPatch(
1333
+ (0.10, card_bottom),
1334
+ 0.80,
1335
+ card_height,
1336
+ transform=ax.transAxes,
1337
+ boxstyle="round,pad=0.02,rounding_size=0.035",
1338
+ facecolor=card_bg_color,
1339
+ edgecolor="none",
1340
+ linewidth=2,
1341
+ clip_on=False,
1342
+ )
1343
+ ax.add_patch(card_rect)
1344
+
1345
+ # Header bar
1346
+ header_top = 0.965 - 0.10
1347
+ header_rect = FancyBboxPatch(
1348
+ (0.14, header_top),
1349
+ 0.72,
1350
+ 0.08,
1351
+ transform=ax.transAxes,
1352
+ boxstyle="round,pad=0.01,rounding_size=0.03",
1353
+ facecolor=base_color,
1354
+ edgecolor="none",
1355
+ alpha=0.7,
1356
+ clip_on=False,
1357
+ )
1358
+ ax.add_patch(header_rect)
1359
+
1360
+ # Add geography name
1361
+ geo_name = name_map[geo_id]
1362
+
1363
+ ax.text(
1364
+ 0.5,
1365
+ header_top + 0.04,
1366
+ geo_name,
1367
+ transform=ax.transAxes,
1368
+ ha="center",
1369
+ va="center",
1370
+ fontsize=12,
1371
+ fontweight="bold",
1372
+ color="#1C1C1C",
1373
+ )
1374
+
1375
+ # Adjust start position below header upwards
1376
+ y_pos = header_top - 0.05
1377
+
1378
+ for _, row in geo_data.iterrows():
1379
+ request = row["cluster_name"]
1380
+ value = row["value"]
1381
+
1382
+ # Format ratio
1383
+ if value >= 10:
1384
+ ratio_str = f"{value:.0f}x"
1385
+ elif value >= 2:
1386
+ ratio_str = f"{value:.1f}x"
1387
+ else:
1388
+ ratio_str = f"{value:.2f}x"
1389
+
1390
+ # Wrap text
1391
+ wrapped_text = textwrap.fill(request, width=46, break_long_words=False)
1392
+ lines = wrapped_text.split("\n")
1393
+
1394
+ # Display text lines with sufficient line spacing
1395
+ line_spacing = 0.045
1396
+ for j, line in enumerate(lines):
1397
+ ax.text(
1398
+ 0.13, # Adjust text position for wider card
1399
+ y_pos - j * line_spacing,
1400
+ line,
1401
+ transform=ax.transAxes,
1402
+ ha="left",
1403
+ va="top",
1404
+ fontsize=9,
1405
+ color="#1C1C1C",
1406
+ rasterized=False,
1407
+ )
1408
+
1409
+ # Position ratio with adjusted margin for wide card
1410
+ text_height = len(lines) * line_spacing
1411
+ ax.text(
1412
+ 0.85,
1413
+ y_pos - (text_height - line_spacing) / 2,
1414
+ ratio_str,
1415
+ transform=ax.transAxes,
1416
+ ha="right",
1417
+ va="center",
1418
+ fontsize=10,
1419
+ fontweight="bold",
1420
+ color="#B85450",
1421
+ rasterized=False,
1422
+ )
1423
+
1424
+ # Add space between different requests
1425
+ y_pos -= text_height + 0.05
1426
+
1427
+ # Remove axes
1428
+ ax.axis("off")
1429
+
1430
+ # Add title
1431
+ fig.suptitle(title, fontsize=14, fontweight="bold", y=0.98)
1432
+
1433
+ plt.tight_layout()
1434
+ plt.subplots_adjust(
1435
+ top=0.92, bottom=0.02, left=0.01, right=0.99, hspace=0.02, wspace=0.02
1436
+ )
1437
+
1438
+ return fig
1439
+
1440
+
1441
+ def plot_dc_task_request_cards(
1442
+ df,
1443
+ title,
1444
+ figsize=(10, 5),
1445
+ ):
1446
+ """
1447
+ Create professional card visualizations showing top overrepresented O*NET tasks and requests for Washington, DC.
1448
+
1449
+ Args:
1450
+ df: Long format dataframe
1451
+ figsize: Figure size as tuple
1452
+ title: Optional title for the figure
1453
+ """
1454
+ # Fixed parameters for DC
1455
+ geo_id = "DC"
1456
+ geography = "state_us"
1457
+ top_n = 5
1458
+
1459
+ # Get tier for color
1460
+ tier_data = filter_df(
1461
+ df, geography=geography, variable="usage_tier", geo_id=[geo_id]
1462
+ )
1463
+ tier = tier_data["value"].iloc[0]
1464
+
1465
+ # Use tier color
1466
+ tier_colors = TIER_COLORS_NUMERIC
1467
+ base_color = tier_colors[tier]
1468
+
1469
+ # Create lighter version for card background
1470
+ rgb = mcolors.to_rgb(base_color)
1471
+ pastel_rgb = tuple(0.85 + 0.15 * c for c in rgb)
1472
+ card_bg_color = mcolors.to_hex(pastel_rgb)
1473
+
1474
+ # Create figure with 2 subplots (cards)
1475
+ fig, axes = create_figure(figsize=figsize, ncols=2)
1476
+
1477
+ # Card 1: Top O*NET Tasks
1478
+ ax1 = axes[0]
1479
+ ax1.set_facecolor(card_bg_color)
1480
+
1481
+ # Get O*NET task data
1482
+ df_tasks = filter_df(
1483
+ df,
1484
+ geography=geography,
1485
+ geo_id=[geo_id],
1486
+ facet="onet_task",
1487
+ variable="onet_task_pct_index",
1488
+ )
1489
+
1490
+ # Exclude not_classified and none
1491
+ df_tasks = df_tasks[~df_tasks["cluster_name"].isin(["not_classified", "none"])]
1492
+
1493
+ # Get top n overrepresented tasks
1494
+ df_tasks = df_tasks[df_tasks["value"] > 1.0].nlargest(top_n, "value")
1495
+
1496
+ # Use fixed card heights
1497
+ card_height_tasks = 0.955
1498
+ card_bottom_tasks = 0.965 - card_height_tasks
1499
+
1500
+ # Draw card for O*NET tasks
1501
+ card_rect1 = FancyBboxPatch(
1502
+ (0.10, card_bottom_tasks),
1503
+ 0.80,
1504
+ card_height_tasks,
1505
+ transform=ax1.transAxes,
1506
+ boxstyle="round,pad=0.02,rounding_size=0.035",
1507
+ facecolor=card_bg_color,
1508
+ edgecolor="none",
1509
+ linewidth=2,
1510
+ clip_on=False,
1511
+ )
1512
+ ax1.add_patch(card_rect1)
1513
+
1514
+ # Header for O*NET tasks
1515
+ header_top = 0.965 - 0.10
1516
+ header_rect1 = FancyBboxPatch(
1517
+ (0.12, header_top),
1518
+ 0.76,
1519
+ 0.08,
1520
+ transform=ax1.transAxes,
1521
+ boxstyle="round,pad=0.01,rounding_size=0.03",
1522
+ facecolor=base_color,
1523
+ edgecolor="none",
1524
+ alpha=0.7,
1525
+ clip_on=False,
1526
+ )
1527
+ ax1.add_patch(header_rect1)
1528
+
1529
+ ax1.text(
1530
+ 0.5,
1531
+ header_top + 0.04,
1532
+ "Top 5 overrepresented O*NET tasks in DC",
1533
+ transform=ax1.transAxes,
1534
+ ha="center",
1535
+ va="center",
1536
+ fontsize=11,
1537
+ fontweight="bold",
1538
+ color="#1C1C1C",
1539
+ )
1540
+
1541
+ # Add task items
1542
+ y_pos = header_top - 0.05
1543
+
1544
+ for _, row in df_tasks.iterrows():
1545
+ task = row["cluster_name"]
1546
+ value = row["value"]
1547
+
1548
+ # Convert to sentence case and remove trailing period
1549
+ task = task[0].upper() + task[1:].lower() if task else task
1550
+ task = task.rstrip(".") # Remove trailing period
1551
+
1552
+ # Format ratio - always with 2 decimal places
1553
+ ratio_str = f"{value:.2f}x"
1554
+
1555
+ # Wrap text
1556
+ wrapped_text = textwrap.fill(task, width=46, break_long_words=False)
1557
+ lines = wrapped_text.split("\n")
1558
+
1559
+ # Display text lines
1560
+ line_spacing = 0.045
1561
+ for j, line in enumerate(lines):
1562
+ ax1.text(
1563
+ 0.13,
1564
+ y_pos - j * line_spacing,
1565
+ line,
1566
+ transform=ax1.transAxes,
1567
+ ha="left",
1568
+ va="top",
1569
+ fontsize=9,
1570
+ color="#1C1C1C",
1571
+ rasterized=False,
1572
+ )
1573
+
1574
+ # Add ratio at the right with consistent color
1575
+ ax1.text(
1576
+ 0.87,
1577
+ y_pos - (len(lines) - 1) * line_spacing / 2,
1578
+ ratio_str,
1579
+ transform=ax1.transAxes,
1580
+ ha="right",
1581
+ va="center",
1582
+ fontsize=10,
1583
+ color="#B85450",
1584
+ fontweight="bold",
1585
+ )
1586
+
1587
+ # Move to next item position
1588
+ y_pos -= len(lines) * line_spacing + 0.025
1589
+
1590
+ ax1.axis("off")
1591
+
1592
+ # Card 2: Top Requests
1593
+ ax2 = axes[1]
1594
+ ax2.set_facecolor(card_bg_color)
1595
+
1596
+ # Get valid requests using threshold
1597
+ valid_requests = filter_requests_by_threshold(
1598
+ df, geography, geo_id, level=1, threshold=1.0
1599
+ )
1600
+
1601
+ # Get request data
1602
+ df_requests = filter_df(
1603
+ df,
1604
+ geography=geography,
1605
+ geo_id=[geo_id],
1606
+ facet="request",
1607
+ variable="request_pct_index",
1608
+ level=1,
1609
+ )
1610
+
1611
+ # Filter by valid requests and overrepresented
1612
+ df_requests = df_requests[
1613
+ (df_requests["cluster_name"].isin(valid_requests))
1614
+ & (df_requests["value"] > 1.0)
1615
+ & (~df_requests["cluster_name"].str.contains("not_classified", na=False))
1616
+ ]
1617
+
1618
+ # Get top n
1619
+ df_requests = df_requests.nlargest(top_n, "value")
1620
+
1621
+ # Draw card for requests with fixed height
1622
+ card_height_requests = 0.72
1623
+ card_bottom_requests = 0.965 - card_height_requests
1624
+
1625
+ card_rect2 = FancyBboxPatch(
1626
+ (0.10, card_bottom_requests),
1627
+ 0.80,
1628
+ card_height_requests,
1629
+ transform=ax2.transAxes,
1630
+ boxstyle="round,pad=0.02,rounding_size=0.035",
1631
+ facecolor=card_bg_color,
1632
+ edgecolor="none",
1633
+ linewidth=2,
1634
+ clip_on=False,
1635
+ )
1636
+ ax2.add_patch(card_rect2)
1637
+
1638
+ # Header for requests
1639
+ header_rect2 = FancyBboxPatch(
1640
+ (0.12, header_top),
1641
+ 0.76,
1642
+ 0.08,
1643
+ transform=ax2.transAxes,
1644
+ boxstyle="round,pad=0.01,rounding_size=0.03",
1645
+ facecolor=base_color,
1646
+ edgecolor="none",
1647
+ alpha=0.7,
1648
+ clip_on=False,
1649
+ )
1650
+ ax2.add_patch(header_rect2)
1651
+
1652
+ ax2.text(
1653
+ 0.5,
1654
+ header_top + 0.04,
1655
+ "Top 5 overrepresented request clusters in DC",
1656
+ transform=ax2.transAxes,
1657
+ ha="center",
1658
+ va="center",
1659
+ fontsize=11,
1660
+ fontweight="bold",
1661
+ color="#1C1C1C",
1662
+ )
1663
+
1664
+ # Add request items
1665
+ y_pos = header_top - 0.05
1666
+
1667
+ for _, row in df_requests.iterrows():
1668
+ request = row["cluster_name"]
1669
+ value = row["value"]
1670
+
1671
+ # Format ratio always with 2 decimal places
1672
+ ratio_str = f"{value:.2f}x"
1673
+
1674
+ # Wrap text
1675
+ wrapped_text = textwrap.fill(request, width=46, break_long_words=False)
1676
+ lines = wrapped_text.split("\n")
1677
+
1678
+ # Display text lines
1679
+ line_spacing = 0.045
1680
+ for j, line in enumerate(lines):
1681
+ ax2.text(
1682
+ 0.13,
1683
+ y_pos - j * line_spacing,
1684
+ line,
1685
+ transform=ax2.transAxes,
1686
+ ha="left",
1687
+ va="top",
1688
+ fontsize=9,
1689
+ color="#1C1C1C",
1690
+ rasterized=False,
1691
+ )
1692
+
1693
+ # Add ratio at the right with consistent color
1694
+ ax2.text(
1695
+ 0.87,
1696
+ y_pos - (len(lines) - 1) * line_spacing / 2,
1697
+ ratio_str,
1698
+ transform=ax2.transAxes,
1699
+ ha="right",
1700
+ va="center",
1701
+ fontsize=10,
1702
+ color="#B85450",
1703
+ fontweight="bold",
1704
+ )
1705
+
1706
+ # Move to next item position
1707
+ y_pos -= len(lines) * line_spacing + 0.025
1708
+
1709
+ ax2.axis("off")
1710
+
1711
+ # Add subtle title if provided
1712
+ fig.suptitle(title, fontsize=13, fontweight="bold", y=0.98)
1713
+
1714
+ plt.tight_layout()
1715
+ return fig
1716
+
1717
+
1718
+ # Summary statistics function
1719
+ def plot_tier_summary_table(df, geography="country", figsize=(12, 6)):
1720
+ """
1721
+ Create a visual table showing entities per tier and example members.
1722
+
1723
+ Args:
1724
+ df: Long format dataframe
1725
+ geography: 'country' or 'state_us'
1726
+ figsize: Figure size
1727
+ """
1728
+ # Get tier data
1729
+ df_tier = filter_df(df, geography=geography, variable="usage_tier")
1730
+
1731
+ # Exclude US territories that appear as countries (may be confusing to readers)
1732
+ if geography == "country":
1733
+ us_territories_as_countries = [
1734
+ "PRI",
1735
+ "VIR",
1736
+ "GUM",
1737
+ "ASM",
1738
+ "MNP",
1739
+ ] # Puerto Rico, Virgin Islands, Guam, American Samoa, Northern Mariana Islands
1740
+ df_tier = df_tier[~df_tier["geo_id"].isin(us_territories_as_countries)]
1741
+
1742
+ # Get usage per capita index for sorting entities within tiers
1743
+ df_usage_index = filter_df(
1744
+ df, geography=geography, variable="usage_per_capita_index"
1745
+ )
1746
+
1747
+ # Apply same territory filter to usage index data
1748
+ if geography == "country":
1749
+ df_usage_index = df_usage_index[
1750
+ ~df_usage_index["geo_id"].isin(us_territories_as_countries)
1751
+ ]
1752
+
1753
+ # Merge tier with usage index
1754
+ df_tier_full = df_tier[["geo_id", "geo_name", "cluster_name"]].merge(
1755
+ df_usage_index[["geo_id", "value"]],
1756
+ on="geo_id",
1757
+ how="left",
1758
+ suffixes=("", "_index"),
1759
+ )
1760
+
1761
+ # Use global tier colors
1762
+ tier_colors = TIER_COLORS_DICT
1763
+
1764
+ # Calculate appropriate figure height based on number of tiers
1765
+ n_tiers = sum(
1766
+ 1 for tier in TIER_ORDER if tier in df_tier_full["cluster_name"].values
1767
+ )
1768
+ # Adjust height: minimal padding for compact display
1769
+ fig_height = 0.5 + n_tiers * 0.3 # Much more compact
1770
+
1771
+ # Create figure with calculated size
1772
+ fig, ax = create_figure(figsize=(figsize[0], fig_height))
1773
+ ax.axis("tight")
1774
+ ax.axis("off")
1775
+
1776
+ # Make background transparent
1777
+ fig.patch.set_alpha(0.0)
1778
+ ax.patch.set_alpha(0.0)
1779
+
1780
+ # Prepare table data
1781
+ table_data = []
1782
+ entity_type = "countries" if geography == "country" else "states"
1783
+ col_labels = [
1784
+ "Tier",
1785
+ "AUI range",
1786
+ f"# of {entity_type}",
1787
+ f"Example {entity_type}",
1788
+ ]
1789
+
1790
+ for tier in TIER_ORDER:
1791
+ if tier in df_tier_full["cluster_name"].values:
1792
+ # Get entities in this tier
1793
+ tier_entities = filter_df(df_tier_full, cluster_name=tier)
1794
+ count = len(tier_entities)
1795
+
1796
+ # Calculate usage index range for this tier
1797
+ min_index = tier_entities["value"].min()
1798
+ max_index = tier_entities["value"].max()
1799
+ index_range = f"{min_index:.2f} - {max_index:.2f}"
1800
+
1801
+ # For Minimal tier where all have 0 index, pick shortest names
1802
+ if tier == "Minimal" and tier_entities["value"].max() == 0:
1803
+ tier_entities = tier_entities.copy()
1804
+ tier_entities["name_length"] = tier_entities["geo_name"].str.len()
1805
+ top_entities = tier_entities.nsmallest(5, "name_length")[
1806
+ "geo_name"
1807
+ ].tolist()
1808
+ else:
1809
+ # Get top 5 entities by usage index in this tier
1810
+ top_entities = tier_entities.nlargest(5, "value")["geo_name"].tolist()
1811
+
1812
+ # Format the example entities as a comma-separated string
1813
+ examples = ", ".join(top_entities[:5])
1814
+
1815
+ table_data.append([tier, index_range, str(count), examples])
1816
+
1817
+ # Create table with better column widths
1818
+ table = ax.table(
1819
+ cellText=table_data,
1820
+ colLabels=col_labels,
1821
+ cellLoc="left",
1822
+ loc="center",
1823
+ colWidths=[0.20, 0.18, 0.12, 0.50],
1824
+ colColours=[ANTHROPIC_OAT] * 4,
1825
+ )
1826
+
1827
+ # Style the table
1828
+ table.auto_set_font_size(False)
1829
+ table.set_fontsize(11)
1830
+ table.scale(1, 2.2)
1831
+
1832
+ # Set all cell edges to Anthropic oat color
1833
+ for _, cell in table.get_celld().items():
1834
+ cell.set_edgecolor(ANTHROPIC_OAT)
1835
+ cell.set_linewidth(1.5)
1836
+
1837
+ # Color code the rows with consistent black text
1838
+ for i, row_data in enumerate(table_data):
1839
+ tier_name = row_data[0]
1840
+ if tier_name in tier_colors:
1841
+ # Color the tier name cell with full opacity
1842
+ table[(i + 1, 0)].set_facecolor(tier_colors[tier_name])
1843
+ table[(i + 1, 0)].set_text_props(color="black", weight="bold")
1844
+
1845
+ # Light background for usage index range column
1846
+ table[(i + 1, 1)].set_facecolor(tier_colors[tier_name])
1847
+ table[(i + 1, 1)].set_alpha(0.3)
1848
+ table[(i + 1, 1)].set_text_props(ha="center", color="black")
1849
+
1850
+ # Light background for count column
1851
+ table[(i + 1, 2)].set_facecolor(tier_colors[tier_name])
1852
+ table[(i + 1, 2)].set_alpha(0.2)
1853
+ table[(i + 1, 2)].set_text_props(ha="center", color="black")
1854
+
1855
+ # Even lighter background for examples column
1856
+ table[(i + 1, 3)].set_facecolor(tier_colors[tier_name])
1857
+ table[(i + 1, 3)].set_alpha(0.1)
1858
+ table[(i + 1, 3)].set_text_props(color="black")
1859
+
1860
+ # Style header row with Anthropic oat and black text
1861
+ for j in range(4):
1862
+ table[(0, j)].set_facecolor(ANTHROPIC_OAT)
1863
+ table[(0, j)].set_text_props(color="black", weight="bold")
1864
+
1865
+ # Center the count column
1866
+ for i in range(len(table_data)):
1867
+ table[(i + 1, 1)].set_text_props(ha="center")
1868
+
1869
+ return fig
1870
+
1871
+
1872
+ def plot_tier_map(
1873
+ df,
1874
+ title,
1875
+ geography,
1876
+ figsize=(16, 10),
1877
+ show_labels=True,
1878
+ ):
1879
+ """
1880
+ Create a map showing per Anthropic AI Usage Tiers.
1881
+
1882
+ Args:
1883
+ df: Long format dataframe with usage_tier variable
1884
+ geography: 'country' or 'state_us'
1885
+ figsize: Figure size
1886
+ title: Map title
1887
+ show_labels: whether to show title and legend (False for clean export)
1888
+ """
1889
+ # Filter for tier data
1890
+ df_tier = filter_df(df, geography=geography, variable="usage_tier").copy()
1891
+
1892
+ # Use global tier colors definition
1893
+ tier_colors = TIER_COLORS_DICT
1894
+
1895
+ # Map tiers to colors
1896
+ df_tier["color"] = df_tier["cluster_name"].map(tier_colors)
1897
+
1898
+ # Set up figure
1899
+ # Create figure with tight_layout disabled
1900
+ fig, ax = create_figure(figsize=figsize, tight_layout=False)
1901
+
1902
+ if geography == "country":
1903
+ # Load world shapefile function
1904
+ world = load_world_shapefile()
1905
+
1906
+ # Merge with world data using geo_id (which contains ISO-3 codes)
1907
+ # Use ISO_A3_EH for merging as it's complete (ISO_A3 has -99 for France)
1908
+ world = merge_geo_data(
1909
+ world,
1910
+ df_tier,
1911
+ "ISO_A3_EH",
1912
+ ["geo_id", "color", "cluster_name"],
1913
+ is_tier=True,
1914
+ )
1915
+
1916
+ # Plot world map
1917
+ plot_world_map(ax, world, data_column="cluster_name", tier_colors=tier_colors)
1918
+
1919
+ else: # state_us
1920
+ # Load US states shapefile function
1921
+ states = load_us_states_shapefile()
1922
+
1923
+ # Merge with tier data BEFORE projection
1924
+ states = merge_geo_data(
1925
+ states, df_tier, "STUSPS", ["geo_id", "color", "cluster_name"], is_tier=True
1926
+ )
1927
+
1928
+ # Pot states with insets
1929
+ plot_us_states_map(
1930
+ fig, ax, states, data_column="cluster_name", tier_colors=tier_colors
1931
+ )
1932
+
1933
+ # Remove axes
1934
+ ax.set_axis_off()
1935
+
1936
+ # Add title only if show_labels=True
1937
+ if show_labels:
1938
+ format_axis(ax, title=title, title_size=14, grid=False)
1939
+
1940
+ # Check which tiers actually appear in the data
1941
+ tiers_in_data = df_tier["cluster_name"].unique()
1942
+
1943
+ # Add legend only if show_labels=True
1944
+ if show_labels:
1945
+ # Check for excluded countries and no data
1946
+ excluded = False
1947
+ no_data = False
1948
+ if geography == "country":
1949
+ if "world" in locals() and "is_excluded" in world.columns:
1950
+ excluded = world["is_excluded"].any()
1951
+ if "world" in locals():
1952
+ no_data = world["cluster_name"].isna().any()
1953
+ else: # state_us
1954
+ if "states" in locals():
1955
+ no_data = states["cluster_name"].isna().any()
1956
+
1957
+ create_tier_legend(
1958
+ ax, tier_colors, tiers_in_data, excluded_countries=excluded, no_data=no_data
1959
+ )
1960
+
1961
+ return fig
1962
+
1963
+
1964
+ def plot_variable_map(
1965
+ df,
1966
+ variable,
1967
+ geography="country",
1968
+ figsize=(16, 10),
1969
+ title=None,
1970
+ cmap=CUSTOM_CMAP,
1971
+ center_at_one=None,
1972
+ ):
1973
+ """
1974
+ Create static map for any variable.
1975
+
1976
+ Args:
1977
+ df: Long format dataframe
1978
+ variable: Variable to plot (e.g., 'usage_pct')
1979
+ geography: 'country' or 'state_us'
1980
+ figsize: Figure size (width, height) in inches
1981
+ title: Map title
1982
+ cmap: Matplotlib colormap or name (default uses custom colormap)
1983
+ center_at_one: Whether to center the color scale at 1.0 (default True for usage_per_capita_index)
1984
+ """
1985
+ # Get data for the specified variable
1986
+ df_data = filter_df(df, geography=geography, facet=geography, variable=variable)
1987
+
1988
+ # Create figure
1989
+ fig = plt.figure(figsize=figsize, dpi=150)
1990
+ fig.set_layout_engine(layout="none") # Disable layout engine for custom axes
1991
+ ax = fig.add_subplot(111)
1992
+
1993
+ if geography == "country":
1994
+ # Load world shapefile function (automatically marks excluded countries)
1995
+ world = load_world_shapefile()
1996
+
1997
+ # Merge using geo_id (which contains ISO-3 codes)
1998
+ world = merge_geo_data(
1999
+ world, df_data, "ISO_A3_EH", ["geo_id", "value"], is_tier=False
2000
+ )
2001
+
2002
+ # Prepare data and normalization
2003
+ plot_column, norm = prepare_map_data(
2004
+ world, "value", center_at_one, world["is_excluded"]
2005
+ )
2006
+
2007
+ # Plot world map
2008
+ plot_world_map(ax, world, data_column=plot_column, cmap=cmap, norm=norm)
2009
+
2010
+ else: # state_us
2011
+ # Load US states shapefile function
2012
+ states = load_us_states_shapefile()
2013
+
2014
+ # Merge our data with the states shapefile
2015
+ states = merge_geo_data(
2016
+ states, df_data, "STUSPS", ["geo_id", "value"], is_tier=False
2017
+ )
2018
+
2019
+ # Prepare data and normalization
2020
+ plot_column, norm = prepare_map_data(states, "value", center_at_one)
2021
+
2022
+ # Plot states with insets
2023
+ plot_us_states_map(
2024
+ fig, ax, states, data_column=plot_column, cmap=cmap, norm=norm
2025
+ )
2026
+
2027
+ # Remove axes
2028
+ ax.set_axis_off()
2029
+
2030
+ # Add colorbar with proper size and positioning
2031
+ divider = make_axes_locatable(ax)
2032
+ cax = divider.append_axes("right", size="3%", pad=0.1)
2033
+
2034
+ # Create colorbar
2035
+ scalar_mappable = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
2036
+ scalar_mappable.set_array([])
2037
+ cbar = plt.colorbar(scalar_mappable, cax=cax)
2038
+
2039
+ # Set colorbar label based on variable
2040
+ if variable == "usage_pct":
2041
+ cbar.set_label("Usage share (%)", fontsize=10, rotation=270, labelpad=15)
2042
+ elif variable == "usage_per_capita_index":
2043
+ cbar.set_label(
2044
+ "Anthropic AI Usage Index", fontsize=10, rotation=270, labelpad=15
2045
+ )
2046
+ else:
2047
+ cbar.set_label(variable, fontsize=10, rotation=270, labelpad=15)
2048
+
2049
+ # Set title
2050
+ if variable == "usage_pct":
2051
+ default_title = "Share of Claude usage by " + (
2052
+ "country" if geography == "country" else "US state"
2053
+ )
2054
+ else:
2055
+ default_title = f"{variable} by " + (
2056
+ "country" if geography == "country" else "US state"
2057
+ )
2058
+
2059
+ format_axis(ax, title=title or default_title, title_size=14, grid=False)
2060
+
2061
+ # Add legend for excluded countries and no data
2062
+ legend_elements = []
2063
+
2064
+ # Check if we have excluded countries or no data regions
2065
+ if geography == "country":
2066
+ # Check for excluded countries (world['is_excluded'] == True)
2067
+ if "is_excluded" in world.columns:
2068
+ excluded_countries = world[world["is_excluded"] == True]
2069
+ if not excluded_countries.empty:
2070
+ legend_elements.append(
2071
+ Patch(
2072
+ facecolor="#c0c0c0",
2073
+ edgecolor="white",
2074
+ label="Claude not available",
2075
+ )
2076
+ )
2077
+
2078
+ # Check for countries with no data
2079
+ no_data_countries = world[
2080
+ (world["value"].isna()) & (world["is_excluded"] != True)
2081
+ ]
2082
+ if not no_data_countries.empty:
2083
+ legend_elements.append(
2084
+ Patch(facecolor="#f0f0f0", edgecolor="white", label="No data")
2085
+ )
2086
+
2087
+ if legend_elements:
2088
+ ax.legend(
2089
+ handles=legend_elements,
2090
+ loc="lower left",
2091
+ fontsize=9,
2092
+ frameon=True,
2093
+ fancybox=True,
2094
+ shadow=True,
2095
+ bbox_to_anchor=(0, 0),
2096
+ )
2097
+
2098
+ return fig
2099
+
2100
+
2101
+ def plot_soc_usage_scatter(
2102
+ df,
2103
+ geography,
2104
+ filtered_entities=None,
2105
+ ):
2106
+ """
2107
+ Create faceted scatterplot of SOC percentages vs Anthropic AI Usage Index.
2108
+ Always creates a 2x2 grid of square subplots showing the top 4 SOC groups.
2109
+
2110
+ Args:
2111
+ df: Long format dataframe with enriched data
2112
+ geography: 'country' or 'state_us'
2113
+ filtered_entities: List of geo_id values that meet MIN_OBSERVATIONS threshold
2114
+ """
2115
+ # Fixed configuration for 2x2 grid
2116
+ n_cols = 2
2117
+ n_rows = 2
2118
+ n_top_groups = 4
2119
+
2120
+ # Apply MIN_OBSERVATIONS filtering if not provided
2121
+ if filtered_entities is None:
2122
+ filtered_countries, filtered_states = get_filtered_geographies(df)
2123
+ filtered_entities = (
2124
+ filtered_countries if geography == "country" else filtered_states
2125
+ )
2126
+
2127
+ # Get Anthropic AI Usage Index data
2128
+ df_usage_index = filter_df(
2129
+ df,
2130
+ geography=geography,
2131
+ variable="usage_per_capita_index",
2132
+ geo_id=filtered_entities,
2133
+ )[["geo_id", "value"]].rename(columns={"value": "ai_usage_index"})
2134
+
2135
+ # Get usage counts for bubble sizes
2136
+ df_usage = filter_df(
2137
+ df, geography=geography, variable="usage_count", geo_id=filtered_entities
2138
+ )[["geo_id", "value"]].rename(columns={"value": "usage_count"})
2139
+
2140
+ # Get tier data for colors
2141
+ df_tier = filter_df(
2142
+ df, geography=geography, variable="usage_tier", geo_id=filtered_entities
2143
+ )[["geo_id", "cluster_name", "value"]].rename(
2144
+ columns={"cluster_name": "tier_name", "value": "tier_value"}
2145
+ )
2146
+
2147
+ # Get SOC percentages
2148
+ df_soc = filter_df(
2149
+ df,
2150
+ geography=geography,
2151
+ facet="soc_occupation",
2152
+ variable="soc_pct",
2153
+ geo_id=filtered_entities,
2154
+ )[["geo_id", "cluster_name", "value"]].rename(
2155
+ columns={"cluster_name": "soc_group", "value": "soc_pct"}
2156
+ )
2157
+
2158
+ # Merge all data
2159
+ df_plot = df_soc.merge(
2160
+ df_usage_index, on="geo_id", how="inner"
2161
+ ) # inner join because some geographies don't have data for all SOC groups
2162
+ df_plot = df_plot.merge(df_usage, on="geo_id", how="left")
2163
+ df_plot = df_plot.merge(
2164
+ df_tier[["geo_id", "tier_name", "tier_value"]], on="geo_id", how="left"
2165
+ )
2166
+
2167
+ # Use parent geography reference for consistent SOC selection
2168
+ if geography == "country":
2169
+ # Use global reference for countries
2170
+ reference_soc = filter_df(
2171
+ df,
2172
+ geography="global",
2173
+ geo_id="GLOBAL",
2174
+ facet="soc_occupation",
2175
+ variable="soc_pct",
2176
+ )
2177
+ else: # state_us
2178
+ # Use US reference for states
2179
+ reference_soc = filter_df(
2180
+ df,
2181
+ geography="country",
2182
+ geo_id="USA",
2183
+ facet="soc_occupation",
2184
+ variable="soc_pct",
2185
+ )
2186
+
2187
+ # Get top SOC groups from reference (excluding not_classified)
2188
+ reference_filtered = reference_soc[
2189
+ ~reference_soc["cluster_name"].str.contains("not_classified", na=False)
2190
+ ]
2191
+ plot_soc_groups = reference_filtered.nlargest(n_top_groups, "value")[
2192
+ "cluster_name"
2193
+ ].tolist()
2194
+
2195
+ # Filter to selected SOC groups
2196
+ df_plot = df_plot[df_plot["soc_group"].isin(plot_soc_groups)]
2197
+
2198
+ tier_colors = TIER_COLORS_DICT
2199
+
2200
+ # Fixed square subplot size for 2x2 grid
2201
+ subplot_size = 6 # Each subplot is 6x6 inches
2202
+ figsize = (subplot_size * n_cols, subplot_size * n_rows)
2203
+
2204
+ # Create figure
2205
+ fig, axes = create_figure(figsize=figsize, nrows=n_rows, ncols=n_cols)
2206
+ fig.suptitle(
2207
+ "Occupation group shares vs Anthropic AI Usage Index",
2208
+ fontsize=16,
2209
+ fontweight="bold",
2210
+ y=0.98,
2211
+ )
2212
+
2213
+ # Flatten axes for easier iteration (always 2x2 grid)
2214
+ axes_flat = axes.flatten()
2215
+
2216
+ # Plot each SOC group
2217
+ for idx, soc_group in enumerate(plot_soc_groups):
2218
+ ax = axes_flat[idx]
2219
+
2220
+ # Get data for this SOC group
2221
+ soc_data = filter_df(df_plot, soc_group=soc_group)
2222
+
2223
+ # Create scatter plot for each tier
2224
+ for tier_name in tier_colors.keys():
2225
+ tier_data = filter_df(soc_data, tier_name=tier_name)
2226
+
2227
+ # Scale bubble sizes using sqrt for better visibility
2228
+ sizes = np.sqrt(tier_data["usage_count"]) * 2
2229
+
2230
+ ax.scatter(
2231
+ tier_data["ai_usage_index"],
2232
+ tier_data["soc_pct"],
2233
+ s=sizes,
2234
+ c=tier_colors[tier_name],
2235
+ alpha=0.6,
2236
+ edgecolors="black",
2237
+ linewidth=0.5,
2238
+ label=tier_name,
2239
+ )
2240
+
2241
+ # Add trend line and regression statistics
2242
+ X = sm.add_constant(soc_data["ai_usage_index"].values)
2243
+ y = soc_data["soc_pct"].values
2244
+
2245
+ model = sm.OLS(y, X)
2246
+ results = model.fit()
2247
+
2248
+ intercept = results.params[0]
2249
+ slope = results.params[1]
2250
+ r_squared = results.rsquared
2251
+ p_value = results.pvalues[1] # p-value for slope
2252
+
2253
+ # Plot trend line
2254
+ x_line = np.linspace(
2255
+ soc_data["ai_usage_index"].min(), soc_data["ai_usage_index"].max(), 100
2256
+ )
2257
+ y_line = intercept + slope * x_line
2258
+ ax.plot(x_line, y_line, "--", color="gray", alpha=0.5, linewidth=1)
2259
+
2260
+ # Format p-value display
2261
+ if p_value < 0.001:
2262
+ p_str = "p < 0.001"
2263
+ else:
2264
+ p_str = f"p = {p_value:.3f}"
2265
+
2266
+ # Add regression statistics
2267
+ ax.text(
2268
+ 0.95,
2269
+ 0.95,
2270
+ f"$\\beta = {slope:.3f}\\ ({p_str})$\n$R^2 = {r_squared:.3f}$",
2271
+ transform=ax.transAxes,
2272
+ ha="right",
2273
+ va="top",
2274
+ fontsize=9,
2275
+ bbox=dict(boxstyle="round", facecolor="white", alpha=0.8),
2276
+ )
2277
+
2278
+ # Format axes
2279
+ format_axis(
2280
+ ax,
2281
+ xlabel="Anthropic AI Usage Index (usage % / working-age population %)",
2282
+ ylabel="Occupation group share (%)",
2283
+ title=soc_group,
2284
+ xlabel_size=10,
2285
+ ylabel_size=10,
2286
+ grid=False,
2287
+ )
2288
+ ax.grid(True, alpha=0.3)
2289
+
2290
+ # Add legend
2291
+ handles, labels = axes_flat[0].get_legend_handles_labels()
2292
+ if handles:
2293
+ # Create new handles with consistent size for legend only
2294
+ # This doesn't modify the actual plot markers
2295
+ legend_handles = []
2296
+ for handle in handles:
2297
+ # Get the color from the original handle
2298
+ color = (
2299
+ handle.get_facecolor()[0]
2300
+ if hasattr(handle, "get_facecolor")
2301
+ else "gray"
2302
+ )
2303
+ # Create a Line2D object with circle marker for legend
2304
+ new_handle = Line2D(
2305
+ [0],
2306
+ [0],
2307
+ marker="o",
2308
+ color="w",
2309
+ markerfacecolor=color,
2310
+ markersize=8,
2311
+ markeredgecolor="black",
2312
+ markeredgewidth=0.5,
2313
+ alpha=0.6,
2314
+ )
2315
+ legend_handles.append(new_handle)
2316
+
2317
+ # Position tier legend centered under the left column with vertical layout
2318
+ fig.legend(
2319
+ legend_handles,
2320
+ labels,
2321
+ title="Anthropic AI Usage Index tier",
2322
+ loc="upper center",
2323
+ bbox_to_anchor=(0.25, -0.03),
2324
+ frameon=True,
2325
+ fancybox=True,
2326
+ shadow=True,
2327
+ ncol=2,
2328
+ borderpad=0.6,
2329
+ )
2330
+
2331
+ # Add size legend using actual scatter points for perfect matching
2332
+ reference_counts = [100, 1000, 10000]
2333
+
2334
+ # Create invisible scatter points with the exact same size formula as the plot
2335
+ size_legend_elements = []
2336
+ for count in reference_counts:
2337
+ # Use exact same formula as in the plot
2338
+ size = np.sqrt(count) * 2
2339
+ # Create scatter on first axis (will be invisible) just for legend
2340
+ scatter = axes_flat[0].scatter(
2341
+ [],
2342
+ [], # Empty data
2343
+ s=size,
2344
+ c="gray",
2345
+ alpha=0.6,
2346
+ edgecolors="black",
2347
+ linewidth=0.5,
2348
+ label=f"{count:,}",
2349
+ )
2350
+ size_legend_elements.append(scatter)
2351
+
2352
+ # Add size legend centered under the right column with vertical layout
2353
+ fig.legend(
2354
+ handles=size_legend_elements,
2355
+ title="Claude usage count",
2356
+ loc="upper center",
2357
+ bbox_to_anchor=(0.75, -0.03),
2358
+ frameon=True,
2359
+ fancybox=True,
2360
+ shadow=True,
2361
+ ncol=1,
2362
+ borderpad=0.6,
2363
+ )
2364
+
2365
+ plt.tight_layout(rect=[0, -0.03, 1, 0.98])
2366
+ return fig
2367
+
2368
+
2369
+ def collaboration_task_regression(df, geography="country"):
2370
+ """
2371
+ Analyze automation vs augmentation patterns controlling for task mix for
2372
+ geographies that meet the minimum observation threshold.
2373
+
2374
+ Uses global task weights to calculate expected automation for each geography,
2375
+ then compares actual vs expected automation.
2376
+
2377
+ Note: Includes "none" tasks in calculations since they have automation/augmentation
2378
+ patterns in the data. Excludes "not_classified" tasks which lack collaboration data.
2379
+
2380
+ Args:
2381
+ df: Input dataframe
2382
+ geography: "country" or "state_us"
2383
+ """
2384
+ # Filter to geographies that meet min observation threshold
2385
+ filtered_countries, filtered_states = get_filtered_geographies(df)
2386
+ filtered_geos = filtered_countries if geography == "country" else filtered_states
2387
+
2388
+ # Get collaboration automation data
2389
+ df_automation = filter_df(
2390
+ df,
2391
+ facet="collaboration_automation_augmentation",
2392
+ geography=geography,
2393
+ variable="automation_pct",
2394
+ geo_id=filtered_geos,
2395
+ )[["geo_id", "value"]].rename(columns={"value": "automation_pct"})
2396
+
2397
+ # Get Anthropic AI Usage Index data
2398
+ df_usage = filter_df(
2399
+ df,
2400
+ geography=geography,
2401
+ facet=geography,
2402
+ variable="usage_per_capita_index",
2403
+ geo_id=filtered_geos,
2404
+ )[["geo_id", "geo_name", "value"]].copy()
2405
+ df_usage.rename(columns={"value": "usage_per_capita_index"}, inplace=True)
2406
+
2407
+ # Get geography-specific task weights (percentages)
2408
+ df_geo_tasks = filter_df(
2409
+ df,
2410
+ facet="onet_task",
2411
+ geography=geography,
2412
+ variable="onet_task_pct",
2413
+ geo_id=filtered_geos,
2414
+ ).copy()
2415
+
2416
+ # Exclude not_classified and none tasks
2417
+ df_geo_tasks = df_geo_tasks[
2418
+ ~df_geo_tasks["cluster_name"].isin(["not_classified", "none"])
2419
+ ]
2420
+
2421
+ # Get global task-specific collaboration patterns (only available at global level)
2422
+ df_task_collab = filter_df(
2423
+ df,
2424
+ facet="onet_task::collaboration",
2425
+ geography="global",
2426
+ geo_id="GLOBAL",
2427
+ variable="onet_task_collaboration_pct",
2428
+ ).copy()
2429
+
2430
+ # Parse task name and collaboration type from cluster_name
2431
+ df_task_collab["task_name"] = df_task_collab["cluster_name"].str.split("::").str[0]
2432
+ df_task_collab["collab_type"] = (
2433
+ df_task_collab["cluster_name"].str.split("::").str[1]
2434
+ )
2435
+
2436
+ # Map collaboration types to automation/augmentation
2437
+ # Automation: directive, feedback loop
2438
+ # Augmentation: validation, task iteration, learning
2439
+ # Excluded: none, not_classified
2440
+ def is_automation(collab_type):
2441
+ if collab_type in ["directive", "feedback loop"]:
2442
+ return True
2443
+ elif collab_type in [
2444
+ "validation",
2445
+ "task iteration",
2446
+ "learning",
2447
+ ]:
2448
+ return False
2449
+ else: # none, not_classified
2450
+ return None
2451
+
2452
+ df_task_collab["is_automation"] = df_task_collab["collab_type"].apply(is_automation)
2453
+
2454
+ # Exclude not_classified tasks upfront
2455
+ df_task_collab_valid = df_task_collab[
2456
+ df_task_collab["task_name"] != "not_classified"
2457
+ ]
2458
+
2459
+ # Calculate automation percentage for each task
2460
+ task_automation_rates = {}
2461
+ for task_name in df_task_collab_valid["task_name"].unique():
2462
+ task_data = df_task_collab_valid[
2463
+ (df_task_collab_valid["task_name"] == task_name)
2464
+ & (df_task_collab_valid["is_automation"].notna())
2465
+ ]
2466
+
2467
+ # Skip tasks that only have "not_classified" collaboration types
2468
+ if task_data.empty or task_data["value"].sum() == 0:
2469
+ continue
2470
+
2471
+ automation_sum = task_data[task_data["is_automation"]]["value"].sum()
2472
+ total_sum = task_data["value"].sum()
2473
+ task_automation_rates[task_name] = (automation_sum / total_sum) * 100
2474
+
2475
+ # Calculate expected automation for each country using its own task weights
2476
+ expected_automation = []
2477
+ geo_ids = []
2478
+
2479
+ for geo_id in filtered_geos:
2480
+ # Get this geography's task distribution (excluding not_classified)
2481
+ geo_tasks = df_geo_tasks[
2482
+ (df_geo_tasks["geo_id"] == geo_id)
2483
+ & (df_geo_tasks["cluster_name"] != "not_classified")
2484
+ ]
2485
+
2486
+ # Skip geographies with no task data
2487
+ if geo_tasks.empty:
2488
+ continue
2489
+
2490
+ # Calculate weighted automation using geography's task weights
2491
+ weighted_auto = 0.0
2492
+ total_weight = 0.0
2493
+
2494
+ for _, row in geo_tasks.iterrows():
2495
+ task = row["cluster_name"]
2496
+ weight = row["value"] # Already in percentage
2497
+
2498
+ # Get automation rate for this task (from global data)
2499
+ if task in task_automation_rates:
2500
+ auto_rate = task_automation_rates[task]
2501
+ weighted_auto += weight * auto_rate
2502
+ total_weight += weight
2503
+
2504
+ # Calculate expected automation
2505
+ expected_auto = weighted_auto / total_weight
2506
+ expected_automation.append(expected_auto)
2507
+ geo_ids.append(geo_id)
2508
+
2509
+ # Create dataframe with expected automation
2510
+ df_expected = pd.DataFrame(
2511
+ {"geo_id": geo_ids, "expected_automation_pct": expected_automation}
2512
+ )
2513
+
2514
+ # Merge all data
2515
+ df_regression = df_automation.merge(df_expected, on="geo_id", how="inner")
2516
+ df_regression = df_regression.merge(df_usage, on="geo_id", how="inner")
2517
+
2518
+ # Count unique tasks for reporting
2519
+ n_tasks = len(task_automation_rates)
2520
+
2521
+ # Calculate residuals from regressions for proper partial correlation
2522
+ # For automation, regress actual on expected to get residuals
2523
+ X_expected = sm.add_constant(df_regression["expected_automation_pct"])
2524
+ model_automation = sm.OLS(df_regression["automation_pct"], X_expected)
2525
+ results_automation = model_automation.fit()
2526
+ df_regression["automation_residuals"] = results_automation.resid
2527
+
2528
+ # For usage, regress on expected automation to get residuals
2529
+ model_usage = sm.OLS(df_regression["usage_per_capita_index"], X_expected)
2530
+ results_usage = model_usage.fit()
2531
+ df_regression["usage_residuals"] = results_usage.resid
2532
+
2533
+ # Partial regression is regression of residuals
2534
+ # We want usage (X) to explain automation (Y)
2535
+ X_partial = sm.add_constant(df_regression["usage_residuals"])
2536
+ model_partial = sm.OLS(df_regression["automation_residuals"], X_partial)
2537
+ results_partial = model_partial.fit()
2538
+ partial_slope = results_partial.params.iloc[1]
2539
+ partial_r2 = results_partial.rsquared
2540
+ partial_p = results_partial.pvalues.iloc[1]
2541
+
2542
+ # Create visualization - only show partial correlation
2543
+ fig, ax = create_figure(figsize=(10, 8))
2544
+
2545
+ # Define colormap for automation residuals
2546
+ colors_automation = [AUGMENTATION_COLOR, AUTOMATION_COLOR]
2547
+ cmap_automation = LinearSegmentedColormap.from_list(
2548
+ "automation", colors_automation, N=100
2549
+ )
2550
+
2551
+ # Plot partial correlation
2552
+ # Create colormap normalization for automation residuals
2553
+ norm = plt.Normalize(
2554
+ vmin=df_regression["automation_residuals"].min(),
2555
+ vmax=df_regression["automation_residuals"].max(),
2556
+ )
2557
+
2558
+ # Plot invisible points to ensure matplotlib's autoscaling includes all data points
2559
+ ax.scatter(
2560
+ df_regression["usage_residuals"],
2561
+ df_regression["automation_residuals"],
2562
+ s=0, # invisible points for autoscaling
2563
+ alpha=0,
2564
+ )
2565
+
2566
+ # Plot country geo_id values as text instead of scatter points
2567
+ for _, row in df_regression.iterrows():
2568
+ color_val = norm(row["automation_residuals"])
2569
+ text_color = cmap_automation(color_val)
2570
+
2571
+ ax.text(
2572
+ row["usage_residuals"],
2573
+ row["automation_residuals"],
2574
+ row["geo_id"],
2575
+ fontsize=7,
2576
+ ha="center",
2577
+ va="center",
2578
+ color=text_color,
2579
+ alpha=0.9,
2580
+ weight="bold",
2581
+ )
2582
+
2583
+ # Create a ScalarMappable for the colorbar
2584
+ scalar_mappable = plt.cm.ScalarMappable(cmap=cmap_automation, norm=norm)
2585
+ scalar_mappable.set_array([])
2586
+
2587
+ # Add regression line using actual regression results
2588
+ # OLS model: automation_residuals = intercept + slope * usage_residuals
2589
+ x_resid_line = np.linspace(
2590
+ df_regression["usage_residuals"].min(),
2591
+ df_regression["usage_residuals"].max(),
2592
+ 100,
2593
+ )
2594
+ intercept = results_partial.params.iloc[0]
2595
+ y_resid_line = intercept + partial_slope * x_resid_line
2596
+ ax.plot(
2597
+ x_resid_line,
2598
+ y_resid_line,
2599
+ "grey",
2600
+ linestyle="--",
2601
+ linewidth=2,
2602
+ alpha=0.7,
2603
+ )
2604
+
2605
+ # Set axis labels and title
2606
+ format_axis(
2607
+ ax,
2608
+ xlabel="Anthropic AI Usage Index residuals\n(per capita usage not explained by task mix)",
2609
+ ylabel="Automation % residuals\n(automation not explained by task mix)",
2610
+ title="Relationship between Anthropic AI Usage Index and automation",
2611
+ grid=False,
2612
+ )
2613
+
2614
+ # Add correlation info inside the plot
2615
+ if partial_p < 0.001:
2616
+ p_str = "p < 0.001"
2617
+ else:
2618
+ p_str = f"p = {partial_p:.3f}"
2619
+
2620
+ ax.text(
2621
+ 0.08,
2622
+ 0.975,
2623
+ f"Partial regression (controlling for task mix): $\\beta = {partial_slope:.3f}, R^2 = {partial_r2:.3f}\\ ({p_str})$",
2624
+ transform=ax.transAxes,
2625
+ fontsize=10,
2626
+ bbox=dict(boxstyle="round", facecolor="white", alpha=0.8),
2627
+ verticalalignment="top",
2628
+ )
2629
+
2630
+ ax.axhline(y=0, color="gray", linestyle=":", linewidth=1, alpha=0.3)
2631
+ ax.axvline(x=0, color="gray", linestyle=":", linewidth=1, alpha=0.3)
2632
+ ax.grid(True, alpha=0.3, linestyle="--")
2633
+
2634
+ # Add colorbar
2635
+ fig.subplots_adjust(right=0.92)
2636
+ cbar_ax = fig.add_axes([0.94, 0.2, 0.02, 0.6])
2637
+ cbar = plt.colorbar(scalar_mappable, cax=cbar_ax)
2638
+ cbar.set_label("Automation % residuals", fontsize=10, rotation=270, labelpad=15)
2639
+
2640
+ # Adjust plot to make room for titles and ensure all data is visible
2641
+ plt.subplots_adjust(top=0.92, right=0.92, left=0.12, bottom=0.12)
2642
+
2643
+ # Return results
2644
+ return {
2645
+ "figure": fig,
2646
+ "partial_slope": partial_slope,
2647
+ "partial_r2": partial_r2,
2648
+ "partial_pvalue": partial_p,
2649
+ "n_countries": len(df_regression),
2650
+ "n_tasks": n_tasks,
2651
+ "df_residuals": df_regression,
2652
+ }
2653
+
2654
+
2655
+ def plot_automation_preference_residuals(df, geography="country", figsize=(14, 12)):
2656
+ """Plot automation vs augmentation preference after controlling for task mix.
2657
+
2658
+ For geographies meeting minimum observation threshold only.
2659
+
2660
+ Args:
2661
+ df: Input dataframe
2662
+ geography: "country" or "state_us"
2663
+ figsize: Figure size
2664
+ """
2665
+ # First run the collaboration analysis to get residuals
2666
+ results = collaboration_task_regression(df, geography=geography)
2667
+
2668
+ # Suppress figure created by collaboration_task_regression
2669
+ plt.close(results["figure"])
2670
+
2671
+ # Get the dataframe with residuals
2672
+ df_residuals = results["df_residuals"]
2673
+
2674
+ # Sort by automation residuals (most augmentation to most automation)
2675
+ df_plot = df_residuals.sort_values("automation_residuals", ascending=True)
2676
+
2677
+ # Adjust figure size based on number of geographies
2678
+ n_geos = len(df_plot)
2679
+ fig_height = max(8, n_geos * 0.25)
2680
+ fig, ax = create_figure(figsize=(figsize[0], fig_height))
2681
+
2682
+ # Create color map
2683
+ colors = [
2684
+ AUGMENTATION_COLOR if x < 0 else AUTOMATION_COLOR
2685
+ for x in df_plot["automation_residuals"]
2686
+ ]
2687
+
2688
+ # Create horizontal bar chart
2689
+ ax.barh(
2690
+ range(len(df_plot)),
2691
+ df_plot["automation_residuals"].values,
2692
+ color=colors,
2693
+ alpha=0.8,
2694
+ )
2695
+
2696
+ # Set y-axis labels with geography names only
2697
+ y_labels = [row["geo_name"] for _, row in df_plot.iterrows()]
2698
+ ax.set_yticks(range(len(df_plot)))
2699
+ ax.set_yticklabels(y_labels, fontsize=7)
2700
+
2701
+ # Reduce white space at top and bottom
2702
+ ax.set_ylim(-0.5, len(df_plot) - 0.5)
2703
+
2704
+ # Add vertical line at zero
2705
+ ax.axvline(x=0, color="black", linestyle="-", linewidth=1, alpha=0.7)
2706
+
2707
+ # Labels and title
2708
+ geo_label = "Countries'" if geography == "country" else "States'"
2709
+ format_axis(
2710
+ ax,
2711
+ xlabel="Automation % residual (after controlling for task mix)",
2712
+ ylabel="",
2713
+ title=f"{geo_label} automation vs augmentation preference\n(after controlling for task composition)",
2714
+ grid=False,
2715
+ )
2716
+
2717
+ # Add grid
2718
+ ax.grid(True, axis="x", alpha=0.3, linestyle="--")
2719
+
2720
+ # Add value labels on the bars
2721
+ for i, (_, row) in enumerate(df_plot.iterrows()):
2722
+ value = row["automation_residuals"]
2723
+ x_offset = 0.2 if abs(value) < 5 else 0.3
2724
+ x_pos = value + (x_offset if value > 0 else -x_offset)
2725
+ ax.text(
2726
+ x_pos,
2727
+ i,
2728
+ f"{value:.1f}",
2729
+ ha="left" if value > 0 else "right",
2730
+ va="center",
2731
+ fontsize=8,
2732
+ )
2733
+
2734
+ # Add annotations
2735
+ y_range = ax.get_ylim()
2736
+ annotation_y = y_range[1] * 0.85
2737
+
2738
+ # Left annotation for augmentation
2739
+ ax.text(
2740
+ ax.get_xlim()[0] * 0.7,
2741
+ annotation_y,
2742
+ "Prefer augmentation",
2743
+ fontsize=9,
2744
+ color=AUGMENTATION_COLOR,
2745
+ fontweight="bold",
2746
+ ha="left",
2747
+ va="center",
2748
+ )
2749
+
2750
+ # Right annotation for automation
2751
+ ax.text(
2752
+ ax.get_xlim()[1] * 0.7,
2753
+ annotation_y,
2754
+ "Prefer automation",
2755
+ fontsize=9,
2756
+ color=AUTOMATION_COLOR,
2757
+ fontweight="bold",
2758
+ ha="right",
2759
+ va="center",
2760
+ )
2761
+
2762
+ plt.tight_layout()
2763
+
2764
+ return fig
2765
+
2766
+
2767
+ def plot_soc_distribution(
2768
+ df, geo_list, geography, figsize=(14, 10), title=None, exclude_not_classified=True
2769
+ ):
2770
+ """
2771
+ Plot SOC occupation distribution for multiple geographies (countries or states) with horizontal bars, colored by tier.
2772
+
2773
+ Args:
2774
+ df: Long format dataframe
2775
+ geo_list: List of geo_id values to compare (e.g., ['USA', 'BRA'] for countries or ['CA', 'TX'] for states)
2776
+ geography: Geographic level ('country' or 'state_us')
2777
+ figsize: Figure size
2778
+ title: Chart title
2779
+ exclude_not_classified: If True, excludes 'not_classified' from the chart
2780
+ """
2781
+ # Use global tier colors and names
2782
+ tier_colors = TIER_COLORS_NUMERIC
2783
+ tier_names = TIER_NAMES_NUMERIC
2784
+
2785
+ # Get usage tier and geo_name for each geography
2786
+ tier_data = filter_df(
2787
+ df, geography=geography, variable="usage_tier", facet=geography, geo_id=geo_list
2788
+ )[["geo_id", "geo_name", "value"]].rename(columns={"value": "tier"})
2789
+
2790
+ # Collect SOC data for all geographies first to determine consistent ordering
2791
+ all_soc_data = []
2792
+ for geo_id in geo_list:
2793
+ geo_soc = filter_df(
2794
+ df,
2795
+ geography=geography,
2796
+ geo_id=geo_id,
2797
+ facet="soc_occupation",
2798
+ variable="soc_pct",
2799
+ ).copy()
2800
+
2801
+ if not geo_soc.empty:
2802
+ # Optionally filter out not_classified
2803
+ if exclude_not_classified:
2804
+ geo_soc = geo_soc[geo_soc["cluster_name"] != "not_classified"].copy()
2805
+
2806
+ geo_soc["geo"] = geo_id
2807
+ all_soc_data.append(geo_soc)
2808
+
2809
+ combined_data = pd.concat(all_soc_data)
2810
+
2811
+ # Use global SOC distribution for countries, USA distribution for states
2812
+ if geography == "country":
2813
+ reference_data = filter_df(
2814
+ df,
2815
+ geography="global",
2816
+ geo_id="GLOBAL",
2817
+ facet="soc_occupation",
2818
+ variable="soc_pct",
2819
+ )
2820
+ else: # state_us
2821
+ reference_data = filter_df(
2822
+ df,
2823
+ geography="country",
2824
+ geo_id="USA",
2825
+ facet="soc_occupation",
2826
+ variable="soc_pct",
2827
+ )
2828
+
2829
+ # Filter out not_classified from reference data if needed
2830
+ if exclude_not_classified:
2831
+ reference_data = reference_data[
2832
+ reference_data["cluster_name"] != "not_classified"
2833
+ ]
2834
+
2835
+ # Sort by reference values ascending so highest appears at top when plotted
2836
+ soc_order = reference_data.sort_values("value", ascending=True)[
2837
+ "cluster_name"
2838
+ ].tolist()
2839
+
2840
+ # Create figure
2841
+ fig, ax = create_figure(figsize=figsize)
2842
+
2843
+ # Width of bars and positions
2844
+ n_geos = len(geo_list)
2845
+ bar_width = 0.95 / n_geos # Wider bars, less spacing within groups
2846
+ y_positions = (
2847
+ np.arange(len(soc_order)) * 1.05
2848
+ ) # Reduce spacing between SOC groups to 5%
2849
+
2850
+ # Sort geo_list to ensure highest tier appears at top within each group
2851
+ # Reverse the order so tier 4 is plotted first and appears on top
2852
+ geo_tier_map = dict(zip(tier_data["geo_id"], tier_data["tier"], strict=True))
2853
+ geo_list_sorted = sorted(geo_list, key=lambda x: geo_tier_map[x])
2854
+
2855
+ # Plot bars for each geography
2856
+ for i, geo_id in enumerate(geo_list_sorted):
2857
+ geo_data = filter_df(combined_data, geo=geo_id)
2858
+ geo_name = filter_df(tier_data, geo_id=geo_id)["geo_name"].iloc[0]
2859
+ geo_tier = filter_df(tier_data, geo_id=geo_id)["tier"].iloc[0]
2860
+
2861
+ # Get values in the right order
2862
+ values = []
2863
+ for soc in soc_order:
2864
+ val_data = filter_df(geo_data, cluster_name=soc)["value"]
2865
+ # Use NaN for missing data
2866
+ values.append(val_data.iloc[0] if not val_data.empty else float("nan"))
2867
+
2868
+ # Determine color based on tier
2869
+ color = tier_colors[int(geo_tier)]
2870
+
2871
+ # Create bars with offset for multiple geographies
2872
+ # Reverse the offset calculation so first geo (lowest tier) goes to bottom
2873
+ offset = ((n_geos - 1 - i) - n_geos / 2 + 0.5) * bar_width
2874
+
2875
+ # Get tier name for label
2876
+ tier_label = tier_names[int(geo_tier)]
2877
+ label_text = f"{geo_name} ({tier_label})"
2878
+
2879
+ bars = ax.barh(
2880
+ y_positions + offset,
2881
+ values,
2882
+ bar_width,
2883
+ label=label_text,
2884
+ color=color,
2885
+ alpha=0.8,
2886
+ )
2887
+
2888
+ # Add value labels for bars with data
2889
+ for bar, value in zip(bars, values, strict=True):
2890
+ if not pd.isna(value):
2891
+ ax.text(
2892
+ value + 0.1,
2893
+ bar.get_y() + bar.get_height() / 2,
2894
+ f"{value:.1f}%",
2895
+ va="center",
2896
+ fontsize=5,
2897
+ )
2898
+
2899
+ # Set y-axis labels - position them at the center of each SOC group
2900
+ ax.set_yticks(y_positions)
2901
+ ax.set_yticklabels(soc_order, fontsize=9, va="center")
2902
+
2903
+ # Reduce white space at top and bottom
2904
+ ax.set_ylim(y_positions[0] - 0.5, y_positions[-1] + 0.5)
2905
+
2906
+ # Customize plot
2907
+ format_axis(
2908
+ ax,
2909
+ xlabel="Share of Claude task usage (%)",
2910
+ ylabel="Standard Occupation Classification group",
2911
+ grid=False,
2912
+ )
2913
+
2914
+ if title is None:
2915
+ title = "Claude task usage by occupation: Comparison by AI usage tier"
2916
+ format_axis(ax, title=title, title_size=14, grid=False)
2917
+
2918
+ # Add legend
2919
+ ax.legend(loc="lower right", fontsize=10, framealpha=0.95)
2920
+
2921
+ # Grid
2922
+ ax.grid(True, axis="x", alpha=0.3, linestyle="--")
2923
+ ax.set_xlim(0, max(combined_data["value"]) * 1.15)
2924
+
2925
+ plt.tight_layout()
2926
+ return fig
release_2025_09_15/code/aei_report_v3_analysis_1p_api.ipynb ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# AEI Report v3 API Analysis\n",
8
+ "This notebook produces the streamlined analysis for the AEI report API chapter"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": null,
14
+ "metadata": {
15
+ "vscode": {
16
+ "languageId": "python"
17
+ }
18
+ },
19
+ "outputs": [],
20
+ "source": [
21
+ "from pathlib import Path\n",
22
+ "import pandas as pd"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": null,
28
+ "metadata": {
29
+ "vscode": {
30
+ "languageId": "python"
31
+ }
32
+ },
33
+ "outputs": [],
34
+ "source": [
35
+ "# Import the analysis functions\n",
36
+ "from aei_analysis_functions_1p_api import (\n",
37
+ " setup_plot_style,\n",
38
+ " load_preprocessed_data,\n",
39
+ " create_top_requests_bar_chart,\n",
40
+ " create_platform_occupational_comparison,\n",
41
+ " create_platform_lorenz_curves,\n",
42
+ " create_collaboration_alluvial,\n",
43
+ " create_automation_augmentation_panel,\n",
44
+ " create_token_output_bar_chart,\n",
45
+ " create_completion_vs_input_tokens_scatter,\n",
46
+ " create_occupational_usage_cost_scatter,\n",
47
+ " create_partial_regression_plot,\n",
48
+ " perform_usage_share_regression_unweighted,\n",
49
+ " create_btos_ai_adoption_chart,\n",
50
+ ")"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": null,
56
+ "metadata": {
57
+ "vscode": {
58
+ "languageId": "python"
59
+ }
60
+ },
61
+ "outputs": [],
62
+ "source": [
63
+ "# Set matplotlib to use the correct backend and style\n",
64
+ "setup_plot_style()"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": null,
70
+ "metadata": {
71
+ "vscode": {
72
+ "languageId": "python"
73
+ }
74
+ },
75
+ "outputs": [],
76
+ "source": [
77
+ "# Set up output directory for saving figures\n",
78
+ "output_dir = Path(\"../data/output/figures/\")\n",
79
+ "btos_data_path = Path(\"../data/input/BTOS_National.xlsx\")\n",
80
+ "api_data_path = Path(\"../data/intermediate/aei_raw_1p_api_2025-08-04_to_2025-08-11.csv\")\n",
81
+ "cai_data_path = Path(\n",
82
+ " \"../data/intermediate/aei_raw_claude_ai_2025-08-04_to_2025-08-11.csv\"\n",
83
+ ")\n",
84
+ "\n",
85
+ "# Create output directory\n",
86
+ "output_dir.mkdir(parents=True, exist_ok=True)"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": null,
92
+ "metadata": {
93
+ "vscode": {
94
+ "languageId": "python"
95
+ }
96
+ },
97
+ "outputs": [],
98
+ "source": [
99
+ "# Load BTOS Data\n",
100
+ "print(\"Loading BTOS data...\")\n",
101
+ "btos_df = pd.read_excel(btos_data_path, sheet_name=\"Response Estimates\")\n",
102
+ "btos_df_ref_dates_df = pd.read_excel(\n",
103
+ " btos_data_path, sheet_name=\"Collection and Reference Dates\"\n",
104
+ ")\n",
105
+ "\n",
106
+ "# Load the API data\n",
107
+ "print(\"Loading API data...\")\n",
108
+ "api_df = load_preprocessed_data(api_data_path)\n",
109
+ "\n",
110
+ "# Load the Claude.ai data\n",
111
+ "print(\"Loading Claude.ai data...\")\n",
112
+ "cai_df = load_preprocessed_data(cai_data_path)"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": null,
118
+ "metadata": {
119
+ "vscode": {
120
+ "languageId": "python"
121
+ }
122
+ },
123
+ "outputs": [],
124
+ "source": [
125
+ "create_btos_ai_adoption_chart(btos_df, btos_df_ref_dates_df, output_dir)"
126
+ ]
127
+ },
128
+ {
129
+ "cell_type": "code",
130
+ "execution_count": null,
131
+ "metadata": {
132
+ "vscode": {
133
+ "languageId": "python"
134
+ }
135
+ },
136
+ "outputs": [],
137
+ "source": [
138
+ "# Create the top requests bar chart\n",
139
+ "print(\"Creating top requests bar chart...\")\n",
140
+ "top_requests_chart = create_top_requests_bar_chart(api_df, output_dir)\n",
141
+ "print(f\"Chart saved to: {top_requests_chart}\")"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": null,
147
+ "metadata": {
148
+ "vscode": {
149
+ "languageId": "python"
150
+ }
151
+ },
152
+ "outputs": [],
153
+ "source": [
154
+ "# Create the platform occupational comparison chart\n",
155
+ "print(\"Creating platform occupational comparison chart...\")\n",
156
+ "occupational_comparison_chart = create_platform_occupational_comparison(\n",
157
+ " api_df, cai_df, output_dir\n",
158
+ ")\n",
159
+ "print(f\"Chart saved to: {occupational_comparison_chart}\")"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": null,
165
+ "metadata": {
166
+ "vscode": {
167
+ "languageId": "python"
168
+ }
169
+ },
170
+ "outputs": [],
171
+ "source": [
172
+ "# Create the platform Lorenz curves\n",
173
+ "print(\"Creating platform Lorenz curves...\")\n",
174
+ "lorenz_curves_chart = create_platform_lorenz_curves(api_df, cai_df, output_dir)\n",
175
+ "print(f\"Chart saved to: {lorenz_curves_chart}\")"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": null,
181
+ "metadata": {
182
+ "vscode": {
183
+ "languageId": "python"
184
+ }
185
+ },
186
+ "outputs": [],
187
+ "source": [
188
+ "# Create the collaboration alluvial diagram\n",
189
+ "print(\"Creating collaboration alluvial diagram...\")\n",
190
+ "alluvial_chart = create_collaboration_alluvial(api_df, cai_df, output_dir)\n",
191
+ "print(f\"Chart saved to: {alluvial_chart}\")"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": null,
197
+ "metadata": {
198
+ "vscode": {
199
+ "languageId": "python"
200
+ }
201
+ },
202
+ "outputs": [],
203
+ "source": [
204
+ "# Create the automation vs augmentation panel\n",
205
+ "print(\"Creating automation vs augmentation panel...\")\n",
206
+ "automation_panel_chart = create_automation_augmentation_panel(\n",
207
+ " api_df, cai_df, output_dir\n",
208
+ ")\n",
209
+ "print(f\"Chart saved to: {automation_panel_chart}\")"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": null,
215
+ "metadata": {
216
+ "vscode": {
217
+ "languageId": "python"
218
+ }
219
+ },
220
+ "outputs": [],
221
+ "source": [
222
+ "# Create the token output bar chart\n",
223
+ "print(\"Creating token output bar chart...\")\n",
224
+ "token_output_chart = create_token_output_bar_chart(api_df, output_dir)\n",
225
+ "print(f\"Chart saved to: {token_output_chart}\")"
226
+ ]
227
+ },
228
+ {
229
+ "cell_type": "code",
230
+ "execution_count": null,
231
+ "metadata": {
232
+ "vscode": {
233
+ "languageId": "python"
234
+ }
235
+ },
236
+ "outputs": [],
237
+ "source": [
238
+ "# Create the completion vs input tokens scatter plot\n",
239
+ "print(\"Creating completion vs input tokens scatter plot...\")\n",
240
+ "completion_input_scatter = create_completion_vs_input_tokens_scatter(api_df, output_dir)\n",
241
+ "print(f\"Chart saved to: {completion_input_scatter}\")"
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "execution_count": null,
247
+ "metadata": {
248
+ "vscode": {
249
+ "languageId": "python"
250
+ }
251
+ },
252
+ "outputs": [],
253
+ "source": [
254
+ "# Create the occupational usage vs cost scatter plot\n",
255
+ "print(\"Creating occupational usage vs cost scatter plot...\")\n",
256
+ "usage_cost_scatter = create_occupational_usage_cost_scatter(api_df, output_dir)\n",
257
+ "print(f\"Chart saved to: {usage_cost_scatter}\")"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "code",
262
+ "execution_count": null,
263
+ "metadata": {
264
+ "vscode": {
265
+ "languageId": "python"
266
+ }
267
+ },
268
+ "outputs": [],
269
+ "source": [
270
+ "# Create the partial regression plot\n",
271
+ "print(\"Creating partial regression plot...\")\n",
272
+ "partial_plot, regression_results = create_partial_regression_plot(\n",
273
+ " api_df, cai_df, output_dir\n",
274
+ ")\n",
275
+ "print(f\"Chart saved to: {partial_plot}\")"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": null,
281
+ "metadata": {
282
+ "vscode": {
283
+ "languageId": "python"
284
+ }
285
+ },
286
+ "outputs": [],
287
+ "source": [
288
+ "# Perform the unweighted usage share regression analysis\n",
289
+ "print(\"Performing unweighted usage share regression analysis...\")\n",
290
+ "regression_model = perform_usage_share_regression_unweighted(api_df, cai_df, output_dir)\n",
291
+ "regression_model.summary()"
292
+ ]
293
+ }
294
+ ],
295
+ "metadata": {
296
+ "kernelspec": {
297
+ "display_name": "Coconut",
298
+ "language": "coconut",
299
+ "name": "coconut"
300
+ },
301
+ "language_info": {
302
+ "codemirror_mode": {
303
+ "name": "python",
304
+ "version": 3
305
+ },
306
+ "file_extension": ".coco",
307
+ "mimetype": "text/x-python3",
308
+ "name": "coconut",
309
+ "pygments_lexer": "coconut",
310
+ "version": "3.0.2"
311
+ }
312
+ },
313
+ "nbformat": 4,
314
+ "nbformat_minor": 4
315
+ }
release_2025_09_15/code/aei_report_v3_analysis_claude_ai.ipynb ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# AEI Report v3 Claude.ai Analysis\n",
8
+ "\n",
9
+ "This notebook performs statistical analysis and creates visualizations from enriched Clio data.\n",
10
+ "It works directly with long format data from the preprocessing pipeline.\n",
11
+ "\n",
12
+ "**Input**: `aei_enriched_claude_ai_2025-08-04_to_2025-08-11.csv`\n",
13
+ "\n",
14
+ "**Output**: Visualizations"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "markdown",
19
+ "metadata": {},
20
+ "source": [
21
+ "## 1. Setup and Data Loading"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": null,
27
+ "metadata": {
28
+ "vscode": {
29
+ "languageId": "python"
30
+ }
31
+ },
32
+ "outputs": [],
33
+ "source": [
34
+ "from pathlib import Path\n",
35
+ "import pandas as pd\n",
36
+ "import matplotlib.pyplot as plt\n",
37
+ "\n",
38
+ "# Import all analysis functions\n",
39
+ "from aei_analysis_functions_claude_ai import (\n",
40
+ " setup_plot_style,\n",
41
+ " get_filtered_geographies,\n",
42
+ " plot_usage_index_bars,\n",
43
+ " plot_tier_map,\n",
44
+ " plot_usage_share_bars,\n",
45
+ " plot_tier_summary_table,\n",
46
+ " plot_gdp_scatter,\n",
47
+ " plot_request_comparison_cards,\n",
48
+ " plot_soc_usage_scatter,\n",
49
+ " plot_dc_task_request_cards,\n",
50
+ " collaboration_task_regression,\n",
51
+ " plot_usage_index_histogram,\n",
52
+ " plot_variable_map,\n",
53
+ " plot_soc_distribution,\n",
54
+ " plot_automation_preference_residuals,\n",
55
+ " plot_variable_bars,\n",
56
+ ")"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {
63
+ "vscode": {
64
+ "languageId": "python"
65
+ }
66
+ },
67
+ "outputs": [],
68
+ "source": [
69
+ "# Set matplotlib to use the correct backend and style\n",
70
+ "setup_plot_style()"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": null,
76
+ "metadata": {
77
+ "vscode": {
78
+ "languageId": "python"
79
+ }
80
+ },
81
+ "outputs": [],
82
+ "source": [
83
+ "# Set up output directory for saving figures\n",
84
+ "output_dir = Path(\"../data/output/figures/\")\n",
85
+ "output_dir.mkdir(parents=True, exist_ok=True)\n",
86
+ "output_dir_app = Path(\"../data/output/figures/appendix/\")\n",
87
+ "output_dir_app.mkdir(parents=True, exist_ok=True)\n",
88
+ "\n",
89
+ "# Load enriched data\n",
90
+ "data_path = \"../data/output/aei_enriched_claude_ai_2025-08-04_to_2025-08-11.csv\"\n",
91
+ "\n",
92
+ "# Load the data - use keep_default_na=False to preserve \"NA\" (Namibia) as string\n",
93
+ "df = pd.read_csv(data_path, keep_default_na=False, na_values=[\"\"])"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": null,
99
+ "metadata": {
100
+ "vscode": {
101
+ "languageId": "python"
102
+ }
103
+ },
104
+ "outputs": [],
105
+ "source": [
106
+ "# Filter countries to those with at least 200 observations\n",
107
+ "# Filter US states to those with at least 100 observations\n",
108
+ "filtered_countries, filtered_states = get_filtered_geographies(df)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "markdown",
113
+ "metadata": {},
114
+ "source": [
115
+ "## 2.2 Global"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "metadata": {
122
+ "vscode": {
123
+ "languageId": "python"
124
+ }
125
+ },
126
+ "outputs": [],
127
+ "source": [
128
+ "# Top countries by share of global usage\n",
129
+ "plot_usage_share_bars(\n",
130
+ " df,\n",
131
+ " geography=\"country\",\n",
132
+ " top_n=30,\n",
133
+ ")\n",
134
+ "plt.savefig(\n",
135
+ " output_dir / \"usage_pct_bar_country_top30.png\", dpi=300, bbox_inches=\"tight\"\n",
136
+ ")"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": null,
142
+ "metadata": {
143
+ "vscode": {
144
+ "languageId": "python"
145
+ }
146
+ },
147
+ "outputs": [],
148
+ "source": [
149
+ "# Create world map showing usage tiers\n",
150
+ "plot_tier_map(\n",
151
+ " df,\n",
152
+ " geography=\"country\",\n",
153
+ " title=\"Anthropic AI Usage Index tiers by country\",\n",
154
+ " figsize=(16, 10),\n",
155
+ ")\n",
156
+ "plt.savefig(\n",
157
+ " output_dir / \"ai_usage_index_tier_map_country_all.png\", dpi=300, bbox_inches=\"tight\"\n",
158
+ ")"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": null,
164
+ "metadata": {
165
+ "vscode": {
166
+ "languageId": "python"
167
+ }
168
+ },
169
+ "outputs": [],
170
+ "source": [
171
+ "# Create tier summary table for countries\n",
172
+ "plot_tier_summary_table(df, geography=\"country\")\n",
173
+ "plt.savefig(\n",
174
+ " output_dir / \"tier_summary_table_country.png\",\n",
175
+ " dpi=300,\n",
176
+ " bbox_inches=\"tight\",\n",
177
+ " transparent=True,\n",
178
+ ")"
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": null,
184
+ "metadata": {
185
+ "vscode": {
186
+ "languageId": "python"
187
+ }
188
+ },
189
+ "outputs": [],
190
+ "source": [
191
+ "# Top countries by usage per capita\n",
192
+ "plot_usage_index_bars(\n",
193
+ " df, geography=\"country\", top_n=20, filtered_entities=filtered_countries\n",
194
+ ")\n",
195
+ "plt.savefig(\n",
196
+ " output_dir / \"ai_usage_index_bar_country_top20.png\", dpi=300, bbox_inches=\"tight\"\n",
197
+ ")"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "execution_count": null,
203
+ "metadata": {
204
+ "vscode": {
205
+ "languageId": "python"
206
+ }
207
+ },
208
+ "outputs": [],
209
+ "source": [
210
+ "# GDP vs usage regression for countries\n",
211
+ "plot_gdp_scatter(df, geography=\"country\", filtered_entities=filtered_countries)\n",
212
+ "plt.savefig(\n",
213
+ " output_dir / \"ai_usage_index_gdp_reg_country_min_obs.png\",\n",
214
+ " dpi=300,\n",
215
+ " bbox_inches=\"tight\",\n",
216
+ ")"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "code",
221
+ "execution_count": null,
222
+ "metadata": {
223
+ "vscode": {
224
+ "languageId": "python"
225
+ }
226
+ },
227
+ "outputs": [],
228
+ "source": [
229
+ "# GDP vs usage regression for countries\n",
230
+ "plot_gdp_scatter(\n",
231
+ " df, geography=\"country\", filtered_entities=filtered_countries, figsize=(13.2, 8.25)\n",
232
+ ")\n",
233
+ "plt.savefig(\n",
234
+ " output_dir / \"ai_usage_index_gdp_reg_country_min_obs_wide.png\",\n",
235
+ " dpi=300,\n",
236
+ " bbox_inches=\"tight\",\n",
237
+ ")"
238
+ ]
239
+ },
240
+ {
241
+ "cell_type": "code",
242
+ "execution_count": null,
243
+ "metadata": {
244
+ "vscode": {
245
+ "languageId": "python"
246
+ }
247
+ },
248
+ "outputs": [],
249
+ "source": [
250
+ "# Create SOC diffusion scatter plot with top 4 classified SOC groups (2x2 grid)\n",
251
+ "plot_soc_usage_scatter(df, geography=\"country\")\n",
252
+ "plt.savefig(\n",
253
+ " output_dir / \"soc_usage_scatter_top4_country_min.png\", dpi=300, bbox_inches=\"tight\"\n",
254
+ ")"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": null,
260
+ "metadata": {
261
+ "vscode": {
262
+ "languageId": "python"
263
+ }
264
+ },
265
+ "outputs": [],
266
+ "source": [
267
+ "# Find the highest usage country in each tier (1-4)\n",
268
+ "\n",
269
+ "# Get usage tier and usage count data for all countries\n",
270
+ "tier_data = df[\n",
271
+ " (df[\"geography\"] == \"country\")\n",
272
+ " & (df[\"variable\"] == \"usage_tier\")\n",
273
+ " & (df[\"facet\"] == \"country\")\n",
274
+ "][[\"geo_id\", \"value\"]].rename(columns={\"value\": \"tier\"})\n",
275
+ "\n",
276
+ "usage_data = df[\n",
277
+ " (df[\"geography\"] == \"country\")\n",
278
+ " & (df[\"variable\"] == \"usage_count\")\n",
279
+ " & (df[\"facet\"] == \"country\")\n",
280
+ "][[\"geo_id\", \"geo_name\", \"value\"]].rename(columns={\"value\": \"usage_count\"})\n",
281
+ "\n",
282
+ "# Merge tier and usage data\n",
283
+ "country_data = usage_data.merge(tier_data, on=\"geo_id\")\n",
284
+ "\n",
285
+ "selected_countries = [\n",
286
+ " country_data[country_data[\"tier\"] == tier]\n",
287
+ " .sort_values(\"usage_count\", ascending=False)\n",
288
+ " .iloc[0][\"geo_id\"]\n",
289
+ " for tier in [4, 3, 2, 1]\n",
290
+ "]"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": null,
296
+ "metadata": {
297
+ "vscode": {
298
+ "languageId": "python"
299
+ }
300
+ },
301
+ "outputs": [],
302
+ "source": [
303
+ "# Compare top overrepresented requests for 4 highest usage countries in each tier\n",
304
+ "plot_request_comparison_cards(\n",
305
+ " df,\n",
306
+ " geo_ids=selected_countries,\n",
307
+ " top_n=5,\n",
308
+ " title=\"Top overrepresented requests for the United States, Brazil, Vietnam and India\",\n",
309
+ " geography=\"country\",\n",
310
+ ")\n",
311
+ "\n",
312
+ "plt.savefig(\n",
313
+ " output_dir / \"request_comparison_cards_by_tier_country_selected4.png\",\n",
314
+ " dpi=300,\n",
315
+ " bbox_inches=\"tight\",\n",
316
+ ")"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "markdown",
321
+ "metadata": {},
322
+ "source": [
323
+ "## 3. United States"
324
+ ]
325
+ },
326
+ {
327
+ "cell_type": "code",
328
+ "execution_count": null,
329
+ "metadata": {
330
+ "vscode": {
331
+ "languageId": "python"
332
+ }
333
+ },
334
+ "outputs": [],
335
+ "source": [
336
+ "# State tier map\n",
337
+ "plot_tier_map(\n",
338
+ " df, geography=\"state_us\", title=\"Anthropic AI Usage Index tier by US state\"\n",
339
+ ")\n",
340
+ "plt.savefig(\n",
341
+ " output_dir / \"ai_usage_index_tier_map_state_all.png\", dpi=300, bbox_inches=\"tight\"\n",
342
+ ")"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "code",
347
+ "execution_count": null,
348
+ "metadata": {
349
+ "vscode": {
350
+ "languageId": "python"
351
+ }
352
+ },
353
+ "outputs": [],
354
+ "source": [
355
+ "# Top 20 US states\n",
356
+ "plot_usage_index_bars(\n",
357
+ " df,\n",
358
+ " geography=\"state_us\",\n",
359
+ " top_n=20,\n",
360
+ ")\n",
361
+ "plt.savefig(\n",
362
+ " output_dir / \"ai_usage_index_bar_state_top20.png\", dpi=300, bbox_inches=\"tight\"\n",
363
+ ")"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": null,
369
+ "metadata": {
370
+ "vscode": {
371
+ "languageId": "python"
372
+ }
373
+ },
374
+ "outputs": [],
375
+ "source": [
376
+ "# Create tier summary table for US states\n",
377
+ "plot_tier_summary_table(df, geography=\"state_us\")\n",
378
+ "plt.savefig(\n",
379
+ " output_dir / \"tier_summary_table_state.png\",\n",
380
+ " dpi=300,\n",
381
+ " bbox_inches=\"tight\",\n",
382
+ " transparent=True,\n",
383
+ ")"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": null,
389
+ "metadata": {
390
+ "vscode": {
391
+ "languageId": "python"
392
+ }
393
+ },
394
+ "outputs": [],
395
+ "source": [
396
+ "# Find the highest usage US state in each tier (1-4)\n",
397
+ "\n",
398
+ "# Get usage tier and usage count data for US states\n",
399
+ "tier_data_states = df[\n",
400
+ " (df[\"geography\"] == \"state_us\")\n",
401
+ " & (df[\"variable\"] == \"usage_tier\")\n",
402
+ " & (df[\"facet\"] == \"state_us\")\n",
403
+ "][[\"geo_id\", \"value\"]].rename(columns={\"value\": \"tier\"})\n",
404
+ "\n",
405
+ "usage_data_states = df[\n",
406
+ " (df[\"geography\"] == \"state_us\")\n",
407
+ " & (df[\"variable\"] == \"usage_count\")\n",
408
+ " & (df[\"facet\"] == \"state_us\")\n",
409
+ "][[\"geo_id\", \"geo_name\", \"value\"]].rename(columns={\"value\": \"usage_count\"})\n",
410
+ "\n",
411
+ "# Merge tier and usage data\n",
412
+ "state_data = usage_data_states.merge(tier_data_states, on=\"geo_id\")\n",
413
+ "\n",
414
+ "# Find the highest usage state in each tier\n",
415
+ "selected_states = [\n",
416
+ " state_data[state_data[\"tier\"] == tier]\n",
417
+ " .sort_values(\"usage_count\", ascending=False)\n",
418
+ " .iloc[0][\"geo_id\"]\n",
419
+ " for tier in [4, 3, 2, 1]\n",
420
+ "]"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": null,
426
+ "metadata": {
427
+ "vscode": {
428
+ "languageId": "python"
429
+ }
430
+ },
431
+ "outputs": [],
432
+ "source": [
433
+ "# Compare top overrepresented requests for US states representing each tier\n",
434
+ "# CA (Tier 4), TX (Tier 3), FL (Tier 2), SC (Tier 1)\n",
435
+ "states_to_compare = [\"CA\", \"TX\", \"FL\", \"SC\"]\n",
436
+ "\n",
437
+ "plot_request_comparison_cards(\n",
438
+ " df,\n",
439
+ " geo_ids=states_to_compare,\n",
440
+ " top_n=5,\n",
441
+ " title=\"Top overrepresented high-level requests for California, Texas, Florida and South Carolina\",\n",
442
+ " geography=\"state_us\",\n",
443
+ ")\n",
444
+ "\n",
445
+ "plt.savefig(\n",
446
+ " output_dir / \"request_comparison_cards_by_tier_state_selected4.png\",\n",
447
+ " dpi=300,\n",
448
+ " bbox_inches=\"tight\",\n",
449
+ ")"
450
+ ]
451
+ },
452
+ {
453
+ "cell_type": "code",
454
+ "execution_count": null,
455
+ "metadata": {
456
+ "vscode": {
457
+ "languageId": "python"
458
+ }
459
+ },
460
+ "outputs": [],
461
+ "source": [
462
+ "# Create card-style visualization for Washington DC\n",
463
+ "# Shows top O*NET tasks and top request categories\n",
464
+ "plot_dc_task_request_cards(\n",
465
+ " df, title=\"Washington, DC: Highest Anthropic AI Usage Index in the US\"\n",
466
+ ")\n",
467
+ "\n",
468
+ "plt.savefig(\n",
469
+ " output_dir / \"task_request_comparison_state_dc.png\", dpi=300, bbox_inches=\"tight\"\n",
470
+ ")"
471
+ ]
472
+ },
473
+ {
474
+ "cell_type": "code",
475
+ "execution_count": null,
476
+ "metadata": {
477
+ "vscode": {
478
+ "languageId": "python"
479
+ }
480
+ },
481
+ "outputs": [],
482
+ "source": [
483
+ "# Collaboration pattern analysis with task mix control\n",
484
+ "# This analysis determines whether the relationship between AUI\n",
485
+ "# and automation preference persists after controlling for task composition\n",
486
+ "collaboration_task_regression(df, geography=\"country\")\n",
487
+ "plt.savefig(\n",
488
+ " output_dir / \"collaboration_task_control_partial_corr_country.png\",\n",
489
+ " dpi=300,\n",
490
+ " bbox_inches=\"tight\",\n",
491
+ ")"
492
+ ]
493
+ },
494
+ {
495
+ "cell_type": "markdown",
496
+ "metadata": {},
497
+ "source": [
498
+ "# Appendix"
499
+ ]
500
+ },
501
+ {
502
+ "cell_type": "markdown",
503
+ "metadata": {},
504
+ "source": [
505
+ "## Global"
506
+ ]
507
+ },
508
+ {
509
+ "cell_type": "code",
510
+ "execution_count": null,
511
+ "metadata": {
512
+ "vscode": {
513
+ "languageId": "python"
514
+ }
515
+ },
516
+ "outputs": [],
517
+ "source": [
518
+ "# Distribution histogram\n",
519
+ "plot_usage_index_histogram(\n",
520
+ " df, geography=\"country\", title=\"Distribution of Anthropic AI Usage Index\"\n",
521
+ ")\n",
522
+ "plt.savefig(\n",
523
+ " output_dir_app / \"ai_usage_index_histogram_country_all.png\",\n",
524
+ " dpi=300,\n",
525
+ " bbox_inches=\"tight\",\n",
526
+ ")"
527
+ ]
528
+ },
529
+ {
530
+ "cell_type": "code",
531
+ "execution_count": null,
532
+ "metadata": {
533
+ "vscode": {
534
+ "languageId": "python"
535
+ }
536
+ },
537
+ "outputs": [],
538
+ "source": [
539
+ "# Create map showing share of usage\n",
540
+ "plot_variable_map(\n",
541
+ " df,\n",
542
+ " variable=\"usage_pct\",\n",
543
+ " geography=\"country\",\n",
544
+ " title=\"Share of global Claude usage by country\",\n",
545
+ " figsize=(14, 8),\n",
546
+ ")\n",
547
+ "plt.savefig(\n",
548
+ " output_dir_app / \"usage_pct_map_country_all.png\", dpi=300, bbox_inches=\"tight\"\n",
549
+ ")"
550
+ ]
551
+ },
552
+ {
553
+ "cell_type": "code",
554
+ "execution_count": null,
555
+ "metadata": {
556
+ "vscode": {
557
+ "languageId": "python"
558
+ }
559
+ },
560
+ "outputs": [],
561
+ "source": [
562
+ "# Create world map showing usage per capita\n",
563
+ "plot_variable_map(\n",
564
+ " df,\n",
565
+ " variable=\"usage_per_capita_index\",\n",
566
+ " geography=\"country\",\n",
567
+ " title=\"Anthropic AI Usage Index by country\",\n",
568
+ " center_at_one=True,\n",
569
+ " figsize=(14, 8),\n",
570
+ ")\n",
571
+ "plt.savefig(\n",
572
+ " output_dir_app / \"ai_usage_index_map_country_all.png\", dpi=300, bbox_inches=\"tight\"\n",
573
+ ")"
574
+ ]
575
+ },
576
+ {
577
+ "cell_type": "code",
578
+ "execution_count": null,
579
+ "metadata": {
580
+ "vscode": {
581
+ "languageId": "python"
582
+ }
583
+ },
584
+ "outputs": [],
585
+ "source": [
586
+ "# AUI for all countries\n",
587
+ "plot_usage_index_bars(\n",
588
+ " df,\n",
589
+ " geography=\"country\",\n",
590
+ " filtered_entities=filtered_countries,\n",
591
+ ")\n",
592
+ "plt.savefig(\n",
593
+ " output_dir_app / \"ai_usage_index_country_all.png\", dpi=300, bbox_inches=\"tight\"\n",
594
+ ")"
595
+ ]
596
+ },
597
+ {
598
+ "cell_type": "code",
599
+ "execution_count": null,
600
+ "metadata": {
601
+ "vscode": {
602
+ "languageId": "python"
603
+ }
604
+ },
605
+ "outputs": [],
606
+ "source": [
607
+ "# SOC distribution comparison for countries by usage tier\n",
608
+ "plot_soc_distribution(\n",
609
+ " df,\n",
610
+ " selected_countries,\n",
611
+ " \"country\",\n",
612
+ " title=\"Occupation groups by Claude task usage in the United States, Brazil, Vietnam and India\",\n",
613
+ ")\n",
614
+ "plt.savefig(\n",
615
+ " output_dir_app / \"soc_distribution_by_tier_country_selected4.png\",\n",
616
+ " dpi=300,\n",
617
+ " bbox_inches=\"tight\",\n",
618
+ ")"
619
+ ]
620
+ },
621
+ {
622
+ "cell_type": "code",
623
+ "execution_count": null,
624
+ "metadata": {
625
+ "vscode": {
626
+ "languageId": "python"
627
+ }
628
+ },
629
+ "outputs": [],
630
+ "source": [
631
+ "# Plot automation preference residuals after controlling for task mix\n",
632
+ "# This shows which countries prefer more automation vs augmentation\n",
633
+ "# than would be expected given their task composition\n",
634
+ "plot_automation_preference_residuals(df)\n",
635
+ "plt.savefig(\n",
636
+ " output_dir_app / \"automation_preference_residuals.png\", dpi=300, bbox_inches=\"tight\"\n",
637
+ ")"
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "markdown",
642
+ "metadata": {},
643
+ "source": [
644
+ "## United States"
645
+ ]
646
+ },
647
+ {
648
+ "cell_type": "code",
649
+ "execution_count": null,
650
+ "metadata": {
651
+ "vscode": {
652
+ "languageId": "python"
653
+ }
654
+ },
655
+ "outputs": [],
656
+ "source": [
657
+ "# Top countries by share of global usage\n",
658
+ "plot_usage_share_bars(\n",
659
+ " df,\n",
660
+ " geography=\"state_us\",\n",
661
+ " top_n=30,\n",
662
+ " title=\"Top 30 US states by share of US Claude usage\",\n",
663
+ ")\n",
664
+ "plt.savefig(\n",
665
+ " output_dir_app / \"usage_pct_bar_state_top30.png\", dpi=300, bbox_inches=\"tight\"\n",
666
+ ")"
667
+ ]
668
+ },
669
+ {
670
+ "cell_type": "code",
671
+ "execution_count": null,
672
+ "metadata": {
673
+ "vscode": {
674
+ "languageId": "python"
675
+ }
676
+ },
677
+ "outputs": [],
678
+ "source": [
679
+ "# Distribution histogram\n",
680
+ "plot_usage_index_histogram(\n",
681
+ " df, geography=\"state_us\", title=\"Distribution of Anthropic AI Usage Index\"\n",
682
+ ")\n",
683
+ "plt.savefig(\n",
684
+ " output_dir_app / \"ai_usage_index_histogram_state_all.png\",\n",
685
+ " dpi=300,\n",
686
+ " bbox_inches=\"tight\",\n",
687
+ ")"
688
+ ]
689
+ },
690
+ {
691
+ "cell_type": "code",
692
+ "execution_count": null,
693
+ "metadata": {
694
+ "vscode": {
695
+ "languageId": "python"
696
+ }
697
+ },
698
+ "outputs": [],
699
+ "source": [
700
+ "# Create map showing share of usage\n",
701
+ "plot_variable_map(\n",
702
+ " df,\n",
703
+ " variable=\"usage_pct\",\n",
704
+ " geography=\"state_us\",\n",
705
+ " title=\"Share of global Claude usage by US state\",\n",
706
+ " figsize=(14, 8),\n",
707
+ ")\n",
708
+ "plt.savefig(\n",
709
+ " output_dir_app / \"usage_pct_map_state_all.png\", dpi=300, bbox_inches=\"tight\"\n",
710
+ ")"
711
+ ]
712
+ },
713
+ {
714
+ "cell_type": "code",
715
+ "execution_count": null,
716
+ "metadata": {
717
+ "vscode": {
718
+ "languageId": "python"
719
+ }
720
+ },
721
+ "outputs": [],
722
+ "source": [
723
+ "# Create map showing per capita usage\n",
724
+ "plot_variable_map(\n",
725
+ " df,\n",
726
+ " variable=\"usage_per_capita_index\",\n",
727
+ " geography=\"state_us\",\n",
728
+ " title=\"Anthropic AI Usage Index by US state\",\n",
729
+ " center_at_one=True,\n",
730
+ " figsize=(14, 8),\n",
731
+ ")\n",
732
+ "plt.savefig(\n",
733
+ " output_dir_app / \"ai_usage_index_map_state_all.png\", dpi=300, bbox_inches=\"tight\"\n",
734
+ ")"
735
+ ]
736
+ },
737
+ {
738
+ "cell_type": "code",
739
+ "execution_count": null,
740
+ "metadata": {
741
+ "vscode": {
742
+ "languageId": "python"
743
+ }
744
+ },
745
+ "outputs": [],
746
+ "source": [
747
+ "plot_usage_index_bars(\n",
748
+ " df,\n",
749
+ " geography=\"state_us\",\n",
750
+ ")\n",
751
+ "plt.savefig(\n",
752
+ " output_dir_app / \"ai_usage_index_bar_state_all.png\", dpi=300, bbox_inches=\"tight\"\n",
753
+ ")"
754
+ ]
755
+ },
756
+ {
757
+ "cell_type": "code",
758
+ "execution_count": null,
759
+ "metadata": {
760
+ "vscode": {
761
+ "languageId": "python"
762
+ }
763
+ },
764
+ "outputs": [],
765
+ "source": [
766
+ "# GDP vs usage regression for US states\n",
767
+ "plot_gdp_scatter(df, geography=\"state_us\", filtered_entities=filtered_states)\n",
768
+ "plt.savefig(\n",
769
+ " output_dir_app / \"ai_usage_index_gdp_reg_state_min_obs.png\",\n",
770
+ " dpi=300,\n",
771
+ " bbox_inches=\"tight\",\n",
772
+ ")"
773
+ ]
774
+ },
775
+ {
776
+ "cell_type": "code",
777
+ "execution_count": null,
778
+ "metadata": {
779
+ "vscode": {
780
+ "languageId": "python"
781
+ }
782
+ },
783
+ "outputs": [],
784
+ "source": [
785
+ "# SOC distribution comparison for US states by usage tier\n",
786
+ "plot_soc_distribution(\n",
787
+ " df,\n",
788
+ " selected_states,\n",
789
+ " \"state_us\",\n",
790
+ " title=\"Occupation groups by Claude task usage in California, Texas, Florida and South Carolina\",\n",
791
+ ")\n",
792
+ "plt.savefig(\n",
793
+ " output_dir_app / \"soc_distribution_by_tier_state_selected4.png\",\n",
794
+ " dpi=300,\n",
795
+ " bbox_inches=\"tight\",\n",
796
+ ")"
797
+ ]
798
+ },
799
+ {
800
+ "cell_type": "code",
801
+ "execution_count": null,
802
+ "metadata": {
803
+ "vscode": {
804
+ "languageId": "python"
805
+ }
806
+ },
807
+ "outputs": [],
808
+ "source": [
809
+ "# Top SOC chart\n",
810
+ "plot_variable_bars(\n",
811
+ " df,\n",
812
+ " variable=\"soc_pct\",\n",
813
+ " geography=\"country\",\n",
814
+ " facet=\"soc_occupation\",\n",
815
+ " geo_id=\"USA\",\n",
816
+ " title=\"Occupation groups in the US by Claude use for associated tasks\",\n",
817
+ " xlabel=\"Share of total usage (%)\",\n",
818
+ " exclude_not_classified=True,\n",
819
+ ")\n",
820
+ "\n",
821
+ "# Save the figure\n",
822
+ "plt.savefig(output_dir_app / \"soc_bar_country_us.png\", dpi=300, bbox_inches=\"tight\")"
823
+ ]
824
+ },
825
+ {
826
+ "cell_type": "code",
827
+ "execution_count": null,
828
+ "metadata": {
829
+ "vscode": {
830
+ "languageId": "python"
831
+ }
832
+ },
833
+ "outputs": [],
834
+ "source": [
835
+ "# Create SOC diffusion scatter plot with top 4 classified SOC groups\n",
836
+ "plot_soc_usage_scatter(\n",
837
+ " df,\n",
838
+ " geography=\"state_us\",\n",
839
+ ")\n",
840
+ "plt.savefig(\n",
841
+ " output_dir_app / \"soc_usage_scatter_top4_state_min.png\",\n",
842
+ " dpi=300,\n",
843
+ " bbox_inches=\"tight\",\n",
844
+ ")"
845
+ ]
846
+ }
847
+ ],
848
+ "metadata": {
849
+ "kernelspec": {
850
+ "display_name": "Coconut",
851
+ "language": "coconut",
852
+ "name": "coconut"
853
+ },
854
+ "language_info": {
855
+ "codemirror_mode": {
856
+ "name": "python",
857
+ "version": 3
858
+ },
859
+ "file_extension": ".coco",
860
+ "mimetype": "text/x-python3",
861
+ "name": "coconut",
862
+ "pygments_lexer": "coconut",
863
+ "version": "3.0.2"
864
+ }
865
+ },
866
+ "nbformat": 4,
867
+ "nbformat_minor": 4
868
+ }
release_2025_09_15/code/aei_report_v3_change_over_time_claude_ai.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Clean Economic Analysis Figure Generator
4
+ ======================================
5
+ Generates three key figures for V1→V2→V3 economic analysis:
6
+ 1. Usage Share Trends Across Economic Index Reports
7
+ 2. Notable Task Changes (Growing/Declining Tasks)
8
+ 3. Automation vs Augmentation Evolution
9
+
10
+ ASSUMPTIONS:
11
+ - V1/V2/V3 use same task taxonomy
12
+ - GLOBAL geo_id is representative
13
+ - Missing values = 0% usage
14
+ - Percentages don't need renormalization
15
+ """
16
+
17
+ import os
18
+ import warnings
19
+ from pathlib import Path
20
+
21
+ import matplotlib.pyplot as plt
22
+ import numpy as np
23
+ import pandas as pd
24
+ import seaborn as sns
25
+
26
+ # Use default matplotlib styling
27
+ plt.style.use("default")
28
+
29
+ # Configuration
30
+ FILES = {
31
+ "v1_tasks": "../data/input/task_pct_v1.csv",
32
+ "v2_tasks": "../data/input/task_pct_v2.csv",
33
+ "v3_data": "../data/intermediate/aei_raw_claude_ai_2025-08-04_to_2025-08-11.csv",
34
+ "v1_auto": "../data/input/automation_vs_augmentation_v1.csv",
35
+ "v2_auto": "../data/input/automation_vs_augmentation_v2.csv",
36
+ "onet": "../data/intermediate/onet_task_statements.csv",
37
+ "soc": "../data/intermediate/soc_structure.csv",
38
+ }
39
+
40
+ AUTOMATION_TYPES = ["directive", "feedback_loop"]
41
+ AUGMENTATION_TYPES = ["validation", "task_iteration", "learning"]
42
+ MIN_THRESHOLD = 1.0
43
+ COLORS = {
44
+ "increase": "#2E8B57",
45
+ "decrease": "#CD5C5C",
46
+ "automation": "#FF6B6B",
47
+ "augmentation": "#4ECDC4",
48
+ }
49
+
50
+ # ============================================================================
51
+ # DATA LOADING
52
+ # ============================================================================
53
+
54
+
55
+ def load_task_data(filepath, version_name):
56
+ """Load and validate task percentage data for any version."""
57
+ if not Path(filepath).exists():
58
+ raise FileNotFoundError(f"Missing {version_name} data: {filepath}")
59
+
60
+ df = pd.read_csv(filepath)
61
+
62
+ if version_name == "V3":
63
+ # Filter V3 data for global onet tasks
64
+ df = df[
65
+ (df["geo_id"] == "GLOBAL")
66
+ & (df["facet"] == "onet_task")
67
+ & (df["variable"] == "onet_task_pct")
68
+ ].copy()
69
+ df = df.rename(columns={"cluster_name": "task_name", "value": "pct"})
70
+
71
+ # Remove "not_classified" from V3 for fair comparison with V1/V2
72
+ # Keep "none" as it represents legitimate unclassifiable tasks across all versions
73
+ not_classified_pct = df[df["task_name"] == "not_classified"]["pct"].sum()
74
+ df = df[df["task_name"] != "not_classified"].copy()
75
+
76
+ # Renormalize V3 to 100% after removing not_classified
77
+ if not_classified_pct > 0:
78
+ remaining_total = df["pct"].sum()
79
+ normalization_factor = 100 / remaining_total
80
+ df["pct"] = df["pct"] * normalization_factor
81
+ print(
82
+ f" → Removed {not_classified_pct:.1f}% not_classified, renormalized by {normalization_factor:.3f}x"
83
+ )
84
+
85
+ # Validate structure
86
+ if "task_name" not in df.columns or "pct" not in df.columns:
87
+ raise ValueError(f"{version_name} data missing required columns")
88
+
89
+ # Normalize task names and validate totals
90
+ df["task_name"] = df["task_name"].str.lower().str.strip()
91
+ total = df["pct"].sum()
92
+
93
+ if not (80 <= total <= 120):
94
+ warnings.warn(
95
+ f"{version_name} percentages sum to {total:.1f}% (expected ~100%)",
96
+ stacklevel=2,
97
+ )
98
+
99
+ print(f"✓ {version_name}: {len(df)} tasks, {total:.1f}% coverage")
100
+ return df[["task_name", "pct"]]
101
+
102
+
103
+ def load_automation_data():
104
+ """Load automation/collaboration data for all versions."""
105
+ result = {}
106
+
107
+ # V1 and V2 - always renormalize to 100%
108
+ for version in ["v1", "v2"]:
109
+ df = pd.read_csv(FILES[f"{version}_auto"])
110
+
111
+ # Always renormalize to 100%
112
+ total = df["pct"].sum()
113
+ normalization_factor = 100 / total
114
+ df["pct"] = df["pct"] * normalization_factor
115
+ print(
116
+ f" → {version.upper()} automation: renormalized from {total:.1f}% to 100.0%"
117
+ )
118
+
119
+ result[version] = df
120
+
121
+ # V3 from processed data
122
+ df = pd.read_csv(FILES["v3_data"])
123
+ v3_collab = df[
124
+ (df["geo_id"] == "GLOBAL")
125
+ & (df["facet"] == "collaboration")
126
+ & (df["level"] == 0)
127
+ & (df["variable"] == "collaboration_pct")
128
+ ].copy()
129
+ v3_collab = v3_collab.rename(
130
+ columns={"cluster_name": "interaction_type", "value": "pct"}
131
+ )
132
+
133
+ # Remove "not_classified" from V3 collaboration data for fair comparison
134
+ not_classified_pct = v3_collab[v3_collab["interaction_type"] == "not_classified"][
135
+ "pct"
136
+ ].sum()
137
+ v3_collab = v3_collab[v3_collab["interaction_type"] != "not_classified"].copy()
138
+
139
+ # Renormalize V3 collaboration to 100% after removing not_classified
140
+ if not_classified_pct > 0:
141
+ remaining_total = v3_collab["pct"].sum()
142
+ normalization_factor = 100 / remaining_total
143
+ v3_collab["pct"] = v3_collab["pct"] * normalization_factor
144
+ print(
145
+ f" → V3 collaboration: removed {not_classified_pct:.1f}% not_classified, renormalized by {normalization_factor:.3f}x"
146
+ )
147
+
148
+ result["v3"] = v3_collab[["interaction_type", "pct"]]
149
+
150
+ print(f"✓ Automation data loaded for all versions")
151
+ return result
152
+
153
+
154
+ def load_occupational_mapping():
155
+ """Load O*NET to SOC mapping data."""
156
+ onet_df = pd.read_csv(FILES["onet"])
157
+ soc_df = pd.read_csv(FILES["soc"]).dropna(subset=["Major Group"])
158
+
159
+ onet_df["soc_major_group"] = onet_df["O*NET-SOC Code"].str[:2]
160
+ soc_df["soc_major_group"] = soc_df["Major Group"].str[:2]
161
+
162
+ merged = onet_df.merge(
163
+ soc_df[["soc_major_group", "SOC or O*NET-SOC 2019 Title"]], on="soc_major_group"
164
+ )
165
+ merged["task_normalized"] = merged["Task"].str.lower().str.strip()
166
+
167
+ print(f"✓ Occupational mapping: {merged['soc_major_group'].nunique()} SOC groups")
168
+ return merged
169
+
170
+
171
+ # ============================================================================
172
+ # ANALYSIS
173
+ # ============================================================================
174
+
175
+
176
+ def analyze_occupational_trends(task_data, onet_soc_data):
177
+ """Analyze occupational category trends across versions."""
178
+
179
+ def aggregate_by_occupation(df):
180
+ merged = df.merge(
181
+ onet_soc_data[
182
+ ["task_normalized", "SOC or O*NET-SOC 2019 Title"]
183
+ ].drop_duplicates(),
184
+ left_on="task_name",
185
+ right_on="task_normalized",
186
+ how="left",
187
+ )
188
+
189
+ unmapped = merged[merged["SOC or O*NET-SOC 2019 Title"].isna()]
190
+ # Only warn if there are real unmapped tasks (not just "none" and "not_classified")
191
+ real_unmapped = unmapped[
192
+ ~unmapped["task_name"].isin(["none", "not_classified"])
193
+ ]
194
+ if len(real_unmapped) > 0:
195
+ real_unmapped_pct = real_unmapped["pct"].sum()
196
+ warnings.warn(
197
+ f"{real_unmapped_pct:.1f}% of tasks unmapped to occupational categories",
198
+ stacklevel=2,
199
+ )
200
+
201
+ return merged.groupby("SOC or O*NET-SOC 2019 Title")["pct"].sum()
202
+
203
+ # Aggregate all versions
204
+ comparison_df = pd.DataFrame(
205
+ {
206
+ "v1": aggregate_by_occupation(task_data["v1"]),
207
+ "v2": aggregate_by_occupation(task_data["v2"]),
208
+ "v3": aggregate_by_occupation(task_data["v3"]),
209
+ }
210
+ ).fillna(0)
211
+
212
+ # Calculate changes and filter economically significant categories
213
+ comparison_df["v3_v1_diff"] = comparison_df["v3"] - comparison_df["v1"]
214
+ significant = comparison_df[
215
+ (comparison_df[["v1", "v2", "v3"]] >= MIN_THRESHOLD).any(axis=1)
216
+ ]
217
+
218
+ print(
219
+ f"✓ Occupational analysis: {len(significant)} economically significant categories"
220
+ )
221
+ return significant.sort_values("v1", ascending=False)
222
+
223
+
224
+ def analyze_task_changes(task_data, onet_soc_data, top_n=12):
225
+ """Identify most notable task changes V1→V3."""
226
+ v1, v3 = task_data["v1"], task_data["v3"]
227
+
228
+ # Compare changes
229
+ comparison = (
230
+ v1[["task_name", "pct"]]
231
+ .rename(columns={"pct": "v1_pct"})
232
+ .merge(
233
+ v3[["task_name", "pct"]].rename(columns={"pct": "v3_pct"}),
234
+ on="task_name",
235
+ how="outer",
236
+ )
237
+ .fillna(0)
238
+ )
239
+
240
+ comparison["change"] = comparison["v3_pct"] - comparison["v1_pct"]
241
+ comparison["rel_change"] = np.where(
242
+ comparison["v1_pct"] > 0,
243
+ (comparison["v3_pct"] - comparison["v1_pct"]) / comparison["v1_pct"] * 100,
244
+ np.inf,
245
+ )
246
+
247
+ # Add SOC context
248
+ with_context = comparison.merge(
249
+ onet_soc_data[
250
+ ["task_normalized", "SOC or O*NET-SOC 2019 Title"]
251
+ ].drop_duplicates(),
252
+ left_on="task_name",
253
+ right_on="task_normalized",
254
+ how="left",
255
+ )
256
+
257
+ # Get all tasks with economically significant changes (>= 0.2pp)
258
+ significant_changes = with_context[abs(with_context["change"]) >= 0.2].copy()
259
+
260
+ # Create category column with formatted relative percentage change
261
+ def format_rel_change(row):
262
+ if row["v1_pct"] > 0:
263
+ rel_change = (row["v3_pct"] - row["v1_pct"]) / row["v1_pct"] * 100
264
+ return f"{rel_change:+.0f}%"
265
+ else:
266
+ return "new"
267
+
268
+ significant_changes["category"] = significant_changes.apply(
269
+ format_rel_change, axis=1
270
+ )
271
+
272
+ # Rename column and sort by change descending
273
+ significant_changes = significant_changes.rename(
274
+ columns={"SOC or O*NET-SOC 2019 Title": "soc_group"}
275
+ )
276
+ significant_changes = significant_changes.sort_values("change", ascending=False)
277
+
278
+ # Round to 3 decimals
279
+ significant_changes[["v1_pct", "v3_pct", "change"]] = significant_changes[
280
+ ["v1_pct", "v3_pct", "change"]
281
+ ].round(3)
282
+
283
+ print(f"✓ Task changes: {len(significant_changes)} notable changes identified")
284
+ return significant_changes
285
+
286
+
287
+ def analyze_automation_trends(automation_data):
288
+ """Analyze automation vs augmentation trends across versions."""
289
+ # Standardize interaction names
290
+ for df in automation_data.values():
291
+ df["interaction_type"] = df["interaction_type"].replace(
292
+ {"task iteration": "task_iteration", "feedback loop": "feedback_loop"}
293
+ )
294
+
295
+ results = {}
296
+ for version, data in automation_data.items():
297
+ auto_total = data[data["interaction_type"].isin(AUTOMATION_TYPES)]["pct"].sum()
298
+ aug_total = data[data["interaction_type"].isin(AUGMENTATION_TYPES)]["pct"].sum()
299
+
300
+ interaction_dict = dict(zip(data["interaction_type"], data["pct"], strict=True))
301
+ results[version] = {
302
+ "automation_total": auto_total,
303
+ "augmentation_total": aug_total,
304
+ "directive": interaction_dict["directive"],
305
+ "feedback_loop": interaction_dict["feedback_loop"],
306
+ "validation": interaction_dict["validation"],
307
+ "task_iteration": interaction_dict["task_iteration"],
308
+ "learning": interaction_dict["learning"],
309
+ }
310
+
311
+ print("✓ Automation trends analysis complete")
312
+ return results
313
+
314
+
315
+ # ============================================================================
316
+ # VISUALIZATION
317
+ # ============================================================================
318
+
319
+
320
+ def setup_plot_style():
321
+ """Configure consistent plot styling."""
322
+ plt.rcParams.update({"font.size": 12, "axes.titlesize": 16, "axes.labelsize": 14})
323
+ sns.set_context("notebook", font_scale=1.1)
324
+
325
+
326
+ def create_usage_trends_figure(comparison_df):
327
+ """Create Usage Share Trends subplot figure."""
328
+ setup_plot_style()
329
+
330
+ # Get top categories
331
+ top_cats = comparison_df[
332
+ (comparison_df[["v1", "v2", "v3"]] >= MIN_THRESHOLD).any(axis=1)
333
+ ].head(8)
334
+ top_cats.index = top_cats.index.str.replace(" Occupations", "")
335
+
336
+ fig, axes = plt.subplots(2, 4, figsize=(20, 15))
337
+ axes = axes.flatten()
338
+
339
+ line_color = "#FF8E53"
340
+ fill_color = "#DEB887"
341
+
342
+ # Simplified date labels (actual periods: Dec 2024-Jan 2025, Feb-Mar 2025, Aug 2025)
343
+ versions, version_labels = [1, 2, 3], ["Jan 2025", "Mar 2025", "Aug 2025"]
344
+
345
+ for i, (category, data) in enumerate(top_cats.iterrows()):
346
+ if i >= len(axes):
347
+ break
348
+ ax = axes[i]
349
+ values = [data["v1"], data["v2"], data["v3"]]
350
+
351
+ ax.plot(
352
+ versions,
353
+ values,
354
+ "o-",
355
+ color=line_color,
356
+ linewidth=3,
357
+ markersize=8,
358
+ markerfacecolor=line_color,
359
+ markeredgecolor="white",
360
+ markeredgewidth=2,
361
+ )
362
+ ax.fill_between(versions, values, alpha=0.3, color=fill_color)
363
+
364
+ # Add value labels
365
+ for x, y in zip(versions, values, strict=True):
366
+ ax.text(
367
+ x,
368
+ y + max(values) * 0.02,
369
+ f"{y:.1f}%",
370
+ ha="center",
371
+ va="bottom",
372
+ fontsize=12,
373
+ fontweight="bold",
374
+ )
375
+
376
+ ax.set_title(category, fontsize=14, fontweight="bold", pad=10)
377
+ ax.set_xticks(versions)
378
+ ax.set_xticklabels(version_labels)
379
+ ax.set_ylabel("Percentage", fontsize=12)
380
+ ax.set_ylim(0, max(values) * 1.15)
381
+ ax.grid(True, alpha=0.3)
382
+ ax.spines["top"].set_visible(False)
383
+ ax.spines["right"].set_visible(False)
384
+
385
+ fig.suptitle(
386
+ "Usage share trends across economic index reports (V1 to V3)",
387
+ fontsize=18,
388
+ fontweight="bold",
389
+ y=0.98,
390
+ )
391
+ plt.tight_layout()
392
+ plt.subplots_adjust(top=0.88)
393
+ return fig
394
+
395
+
396
+ def create_automation_figure(trends):
397
+ """Create Automation vs Augmentation Evolution figure."""
398
+ setup_plot_style()
399
+
400
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
401
+
402
+ # Simplified date labels (actual periods: Dec 2024-Jan 2025, Feb-Mar 2025, Aug 2025)
403
+ version_labels = ["Jan 2025", "Mar 2025", "Aug 2025"]
404
+ x_pos = [1, 2, 3]
405
+
406
+ # Left: Overall trends (no fill)
407
+ auto_vals = [trends[v]["automation_total"] for v in ["v1", "v2", "v3"]]
408
+ aug_vals = [trends[v]["augmentation_total"] for v in ["v1", "v2", "v3"]]
409
+
410
+ ax1.plot(
411
+ x_pos,
412
+ auto_vals,
413
+ "o-",
414
+ color=COLORS["automation"],
415
+ linewidth=3,
416
+ markersize=8,
417
+ label="Automation",
418
+ markeredgecolor="white",
419
+ markeredgewidth=2,
420
+ )
421
+ ax1.plot(
422
+ x_pos,
423
+ aug_vals,
424
+ "o-",
425
+ color=COLORS["augmentation"],
426
+ linewidth=3,
427
+ markersize=8,
428
+ label="Augmentation",
429
+ markeredgecolor="white",
430
+ markeredgewidth=2,
431
+ )
432
+
433
+ # Value labels with automation above and augmentation below dots
434
+ y_max = max(max(auto_vals), max(aug_vals))
435
+ for i, (auto, aug) in enumerate(zip(auto_vals, aug_vals, strict=True)):
436
+ # Red (automation) always above the dot
437
+ ax1.text(
438
+ x_pos[i],
439
+ auto + 1.2,
440
+ f"{auto:.1f}%",
441
+ ha="center",
442
+ va="bottom",
443
+ fontweight="bold",
444
+ color=COLORS["automation"],
445
+ )
446
+ # Blue (augmentation) always below the dot
447
+ ax1.text(
448
+ x_pos[i],
449
+ aug - 1.5,
450
+ f"{aug:.1f}%",
451
+ ha="center",
452
+ va="top",
453
+ fontweight="bold",
454
+ color=COLORS["augmentation"],
455
+ )
456
+
457
+ ax1.set_xticks(x_pos)
458
+ ax1.set_xticklabels(version_labels)
459
+ ax1.set_ylabel("Percentage")
460
+ ax1.set_title("Automation vs augmentation trends")
461
+ ax1.legend()
462
+ ax1.grid(True, alpha=0.3)
463
+ ax1.spines[["top", "right"]].set_visible(False)
464
+ ax1.set_ylim(0, y_max * 1.15)
465
+
466
+ # Right: Individual interaction types with color-coded groups
467
+ interactions = [
468
+ "directive",
469
+ "feedback_loop",
470
+ "validation",
471
+ "task_iteration",
472
+ "learning",
473
+ ]
474
+ # Automation = red shades, Augmentation = cool shades
475
+ colors_individual = ["#DC143C", "#FF6B6B", "#4682B4", "#5F9EA0", "#4169E1"]
476
+
477
+ for interaction, color in zip(interactions, colors_individual, strict=True):
478
+ values = [trends[v][interaction] for v in ["v1", "v2", "v3"]]
479
+ ax2.plot(
480
+ x_pos,
481
+ values,
482
+ "o-",
483
+ color=color,
484
+ linewidth=2.5,
485
+ markersize=6,
486
+ label=interaction.replace("_", " ").title(),
487
+ alpha=0.8,
488
+ )
489
+
490
+ ax2.set_xticks(x_pos)
491
+ ax2.set_xticklabels(version_labels)
492
+ ax2.set_ylabel("Percentage")
493
+ ax2.set_title("Individual interaction types")
494
+ ax2.legend(bbox_to_anchor=(1.05, 1), loc="upper left")
495
+ ax2.grid(True, alpha=0.3)
496
+ ax2.spines[["top", "right"]].set_visible(False)
497
+
498
+ plt.suptitle(
499
+ "Automation vs augmentation evolution (V1 to V3)",
500
+ fontsize=16,
501
+ fontweight="bold",
502
+ )
503
+ plt.tight_layout()
504
+ return fig
505
+
506
+
507
+ # ============================================================================
508
+ # MAIN
509
+ # ============================================================================
510
+
511
+
512
+ def main():
513
+ """Generate all three economic analysis figures."""
514
+ print("=" * 80)
515
+ print("ECONOMIC ANALYSIS FIGURE GENERATION")
516
+ print("=" * 80)
517
+
518
+ # Use consistent output directory for all economic research scripts
519
+ output_dir = "../data/output/figures"
520
+ os.makedirs(output_dir, exist_ok=True)
521
+
522
+ # Load all data
523
+ print("\nLoading data...")
524
+ task_data = {
525
+ "v1": load_task_data(FILES["v1_tasks"], "V1"),
526
+ "v2": load_task_data(FILES["v2_tasks"], "V2"),
527
+ "v3": load_task_data(FILES["v3_data"], "V3"),
528
+ }
529
+ automation_data = load_automation_data()
530
+ onet_soc_data = load_occupational_mapping()
531
+
532
+ # Analysis
533
+ print("\nAnalyzing trends...")
534
+ occupational_trends = analyze_occupational_trends(task_data, onet_soc_data)
535
+ task_changes = analyze_task_changes(task_data, onet_soc_data)
536
+ automation_trends = analyze_automation_trends(automation_data)
537
+
538
+ # Generate figures
539
+ print("\nGenerating figures...")
540
+
541
+ fig1 = create_usage_trends_figure(occupational_trends)
542
+ fig1.savefig(
543
+ f"{output_dir}/main_occupational_categories.png",
544
+ dpi=300,
545
+ bbox_inches="tight",
546
+ facecolor="white",
547
+ )
548
+ print("✓ Saved: main_occupational_categories.png")
549
+
550
+ fig3 = create_automation_figure(automation_trends)
551
+ fig3.savefig(
552
+ f"{output_dir}/automation_trends_v1_v2_v3.png",
553
+ dpi=300,
554
+ bbox_inches="tight",
555
+ facecolor="white",
556
+ )
557
+ print("✓ Saved: automation_trends_v1_v2_v3.png")
558
+
559
+ print(f"\n✅ All figures generated successfully!")
560
+ return occupational_trends, task_changes, automation_trends
561
+
562
+
563
+ if __name__ == "__main__":
564
+ results = main()
release_2025_09_15/code/aei_report_v3_preprocessing_claude_ai.ipynb ADDED
@@ -0,0 +1,1840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# AEI Report v3 Claude.ai Preprocessing\n",
8
+ "\n",
9
+ "This notebook takes processed Clio data and enriches it with external sources:\n",
10
+ "1. Merges with population data for per capita calculations\n",
11
+ "2. Merges with GDP data for economic analysis\n",
12
+ "3. Merges with SOC/O*NET data for occupational analysis\n",
13
+ "4. Applies MIN_OBSERVATIONS filtering\n",
14
+ "5. Calculates derived metrics (per capita, indices, tiers)\n",
15
+ "6. Categorizes collaboration patterns\n",
16
+ "\n",
17
+ "**Input**: `aei_raw_claude_ai_2025-08-04_to_2025-08-11.csv`\n",
18
+ "\n",
19
+ "**Output**: `aei_enriched_claude_ai_2025-08-04_to_2025-08-11.csv`"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "markdown",
24
+ "metadata": {},
25
+ "source": [
26
+ "## Configuration and Setup"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": null,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "from pathlib import Path\n",
36
+ "\n",
37
+ "import numpy as np\n",
38
+ "import pandas as pd"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": null,
44
+ "metadata": {},
45
+ "outputs": [],
46
+ "source": [
47
+ "# Year for external data\n",
48
+ "YEAR = 2024\n",
49
+ "\n",
50
+ "# Data paths - using local directories\n",
51
+ "DATA_INPUT_DIR = \"../data/input\" # Raw external data\n",
52
+ "DATA_INTERMEDIATE_DIR = (\n",
53
+ " \"../data/intermediate\" # Processed external data and Clio output\n",
54
+ ")\n",
55
+ "DATA_OUTPUT_DIR = \"../data/output\" # Final enriched data\n",
56
+ "\n",
57
+ "# Minimum observation thresholds\n",
58
+ "MIN_OBSERVATIONS_COUNTRY = 200 # Threshold for countries\n",
59
+ "MIN_OBSERVATIONS_US_STATE = 100 # Threshold for US states"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "# Countries where Claude doesn't operate (23 countries)\n",
69
+ "EXCLUDED_COUNTRIES = [\n",
70
+ " \"AF\", # Afghanistan\n",
71
+ " \"BY\", # Belarus\n",
72
+ " \"CD\", # Democratic Republic of the Congo\n",
73
+ " \"CF\", # Central African Republic\n",
74
+ " \"CN\", # China\n",
75
+ " \"CU\", # Cuba\n",
76
+ " \"ER\", # Eritrea\n",
77
+ " \"ET\", # Ethiopia\n",
78
+ " \"HK\", # Hong Kong\n",
79
+ " \"IR\", # Iran\n",
80
+ " \"KP\", # North Korea\n",
81
+ " \"LY\", # Libya\n",
82
+ " \"ML\", # Mali\n",
83
+ " \"MM\", # Myanmar\n",
84
+ " \"MO\", # Macau\n",
85
+ " \"NI\", # Nicaragua\n",
86
+ " \"RU\", # Russia\n",
87
+ " \"SD\", # Sudan\n",
88
+ " \"SO\", # Somalia\n",
89
+ " \"SS\", # South Sudan\n",
90
+ " \"SY\", # Syria\n",
91
+ " \"VE\", # Venezuela\n",
92
+ " \"YE\", # Yemen\n",
93
+ "]"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "markdown",
98
+ "metadata": {},
99
+ "source": [
100
+ "## Data Loading Functions"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "metadata": {},
107
+ "outputs": [],
108
+ "source": [
109
+ "def load_population_data():\n",
110
+ " \"\"\"\n",
111
+ " Load population data for countries and US states.\n",
112
+ "\n",
113
+ " Args:\n",
114
+ " verbose: Whether to print progress\n",
115
+ "\n",
116
+ " Returns:\n",
117
+ " Dict with country and state_us population dataframes\n",
118
+ " \"\"\"\n",
119
+ " pop_country_path = (\n",
120
+ " Path(DATA_INTERMEDIATE_DIR) / f\"working_age_pop_{YEAR}_country.csv\"\n",
121
+ " )\n",
122
+ " pop_state_path = (\n",
123
+ " Path(DATA_INTERMEDIATE_DIR) / f\"working_age_pop_{YEAR}_us_state.csv\"\n",
124
+ " )\n",
125
+ "\n",
126
+ " if not pop_country_path.exists() or not pop_state_path.exists():\n",
127
+ " raise FileNotFoundError(\n",
128
+ " f\"Population data is required but not found.\\n\"\n",
129
+ " f\" Expected files:\\n\"\n",
130
+ " f\" - {pop_country_path}\\n\"\n",
131
+ " f\" - {pop_state_path}\\n\"\n",
132
+ " f\" Run preprocess_population.py first to generate these files.\"\n",
133
+ " )\n",
134
+ "\n",
135
+ " # Use keep_default_na=False to preserve any \"NA\" values as strings\n",
136
+ " df_pop_country = pd.read_csv(\n",
137
+ " pop_country_path, keep_default_na=False, na_values=[\"\"]\n",
138
+ " )\n",
139
+ " df_pop_state = pd.read_csv(pop_state_path, keep_default_na=False, na_values=[\"\"])\n",
140
+ "\n",
141
+ " return {\"country\": df_pop_country, \"state_us\": df_pop_state}\n",
142
+ "\n",
143
+ "\n",
144
+ "def load_gdp_data():\n",
145
+ " \"\"\"\n",
146
+ " Load GDP data for countries and US states.\n",
147
+ "\n",
148
+ " Returns:\n",
149
+ " Dict with country and state_us GDP dataframes\n",
150
+ " \"\"\"\n",
151
+ " gdp_country_path = Path(DATA_INTERMEDIATE_DIR) / f\"gdp_{YEAR}_country.csv\"\n",
152
+ " gdp_state_path = Path(DATA_INTERMEDIATE_DIR) / f\"gdp_{YEAR}_us_state.csv\"\n",
153
+ "\n",
154
+ " if not gdp_country_path.exists() or not gdp_state_path.exists():\n",
155
+ " raise FileNotFoundError(\n",
156
+ " f\"GDP data is required but not found.\\n\"\n",
157
+ " f\" Expected files:\\n\"\n",
158
+ " f\" - {gdp_country_path}\\n\"\n",
159
+ " f\" - {gdp_state_path}\\n\"\n",
160
+ " f\" Run preprocess_gdp.py first to generate these files.\"\n",
161
+ " )\n",
162
+ "\n",
163
+ " # Use keep_default_na=False to preserve any \"NA\" values as strings\n",
164
+ " df_gdp_country = pd.read_csv(\n",
165
+ " gdp_country_path, keep_default_na=False, na_values=[\"\"]\n",
166
+ " )\n",
167
+ " df_gdp_state = pd.read_csv(gdp_state_path, keep_default_na=False, na_values=[\"\"])\n",
168
+ "\n",
169
+ " return {\"country\": df_gdp_country, \"state_us\": df_gdp_state}\n",
170
+ "\n",
171
+ "\n",
172
+ "def load_task_data():\n",
173
+ " \"\"\"\n",
174
+ " Load O*NET task statements with SOC codes.\n",
175
+ "\n",
176
+ " Returns:\n",
177
+ " DataFrame with O*NET tasks and SOC major groups\n",
178
+ " \"\"\"\n",
179
+ " onet_path = Path(DATA_INTERMEDIATE_DIR) / \"onet_task_statements.csv\"\n",
180
+ "\n",
181
+ " if not onet_path.exists():\n",
182
+ " raise FileNotFoundError(\n",
183
+ " f\"O*NET data is required but not found.\\n\"\n",
184
+ " f\" Expected file:\\n\"\n",
185
+ " f\" - {onet_path}\\n\"\n",
186
+ " f\" Run preprocess_onet.py first to generate this file.\"\n",
187
+ " )\n",
188
+ "\n",
189
+ " # Use keep_default_na=False to preserve any \"NA\" values as strings\n",
190
+ " df_onet = pd.read_csv(onet_path, keep_default_na=False, na_values=[\"\"])\n",
191
+ "\n",
192
+ " # Normalize task names for matching with Clio data\n",
193
+ " df_onet[\"task_normalized\"] = df_onet[\"Task\"].str.lower().str.strip()\n",
194
+ "\n",
195
+ " return df_onet\n",
196
+ "\n",
197
+ "\n",
198
+ "def load_soc_data():\n",
199
+ " \"\"\"\n",
200
+ " Load SOC structure data for occupation names.\n",
201
+ "\n",
202
+ " Returns:\n",
203
+ " DataFrame with SOC major groups and their titles\n",
204
+ " \"\"\"\n",
205
+ " soc_path = Path(DATA_INTERMEDIATE_DIR) / \"soc_structure.csv\"\n",
206
+ "\n",
207
+ " if not soc_path.exists():\n",
208
+ " raise FileNotFoundError(\n",
209
+ " f\"SOC structure data is required but not found.\\n\"\n",
210
+ " f\" Expected file:\\n\"\n",
211
+ " f\" - {soc_path}\\n\"\n",
212
+ " f\" Run preprocess_onet.py first to generate this file.\"\n",
213
+ " )\n",
214
+ "\n",
215
+ " # Use keep_default_na=False to preserve any \"NA\" values as strings\n",
216
+ " df_soc = pd.read_csv(soc_path, keep_default_na=False, na_values=[\"\"])\n",
217
+ "\n",
218
+ " # Get unique major groups with their titles for SOC name mapping\n",
219
+ " df_major_groups = df_soc[df_soc[\"soc_major_group\"].notna()][\n",
220
+ " [\"soc_major_group\", \"SOC or O*NET-SOC 2019 Title\"]\n",
221
+ " ].drop_duplicates(subset=[\"soc_major_group\"])\n",
222
+ "\n",
223
+ " return df_major_groups\n",
224
+ "\n",
225
+ "\n",
226
+ "def load_external_data():\n",
227
+ " \"\"\"\n",
228
+ " Load all external data sources from local files.\n",
229
+ "\n",
230
+ " Returns:\n",
231
+ " Dict with population, gdp, task_statements, and soc_structure dataframes\n",
232
+ " \"\"\"\n",
233
+ "\n",
234
+ " external_data = {}\n",
235
+ "\n",
236
+ " # Load each data source with its specific function\n",
237
+ " external_data[\"population\"] = load_population_data()\n",
238
+ " external_data[\"gdp\"] = load_gdp_data()\n",
239
+ " external_data[\"task_statements\"] = load_task_data()\n",
240
+ " external_data[\"soc_structure\"] = load_soc_data()\n",
241
+ "\n",
242
+ " return external_data"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "markdown",
247
+ "metadata": {},
248
+ "source": [
249
+ "## Filtering Functions"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": null,
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "def get_filtered_geographies(df):\n",
259
+ " \"\"\"\n",
260
+ " Get lists of countries and states that meet MIN_OBSERVATIONS thresholds.\n",
261
+ "\n",
262
+ " This function does NOT filter the dataframe - it only identifies which\n",
263
+ " geographies meet the thresholds. The full dataframe is preserved\n",
264
+ " so we can still report statistics for all geographies.\n",
265
+ "\n",
266
+ " Args:\n",
267
+ " df: Input dataframe\n",
268
+ "\n",
269
+ " Returns:\n",
270
+ " Tuple of (filtered_countries list, filtered_states list)\n",
271
+ " \"\"\"\n",
272
+ " # Get country usage counts\n",
273
+ " country_usage = df[\n",
274
+ " (df[\"facet\"] == \"country\") & (df[\"variable\"] == \"usage_count\")\n",
275
+ " ].set_index(\"geo_id\")[\"value\"]\n",
276
+ "\n",
277
+ " # Get state usage counts\n",
278
+ " state_usage = df[\n",
279
+ " (df[\"facet\"] == \"state_us\") & (df[\"variable\"] == \"usage_count\")\n",
280
+ " ].set_index(\"geo_id\")[\"value\"]\n",
281
+ "\n",
282
+ " # Get countries that meet MIN_OBSERVATIONS threshold\n",
283
+ " filtered_countries = country_usage[\n",
284
+ " country_usage >= MIN_OBSERVATIONS_COUNTRY\n",
285
+ " ].index.tolist()\n",
286
+ "\n",
287
+ " # Get states that meet MIN_OBSERVATIONS threshold\n",
288
+ " filtered_states = state_usage[\n",
289
+ " state_usage >= MIN_OBSERVATIONS_US_STATE\n",
290
+ " ].index.tolist()\n",
291
+ "\n",
292
+ " return filtered_countries, filtered_states"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "markdown",
297
+ "metadata": {},
298
+ "source": [
299
+ "## Data Merge Functions"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "code",
304
+ "execution_count": null,
305
+ "metadata": {},
306
+ "outputs": [],
307
+ "source": [
308
+ "def merge_population_data(df, population_data):\n",
309
+ " \"\"\"\n",
310
+ " Merge population data in long format.\n",
311
+ "\n",
312
+ " This function:\n",
313
+ " 1. Adds countries/states that have population but no usage (with 0 usage values)\n",
314
+ " 2. Adds population as new rows with variable=\"working_age_pop\"\n",
315
+ "\n",
316
+ " Args:\n",
317
+ " df: Input dataframe in long format\n",
318
+ " population_data: Dict with country and state_us population dataframes\n",
319
+ "\n",
320
+ " Returns:\n",
321
+ " Dataframe with all geographies and population added as rows\n",
322
+ " \"\"\"\n",
323
+ " df_result = df.copy()\n",
324
+ " new_rows = []\n",
325
+ "\n",
326
+ " # Get unique date/platform combinations to replicate for new data\n",
327
+ " date_platform_combos = df_result[\n",
328
+ " [\"date_start\", \"date_end\", \"platform_and_product\"]\n",
329
+ " ].drop_duplicates()\n",
330
+ "\n",
331
+ " # Process countries\n",
332
+ " if \"country\" in population_data and not population_data[\"country\"].empty:\n",
333
+ " pop_country = population_data[\"country\"]\n",
334
+ "\n",
335
+ " # Get existing countries in our data\n",
336
+ " existing_countries = df_result[\n",
337
+ " (df_result[\"geography\"] == \"country\")\n",
338
+ " & (df_result[\"variable\"] == \"usage_count\")\n",
339
+ " ][\"geo_id\"].unique()\n",
340
+ "\n",
341
+ " # Add missing countries with 0 usage (excluding excluded countries)\n",
342
+ " missing_countries = (\n",
343
+ " set(pop_country[\"country_code\"])\n",
344
+ " - set(existing_countries)\n",
345
+ " - set(EXCLUDED_COUNTRIES)\n",
346
+ " )\n",
347
+ "\n",
348
+ " for _, combo in date_platform_combos.iterrows():\n",
349
+ " # Add missing countries with 0 usage (both count and percentage)\n",
350
+ " for country_code in missing_countries:\n",
351
+ " # Add usage_count = 0\n",
352
+ " new_rows.append(\n",
353
+ " {\n",
354
+ " \"geo_id\": country_code,\n",
355
+ " \"geography\": \"country\",\n",
356
+ " \"date_start\": combo[\"date_start\"],\n",
357
+ " \"date_end\": combo[\"date_end\"],\n",
358
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
359
+ " \"facet\": \"country\",\n",
360
+ " \"level\": 0,\n",
361
+ " \"variable\": \"usage_count\",\n",
362
+ " \"cluster_name\": \"\",\n",
363
+ " \"value\": 0.0,\n",
364
+ " }\n",
365
+ " )\n",
366
+ " # Add usage_pct = 0\n",
367
+ " new_rows.append(\n",
368
+ " {\n",
369
+ " \"geo_id\": country_code,\n",
370
+ " \"geography\": \"country\",\n",
371
+ " \"date_start\": combo[\"date_start\"],\n",
372
+ " \"date_end\": combo[\"date_end\"],\n",
373
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
374
+ " \"facet\": \"country\",\n",
375
+ " \"level\": 0,\n",
376
+ " \"variable\": \"usage_pct\",\n",
377
+ " \"cluster_name\": \"\",\n",
378
+ " \"value\": 0.0,\n",
379
+ " }\n",
380
+ " )\n",
381
+ "\n",
382
+ " # Add population data for all countries (that are not excluded)\n",
383
+ " for _, pop_row in pop_country.iterrows():\n",
384
+ " new_rows.append(\n",
385
+ " {\n",
386
+ " \"geo_id\": pop_row[\"country_code\"],\n",
387
+ " \"geography\": \"country\",\n",
388
+ " \"date_start\": combo[\"date_start\"],\n",
389
+ " \"date_end\": combo[\"date_end\"],\n",
390
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
391
+ " \"facet\": \"country\",\n",
392
+ " \"level\": 0,\n",
393
+ " \"variable\": \"working_age_pop\",\n",
394
+ " \"cluster_name\": \"\",\n",
395
+ " \"value\": float(pop_row[\"working_age_pop\"]),\n",
396
+ " }\n",
397
+ " )\n",
398
+ "\n",
399
+ " # Process US states\n",
400
+ " if \"state_us\" in population_data and not population_data[\"state_us\"].empty:\n",
401
+ " pop_state = population_data[\"state_us\"]\n",
402
+ "\n",
403
+ " # Get existing states in our data\n",
404
+ " existing_states = df_result[\n",
405
+ " (df_result[\"geography\"] == \"state_us\")\n",
406
+ " & (df_result[\"variable\"] == \"usage_count\")\n",
407
+ " ][\"geo_id\"].unique()\n",
408
+ "\n",
409
+ " # Add missing states with 0 usage\n",
410
+ " missing_states = set(pop_state[\"state_code\"]) - set(existing_states)\n",
411
+ "\n",
412
+ " for _, combo in date_platform_combos.iterrows():\n",
413
+ " # Add missing states with 0 usage (both count and percentage)\n",
414
+ " for state_code in missing_states:\n",
415
+ " # Add usage_count = 0\n",
416
+ " new_rows.append(\n",
417
+ " {\n",
418
+ " \"geo_id\": state_code,\n",
419
+ " \"geography\": \"state_us\",\n",
420
+ " \"date_start\": combo[\"date_start\"],\n",
421
+ " \"date_end\": combo[\"date_end\"],\n",
422
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
423
+ " \"facet\": \"state_us\",\n",
424
+ " \"level\": 0,\n",
425
+ " \"variable\": \"usage_count\",\n",
426
+ " \"cluster_name\": \"\",\n",
427
+ " \"value\": 0.0,\n",
428
+ " }\n",
429
+ " )\n",
430
+ " # Add usage_pct = 0\n",
431
+ " new_rows.append(\n",
432
+ " {\n",
433
+ " \"geo_id\": state_code,\n",
434
+ " \"geography\": \"state_us\",\n",
435
+ " \"date_start\": combo[\"date_start\"],\n",
436
+ " \"date_end\": combo[\"date_end\"],\n",
437
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
438
+ " \"facet\": \"state_us\",\n",
439
+ " \"level\": 0,\n",
440
+ " \"variable\": \"usage_pct\",\n",
441
+ " \"cluster_name\": \"\",\n",
442
+ " \"value\": 0.0,\n",
443
+ " }\n",
444
+ " )\n",
445
+ "\n",
446
+ " # Add population data for all states\n",
447
+ " for _, pop_row in pop_state.iterrows():\n",
448
+ " new_rows.append(\n",
449
+ " {\n",
450
+ " \"geo_id\": pop_row[\"state_code\"],\n",
451
+ " \"geography\": \"state_us\",\n",
452
+ " \"date_start\": combo[\"date_start\"],\n",
453
+ " \"date_end\": combo[\"date_end\"],\n",
454
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
455
+ " \"facet\": \"state_us\",\n",
456
+ " \"level\": 0,\n",
457
+ " \"variable\": \"working_age_pop\",\n",
458
+ " \"cluster_name\": \"\",\n",
459
+ " \"value\": float(pop_row[\"working_age_pop\"]),\n",
460
+ " }\n",
461
+ " )\n",
462
+ "\n",
463
+ " # Add all new rows to the dataframe\n",
464
+ " if new_rows:\n",
465
+ " df_new = pd.DataFrame(new_rows)\n",
466
+ " df_result = pd.concat([df_result, df_new], ignore_index=True)\n",
467
+ "\n",
468
+ " return df_result\n",
469
+ "\n",
470
+ "\n",
471
+ "def merge_gdp_data(df, gdp_data, population_data):\n",
472
+ " \"\"\"\n",
473
+ " Merge GDP data and calculate GDP per working age capita.\n",
474
+ "\n",
475
+ " Since we have total GDP in actual dollars, we divide by population to get per capita.\n",
476
+ "\n",
477
+ " Args:\n",
478
+ " df: Input dataframe in long format\n",
479
+ " gdp_data: Dict with country and state_us GDP dataframes (total GDP in dollars)\n",
480
+ " population_data: Dict with country and state_us population dataframes\n",
481
+ "\n",
482
+ " Returns:\n",
483
+ " Dataframe with GDP per capita data added as rows\n",
484
+ " \"\"\"\n",
485
+ " df_result = df.copy()\n",
486
+ " new_rows = []\n",
487
+ "\n",
488
+ " # Get unique date/platform combinations\n",
489
+ " date_platform_combos = df_result[\n",
490
+ " [\"date_start\", \"date_end\", \"platform_and_product\"]\n",
491
+ " ].drop_duplicates()\n",
492
+ "\n",
493
+ " # Process country GDP\n",
494
+ " if \"country\" in gdp_data and \"country\" in population_data:\n",
495
+ " gdp_country = gdp_data[\"country\"]\n",
496
+ " pop_country = population_data[\"country\"]\n",
497
+ "\n",
498
+ " # Merge GDP with population to calculate per capita\n",
499
+ " gdp_pop = gdp_country.merge(pop_country, on=\"iso_alpha_3\", how=\"inner\")\n",
500
+ "\n",
501
+ " # Calculate GDP per working age capita\n",
502
+ " gdp_pop[\"gdp_per_working_age_capita\"] = (\n",
503
+ " gdp_pop[\"gdp_total\"] / gdp_pop[\"working_age_pop\"]\n",
504
+ " )\n",
505
+ "\n",
506
+ " for _, combo in date_platform_combos.iterrows():\n",
507
+ " for _, gdp_row in gdp_pop.iterrows():\n",
508
+ " new_rows.append(\n",
509
+ " {\n",
510
+ " \"geo_id\": gdp_row[\"country_code\"], # Use 2-letter code\n",
511
+ " \"geography\": \"country\",\n",
512
+ " \"date_start\": combo[\"date_start\"],\n",
513
+ " \"date_end\": combo[\"date_end\"],\n",
514
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
515
+ " \"facet\": \"country\",\n",
516
+ " \"level\": 0,\n",
517
+ " \"variable\": \"gdp_per_working_age_capita\",\n",
518
+ " \"cluster_name\": \"\",\n",
519
+ " \"value\": float(gdp_row[\"gdp_per_working_age_capita\"]),\n",
520
+ " }\n",
521
+ " )\n",
522
+ "\n",
523
+ " # Process state GDP\n",
524
+ " if \"state_us\" in gdp_data and \"state_us\" in population_data:\n",
525
+ " gdp_state = gdp_data[\"state_us\"]\n",
526
+ " pop_state = population_data[\"state_us\"]\n",
527
+ "\n",
528
+ " # Merge GDP with population\n",
529
+ " # Column names from preprocess_gdp.py: state_code, gdp_total (in actual dollars)\n",
530
+ " gdp_pop = gdp_state.merge(pop_state, on=\"state_code\", how=\"inner\")\n",
531
+ "\n",
532
+ " # Calculate GDP per working age capita\n",
533
+ " gdp_pop[\"gdp_per_working_age_capita\"] = (\n",
534
+ " gdp_pop[\"gdp_total\"] / gdp_pop[\"working_age_pop\"]\n",
535
+ " )\n",
536
+ "\n",
537
+ " for _, combo in date_platform_combos.iterrows():\n",
538
+ " for _, gdp_row in gdp_pop.iterrows():\n",
539
+ " new_rows.append(\n",
540
+ " {\n",
541
+ " \"geo_id\": gdp_row[\"state_code\"],\n",
542
+ " \"geography\": \"state_us\",\n",
543
+ " \"date_start\": combo[\"date_start\"],\n",
544
+ " \"date_end\": combo[\"date_end\"],\n",
545
+ " \"platform_and_product\": combo[\"platform_and_product\"],\n",
546
+ " \"facet\": \"state_us\",\n",
547
+ " \"level\": 0,\n",
548
+ " \"variable\": \"gdp_per_working_age_capita\",\n",
549
+ " \"cluster_name\": \"\",\n",
550
+ " \"value\": float(gdp_row[\"gdp_per_working_age_capita\"]),\n",
551
+ " }\n",
552
+ " )\n",
553
+ "\n",
554
+ " # Add all new rows to the dataframe\n",
555
+ " if new_rows:\n",
556
+ " df_new = pd.DataFrame(new_rows)\n",
557
+ " df_result = pd.concat([df_result, df_new], ignore_index=True)\n",
558
+ "\n",
559
+ " return df_result\n",
560
+ "\n",
561
+ "\n",
562
+ "def calculate_soc_distribution(\n",
563
+ " df, df_onet, df_soc_structure, filtered_countries=None, filtered_states=None\n",
564
+ "):\n",
565
+ " \"\"\"\n",
566
+ " Calculate SOC occupation distribution from O*NET task usage.\n",
567
+ "\n",
568
+ " This uses the following approach:\n",
569
+ " 1. Map tasks directly to SOC major groups (with minimal double counting)\n",
570
+ " 2. Combine \"none\" and \"not_classified\" tasks into a single \"not_classified\" SOC group\n",
571
+ " 3. Sum percentages by SOC group\n",
572
+ " 4. Normalize to 100% for each geography\n",
573
+ " 5. Calculate for countries, US states, and global that meet MIN_OBSERVATIONS threshold\n",
574
+ "\n",
575
+ " NOTE: For US states, only ~449 O*NET tasks have state-level data (those with sufficient\n",
576
+ " observations), but these tasks still map to SOC groups the same way as for countries.\n",
577
+ "\n",
578
+ " Args:\n",
579
+ " df: DataFrame with O*NET task percentages\n",
580
+ " df_onet: O*NET task data with SOC codes\n",
581
+ " df_soc_structure: SOC structure with major group names\n",
582
+ " filtered_countries: List of countries that meet MIN_OBSERVATIONS (optional)\n",
583
+ " filtered_states: List of states that meet MIN_OBSERVATIONS (optional)\n",
584
+ "\n",
585
+ " Returns:\n",
586
+ " DataFrame with SOC distribution rows added\n",
587
+ " \"\"\"\n",
588
+ " df_result = df.copy()\n",
589
+ " soc_rows = []\n",
590
+ "\n",
591
+ " # Get all O*NET task percentage data (including not_classified and \"none\")\n",
592
+ " df_task_pct_all = df_result[\n",
593
+ " (df_result[\"facet\"] == \"onet_task\") & (df_result[\"variable\"] == \"onet_task_pct\")\n",
594
+ " ].copy()\n",
595
+ "\n",
596
+ " if df_task_pct_all.empty:\n",
597
+ " return df_result\n",
598
+ "\n",
599
+ " # Build masks for each geography type\n",
600
+ " # Always include global\n",
601
+ " global_mask = df_task_pct_all[\"geography\"] == \"global\"\n",
602
+ "\n",
603
+ " # Apply filtering for countries\n",
604
+ " if filtered_countries is not None:\n",
605
+ " country_mask = (df_task_pct_all[\"geography\"] == \"country\") & (\n",
606
+ " df_task_pct_all[\"geo_id\"].isin(filtered_countries)\n",
607
+ " )\n",
608
+ " else:\n",
609
+ " # If no filter, keep all countries\n",
610
+ " country_mask = df_task_pct_all[\"geography\"] == \"country\"\n",
611
+ "\n",
612
+ " # Apply filtering for states\n",
613
+ " if filtered_states is not None:\n",
614
+ " state_mask = (df_task_pct_all[\"geography\"] == \"state_us\") & (\n",
615
+ " df_task_pct_all[\"geo_id\"].isin(filtered_states)\n",
616
+ " )\n",
617
+ " else:\n",
618
+ " # If no filter, keep all states\n",
619
+ " state_mask = df_task_pct_all[\"geography\"] == \"state_us\"\n",
620
+ "\n",
621
+ " # Combine masks to keep relevant geographies\n",
622
+ " combined_mask = global_mask | country_mask | state_mask\n",
623
+ " df_task_pct_all = df_task_pct_all[combined_mask].copy()\n",
624
+ "\n",
625
+ " if df_task_pct_all.empty:\n",
626
+ " return df_result\n",
627
+ "\n",
628
+ " # Separate not_classified and none tasks from real O*NET tasks\n",
629
+ " df_not_classified = df_task_pct_all[\n",
630
+ " (df_task_pct_all[\"cluster_name\"].str.contains(\"not_classified\", na=False))\n",
631
+ " | (df_task_pct_all[\"cluster_name\"] == \"none\")\n",
632
+ " ].copy()\n",
633
+ "\n",
634
+ " # Get real O*NET tasks (excluding not_classified and none)\n",
635
+ " df_task_pct = df_task_pct_all[\n",
636
+ " (~df_task_pct_all[\"cluster_name\"].str.contains(\"not_classified\", na=False))\n",
637
+ " & (df_task_pct_all[\"cluster_name\"] != \"none\")\n",
638
+ " ].copy()\n",
639
+ "\n",
640
+ " # Normalize task names for matching\n",
641
+ " df_task_pct[\"task_normalized\"] = df_task_pct[\"cluster_name\"].str.lower().str.strip()\n",
642
+ "\n",
643
+ " # Get unique task-SOC pairs from O*NET data\n",
644
+ " # This keeps tasks that map to multiple SOC groups (different rows)\n",
645
+ " df_task_soc = df_onet[[\"task_normalized\", \"soc_major_group\"]].drop_duplicates()\n",
646
+ "\n",
647
+ " # Merge tasks with their SOC codes\n",
648
+ " df_with_soc = df_task_pct.merge(df_task_soc, on=\"task_normalized\", how=\"left\")\n",
649
+ "\n",
650
+ " # Check for unmapped tasks and raise error if found (same as for countries)\n",
651
+ " unmapped_tasks = df_with_soc[df_with_soc[\"soc_major_group\"].isna()]\n",
652
+ " if not unmapped_tasks.empty:\n",
653
+ " unmapped_list = unmapped_tasks[\"cluster_name\"].unique()[:10] # Show first 10\n",
654
+ " n_unmapped = len(unmapped_tasks[\"cluster_name\"].unique())\n",
655
+ "\n",
656
+ " # Check which geographies have unmapped tasks\n",
657
+ " unmapped_geos = unmapped_tasks[\"geography\"].unique()\n",
658
+ "\n",
659
+ " raise ValueError(\n",
660
+ " f\"Found {n_unmapped} O*NET tasks that could not be mapped to SOC codes.\\n\"\n",
661
+ " f\"Geographies with unmapped tasks: {unmapped_geos.tolist()}\\n\"\n",
662
+ " f\"First 10 unmapped tasks:\\n\"\n",
663
+ " + \"\\n\".join(f\" - {task}\" for task in unmapped_list)\n",
664
+ " + f\"\\n\\nThis likely means the O*NET data is out of sync with the Clio task data.\\n\"\n",
665
+ " f\"Please verify that preprocess_onet.py has been run with the correct O*NET version.\"\n",
666
+ " )\n",
667
+ "\n",
668
+ " # Create SOC name mapping if SOC structure is available\n",
669
+ " soc_names = {}\n",
670
+ " if not df_soc_structure.empty:\n",
671
+ " for _, row in df_soc_structure.iterrows():\n",
672
+ " soc_code = row[\"soc_major_group\"]\n",
673
+ " title = row[\"SOC or O*NET-SOC 2019 Title\"]\n",
674
+ " # Clean up title (remove \"Occupations\" suffix)\n",
675
+ " clean_title = title.replace(\" Occupations\", \"\").replace(\" Occupation\", \"\")\n",
676
+ " soc_names[soc_code] = clean_title\n",
677
+ "\n",
678
+ " # Group by geography and process each group\n",
679
+ " geo_groups = df_with_soc.groupby(\n",
680
+ " [\"geo_id\", \"geography\", \"date_start\", \"date_end\", \"platform_and_product\"]\n",
681
+ " )\n",
682
+ "\n",
683
+ " # Also group not_classified data by geography\n",
684
+ " not_classified_groups = df_not_classified.groupby(\n",
685
+ " [\"geo_id\", \"geography\", \"date_start\", \"date_end\", \"platform_and_product\"]\n",
686
+ " )\n",
687
+ "\n",
688
+ " # Track statistics\n",
689
+ " states_with_soc = set()\n",
690
+ " countries_with_soc = set()\n",
691
+ "\n",
692
+ " # Process all geographies\n",
693
+ " all_geos = set()\n",
694
+ " for (geo_id, geography, date_start, date_end, platform), _ in geo_groups:\n",
695
+ " all_geos.add((geo_id, geography, date_start, date_end, platform))\n",
696
+ " for (geo_id, geography, date_start, date_end, platform), _ in not_classified_groups:\n",
697
+ " all_geos.add((geo_id, geography, date_start, date_end, platform))\n",
698
+ "\n",
699
+ " for geo_id, geography, date_start, date_end, platform in all_geos:\n",
700
+ " # Get mapped SOC data for this geography\n",
701
+ " try:\n",
702
+ " geo_data = geo_groups.get_group(\n",
703
+ " (geo_id, geography, date_start, date_end, platform)\n",
704
+ " )\n",
705
+ " # Sum percentages by SOC major group\n",
706
+ " # If a task maps to multiple SOC groups, its percentage is added to each\n",
707
+ " soc_totals = geo_data.groupby(\"soc_major_group\")[\"value\"].sum()\n",
708
+ " except KeyError:\n",
709
+ " # No mapped tasks for this geography\n",
710
+ " soc_totals = pd.Series(dtype=float)\n",
711
+ "\n",
712
+ " # Get not_classified/none data for this geography\n",
713
+ " try:\n",
714
+ " not_classified_data = not_classified_groups.get_group(\n",
715
+ " (geo_id, geography, date_start, date_end, platform)\n",
716
+ " )\n",
717
+ " # Sum all not_classified and none percentages\n",
718
+ " not_classified_total = not_classified_data[\"value\"].sum()\n",
719
+ " except KeyError:\n",
720
+ " # No not_classified/none for this geography\n",
721
+ " not_classified_total = 0\n",
722
+ "\n",
723
+ " # Combine and normalize to 100%\n",
724
+ " total_pct = soc_totals.sum() + not_classified_total\n",
725
+ "\n",
726
+ " if total_pct > 0:\n",
727
+ " # Normalize mapped SOC groups\n",
728
+ " if len(soc_totals) > 0:\n",
729
+ " soc_normalized = (soc_totals / total_pct) * 100\n",
730
+ " else:\n",
731
+ " soc_normalized = pd.Series(dtype=float)\n",
732
+ "\n",
733
+ " # Calculate normalized not_classified percentage\n",
734
+ " not_classified_normalized = (not_classified_total / total_pct) * 100\n",
735
+ "\n",
736
+ " # Track geographies that have SOC data\n",
737
+ " if geography == \"state_us\":\n",
738
+ " states_with_soc.add(geo_id)\n",
739
+ " elif geography == \"country\":\n",
740
+ " countries_with_soc.add(geo_id)\n",
741
+ "\n",
742
+ " # Create rows for each SOC group\n",
743
+ " for soc_group, pct_value in soc_normalized.items():\n",
744
+ " # Get SOC name if available, otherwise use code\n",
745
+ " soc_name = soc_names.get(soc_group, f\"SOC {soc_group}\")\n",
746
+ "\n",
747
+ " soc_row = {\n",
748
+ " \"geo_id\": geo_id,\n",
749
+ " \"geography\": geography,\n",
750
+ " \"date_start\": date_start,\n",
751
+ " \"date_end\": date_end,\n",
752
+ " \"platform_and_product\": platform,\n",
753
+ " \"facet\": \"soc_occupation\",\n",
754
+ " \"level\": 0,\n",
755
+ " \"variable\": \"soc_pct\",\n",
756
+ " \"cluster_name\": soc_name,\n",
757
+ " \"value\": pct_value,\n",
758
+ " }\n",
759
+ " soc_rows.append(soc_row)\n",
760
+ "\n",
761
+ " # Add not_classified SOC row if there's any not_classified/none percentage\n",
762
+ " if not_classified_normalized > 0:\n",
763
+ " soc_row = {\n",
764
+ " \"geo_id\": geo_id,\n",
765
+ " \"geography\": geography,\n",
766
+ " \"date_start\": date_start,\n",
767
+ " \"date_end\": date_end,\n",
768
+ " \"platform_and_product\": platform,\n",
769
+ " \"facet\": \"soc_occupation\",\n",
770
+ " \"level\": 0,\n",
771
+ " \"variable\": \"soc_pct\",\n",
772
+ " \"cluster_name\": \"not_classified\",\n",
773
+ " \"value\": not_classified_normalized,\n",
774
+ " }\n",
775
+ " soc_rows.append(soc_row)\n",
776
+ "\n",
777
+ " # Print summary\n",
778
+ " if countries_with_soc:\n",
779
+ " print(\n",
780
+ " f\"Calculated SOC distributions for {len(countries_with_soc)} countries + global\"\n",
781
+ " )\n",
782
+ " if states_with_soc:\n",
783
+ " print(f\"Calculated SOC distributions for {len(states_with_soc)} US states\")\n",
784
+ "\n",
785
+ " # Add all SOC rows to result\n",
786
+ " if soc_rows:\n",
787
+ " df_soc = pd.DataFrame(soc_rows)\n",
788
+ " df_result = pd.concat([df_result, df_soc], ignore_index=True)\n",
789
+ "\n",
790
+ " return df_result"
791
+ ]
792
+ },
793
+ {
794
+ "cell_type": "markdown",
795
+ "metadata": {},
796
+ "source": [
797
+ "## Metric Calculation Functions"
798
+ ]
799
+ },
800
+ {
801
+ "cell_type": "code",
802
+ "execution_count": null,
803
+ "metadata": {},
804
+ "outputs": [],
805
+ "source": [
806
+ "def calculate_per_capita_metrics(df):\n",
807
+ " \"\"\"\n",
808
+ " Calculate per capita metrics by joining usage and population data.\n",
809
+ "\n",
810
+ " Since data is in long format, this function:\n",
811
+ " 1. Extracts usage count rows\n",
812
+ " 2. Extracts population rows\n",
813
+ " 3. Joins them and calculates per capita\n",
814
+ " 4. Adds results as new rows\n",
815
+ "\n",
816
+ " Args:\n",
817
+ " df: Dataframe in long format with usage and population as rows\n",
818
+ "\n",
819
+ " Returns:\n",
820
+ " Dataframe with per capita metrics added as new rows\n",
821
+ " \"\"\"\n",
822
+ " df_result = df.copy()\n",
823
+ "\n",
824
+ " # Define which metrics should have per capita calculations\n",
825
+ " count_metrics = [\"usage_count\"]\n",
826
+ "\n",
827
+ " # Get population data\n",
828
+ " df_pop = df_result[df_result[\"variable\"] == \"working_age_pop\"][\n",
829
+ " [\n",
830
+ " \"geo_id\",\n",
831
+ " \"geography\",\n",
832
+ " \"date_start\",\n",
833
+ " \"date_end\",\n",
834
+ " \"platform_and_product\",\n",
835
+ " \"value\",\n",
836
+ " ]\n",
837
+ " ].rename(columns={\"value\": \"population\"})\n",
838
+ "\n",
839
+ " # Calculate per capita for each count metric\n",
840
+ " per_capita_rows = []\n",
841
+ "\n",
842
+ " for metric in count_metrics:\n",
843
+ " # Get the count data for this metric\n",
844
+ " df_metric = df_result[df_result[\"variable\"] == metric].copy()\n",
845
+ "\n",
846
+ " # Join with population data\n",
847
+ " df_joined = df_metric.merge(\n",
848
+ " df_pop,\n",
849
+ " on=[\n",
850
+ " \"geo_id\",\n",
851
+ " \"geography\",\n",
852
+ " \"date_start\",\n",
853
+ " \"date_end\",\n",
854
+ " \"platform_and_product\",\n",
855
+ " ],\n",
856
+ " how=\"left\",\n",
857
+ " )\n",
858
+ "\n",
859
+ " # Calculate per capita where population exists and is > 0\n",
860
+ " df_joined = df_joined[\n",
861
+ " df_joined[\"population\"].notna() & (df_joined[\"population\"] > 0)\n",
862
+ " ]\n",
863
+ "\n",
864
+ " if not df_joined.empty:\n",
865
+ " # Create per capita rows\n",
866
+ " for _, row in df_joined.iterrows():\n",
867
+ " per_capita_row = {\n",
868
+ " \"geo_id\": row[\"geo_id\"],\n",
869
+ " \"geography\": row[\"geography\"],\n",
870
+ " \"date_start\": row[\"date_start\"],\n",
871
+ " \"date_end\": row[\"date_end\"],\n",
872
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
873
+ " \"facet\": row[\"facet\"],\n",
874
+ " \"level\": row[\"level\"],\n",
875
+ " \"variable\": metric.replace(\"_count\", \"_per_capita\"),\n",
876
+ " \"cluster_name\": row[\"cluster_name\"],\n",
877
+ " \"value\": row[\"value\"] / row[\"population\"],\n",
878
+ " }\n",
879
+ " per_capita_rows.append(per_capita_row)\n",
880
+ "\n",
881
+ " # Add all per capita rows to the result\n",
882
+ " if per_capita_rows:\n",
883
+ " df_per_capita = pd.DataFrame(per_capita_rows)\n",
884
+ " df_result = pd.concat([df_result, df_per_capita], ignore_index=True)\n",
885
+ "\n",
886
+ " return df_result\n",
887
+ "\n",
888
+ "\n",
889
+ "def calculate_usage_per_capita_index(df, filtered_countries=None, filtered_states=None):\n",
890
+ " \"\"\"\n",
891
+ " Calculate usage concentration index: (% of usage) / (% of population).\n",
892
+ "\n",
893
+ " This shows whether a geography has more or less usage than expected based on its population.\n",
894
+ " - Index = 1.0: Usage proportional to population\n",
895
+ " - Index > 1.0: Over-representation (more usage than expected)\n",
896
+ " - Index < 1.0: Under-representation (less usage than expected)\n",
897
+ " - Index = 0.0: No usage at all\n",
898
+ "\n",
899
+ " The function calculates the index for all countries/states that have usage data.\n",
900
+ " Excluded countries don't have usage data, so they're automatically excluded.\n",
901
+ " Countries with zero usage get index=0 naturally from the calculation.\n",
902
+ "\n",
903
+ " Args:\n",
904
+ " df: Dataframe with usage and population data\n",
905
+ " filtered_countries: List of countries that meet MIN_OBSERVATIONS threshold (used for baseline calculation)\n",
906
+ " filtered_states: List of states that meet MIN_OBSERVATIONS threshold (used for baseline calculation)\n",
907
+ "\n",
908
+ " Returns:\n",
909
+ " Dataframe with usage concentration index added as new rows\n",
910
+ " \"\"\"\n",
911
+ " df_result = df.copy()\n",
912
+ "\n",
913
+ " index_rows = []\n",
914
+ "\n",
915
+ " # Process countries\n",
916
+ " # Get all countries with usage data (excluded countries won't be here)\n",
917
+ " df_usage_country = df_result[\n",
918
+ " (df_result[\"geography\"] == \"country\") & (df_result[\"variable\"] == \"usage_count\")\n",
919
+ " ].copy()\n",
920
+ "\n",
921
+ " # Get population data for the same countries\n",
922
+ " df_pop_country = df_result[\n",
923
+ " (df_result[\"geography\"] == \"country\")\n",
924
+ " & (df_result[\"variable\"] == \"working_age_pop\")\n",
925
+ " ].copy()\n",
926
+ "\n",
927
+ " if not df_usage_country.empty and not df_pop_country.empty:\n",
928
+ " # For baseline calculation, use filtered countries if provided, otherwise use all\n",
929
+ " if filtered_countries is not None:\n",
930
+ " # Calculate totals using only filtered countries for the baseline\n",
931
+ " usage_for_baseline = df_usage_country[\n",
932
+ " df_usage_country[\"geo_id\"].isin(filtered_countries)\n",
933
+ " ]\n",
934
+ " pop_for_baseline = df_pop_country[\n",
935
+ " df_pop_country[\"geo_id\"].isin(filtered_countries)\n",
936
+ " ]\n",
937
+ " total_usage = usage_for_baseline[\"value\"].sum()\n",
938
+ " total_pop = pop_for_baseline[\"value\"].sum()\n",
939
+ " else:\n",
940
+ " # Use all countries for baseline\n",
941
+ " total_usage = df_usage_country[\"value\"].sum()\n",
942
+ " total_pop = df_pop_country[\"value\"].sum()\n",
943
+ "\n",
944
+ " if total_usage > 0 and total_pop > 0:\n",
945
+ " # Calculate index for all countries (not just filtered)\n",
946
+ " for _, usage_row in df_usage_country.iterrows():\n",
947
+ " # Find corresponding population\n",
948
+ " pop_value = df_pop_country[\n",
949
+ " df_pop_country[\"geo_id\"] == usage_row[\"geo_id\"]\n",
950
+ " ][\"value\"].values\n",
951
+ "\n",
952
+ " if len(pop_value) > 0 and pop_value[0] > 0:\n",
953
+ " # Calculate shares\n",
954
+ " usage_share = (\n",
955
+ " usage_row[\"value\"] / total_usage\n",
956
+ " if usage_row[\"value\"] > 0\n",
957
+ " else 0\n",
958
+ " )\n",
959
+ " pop_share = pop_value[0] / total_pop\n",
960
+ "\n",
961
+ " # Calculate index (will be 0 if usage is 0)\n",
962
+ " index_value = usage_share / pop_share if pop_share > 0 else 0\n",
963
+ "\n",
964
+ " index_row = {\n",
965
+ " \"geo_id\": usage_row[\"geo_id\"],\n",
966
+ " \"geography\": usage_row[\"geography\"],\n",
967
+ " \"date_start\": usage_row[\"date_start\"],\n",
968
+ " \"date_end\": usage_row[\"date_end\"],\n",
969
+ " \"platform_and_product\": usage_row[\"platform_and_product\"],\n",
970
+ " \"facet\": usage_row[\"facet\"],\n",
971
+ " \"level\": usage_row[\"level\"],\n",
972
+ " \"variable\": \"usage_per_capita_index\",\n",
973
+ " \"cluster_name\": usage_row[\"cluster_name\"],\n",
974
+ " \"value\": index_value,\n",
975
+ " }\n",
976
+ " index_rows.append(index_row)\n",
977
+ "\n",
978
+ " # Process states\n",
979
+ " # Get all states with usage data\n",
980
+ " df_usage_state = df_result[\n",
981
+ " (df_result[\"geography\"] == \"state_us\")\n",
982
+ " & (df_result[\"variable\"] == \"usage_count\")\n",
983
+ " ].copy()\n",
984
+ "\n",
985
+ " # Get population data for the same states\n",
986
+ " df_pop_state = df_result[\n",
987
+ " (df_result[\"geography\"] == \"state_us\")\n",
988
+ " & (df_result[\"variable\"] == \"working_age_pop\")\n",
989
+ " ].copy()\n",
990
+ "\n",
991
+ " if not df_usage_state.empty and not df_pop_state.empty:\n",
992
+ " # For baseline calculation, use filtered states if provided, otherwise use all\n",
993
+ " if filtered_states is not None:\n",
994
+ " # Calculate totals using only filtered states for the baseline\n",
995
+ " usage_for_baseline = df_usage_state[\n",
996
+ " df_usage_state[\"geo_id\"].isin(filtered_states)\n",
997
+ " ]\n",
998
+ " pop_for_baseline = df_pop_state[\n",
999
+ " df_pop_state[\"geo_id\"].isin(filtered_states)\n",
1000
+ " ]\n",
1001
+ " total_usage = usage_for_baseline[\"value\"].sum()\n",
1002
+ " total_pop = pop_for_baseline[\"value\"].sum()\n",
1003
+ " else:\n",
1004
+ " # Use all states for baseline\n",
1005
+ " total_usage = df_usage_state[\"value\"].sum()\n",
1006
+ " total_pop = df_pop_state[\"value\"].sum()\n",
1007
+ "\n",
1008
+ " if total_usage > 0 and total_pop > 0:\n",
1009
+ " # Calculate index for all states (not just filtered)\n",
1010
+ " for _, usage_row in df_usage_state.iterrows():\n",
1011
+ " # Find corresponding population\n",
1012
+ " pop_value = df_pop_state[df_pop_state[\"geo_id\"] == usage_row[\"geo_id\"]][\n",
1013
+ " \"value\"\n",
1014
+ " ].values\n",
1015
+ "\n",
1016
+ " if len(pop_value) > 0 and pop_value[0] > 0:\n",
1017
+ " # Calculate shares\n",
1018
+ " usage_share = (\n",
1019
+ " usage_row[\"value\"] / total_usage\n",
1020
+ " if usage_row[\"value\"] > 0\n",
1021
+ " else 0\n",
1022
+ " )\n",
1023
+ " pop_share = pop_value[0] / total_pop\n",
1024
+ "\n",
1025
+ " # Calculate index (will be 0 if usage is 0)\n",
1026
+ " index_value = usage_share / pop_share if pop_share > 0 else 0\n",
1027
+ "\n",
1028
+ " index_row = {\n",
1029
+ " \"geo_id\": usage_row[\"geo_id\"],\n",
1030
+ " \"geography\": usage_row[\"geography\"],\n",
1031
+ " \"date_start\": usage_row[\"date_start\"],\n",
1032
+ " \"date_end\": usage_row[\"date_end\"],\n",
1033
+ " \"platform_and_product\": usage_row[\"platform_and_product\"],\n",
1034
+ " \"facet\": usage_row[\"facet\"],\n",
1035
+ " \"level\": usage_row[\"level\"],\n",
1036
+ " \"variable\": \"usage_per_capita_index\",\n",
1037
+ " \"cluster_name\": usage_row[\"cluster_name\"],\n",
1038
+ " \"value\": index_value,\n",
1039
+ " }\n",
1040
+ " index_rows.append(index_row)\n",
1041
+ "\n",
1042
+ " # Add all index rows to result\n",
1043
+ " if index_rows:\n",
1044
+ " df_index = pd.DataFrame(index_rows)\n",
1045
+ " df_result = pd.concat([df_result, df_index], ignore_index=True)\n",
1046
+ "\n",
1047
+ " return df_result\n",
1048
+ "\n",
1049
+ "\n",
1050
+ "def calculate_category_percentage_index(\n",
1051
+ " df, filtered_countries=None, filtered_states=None\n",
1052
+ "):\n",
1053
+ " \"\"\"\n",
1054
+ " Calculate category percentage index for facet specialization.\n",
1055
+ "\n",
1056
+ " For countries: Compare to global percentage for that cluster\n",
1057
+ " For US states: Compare to US country percentage for that cluster\n",
1058
+ "\n",
1059
+ " Only calculates for countries/states that meet MIN_OBSERVATIONS.\n",
1060
+ " Excludes \"not_classified\" and \"none\" categories as these are catch-alls.\n",
1061
+ "\n",
1062
+ " Args:\n",
1063
+ " df: Dataframe with percentage metrics as rows\n",
1064
+ " filtered_countries: List of countries that meet MIN_OBSERVATIONS threshold\n",
1065
+ " filtered_states: List of states that meet MIN_OBSERVATIONS threshold\n",
1066
+ "\n",
1067
+ " Returns:\n",
1068
+ " Dataframe with category percentage index added as new rows (only for filtered geographies)\n",
1069
+ " \"\"\"\n",
1070
+ " df_result = df.copy()\n",
1071
+ "\n",
1072
+ " # Process percentage metrics for content facets\n",
1073
+ " pct_vars = [\"onet_task_pct\", \"collaboration_pct\", \"request_pct\"]\n",
1074
+ "\n",
1075
+ " index_rows = []\n",
1076
+ "\n",
1077
+ " for pct_var in pct_vars:\n",
1078
+ " # Get the base facet name\n",
1079
+ " facet_name = pct_var.replace(\"_pct\", \"\")\n",
1080
+ "\n",
1081
+ " # Get percentage data for this variable\n",
1082
+ " df_pct = df_result[\n",
1083
+ " (df_result[\"variable\"] == pct_var) & (df_result[\"facet\"] == facet_name)\n",
1084
+ " ].copy()\n",
1085
+ "\n",
1086
+ " # Exclude not_classified and none categories from index calculation\n",
1087
+ " # These are catch-all/no-pattern categories that don't provide meaningful comparisons\n",
1088
+ " df_pct = df_pct[~df_pct[\"cluster_name\"].isin([\"not_classified\", \"none\"])]\n",
1089
+ "\n",
1090
+ " if not df_pct.empty and \"cluster_name\" in df_pct.columns:\n",
1091
+ " # Check if this facet has levels (like request)\n",
1092
+ " has_levels = df_pct[\"level\"].notna().any() and (df_pct[\"level\"] != 0).any()\n",
1093
+ "\n",
1094
+ " if has_levels:\n",
1095
+ " # Process each level separately\n",
1096
+ " levels = df_pct[\"level\"].dropna().unique()\n",
1097
+ "\n",
1098
+ " for level in levels:\n",
1099
+ " df_level = df_pct[df_pct[\"level\"] == level].copy()\n",
1100
+ "\n",
1101
+ " # Get global baselines for this level\n",
1102
+ " global_baselines = (\n",
1103
+ " df_level[\n",
1104
+ " (df_level[\"geography\"] == \"global\")\n",
1105
+ " & (df_level[\"geo_id\"] == \"GLOBAL\")\n",
1106
+ " ]\n",
1107
+ " .set_index(\"cluster_name\")[\"value\"]\n",
1108
+ " .to_dict()\n",
1109
+ " )\n",
1110
+ "\n",
1111
+ " # Get US baselines for this level\n",
1112
+ " us_baselines = (\n",
1113
+ " df_level[\n",
1114
+ " (df_level[\"geography\"] == \"country\")\n",
1115
+ " & (df_level[\"geo_id\"] == \"US\")\n",
1116
+ " ]\n",
1117
+ " .set_index(\"cluster_name\")[\"value\"]\n",
1118
+ " .to_dict()\n",
1119
+ " )\n",
1120
+ "\n",
1121
+ " # Process countries for this level\n",
1122
+ " if filtered_countries is not None and global_baselines:\n",
1123
+ " df_countries = df_level[\n",
1124
+ " (df_level[\"geography\"] == \"country\")\n",
1125
+ " & (df_level[\"geo_id\"].isin(filtered_countries))\n",
1126
+ " ].copy()\n",
1127
+ "\n",
1128
+ " for _, row in df_countries.iterrows():\n",
1129
+ " baseline = global_baselines.get(row[\"cluster_name\"])\n",
1130
+ "\n",
1131
+ " if baseline and baseline > 0:\n",
1132
+ " index_row = {\n",
1133
+ " \"geo_id\": row[\"geo_id\"],\n",
1134
+ " \"geography\": row[\"geography\"],\n",
1135
+ " \"date_start\": row[\"date_start\"],\n",
1136
+ " \"date_end\": row[\"date_end\"],\n",
1137
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1138
+ " \"facet\": row[\"facet\"],\n",
1139
+ " \"level\": row[\"level\"],\n",
1140
+ " \"variable\": f\"{facet_name}_pct_index\",\n",
1141
+ " \"cluster_name\": row[\"cluster_name\"],\n",
1142
+ " \"value\": row[\"value\"] / baseline,\n",
1143
+ " }\n",
1144
+ " index_rows.append(index_row)\n",
1145
+ "\n",
1146
+ " # Process states for this level\n",
1147
+ " if filtered_states is not None and us_baselines:\n",
1148
+ " df_states = df_level[\n",
1149
+ " (df_level[\"geography\"] == \"state_us\")\n",
1150
+ " & (df_level[\"geo_id\"].isin(filtered_states))\n",
1151
+ " ].copy()\n",
1152
+ "\n",
1153
+ " for _, row in df_states.iterrows():\n",
1154
+ " baseline = us_baselines.get(row[\"cluster_name\"])\n",
1155
+ "\n",
1156
+ " if baseline and baseline > 0:\n",
1157
+ " index_row = {\n",
1158
+ " \"geo_id\": row[\"geo_id\"],\n",
1159
+ " \"geography\": row[\"geography\"],\n",
1160
+ " \"date_start\": row[\"date_start\"],\n",
1161
+ " \"date_end\": row[\"date_end\"],\n",
1162
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1163
+ " \"facet\": row[\"facet\"],\n",
1164
+ " \"level\": row[\"level\"],\n",
1165
+ " \"variable\": f\"{facet_name}_pct_index\",\n",
1166
+ " \"cluster_name\": row[\"cluster_name\"],\n",
1167
+ " \"value\": row[\"value\"] / baseline,\n",
1168
+ " }\n",
1169
+ " index_rows.append(index_row)\n",
1170
+ " else:\n",
1171
+ " # No levels (onet_task, collaboration)\n",
1172
+ " # Get global baselines\n",
1173
+ " global_baselines = (\n",
1174
+ " df_pct[\n",
1175
+ " (df_pct[\"geography\"] == \"global\")\n",
1176
+ " & (df_pct[\"geo_id\"] == \"GLOBAL\")\n",
1177
+ " ]\n",
1178
+ " .set_index(\"cluster_name\")[\"value\"]\n",
1179
+ " .to_dict()\n",
1180
+ " )\n",
1181
+ "\n",
1182
+ " # Get US baselines\n",
1183
+ " us_baselines = (\n",
1184
+ " df_pct[\n",
1185
+ " (df_pct[\"geography\"] == \"country\") & (df_pct[\"geo_id\"] == \"US\")\n",
1186
+ " ]\n",
1187
+ " .set_index(\"cluster_name\")[\"value\"]\n",
1188
+ " .to_dict()\n",
1189
+ " )\n",
1190
+ "\n",
1191
+ " # Process countries\n",
1192
+ " if filtered_countries is not None and global_baselines:\n",
1193
+ " df_countries = df_pct[\n",
1194
+ " (df_pct[\"geography\"] == \"country\")\n",
1195
+ " & (df_pct[\"geo_id\"].isin(filtered_countries))\n",
1196
+ " ].copy()\n",
1197
+ "\n",
1198
+ " for _, row in df_countries.iterrows():\n",
1199
+ " baseline = global_baselines.get(row[\"cluster_name\"])\n",
1200
+ "\n",
1201
+ " if baseline and baseline > 0:\n",
1202
+ " index_row = {\n",
1203
+ " \"geo_id\": row[\"geo_id\"],\n",
1204
+ " \"geography\": row[\"geography\"],\n",
1205
+ " \"date_start\": row[\"date_start\"],\n",
1206
+ " \"date_end\": row[\"date_end\"],\n",
1207
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1208
+ " \"facet\": row[\"facet\"],\n",
1209
+ " \"level\": row[\"level\"],\n",
1210
+ " \"variable\": f\"{facet_name}_pct_index\",\n",
1211
+ " \"cluster_name\": row[\"cluster_name\"],\n",
1212
+ " \"value\": row[\"value\"] / baseline,\n",
1213
+ " }\n",
1214
+ " index_rows.append(index_row)\n",
1215
+ "\n",
1216
+ " # Process states\n",
1217
+ " if filtered_states is not None and us_baselines:\n",
1218
+ " df_states = df_pct[\n",
1219
+ " (df_pct[\"geography\"] == \"state_us\")\n",
1220
+ " & (df_pct[\"geo_id\"].isin(filtered_states))\n",
1221
+ " ].copy()\n",
1222
+ "\n",
1223
+ " for _, row in df_states.iterrows():\n",
1224
+ " baseline = us_baselines.get(row[\"cluster_name\"])\n",
1225
+ "\n",
1226
+ " if baseline and baseline > 0:\n",
1227
+ " index_row = {\n",
1228
+ " \"geo_id\": row[\"geo_id\"],\n",
1229
+ " \"geography\": row[\"geography\"],\n",
1230
+ " \"date_start\": row[\"date_start\"],\n",
1231
+ " \"date_end\": row[\"date_end\"],\n",
1232
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1233
+ " \"facet\": row[\"facet\"],\n",
1234
+ " \"level\": row[\"level\"],\n",
1235
+ " \"variable\": f\"{facet_name}_pct_index\",\n",
1236
+ " \"cluster_name\": row[\"cluster_name\"],\n",
1237
+ " \"value\": row[\"value\"] / baseline,\n",
1238
+ " }\n",
1239
+ " index_rows.append(index_row)\n",
1240
+ "\n",
1241
+ " # Add all index rows to result\n",
1242
+ " if index_rows:\n",
1243
+ " df_index = pd.DataFrame(index_rows)\n",
1244
+ " df_result = pd.concat([df_result, df_index], ignore_index=True)\n",
1245
+ "\n",
1246
+ " return df_result"
1247
+ ]
1248
+ },
1249
+ {
1250
+ "cell_type": "code",
1251
+ "execution_count": null,
1252
+ "metadata": {},
1253
+ "outputs": [],
1254
+ "source": [
1255
+ "def calculate_usage_tiers(df, n_tiers=4, filtered_countries=None, filtered_states=None):\n",
1256
+ " \"\"\"\n",
1257
+ " Calculate usage tiers based on indexed per capita usage.\n",
1258
+ " - Tier 0: Zero adoption (index = 0)\n",
1259
+ " - Tiers 1-4: Quartiles based on thresholds from filtered countries/states\n",
1260
+ "\n",
1261
+ " Quartile thresholds are calculated using only countries/states with ≥MIN_OBSERVATIONS,\n",
1262
+ " but applied to all countries/states to ensure complete visualization.\n",
1263
+ "\n",
1264
+ " Note: Tier assignments for countries/states with <MIN_OBSERVATIONS should be\n",
1265
+ " interpreted with caution due to sample size limitations.\n",
1266
+ "\n",
1267
+ " Args:\n",
1268
+ " df: Input dataframe\n",
1269
+ " n_tiers: Number of quartiles to create for non-zero usage (default 4)\n",
1270
+ " filtered_countries: List of countries that meet MIN_OBSERVATIONS threshold\n",
1271
+ " filtered_states: List of states that meet MIN_OBSERVATIONS threshold\n",
1272
+ "\n",
1273
+ " Returns:\n",
1274
+ " Dataframe with usage tier rows added\n",
1275
+ " \"\"\"\n",
1276
+ " df_result = df.copy()\n",
1277
+ "\n",
1278
+ " # Calculate tiers for indexed per capita metrics\n",
1279
+ " if \"variable\" in df_result.columns and \"value\" in df_result.columns:\n",
1280
+ " index_vars = [\"usage_per_capita_index\"]\n",
1281
+ "\n",
1282
+ " quartile_labels = [\n",
1283
+ " \"Emerging (bottom 25%)\",\n",
1284
+ " \"Lower middle (25-50%)\",\n",
1285
+ " \"Upper middle (50-75%)\",\n",
1286
+ " \"Leading (top 25%)\",\n",
1287
+ " ]\n",
1288
+ "\n",
1289
+ " tier_rows = []\n",
1290
+ "\n",
1291
+ " for var in index_vars:\n",
1292
+ " # Process countries\n",
1293
+ " # Get all countries with the index variable\n",
1294
+ " all_country_data = df_result[\n",
1295
+ " (df_result[\"variable\"] == var) & (df_result[\"geography\"] == \"country\")\n",
1296
+ " ].copy()\n",
1297
+ "\n",
1298
+ " if not all_country_data.empty:\n",
1299
+ " # Separate zero and non-zero usage\n",
1300
+ " zero_usage = all_country_data[all_country_data[\"value\"] == 0].copy()\n",
1301
+ " nonzero_usage = all_country_data[all_country_data[\"value\"] > 0].copy()\n",
1302
+ "\n",
1303
+ " # Calculate quartile thresholds using ONLY filtered countries\n",
1304
+ " if filtered_countries is not None and not nonzero_usage.empty:\n",
1305
+ " # Get only filtered countries for quartile calculation\n",
1306
+ " filtered_for_quartiles = nonzero_usage[\n",
1307
+ " nonzero_usage[\"geo_id\"].isin(filtered_countries)\n",
1308
+ " ].copy()\n",
1309
+ "\n",
1310
+ " if not filtered_for_quartiles.empty:\n",
1311
+ " # Calculate quartile thresholds from filtered countries\n",
1312
+ " quartiles = (\n",
1313
+ " filtered_for_quartiles[\"value\"]\n",
1314
+ " .quantile([0.25, 0.5, 0.75])\n",
1315
+ " .values\n",
1316
+ " )\n",
1317
+ "\n",
1318
+ " # Apply thresholds to all non-zero countries\n",
1319
+ " for _, row in nonzero_usage.iterrows():\n",
1320
+ " value = row[\"value\"]\n",
1321
+ "\n",
1322
+ " # Assign tier based on thresholds\n",
1323
+ " if value <= quartiles[0]:\n",
1324
+ " tier_label = quartile_labels[0] # Bottom 25%\n",
1325
+ " tier_value = 1\n",
1326
+ " elif value <= quartiles[1]:\n",
1327
+ " tier_label = quartile_labels[1] # 25-50%\n",
1328
+ " tier_value = 2\n",
1329
+ " elif value <= quartiles[2]:\n",
1330
+ " tier_label = quartile_labels[2] # 50-75%\n",
1331
+ " tier_value = 3\n",
1332
+ " else:\n",
1333
+ " tier_label = quartile_labels[3] # Top 25%\n",
1334
+ " tier_value = 4\n",
1335
+ "\n",
1336
+ " tier_row = {\n",
1337
+ " \"geo_id\": row[\"geo_id\"],\n",
1338
+ " \"geography\": row[\"geography\"],\n",
1339
+ " \"date_start\": row[\"date_start\"],\n",
1340
+ " \"date_end\": row[\"date_end\"],\n",
1341
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1342
+ " \"facet\": row[\"facet\"],\n",
1343
+ " \"level\": row[\"level\"],\n",
1344
+ " \"variable\": \"usage_tier\",\n",
1345
+ " \"cluster_name\": tier_label,\n",
1346
+ " \"value\": tier_value,\n",
1347
+ " }\n",
1348
+ " tier_rows.append(tier_row)\n",
1349
+ "\n",
1350
+ " # Add tier 0 for all zero usage countries\n",
1351
+ " for _, row in zero_usage.iterrows():\n",
1352
+ " tier_row = {\n",
1353
+ " \"geo_id\": row[\"geo_id\"],\n",
1354
+ " \"geography\": row[\"geography\"],\n",
1355
+ " \"date_start\": row[\"date_start\"],\n",
1356
+ " \"date_end\": row[\"date_end\"],\n",
1357
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1358
+ " \"facet\": row[\"facet\"],\n",
1359
+ " \"level\": row[\"level\"],\n",
1360
+ " \"variable\": \"usage_tier\",\n",
1361
+ " \"cluster_name\": \"Minimal\",\n",
1362
+ " \"value\": 0,\n",
1363
+ " }\n",
1364
+ " tier_rows.append(tier_row)\n",
1365
+ "\n",
1366
+ " # Process states\n",
1367
+ " # Get all states with the index variable\n",
1368
+ " all_state_data = df_result[\n",
1369
+ " (df_result[\"variable\"] == var) & (df_result[\"geography\"] == \"state_us\")\n",
1370
+ " ].copy()\n",
1371
+ "\n",
1372
+ " if not all_state_data.empty:\n",
1373
+ " # Separate zero and non-zero usage\n",
1374
+ " zero_usage = all_state_data[all_state_data[\"value\"] == 0].copy()\n",
1375
+ " nonzero_usage = all_state_data[all_state_data[\"value\"] > 0].copy()\n",
1376
+ "\n",
1377
+ " # Calculate quartile thresholds using ONLY filtered states\n",
1378
+ " if filtered_states is not None and not nonzero_usage.empty:\n",
1379
+ " # Get only filtered states for quartile calculation\n",
1380
+ " filtered_for_quartiles = nonzero_usage[\n",
1381
+ " nonzero_usage[\"geo_id\"].isin(filtered_states)\n",
1382
+ " ].copy()\n",
1383
+ "\n",
1384
+ " if not filtered_for_quartiles.empty:\n",
1385
+ " # Calculate quartile thresholds from filtered states\n",
1386
+ " quartiles = (\n",
1387
+ " filtered_for_quartiles[\"value\"]\n",
1388
+ " .quantile([0.25, 0.5, 0.75])\n",
1389
+ " .values\n",
1390
+ " )\n",
1391
+ "\n",
1392
+ " # Apply thresholds to all non-zero states\n",
1393
+ " for _, row in nonzero_usage.iterrows():\n",
1394
+ " value = row[\"value\"]\n",
1395
+ "\n",
1396
+ " # Assign tier based on thresholds\n",
1397
+ " if value <= quartiles[0]:\n",
1398
+ " tier_label = quartile_labels[0] # Bottom 25%\n",
1399
+ " tier_value = 1\n",
1400
+ " elif value <= quartiles[1]:\n",
1401
+ " tier_label = quartile_labels[1] # 25-50%\n",
1402
+ " tier_value = 2\n",
1403
+ " elif value <= quartiles[2]:\n",
1404
+ " tier_label = quartile_labels[2] # 50-75%\n",
1405
+ " tier_value = 3\n",
1406
+ " else:\n",
1407
+ " tier_label = quartile_labels[3] # Top 25%\n",
1408
+ " tier_value = 4\n",
1409
+ "\n",
1410
+ " tier_row = {\n",
1411
+ " \"geo_id\": row[\"geo_id\"],\n",
1412
+ " \"geography\": row[\"geography\"],\n",
1413
+ " \"date_start\": row[\"date_start\"],\n",
1414
+ " \"date_end\": row[\"date_end\"],\n",
1415
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1416
+ " \"facet\": row[\"facet\"],\n",
1417
+ " \"level\": row[\"level\"],\n",
1418
+ " \"variable\": \"usage_tier\",\n",
1419
+ " \"cluster_name\": tier_label,\n",
1420
+ " \"value\": tier_value,\n",
1421
+ " }\n",
1422
+ " tier_rows.append(tier_row)\n",
1423
+ "\n",
1424
+ " # Add tier 0 for all zero usage states\n",
1425
+ " for _, row in zero_usage.iterrows():\n",
1426
+ " tier_row = {\n",
1427
+ " \"geo_id\": row[\"geo_id\"],\n",
1428
+ " \"geography\": row[\"geography\"],\n",
1429
+ " \"date_start\": row[\"date_start\"],\n",
1430
+ " \"date_end\": row[\"date_end\"],\n",
1431
+ " \"platform_and_product\": row[\"platform_and_product\"],\n",
1432
+ " \"facet\": row[\"facet\"],\n",
1433
+ " \"level\": row[\"level\"],\n",
1434
+ " \"variable\": \"usage_tier\",\n",
1435
+ " \"cluster_name\": \"Minimal\",\n",
1436
+ " \"value\": 0,\n",
1437
+ " }\n",
1438
+ " tier_rows.append(tier_row)\n",
1439
+ "\n",
1440
+ " if tier_rows:\n",
1441
+ " df_result = pd.concat(\n",
1442
+ " [df_result, pd.DataFrame(tier_rows)], ignore_index=True\n",
1443
+ " )\n",
1444
+ "\n",
1445
+ " return df_result"
1446
+ ]
1447
+ },
1448
+ {
1449
+ "cell_type": "code",
1450
+ "execution_count": null,
1451
+ "metadata": {},
1452
+ "outputs": [],
1453
+ "source": [
1454
+ "def calculate_automation_augmentation_metrics(\n",
1455
+ " df, filtered_countries=None, filtered_states=None\n",
1456
+ "):\n",
1457
+ " \"\"\"\n",
1458
+ " Calculate automation vs augmentation percentages for collaboration patterns.\n",
1459
+ "\n",
1460
+ " This function:\n",
1461
+ " 1. Categorizes collaboration patterns as automation or augmentation\n",
1462
+ " 2. Calculates percentages excluding 'none' and 'not_classified'\n",
1463
+ " 3. Only calculates for filtered geographies at country/state level\n",
1464
+ "\n",
1465
+ " Categorization:\n",
1466
+ " - Automation: directive, feedback loop (AI-centric patterns)\n",
1467
+ " - Augmentation: validation, task iteration, learning (human-centric patterns)\n",
1468
+ " - Excluded: none (no collaboration), not_classified (unknown)\n",
1469
+ "\n",
1470
+ " Args:\n",
1471
+ " df: Dataframe with collaboration data\n",
1472
+ " filtered_countries: List of countries that meet MIN_OBSERVATIONS\n",
1473
+ " filtered_states: List of states that meet MIN_OBSERVATIONS\n",
1474
+ "\n",
1475
+ " Returns:\n",
1476
+ " Dataframe with automation/augmentation percentage rows added\n",
1477
+ " \"\"\"\n",
1478
+ " if \"facet\" not in df.columns or \"cluster_name\" not in df.columns:\n",
1479
+ " return df\n",
1480
+ "\n",
1481
+ " df_result = df.copy()\n",
1482
+ "\n",
1483
+ " # Get collaboration data\n",
1484
+ " collab_data = df_result[\n",
1485
+ " (df_result[\"facet\"] == \"collaboration\")\n",
1486
+ " & (df_result[\"variable\"] == \"collaboration_count\")\n",
1487
+ " ].copy()\n",
1488
+ "\n",
1489
+ " if collab_data.empty:\n",
1490
+ " return df_result\n",
1491
+ "\n",
1492
+ " # Define pattern categorization\n",
1493
+ " def categorize_pattern(pattern_name):\n",
1494
+ " if pd.isna(pattern_name):\n",
1495
+ " return None\n",
1496
+ "\n",
1497
+ " pattern_clean = pattern_name.lower().replace(\"_\", \" \").replace(\"-\", \" \")\n",
1498
+ "\n",
1499
+ " # Augmentation patterns (human-centric)\n",
1500
+ " if \"validation\" in pattern_clean:\n",
1501
+ " return \"augmentation\"\n",
1502
+ " elif \"task iteration\" in pattern_clean or \"task_iteration\" in pattern_clean:\n",
1503
+ " return \"augmentation\"\n",
1504
+ " elif \"learning\" in pattern_clean:\n",
1505
+ " return \"augmentation\"\n",
1506
+ " # Automation patterns (AI-centric)\n",
1507
+ " elif \"directive\" in pattern_clean:\n",
1508
+ " return \"automation\"\n",
1509
+ " elif \"feedback loop\" in pattern_clean or \"feedback_loop\" in pattern_clean:\n",
1510
+ " return \"automation\"\n",
1511
+ " # Excluded patterns - return None to exclude from calculations\n",
1512
+ " elif \"none\" in pattern_clean or \"not_classified\" in pattern_clean:\n",
1513
+ " return None\n",
1514
+ " else:\n",
1515
+ " return None # Unknown patterns also excluded\n",
1516
+ "\n",
1517
+ " # Add category column\n",
1518
+ " collab_data[\"category\"] = collab_data[\"cluster_name\"].apply(categorize_pattern)\n",
1519
+ "\n",
1520
+ " # Filter to only patterns that have a category (excludes none, not_classified, etc.)\n",
1521
+ " collab_categorized = collab_data[collab_data[\"category\"].notna()].copy()\n",
1522
+ "\n",
1523
+ " if collab_categorized.empty:\n",
1524
+ " return df_result\n",
1525
+ "\n",
1526
+ " # Process by geography\n",
1527
+ " new_rows = []\n",
1528
+ "\n",
1529
+ " # Group by geography and geo_id\n",
1530
+ " for (geography, geo_id), geo_data in collab_categorized.groupby(\n",
1531
+ " [\"geography\", \"geo_id\"]\n",
1532
+ " ):\n",
1533
+ " # Apply filtering based on geography level\n",
1534
+ " if geography == \"country\" and filtered_countries is not None:\n",
1535
+ " if geo_id not in filtered_countries:\n",
1536
+ " continue # Skip countries that don't meet threshold\n",
1537
+ " elif geography == \"state_us\" and filtered_states is not None:\n",
1538
+ " if geo_id not in filtered_states:\n",
1539
+ " continue # Skip states that don't meet threshold\n",
1540
+ " # global is always included (no filtering)\n",
1541
+ "\n",
1542
+ " # Calculate totals by category\n",
1543
+ " automation_total = geo_data[geo_data[\"category\"] == \"automation\"][\"value\"].sum()\n",
1544
+ " augmentation_total = geo_data[geo_data[\"category\"] == \"augmentation\"][\n",
1545
+ " \"value\"\n",
1546
+ " ].sum()\n",
1547
+ "\n",
1548
+ " # Total of categorized patterns (excluding none and not_classified)\n",
1549
+ " total_categorized = automation_total + augmentation_total\n",
1550
+ "\n",
1551
+ " if total_categorized > 0:\n",
1552
+ " # Get a sample row for metadata\n",
1553
+ " sample_row = geo_data.iloc[0]\n",
1554
+ "\n",
1555
+ " # Create automation percentage row\n",
1556
+ " automation_row = {\n",
1557
+ " \"geo_id\": geo_id,\n",
1558
+ " \"geography\": geography,\n",
1559
+ " \"date_start\": sample_row[\"date_start\"],\n",
1560
+ " \"date_end\": sample_row[\"date_end\"],\n",
1561
+ " \"platform_and_product\": sample_row[\"platform_and_product\"],\n",
1562
+ " \"facet\": \"collaboration_automation_augmentation\",\n",
1563
+ " \"level\": 0,\n",
1564
+ " \"variable\": \"automation_pct\",\n",
1565
+ " \"cluster_name\": \"automation\",\n",
1566
+ " \"value\": (automation_total / total_categorized) * 100,\n",
1567
+ " }\n",
1568
+ " new_rows.append(automation_row)\n",
1569
+ "\n",
1570
+ " # Create augmentation percentage row\n",
1571
+ " augmentation_row = {\n",
1572
+ " \"geo_id\": geo_id,\n",
1573
+ " \"geography\": geography,\n",
1574
+ " \"date_start\": sample_row[\"date_start\"],\n",
1575
+ " \"date_end\": sample_row[\"date_end\"],\n",
1576
+ " \"platform_and_product\": sample_row[\"platform_and_product\"],\n",
1577
+ " \"facet\": \"collaboration_automation_augmentation\",\n",
1578
+ " \"level\": 0,\n",
1579
+ " \"variable\": \"augmentation_pct\",\n",
1580
+ " \"cluster_name\": \"augmentation\",\n",
1581
+ " \"value\": (augmentation_total / total_categorized) * 100,\n",
1582
+ " }\n",
1583
+ " new_rows.append(augmentation_row)\n",
1584
+ "\n",
1585
+ " # Add all new rows to result\n",
1586
+ " if new_rows:\n",
1587
+ " df_new = pd.DataFrame(new_rows)\n",
1588
+ " df_result = pd.concat([df_result, df_new], ignore_index=True)\n",
1589
+ "\n",
1590
+ " return df_result"
1591
+ ]
1592
+ },
1593
+ {
1594
+ "cell_type": "code",
1595
+ "execution_count": null,
1596
+ "metadata": {},
1597
+ "outputs": [],
1598
+ "source": [
1599
+ "def add_iso3_and_names(df):\n",
1600
+ " \"\"\"\n",
1601
+ " Replace ISO-2 codes with ISO-3 codes and add geographic names.\n",
1602
+ "\n",
1603
+ " This function:\n",
1604
+ " 1. Replaces geo_id from ISO-2 to ISO-3 for countries\n",
1605
+ " 2. Adds geo_name column with human-readable names for all geographies\n",
1606
+ " 3. Preserves special geo_ids (like 'not_classified') that aren't in ISO mapping\n",
1607
+ "\n",
1608
+ " Args:\n",
1609
+ " df: Enriched dataframe with geo_id (ISO-2 for countries, state codes for US states)\n",
1610
+ "\n",
1611
+ " Returns:\n",
1612
+ " Dataframe with ISO-3 codes in geo_id and geo_name column added\n",
1613
+ " \"\"\"\n",
1614
+ " df_result = df.copy()\n",
1615
+ "\n",
1616
+ " # Initialize geo_name column\n",
1617
+ " df_result[\"geo_name\"] = \"\"\n",
1618
+ "\n",
1619
+ " # Load ISO mapping data for countries\n",
1620
+ " iso_path = Path(DATA_INTERMEDIATE_DIR) / \"iso_country_codes.csv\"\n",
1621
+ " if iso_path.exists():\n",
1622
+ " df_iso = pd.read_csv(iso_path, keep_default_na=False, na_values=[\"\"])\n",
1623
+ "\n",
1624
+ " # Create ISO-2 to ISO-3 mapping\n",
1625
+ " iso2_to_iso3 = dict(zip(df_iso[\"iso_alpha_2\"], df_iso[\"iso_alpha_3\"]))\n",
1626
+ "\n",
1627
+ " # Create ISO-2 to country name mapping\n",
1628
+ " iso2_to_name = dict(zip(df_iso[\"iso_alpha_2\"], df_iso[\"country_name\"]))\n",
1629
+ "\n",
1630
+ " # For all rows where geography is 'country', add country names and convert codes\n",
1631
+ " # This includes content facets that are broken down by country\n",
1632
+ " country_mask = df_result[\"geography\"] == \"country\"\n",
1633
+ "\n",
1634
+ " # First, identify which geo_ids don't have ISO mappings\n",
1635
+ " country_geo_ids = df_result.loc[country_mask, \"geo_id\"].unique()\n",
1636
+ " unmapped_geo_ids = [\n",
1637
+ " g for g in country_geo_ids if g not in iso2_to_iso3 and pd.notna(g)\n",
1638
+ " ]\n",
1639
+ "\n",
1640
+ " if unmapped_geo_ids:\n",
1641
+ " print(\n",
1642
+ " f\"\\nWarning: The following geo_ids are not in ISO-2 mapping and will be kept as-is:\"\n",
1643
+ " )\n",
1644
+ " for geo_id in unmapped_geo_ids:\n",
1645
+ " # Count rows and usage for this geo_id\n",
1646
+ " geo_mask = (df_result[\"geography\"] == \"country\") & (\n",
1647
+ " df_result[\"geo_id\"] == geo_id\n",
1648
+ " )\n",
1649
+ " row_count = geo_mask.sum()\n",
1650
+ " usage_mask = geo_mask & (df_result[\"variable\"] == \"usage_count\")\n",
1651
+ " usage_sum = (\n",
1652
+ " df_result.loc[usage_mask, \"value\"].sum() if usage_mask.any() else 0\n",
1653
+ " )\n",
1654
+ " print(f\" - '{geo_id}': {row_count} rows, {usage_sum:,.0f} usage count\")\n",
1655
+ "\n",
1656
+ " # Check for geo_ids without country names\n",
1657
+ " unmapped_names = [g for g in unmapped_geo_ids if g not in iso2_to_name]\n",
1658
+ " if unmapped_names:\n",
1659
+ " print(\n",
1660
+ " f\"\\nWarning: The following geo_ids don't have country names and will use geo_id as name:\"\n",
1661
+ " )\n",
1662
+ " for geo_id in unmapped_names:\n",
1663
+ " print(f\" - '{geo_id}'\")\n",
1664
+ "\n",
1665
+ " # Apply country names BEFORE converting ISO-2 to ISO-3\n",
1666
+ " # The iso2_to_name dictionary uses ISO-2 codes as keys\n",
1667
+ " df_result.loc[country_mask, \"geo_name\"] = (\n",
1668
+ " df_result.loc[country_mask, \"geo_id\"]\n",
1669
+ " .map(iso2_to_name)\n",
1670
+ " .fillna(df_result.loc[country_mask, \"geo_id\"])\n",
1671
+ " )\n",
1672
+ "\n",
1673
+ " # Convert ISO-2 to ISO-3 codes\n",
1674
+ " df_result.loc[country_mask, \"geo_id\"] = (\n",
1675
+ " df_result.loc[country_mask, \"geo_id\"]\n",
1676
+ " .map(iso2_to_iso3)\n",
1677
+ " .fillna(df_result.loc[country_mask, \"geo_id\"])\n",
1678
+ " )\n",
1679
+ " else:\n",
1680
+ " print(f\"Warning: ISO mapping file not found at {iso_path}\")\n",
1681
+ "\n",
1682
+ " # Load state names from census data\n",
1683
+ " state_codes_path = Path(DATA_INPUT_DIR) / \"census_state_codes.txt\"\n",
1684
+ " if state_codes_path.exists():\n",
1685
+ " df_state_codes = pd.read_csv(state_codes_path, sep=\"|\")\n",
1686
+ " # Create state code to name mapping (STUSAB is the 2-letter code, STATE_NAME is the full name)\n",
1687
+ " state_code_to_name = dict(\n",
1688
+ " zip(df_state_codes[\"STUSAB\"], df_state_codes[\"STATE_NAME\"])\n",
1689
+ " )\n",
1690
+ "\n",
1691
+ " # For all rows where geography is 'state_us', add state names\n",
1692
+ " state_mask = df_result[\"geography\"] == \"state_us\"\n",
1693
+ " df_result.loc[state_mask, \"geo_name\"] = df_result.loc[state_mask, \"geo_id\"].map(\n",
1694
+ " state_code_to_name\n",
1695
+ " )\n",
1696
+ " else:\n",
1697
+ " print(f\"Warning: State census file not found at {state_codes_path}\")\n",
1698
+ "\n",
1699
+ " # For global entries\n",
1700
+ " global_mask = df_result[\"geography\"] == \"global\"\n",
1701
+ " df_result.loc[global_mask, \"geo_name\"] = \"global\"\n",
1702
+ "\n",
1703
+ " # Fill any missing geo_names with geo_id as fallback\n",
1704
+ " df_result.loc[df_result[\"geo_name\"] == \"\", \"geo_name\"] = df_result.loc[\n",
1705
+ " df_result[\"geo_name\"] == \"\", \"geo_id\"\n",
1706
+ " ]\n",
1707
+ " df_result[\"geo_name\"] = df_result[\"geo_name\"].fillna(df_result[\"geo_id\"])\n",
1708
+ "\n",
1709
+ " return df_result"
1710
+ ]
1711
+ },
1712
+ {
1713
+ "cell_type": "markdown",
1714
+ "metadata": {},
1715
+ "source": [
1716
+ "## Main Processing Function"
1717
+ ]
1718
+ },
1719
+ {
1720
+ "cell_type": "code",
1721
+ "execution_count": null,
1722
+ "metadata": {},
1723
+ "outputs": [],
1724
+ "source": [
1725
+ "def enrich_clio_data(input_path, output_path, external_data=None):\n",
1726
+ " \"\"\"\n",
1727
+ " Enrich processed Clio data with external sources.\n",
1728
+ "\n",
1729
+ " Args:\n",
1730
+ " input_path: Path to processed Clio data\n",
1731
+ " output_path: Path for enriched CSV output\n",
1732
+ " external_data: Pre-loaded external data (optional)\n",
1733
+ "\n",
1734
+ " Returns:\n",
1735
+ " Path to enriched data file\n",
1736
+ " \"\"\"\n",
1737
+ " # Load processed Clio data - use keep_default_na=False to preserve \"NA\" (Namibia)\n",
1738
+ " df = pd.read_csv(input_path, keep_default_na=False, na_values=[\"\"])\n",
1739
+ "\n",
1740
+ " # Load external data if not provided\n",
1741
+ " if external_data is None:\n",
1742
+ " external_data = load_external_data()\n",
1743
+ "\n",
1744
+ " # Get filtered geographies (but keep all data in the dataframe)\n",
1745
+ " filtered_countries, filtered_states = get_filtered_geographies(df)\n",
1746
+ "\n",
1747
+ " # Merge with population data\n",
1748
+ " df = merge_population_data(df, external_data[\"population\"])\n",
1749
+ "\n",
1750
+ " # Merge with GDP data (pass population data for per capita calculation)\n",
1751
+ " df = merge_gdp_data(df, external_data[\"gdp\"], external_data[\"population\"])\n",
1752
+ "\n",
1753
+ " # Calculate SOC occupation distribution from O*NET tasks\n",
1754
+ " # Only for geographies that meet MIN_OBSERVATIONS threshold\n",
1755
+ " df = calculate_soc_distribution(\n",
1756
+ " df,\n",
1757
+ " external_data[\"task_statements\"],\n",
1758
+ " external_data[\"soc_structure\"],\n",
1759
+ " filtered_countries=filtered_countries,\n",
1760
+ " filtered_states=filtered_states,\n",
1761
+ " )\n",
1762
+ "\n",
1763
+ " # Calculate per capita metrics\n",
1764
+ " df = calculate_per_capita_metrics(df)\n",
1765
+ "\n",
1766
+ " # Calculate usage index - pass filtered countries/states to only use them for baseline\n",
1767
+ " df = calculate_usage_per_capita_index(\n",
1768
+ " df, filtered_countries=filtered_countries, filtered_states=filtered_states\n",
1769
+ " )\n",
1770
+ "\n",
1771
+ " # Calculate category percentage index - pass filtered countries/states\n",
1772
+ " df = calculate_category_percentage_index(\n",
1773
+ " df, filtered_countries=filtered_countries, filtered_states=filtered_states\n",
1774
+ " )\n",
1775
+ "\n",
1776
+ " # Calculate usage tiers - pass filtered countries/states to only use them\n",
1777
+ " df = calculate_usage_tiers(\n",
1778
+ " df, filtered_countries=filtered_countries, filtered_states=filtered_states\n",
1779
+ " )\n",
1780
+ "\n",
1781
+ " # Add collaboration categorization\n",
1782
+ " df = calculate_automation_augmentation_metrics(df)\n",
1783
+ "\n",
1784
+ " # Add ISO-3 codes and geographic names\n",
1785
+ " df = add_iso3_and_names(df)\n",
1786
+ "\n",
1787
+ " # Sort for consistent output ordering\n",
1788
+ " df = df.sort_values(\n",
1789
+ " [\"geography\", \"geo_id\", \"facet\", \"level\", \"cluster_name\", \"variable\"]\n",
1790
+ " )\n",
1791
+ "\n",
1792
+ " # Save enriched data as CSV\n",
1793
+ " df.to_csv(output_path, index=False)\n",
1794
+ "\n",
1795
+ " return str(output_path)"
1796
+ ]
1797
+ },
1798
+ {
1799
+ "cell_type": "markdown",
1800
+ "metadata": {},
1801
+ "source": [
1802
+ "## Merge External Data"
1803
+ ]
1804
+ },
1805
+ {
1806
+ "cell_type": "code",
1807
+ "execution_count": null,
1808
+ "metadata": {},
1809
+ "outputs": [],
1810
+ "source": [
1811
+ "input_path = \"../data/intermediate/aei_raw_claude_ai_2025-08-04_to_2025-08-11.csv\"\n",
1812
+ "output_path = \"../data/output/aei_enriched_claude_ai_2025-08-04_to_2025-08-11.csv\"\n",
1813
+ "\n",
1814
+ "enrich_clio_data(input_path, output_path)\n",
1815
+ "print(f\"\\n✅ Enrichment complete! Output: {output_path}\")"
1816
+ ]
1817
+ }
1818
+ ],
1819
+ "metadata": {
1820
+ "kernelspec": {
1821
+ "display_name": "py311",
1822
+ "language": "python",
1823
+ "name": "python3"
1824
+ },
1825
+ "language_info": {
1826
+ "codemirror_mode": {
1827
+ "name": "ipython",
1828
+ "version": 3
1829
+ },
1830
+ "file_extension": ".py",
1831
+ "mimetype": "text/x-python",
1832
+ "name": "python",
1833
+ "nbconvert_exporter": "python",
1834
+ "pygments_lexer": "ipython3",
1835
+ "version": "3.11.13"
1836
+ }
1837
+ },
1838
+ "nbformat": 4,
1839
+ "nbformat_minor": 4
1840
+ }
release_2025_09_15/code/preprocess_gdp.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preprocess GDP data for economic analysis.
3
+
4
+ This script downloads and processes GDP data from:
5
+ 1. IMF API for country-level GDP data
6
+ 2. BEA (Bureau of Economic Analysis) for US state-level GDP data
7
+
8
+ Output files:
9
+ - gdp_YYYY_country.csv (e.g., gdp_2024_country.csv): Country-level total GDP
10
+ - gdp_YYYY_us_state.csv (e.g., gdp_2024_us_state.csv): US state-level total GDP
11
+ """
12
+
13
+ import io
14
+ import json
15
+ import warnings
16
+ from pathlib import Path
17
+
18
+ import httpx
19
+ import pandas as pd
20
+
21
+ # Global configuration
22
+ YEAR = 2024
23
+ DATA_INPUT_DIR = Path("../data/input")
24
+ DATA_INTERMEDIATE_DIR = Path("../data/intermediate")
25
+
26
+
27
+ # Countries where Claude AI service is not available
28
+ # These will be excluded from all GDP data
29
+ EXCLUDED_COUNTRIES = [
30
+ "AFG",
31
+ "BLR",
32
+ "COD",
33
+ "CAF",
34
+ "CHN",
35
+ "CUB",
36
+ "ERI",
37
+ "ETH",
38
+ "HKG",
39
+ "IRN",
40
+ "PRK",
41
+ "LBY",
42
+ "MLI",
43
+ "MMR",
44
+ "MAC",
45
+ "NIC",
46
+ "RUS",
47
+ "SDN",
48
+ "SOM",
49
+ "SSD",
50
+ "SYR",
51
+ "VEN",
52
+ "YEM",
53
+ ]
54
+
55
+
56
+ def check_existing_files():
57
+ """Check if processed GDP files already exist."""
58
+ gdp_country_path = DATA_INTERMEDIATE_DIR / f"gdp_{YEAR}_country.csv"
59
+ gdp_state_path = DATA_INTERMEDIATE_DIR / f"gdp_{YEAR}_us_state.csv"
60
+
61
+ if gdp_country_path.exists() and gdp_state_path.exists():
62
+ print("✅ GDP files already exist:")
63
+ print(f" - {gdp_country_path}")
64
+ print(f" - {gdp_state_path}")
65
+ print("Skipping GDP preprocessing. Delete these files if you want to re-run.")
66
+ return True
67
+ return False
68
+
69
+
70
+ def load_country_gdp_data():
71
+ """
72
+ Load country-level GDP data from cache or IMF API.
73
+
74
+ Returns:
75
+ dict: Raw GDP data from IMF API, or None if fetch fails
76
+ """
77
+ # Check if raw data already exists
78
+ raw_gdp_path = DATA_INPUT_DIR / f"imf_gdp_raw_{YEAR}.json"
79
+ if raw_gdp_path.exists():
80
+ print("Loading cached IMF GDP data...")
81
+ with open(raw_gdp_path) as f:
82
+ return json.load(f)
83
+
84
+ # Download if not cached
85
+ imf_total_gdp_url = "https://www.imf.org/external/datamapper/api/v1/NGDPD" # IMF returns GDP in billions USD
86
+
87
+ print("Fetching GDP data from IMF API...")
88
+ try:
89
+ with httpx.Client() as client:
90
+ response = client.get(imf_total_gdp_url, timeout=30)
91
+ response.raise_for_status()
92
+ gdp_data = response.json()
93
+ print("✓ Successfully fetched total GDP data from IMF API")
94
+
95
+ # Save raw data for future use
96
+ with open(raw_gdp_path, "w") as f:
97
+ json.dump(gdp_data, f, indent=2)
98
+ print(f"✓ Saved raw GDP data to {raw_gdp_path}")
99
+
100
+ return gdp_data
101
+ except Exception as e:
102
+ raise ConnectionError(f"Failed to fetch data from IMF API: {e}") from e
103
+
104
+
105
+ def process_country_gdp_data(gdp_data):
106
+ """
107
+ Process IMF GDP data into standardized format.
108
+
109
+ Args:
110
+ gdp_data: Raw IMF API response
111
+
112
+ Returns:
113
+ pd.DataFrame: Processed country GDP data (excluding countries where service is not available)
114
+ """
115
+ # Extract GDP data for target year
116
+ # Structure: {"values": {"NGDPD": {"countryiso3code": {"year": value}}}}
117
+ gdp_values = gdp_data.get("values", {}).get("NGDPD", {})
118
+
119
+ # Build records for target year data only
120
+ gdp_records = []
121
+ target_year = str(YEAR)
122
+ missing_countries = []
123
+
124
+ for countryiso3code, years_data in gdp_values.items():
125
+ if isinstance(years_data, dict):
126
+ if target_year in years_data and years_data[target_year]:
127
+ gdp_value = years_data[target_year]
128
+ # Convert from billions to actual dollars
129
+ gdp_records.append(
130
+ {
131
+ "iso_alpha_3": countryiso3code,
132
+ "gdp_total": float(gdp_value)
133
+ * 1e9, # Convert billions to dollars
134
+ "year": YEAR,
135
+ }
136
+ )
137
+ else:
138
+ missing_countries.append(countryiso3code)
139
+
140
+ if missing_countries:
141
+ warnings.warn(
142
+ f"{len(missing_countries)} countries missing {YEAR} GDP data. "
143
+ f"Examples: {missing_countries[:5]}",
144
+ UserWarning,
145
+ stacklevel=2,
146
+ )
147
+
148
+ df_gdp = pd.DataFrame(gdp_records)
149
+
150
+ if df_gdp.empty:
151
+ raise ValueError(f"No GDP data available for year {YEAR}")
152
+
153
+ # Apply country code mappings for mismatches between IMF and ISO3
154
+ country_code_mappings = {
155
+ "UVK": "XKX", # Kosovo
156
+ # Add more mappings as needed
157
+ }
158
+
159
+ for imf_code, iso3_code in country_code_mappings.items():
160
+ df_gdp.loc[df_gdp["iso_alpha_3"] == imf_code, "iso_alpha_3"] = iso3_code
161
+
162
+ # Filter to only keep countries with valid ISO-3 codes
163
+ # This removes regional aggregates like ADVEC, AFQ, etc.
164
+ iso_codes_path = DATA_INTERMEDIATE_DIR / "iso_country_codes.csv"
165
+ df_iso = pd.read_csv(iso_codes_path, keep_default_na=False, na_values=[""])
166
+ valid_iso3_codes = set(df_iso["iso_alpha_3"].unique())
167
+
168
+ initial_aggregate_count = len(df_gdp)
169
+ df_gdp = df_gdp[df_gdp["iso_alpha_3"].isin(valid_iso3_codes)]
170
+ filtered_aggregates = initial_aggregate_count - len(df_gdp)
171
+
172
+ if filtered_aggregates > 0:
173
+ print(
174
+ f" Filtered out {filtered_aggregates} non-country codes (regional aggregates)"
175
+ )
176
+
177
+ # Filter out excluded countries (now using 3-letter codes directly)
178
+ initial_count = len(df_gdp)
179
+ df_gdp = df_gdp[~df_gdp["iso_alpha_3"].isin(EXCLUDED_COUNTRIES)]
180
+ excluded_count = initial_count - len(df_gdp)
181
+
182
+ if excluded_count > 0:
183
+ print(f" Excluded {excluded_count} countries where service is not available")
184
+
185
+ # Save processed GDP data
186
+ processed_gdp_path = DATA_INTERMEDIATE_DIR / f"gdp_{YEAR}_country.csv"
187
+ df_gdp.to_csv(processed_gdp_path, index=False)
188
+
189
+ print(f"✓ Saved processed GDP data to {processed_gdp_path}")
190
+ print(f" Countries with {YEAR} GDP data: {len(df_gdp)}")
191
+ print(f" Countries excluded (service not available): {len(EXCLUDED_COUNTRIES)}")
192
+ print(f" Total global GDP: ${df_gdp['gdp_total'].sum() / 1e12:.2f} trillion")
193
+
194
+ return df_gdp
195
+
196
+
197
+ def load_state_gdp_data():
198
+ """
199
+ Load US state GDP data from BEA file.
200
+
201
+ Returns:
202
+ pd.DataFrame: Raw state GDP data, or None if file not found
203
+ """
204
+ state_gdp_raw_path = DATA_INPUT_DIR / f"bea_us_state_gdp_{YEAR}.csv"
205
+
206
+ if not state_gdp_raw_path.exists():
207
+ error_msg = f"""
208
+ State GDP data not found at: {state_gdp_raw_path}
209
+
210
+ To obtain this data:
211
+ 1. Go to: https://apps.bea.gov/itable/?ReqID=70&step=1
212
+ 2. Select: SASUMMARY State annual summary statistics (area = "United States", statistic = Gross domestic product (GDP), unit of measure = "Levels")
213
+ 3. Download the CSV file for year {YEAR}
214
+ 4. Save it as: bea_us_state_gdp_{YEAR}.csv
215
+ 5. Place it in your data input directory
216
+ """
217
+ raise FileNotFoundError(error_msg)
218
+
219
+ print("Loading US state GDP data...")
220
+ # Parse CSV skipping the first 3 rows (BEA metadata)
221
+ df_state_gdp_raw = pd.read_csv(state_gdp_raw_path, skiprows=3)
222
+ df_state_gdp_raw.columns = ["GeoFips", "State", f"gdp_{YEAR}_millions"]
223
+
224
+ return df_state_gdp_raw
225
+
226
+
227
+ def process_state_gdp_data(df_state_gdp_raw):
228
+ """
229
+ Process BEA state GDP data into standardized format.
230
+
231
+ Args:
232
+ df_state_gdp_raw: Raw BEA data
233
+
234
+ Returns:
235
+ pd.DataFrame: Processed state GDP data
236
+ """
237
+
238
+ # Remove the US total row (GeoFips = "00000")
239
+ df_state_gdp = df_state_gdp_raw[df_state_gdp_raw["GeoFips"] != "00000"].copy()
240
+
241
+ # Remove all rows starting from empty line before "Legend/Footnotes" marker
242
+ # BEA files have footer information after the data, with an empty line before
243
+ legend_index = (
244
+ df_state_gdp[
245
+ df_state_gdp["GeoFips"].str.contains("Legend", case=False, na=False)
246
+ ].index[0]
247
+ - 1
248
+ )
249
+ df_state_gdp = df_state_gdp.iloc[:legend_index].copy()
250
+ print(f" Removed footer rows starting from 'Legend/Footnotes'")
251
+
252
+ # Convert GDP from millions to actual dollars
253
+ df_state_gdp["gdp_total"] = df_state_gdp[f"gdp_{YEAR}_millions"] * 1e6
254
+
255
+ # Clean state names
256
+ df_state_gdp["State"] = df_state_gdp["State"].str.strip()
257
+
258
+ # Get state codes
259
+ state_code_dict = get_state_codes()
260
+ df_state_gdp["state_code"] = df_state_gdp["State"].map(state_code_dict)
261
+
262
+ # Check for missing state codes
263
+ missing_codes = df_state_gdp[df_state_gdp["state_code"].isna()]
264
+ if not missing_codes.empty:
265
+ raise ValueError(
266
+ f"Could not find state codes for: {missing_codes['State'].tolist()}\n"
267
+ f"All BEA state names should match Census state codes after filtering."
268
+ )
269
+
270
+ # Select and rename columns
271
+ df_state_gdp_final = df_state_gdp[
272
+ ["state_code", "State", "gdp_total", f"gdp_{YEAR}_millions"]
273
+ ].copy()
274
+ df_state_gdp_final.columns = [
275
+ "state_code",
276
+ "state_name",
277
+ "gdp_total",
278
+ "gdp_millions",
279
+ ]
280
+ df_state_gdp_final["year"] = YEAR
281
+
282
+ # Save processed state GDP data
283
+ processed_state_gdp_path = DATA_INTERMEDIATE_DIR / f"gdp_{YEAR}_us_state.csv"
284
+ df_state_gdp_final.to_csv(processed_state_gdp_path, index=False)
285
+
286
+ print(
287
+ f"✓ Processed state GDP data for {len(df_state_gdp_final)} states/territories"
288
+ )
289
+ print(
290
+ f" Total US GDP: ${df_state_gdp_final['gdp_total'].sum() / 1e12:.2f} trillion"
291
+ )
292
+ print(f"✓ Saved to {processed_state_gdp_path}")
293
+
294
+ return df_state_gdp_final
295
+
296
+
297
+ def get_state_codes():
298
+ """
299
+ Get US state codes from Census Bureau.
300
+
301
+ Returns:
302
+ dict: Mapping of state names to abbreviations
303
+ """
304
+ state_codes_path = DATA_INPUT_DIR / "census_state_codes.txt"
305
+
306
+ if state_codes_path.exists():
307
+ print(" Loading cached state codes...")
308
+ df_state_codes = pd.read_csv(state_codes_path, sep="|")
309
+ else:
310
+ print(" Downloading state codes from Census Bureau...")
311
+ response = httpx.get("https://www2.census.gov/geo/docs/reference/state.txt")
312
+ response.raise_for_status()
313
+
314
+ # Save for future use
315
+ with open(state_codes_path, "w") as f:
316
+ f.write(response.text)
317
+ print(f" Cached state codes to {state_codes_path}")
318
+
319
+ df_state_codes = pd.read_csv(io.StringIO(response.text), sep="|")
320
+
321
+ # Create mapping dictionary
322
+ state_code_dict = dict(
323
+ zip(df_state_codes["STATE_NAME"], df_state_codes["STUSAB"], strict=True)
324
+ )
325
+
326
+ return state_code_dict
327
+
328
+
329
+ def main():
330
+ """Main function to run GDP preprocessing."""
331
+ # Check if files already exist
332
+ if check_existing_files():
333
+ return
334
+
335
+ print("=" * 60)
336
+ print(f"PROCESSING {YEAR} GDP DATA")
337
+ print("=" * 60)
338
+
339
+ # Process country-level GDP from IMF
340
+ print(f"\n=== Country-Level GDP (IMF) - Year {YEAR} ===")
341
+ gdp_data = load_country_gdp_data()
342
+ df_gdp_country = process_country_gdp_data(gdp_data)
343
+
344
+ # Process US state-level GDP from BEA
345
+ print(f"\n=== US State-Level GDP (BEA) - Year {YEAR} ===")
346
+ df_state_gdp_raw = load_state_gdp_data()
347
+ df_gdp_state = process_state_gdp_data(df_state_gdp_raw)
348
+
349
+ # Final status
350
+ print(f"\n✅ {YEAR} GDP data preprocessing complete!")
351
+ print("\n=== Summary Statistics ===")
352
+ if df_gdp_country is not None:
353
+ print(f"Countries processed: {len(df_gdp_country)}")
354
+ print(f"Countries excluded (service not available): {len(EXCLUDED_COUNTRIES)}")
355
+ print(
356
+ f"Total global GDP: ${df_gdp_country['gdp_total'].sum() / 1e12:.2f} trillion"
357
+ )
358
+ if df_gdp_state is not None:
359
+ print(f"US states processed: {len(df_gdp_state)}")
360
+ print(f"Total US GDP: ${df_gdp_state['gdp_total'].sum() / 1e12:.2f} trillion")
361
+
362
+
363
+ if __name__ == "__main__":
364
+ main()
release_2025_09_15/code/preprocess_iso_codes.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fetch ISO country code mappings from GeoNames.
3
+
4
+ This script fetches comprehensive country data from GeoNames countryInfo.txt
5
+ and saves it as a CSV file for use in data preprocessing pipelines.
6
+ """
7
+
8
+ import io
9
+ from pathlib import Path
10
+
11
+ import httpx
12
+ import pandas as pd
13
+
14
+
15
+ def fetch_country_mappings(save_raw=True):
16
+ """
17
+ Fetch country code mappings from GeoNames.
18
+
19
+ Args:
20
+ save_raw: Whether to save raw data file to data/input
21
+
22
+ Returns:
23
+ pd.DataFrame: DataFrame with country information from GeoNames
24
+ """
25
+ # Fetch countryInfo.txt from GeoNames
26
+ geonames_url = "https://download.geonames.org/export/dump/countryInfo.txt"
27
+
28
+ with httpx.Client() as client:
29
+ response = client.get(geonames_url)
30
+ response.raise_for_status()
31
+ content = response.text
32
+
33
+ # Save raw file to data/input for reference
34
+ if save_raw:
35
+ input_dir = Path("../data/input")
36
+ input_dir.mkdir(parents=True, exist_ok=True)
37
+
38
+ raw_path = input_dir / "geonames_countryInfo.txt"
39
+ with open(raw_path, "w", encoding="utf-8") as f:
40
+ f.write(content)
41
+
42
+ # Extract column names from the last comment line
43
+ lines = content.split("\n")
44
+ header_line = [line for line in lines if line.startswith("#")][-1]
45
+ column_names = header_line[1:].split("\t") # Remove # and split by tab
46
+
47
+ # Parse the tab-separated file
48
+ # keep_default_na=False to prevent "NA" (Namibia) from becoming NaN
49
+ df = pd.read_csv(
50
+ io.StringIO(content),
51
+ sep="\t",
52
+ comment="#",
53
+ header=None, # No header row in the data
54
+ keep_default_na=False, # Don't interpret "NA" as NaN (needed for Namibia)
55
+ na_values=[""], # Only treat empty strings as NaN
56
+ names=column_names, # Use the column names from the comment
57
+ )
58
+
59
+ # Rename columns to our standard format
60
+ df = df.rename(
61
+ columns={"ISO": "iso_alpha_2", "ISO3": "iso_alpha_3", "Country": "country_name"}
62
+ )
63
+
64
+ return df
65
+
66
+
67
+ def create_country_dataframe(geonames_df):
68
+ """
69
+ Create a cleaned DataFrame with country codes and names.
70
+
71
+ Args:
72
+ geonames_df: DataFrame from GeoNames with all country information
73
+
74
+ Returns:
75
+ pd.DataFrame: DataFrame with columns [iso_alpha_2, iso_alpha_3, country_name]
76
+ """
77
+ # Select only the columns we need
78
+ df = geonames_df[["iso_alpha_2", "iso_alpha_3", "country_name"]].copy()
79
+
80
+ # Sort by country name for consistency
81
+ df = df.sort_values("country_name").reset_index(drop=True)
82
+
83
+ return df
84
+
85
+
86
+ def save_country_codes(output_path="../data/intermediate/iso_country_codes.csv"):
87
+ """
88
+ Fetch country codes from GeoNames and save to CSV.
89
+
90
+ Args:
91
+ output_path: Path to save the CSV file
92
+ """
93
+ # Fetch full GeoNames data
94
+ geonames_df = fetch_country_mappings()
95
+
96
+ # Create cleaned DataFrame with just the columns we need
97
+ df = create_country_dataframe(geonames_df)
98
+
99
+ # Ensure output directory exists
100
+ output_file = Path(output_path)
101
+ output_file.parent.mkdir(parents=True, exist_ok=True)
102
+
103
+ # Save to CSV
104
+ df.to_csv(output_file, index=False)
105
+
106
+ return df
107
+
108
+
109
+ if __name__ == "__main__":
110
+ # Fetch and save country codes
111
+ df = save_country_codes()
release_2025_09_15/code/preprocess_onet.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preprocess O*NET and SOC data for economic analysis.
3
+
4
+ This script downloads and processes occupational data from:
5
+ 1. O*NET Resource Center for task statements
6
+ 2. O*NET Resource Center for SOC structure
7
+
8
+ Output files:
9
+ - onet_task_statements.csv: O*NET task statements with SOC major groups
10
+ - soc_structure.csv: SOC occupational classification structure
11
+ """
12
+
13
+ import io
14
+ import os
15
+ import tempfile
16
+ from pathlib import Path
17
+
18
+ import httpx
19
+ import pandas as pd
20
+
21
+ # Global configuration
22
+ DATA_INPUT_DIR = Path("../data/input")
23
+ DATA_INTERMEDIATE_DIR = Path("../data/intermediate")
24
+
25
+
26
+ def check_existing_files():
27
+ """Check if processed O*NET/SOC files already exist."""
28
+ onet_task_statements_path = DATA_INTERMEDIATE_DIR / "onet_task_statements.csv"
29
+ soc_structure_path = DATA_INTERMEDIATE_DIR / "soc_structure.csv"
30
+
31
+ if onet_task_statements_path.exists() and soc_structure_path.exists():
32
+ print("✅ SOC/O*NET files already exist:")
33
+ print(f" - {onet_task_statements_path}")
34
+ print(f" - {soc_structure_path}")
35
+ print("Skipping SOC preprocessing. Delete these files if you want to re-run.")
36
+ return True
37
+ return False
38
+
39
+
40
+ def load_task_data():
41
+ """
42
+ Load O*NET Task Statements from cache or O*NET Resource Center.
43
+
44
+ Returns:
45
+ pd.DataFrame: O*NET task statements data
46
+ """
47
+ # Check if raw data already exists
48
+ raw_onet_path = DATA_INPUT_DIR / "onet_task_statements_raw.xlsx"
49
+ if raw_onet_path.exists():
50
+ df_onet = pd.read_excel(raw_onet_path)
51
+ return df_onet
52
+
53
+ # Download if not cached
54
+ # O*NET Database version 20.1
55
+ onet_url = "https://www.onetcenter.org/dl_files/database/db_20_1_excel/Task%20Statements.xlsx"
56
+
57
+ print("Downloading O*NET task statements...")
58
+ try:
59
+ with httpx.Client(follow_redirects=True) as client:
60
+ response = client.get(onet_url, timeout=60)
61
+ response.raise_for_status()
62
+ excel_content = response.content
63
+ # Save raw data for future use
64
+ with open(raw_onet_path, "wb") as f:
65
+ f.write(excel_content)
66
+
67
+ # Save to temporary file for pandas to read
68
+ with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp_file:
69
+ tmp_file.write(excel_content)
70
+ tmp_path = tmp_file.name
71
+
72
+ try:
73
+ df_onet = pd.read_excel(tmp_path)
74
+ return df_onet
75
+ finally:
76
+ os.unlink(tmp_path)
77
+
78
+ except Exception as e:
79
+ raise ConnectionError(f"Failed to download O*NET data: {e}") from e
80
+
81
+
82
+ def process_task_data(df_tasks):
83
+ """
84
+ Process task statements data.
85
+
86
+ Args:
87
+ df_tasks: Raw task data
88
+
89
+ Returns:
90
+ pd.DataFrame: Processed O*NET data with SOC major groups
91
+ """
92
+ # Extract SOC major group from O*NET-SOC Code (first 2 digits)
93
+ df_tasks["soc_major_group"] = df_tasks["O*NET-SOC Code"].str[:2]
94
+
95
+ # Save processed task data
96
+ processed_tasks_path = DATA_INTERMEDIATE_DIR / "onet_task_statements.csv"
97
+ df_tasks.to_csv(processed_tasks_path, index=False)
98
+
99
+ print(
100
+ f"✓ Processed {len(df_tasks):,} task statements from {df_tasks['O*NET-SOC Code'].nunique()} occupations"
101
+ )
102
+
103
+ return df_tasks
104
+
105
+
106
+ def load_soc_data():
107
+ """
108
+ Load SOC Structure from cache or O*NET Resource Center.
109
+
110
+ Returns:
111
+ pd.DataFrame: SOC structure data
112
+ """
113
+ # Check if raw data already exists
114
+ raw_soc_path = DATA_INPUT_DIR / "soc_structure_raw.csv"
115
+ if raw_soc_path.exists():
116
+ return pd.read_csv(raw_soc_path)
117
+
118
+ # Download if not cached
119
+ soc_url = "https://www.onetcenter.org/taxonomy/2019/structure/?fmt=csv"
120
+
121
+ print("Downloading SOC structure...")
122
+ try:
123
+ with httpx.Client(follow_redirects=True) as client:
124
+ response = client.get(soc_url, timeout=30)
125
+ response.raise_for_status()
126
+ soc_content = response.text
127
+ # Save raw data for future use
128
+ with open(raw_soc_path, "w") as f:
129
+ f.write(soc_content)
130
+
131
+ # Parse the CSV
132
+ df_soc = pd.read_csv(io.StringIO(soc_content))
133
+ return df_soc
134
+
135
+ except Exception as e:
136
+ raise ConnectionError(f"Failed to download SOC structure: {e}") from e
137
+
138
+
139
+ def process_soc_data(df_soc):
140
+ """
141
+ Process SOC structure data.
142
+
143
+ Args:
144
+ df_soc: Raw SOC structure data
145
+
146
+ Returns:
147
+ pd.DataFrame: Processed SOC structure
148
+ """
149
+ # Extract the 2-digit code from Major Group (e.g., "11-0000" -> "11")
150
+ df_soc["soc_major_group"] = df_soc["Major Group"].str[:2]
151
+
152
+ # Save processed SOC structure
153
+ processed_soc_path = DATA_INTERMEDIATE_DIR / "soc_structure.csv"
154
+ df_soc.to_csv(processed_soc_path, index=False)
155
+
156
+ print(f"✓ Processed {len(df_soc):,} SOC entries")
157
+
158
+ return df_soc
159
+
160
+
161
+ def main():
162
+ """Main function to run O*NET/SOC preprocessing."""
163
+ # Check if files already exist
164
+ if check_existing_files():
165
+ return
166
+
167
+ # Process Task Statements
168
+ df_tasks_raw = load_task_data()
169
+ process_task_data(df_tasks_raw)
170
+
171
+ # Process SOC Structure
172
+ df_soc_raw = load_soc_data()
173
+ process_soc_data(df_soc_raw)
174
+
175
+ print("\n✅ O*NET/SOC data preprocessing complete!")
176
+
177
+
178
+ if __name__ == "__main__":
179
+ main()
release_2025_09_15/code/preprocess_population.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preprocess population data for economic analysis.
3
+
4
+ This script downloads and processes working-age population data (ages 15-64) from:
5
+ 1. World Bank API for country-level data
6
+ 2. Taiwan National Development Council for Taiwan data (not in World Bank)
7
+ 3. US Census Bureau for US state-level data
8
+
9
+ Output files:
10
+ - working_age_pop_YYYY_country.csv (e.g., working_age_pop_2024_country.csv): Country-level working age population
11
+ - working_age_pop_YYYY_us_state.csv (e.g., working_age_pop_2024_us_state.csv): US state-level working age population
12
+ """
13
+
14
+ import io
15
+ import warnings
16
+ from pathlib import Path
17
+
18
+ import httpx
19
+ import pandas as pd
20
+
21
+ # Global configuration
22
+ YEAR = 2024
23
+ DATA_INPUT_DIR = Path("../data/input")
24
+ DATA_INTERMEDIATE_DIR = Path("../data/intermediate")
25
+
26
+ # Countries where Claude AI service is not available
27
+ # These will be excluded from all population data
28
+ EXCLUDED_COUNTRIES = [
29
+ "AF", # Afghanistan
30
+ "BY", # Belarus
31
+ "CD", # Democratic Republic of the Congo
32
+ "CF", # Central African Republic
33
+ "CN", # China
34
+ "CU", # Cuba
35
+ "ER", # Eritrea
36
+ "ET", # Ethiopia
37
+ "HK", # Hong Kong
38
+ "IR", # Iran
39
+ "KP", # North Korea
40
+ "LY", # Libya
41
+ "ML", # Mali
42
+ "MM", # Myanmar
43
+ "MO", # Macau
44
+ "NI", # Nicaragua
45
+ "RU", # Russia
46
+ "SD", # Sudan
47
+ "SO", # Somalia
48
+ "SS", # South Sudan
49
+ "SY", # Syria
50
+ "VE", # Venezuela
51
+ "YE", # Yemen
52
+ ]
53
+
54
+
55
+ def check_existing_files():
56
+ """Check if processed population files already exist."""
57
+ processed_country_pop_path = (
58
+ DATA_INTERMEDIATE_DIR / f"working_age_pop_{YEAR}_country.csv"
59
+ )
60
+ processed_state_pop_path = (
61
+ DATA_INTERMEDIATE_DIR / f"working_age_pop_{YEAR}_us_state.csv"
62
+ )
63
+
64
+ if processed_country_pop_path.exists() and processed_state_pop_path.exists():
65
+ print("✅ Population files already exist:")
66
+ print(f" - {processed_country_pop_path}")
67
+ print(f" - {processed_state_pop_path}")
68
+ print(
69
+ "Skipping population preprocessing. Delete these files if you want to re-run."
70
+ )
71
+ return True
72
+ return False
73
+
74
+
75
+ def load_world_bank_population_data():
76
+ """
77
+ Load country-level working age population data from cache or World Bank API.
78
+
79
+ Returns:
80
+ pd.DataFrame: Raw population data from World Bank
81
+ """
82
+ # Check if raw data already exists
83
+ raw_country_pop_path = DATA_INPUT_DIR / f"working_age_pop_{YEAR}_country_raw.csv"
84
+ if raw_country_pop_path.exists():
85
+ print("Loading cached country population data...")
86
+ return pd.read_csv(raw_country_pop_path, keep_default_na=False, na_values=[""])
87
+
88
+ # Download if not cached
89
+ url = "https://api.worldbank.org/v2/country/all/indicator/SP.POP.1564.TO"
90
+ params = {"format": "json", "date": str(YEAR), "per_page": "1000"}
91
+
92
+ print("Downloading country population data from World Bank API...")
93
+ response = httpx.get(url, params=params)
94
+ response.raise_for_status()
95
+
96
+ # World Bank API returns [metadata, data] structure
97
+ data = response.json()[1]
98
+ df_raw = pd.json_normalize(data)
99
+
100
+ return df_raw
101
+
102
+
103
+ def filter_to_country_level_data(df_raw):
104
+ """
105
+ Filter World Bank data to exclude regional aggregates and keep only countries.
106
+
107
+ The World Bank data starts with regional aggregates (Arab World, Caribbean small states, etc.)
108
+ followed by actual countries starting with Afghanistan (AFG).
109
+
110
+ Args:
111
+ df_raw: Raw World Bank data
112
+
113
+ Returns:
114
+ pd.DataFrame: Filtered data with only country-level records
115
+ """
116
+ # Find Afghanistan (AFG) - the first real country after aggregates
117
+ afg_index = df_raw[df_raw["countryiso3code"] == "AFG"].index[0]
118
+
119
+ # Keep everything from AFG onwards
120
+ df_filtered = df_raw.iloc[afg_index:].copy()
121
+ print(f"Filtered to {len(df_filtered)} countries (excluding regional aggregates)")
122
+
123
+ return df_filtered
124
+
125
+
126
+ def process_country_population_data(df_raw):
127
+ """
128
+ Process raw World Bank population data.
129
+
130
+ Args:
131
+ df_raw: Raw data from World Bank API
132
+
133
+ Returns:
134
+ pd.DataFrame: Processed country population data (excluding countries where service is not available)
135
+ """
136
+ # Filter to country level only
137
+ df_country = filter_to_country_level_data(df_raw)
138
+
139
+ # Select and rename columns
140
+ df_processed = df_country[
141
+ ["countryiso3code", "date", "value", "country.id", "country.value"]
142
+ ].copy()
143
+
144
+ df_processed.columns = [
145
+ "iso_alpha_3",
146
+ "year",
147
+ "working_age_pop",
148
+ "country_code",
149
+ "country_name",
150
+ ]
151
+
152
+ # Convert year to int
153
+ df_processed["year"] = pd.to_numeric(df_processed["year"])
154
+ df_processed = df_processed.dropna(subset=["working_age_pop"])
155
+
156
+ # Remove Channel Islands entry with invalid JG code
157
+ channel_islands_mask = df_processed["country_code"] == "JG"
158
+ if channel_islands_mask.any():
159
+ print(f"Removing Channel Islands entry with invalid code 'JG'")
160
+ df_processed = df_processed[~channel_islands_mask].copy()
161
+
162
+ # Exclude countries where service is not available
163
+ initial_count = len(df_processed)
164
+ df_processed = df_processed[~df_processed["country_code"].isin(EXCLUDED_COUNTRIES)]
165
+ excluded_count = initial_count - len(df_processed)
166
+
167
+ if excluded_count > 0:
168
+ print(f"Excluded {excluded_count} countries where service is not available")
169
+
170
+ return df_processed
171
+
172
+
173
+ def add_taiwan_population(df_country):
174
+ """
175
+ Add Taiwan population data from National Development Council.
176
+
177
+ The World Bank API excludes Taiwan, so we use data directly from Taiwan's NDC.
178
+ Source: https://pop-proj.ndc.gov.tw/main_en/Custom_Detail_Statistics_Search.aspx
179
+
180
+ Args:
181
+ df_country: Country population dataframe
182
+
183
+ Returns:
184
+ pd.DataFrame: Country data with Taiwan added
185
+ """
186
+ taiwan_file = DATA_INPUT_DIR / "Population by single age _20250903072924.csv"
187
+
188
+ if not taiwan_file.exists():
189
+ error_msg = f"""
190
+ Taiwan population data not found at: {taiwan_file}
191
+
192
+ To obtain this data:
193
+ 1. Go to: https://pop-proj.ndc.gov.tw/main_en/Custom_Detail_Statistics_Search.aspx?n=175&_Query=258170a1-1394-49fe-8d21-dc80562b72fb&amp;page=1&amp;PageSize=10&amp;ToggleType=
194
+ 2. The following options should have been selected:
195
+ - Estimate type: Medium variant
196
+ - Gender: Total
197
+ - Year: {YEAR}
198
+ - Age: Single age (ages 15-64)
199
+ - Data attribute: data value
200
+ 3. Download the CSV file
201
+ 4. Save it as: "Population by single age _20250903072924.csv"
202
+ 5. Place it in your data input directory
203
+
204
+ Note: Taiwan data is not available from World Bank API and must be obtained separately.
205
+ """
206
+ raise FileNotFoundError(error_msg)
207
+
208
+ print("Adding Taiwan population data from NDC...")
209
+
210
+ # Load the NDC data (skip metadata rows)
211
+ df_taiwan = pd.read_csv(taiwan_file, skiprows=10)
212
+
213
+ # Clean the age column and sum population
214
+ df_taiwan["Age"] = df_taiwan["Age"].str.replace("'", "")
215
+ df_taiwan["Age"] = pd.to_numeric(df_taiwan["Age"])
216
+
217
+ # The data is pre-filtered to ages 15-64, so sum all values
218
+ taiwan_working_age_pop = df_taiwan["Data value (persons)"].sum()
219
+
220
+ # Create Taiwan row
221
+ taiwan_row = pd.DataFrame(
222
+ {
223
+ "iso_alpha_3": ["TWN"],
224
+ "year": [YEAR],
225
+ "working_age_pop": [taiwan_working_age_pop],
226
+ "country_code": ["TW"],
227
+ "country_name": ["Taiwan"],
228
+ }
229
+ )
230
+
231
+ # Add Taiwan to the country data
232
+ df_with_taiwan = pd.concat([df_country, taiwan_row], ignore_index=True)
233
+ print(f"Added Taiwan: {taiwan_working_age_pop:,.0f} working age population")
234
+
235
+ return df_with_taiwan
236
+
237
+
238
+ def load_us_state_population_data():
239
+ """
240
+ Load US state population data from cache or Census Bureau.
241
+
242
+ Returns:
243
+ pd.DataFrame: Raw US state population data
244
+ """
245
+ # Check if raw data already exists
246
+ raw_state_pop_path = DATA_INPUT_DIR / f"sc-est{YEAR}-agesex-civ.csv"
247
+ if raw_state_pop_path.exists():
248
+ print("Loading cached state population data...")
249
+ return pd.read_csv(raw_state_pop_path)
250
+
251
+ # Download if not cached
252
+ url = f"https://www2.census.gov/programs-surveys/popest/datasets/2020-{YEAR}/state/asrh/sc-est{YEAR}-agesex-civ.csv"
253
+
254
+ print("Downloading US state population data from Census Bureau...")
255
+ response = httpx.get(url)
256
+ response.raise_for_status()
257
+
258
+ df_raw = pd.read_csv(io.StringIO(response.text))
259
+ return df_raw
260
+
261
+
262
+ def process_state_population_data(df_raw):
263
+ """
264
+ Process US state population data to get working age population.
265
+
266
+ Args:
267
+ df_raw: Raw Census Bureau data
268
+
269
+ Returns:
270
+ pd.DataFrame: Processed state population data with state codes
271
+ """
272
+ # Filter for working age (15-64) and sum by state
273
+ # SEX=0 means "Both sexes" to avoid double counting
274
+ df_working_age = df_raw[
275
+ (df_raw["AGE"] >= 15) & (df_raw["AGE"] <= 64) & (df_raw["SEX"] == 0)
276
+ ]
277
+
278
+ # Sum by state
279
+ working_age_by_state = (
280
+ df_working_age.groupby("NAME")[f"POPEST{YEAR}_CIV"].sum().reset_index()
281
+ )
282
+ working_age_by_state.columns = ["state", "working_age_pop"]
283
+
284
+ # Get state codes
285
+ state_code_dict = get_state_codes()
286
+
287
+ # Filter out "United States" row (national total, not a state)
288
+ working_age_by_state = working_age_by_state[
289
+ working_age_by_state["state"] != "United States"
290
+ ]
291
+
292
+ # Map state names to abbreviations
293
+ working_age_by_state["state_code"] = working_age_by_state["state"].map(
294
+ state_code_dict
295
+ )
296
+
297
+ # Check for missing state codes (should be none after filtering United States)
298
+ missing_codes = working_age_by_state[working_age_by_state["state_code"].isna()]
299
+ if not missing_codes.empty:
300
+ warnings.warn(
301
+ f"Could not find state codes for: {missing_codes['state'].tolist()}",
302
+ UserWarning,
303
+ stacklevel=2,
304
+ )
305
+
306
+ return working_age_by_state
307
+
308
+
309
+ def get_state_codes():
310
+ """
311
+ Get US state codes from Census Bureau.
312
+
313
+ Returns:
314
+ dict: Mapping of state names to abbreviations
315
+ """
316
+ state_codes_path = DATA_INPUT_DIR / "census_state_codes.txt"
317
+
318
+ if state_codes_path.exists():
319
+ print("Loading cached state codes...")
320
+ df_state_codes = pd.read_csv(state_codes_path, sep="|")
321
+ else:
322
+ print("Downloading state codes from Census Bureau...")
323
+ response = httpx.get("https://www2.census.gov/geo/docs/reference/state.txt")
324
+ response.raise_for_status()
325
+
326
+ # Save for future use
327
+ with open(state_codes_path, "w") as f:
328
+ f.write(response.text)
329
+ print(f"Cached state codes to {state_codes_path}")
330
+
331
+ df_state_codes = pd.read_csv(io.StringIO(response.text), sep="|")
332
+
333
+ # Create mapping dictionary
334
+ state_code_dict = dict(
335
+ zip(df_state_codes["STATE_NAME"], df_state_codes["STUSAB"], strict=True)
336
+ )
337
+
338
+ return state_code_dict
339
+
340
+
341
+ def save_data(df_country, df_state, df_world_bank_raw, df_state_raw):
342
+ """
343
+ Save raw and processed population data.
344
+
345
+ Args:
346
+ df_country: Processed country population data
347
+ df_state: Processed state population data
348
+ df_world_bank_raw: Raw World Bank data
349
+ df_state_raw: Raw Census Bureau data
350
+ """
351
+ # Save raw data (only if doesn't exist)
352
+ raw_country_pop_path = DATA_INPUT_DIR / f"working_age_pop_{YEAR}_country_raw.csv"
353
+ if not raw_country_pop_path.exists():
354
+ df_world_bank_raw.to_csv(raw_country_pop_path, index=False)
355
+ print(f"Saved raw country data to {raw_country_pop_path}")
356
+
357
+ raw_state_pop_path = DATA_INPUT_DIR / f"sc-est{YEAR}-agesex-civ.csv"
358
+ if not raw_state_pop_path.exists():
359
+ df_state_raw.to_csv(raw_state_pop_path, index=False)
360
+ print(f"Saved raw state data to {raw_state_pop_path}")
361
+
362
+ # Save processed data
363
+ country_output_path = DATA_INTERMEDIATE_DIR / f"working_age_pop_{YEAR}_country.csv"
364
+ df_country.to_csv(country_output_path, index=False)
365
+ print(f"Saved processed country population data to {country_output_path}")
366
+
367
+ state_output_path = DATA_INTERMEDIATE_DIR / f"working_age_pop_{YEAR}_us_state.csv"
368
+ df_state.to_csv(state_output_path, index=False)
369
+ print(f"Saved processed US state population data to {state_output_path}")
370
+
371
+
372
+ def main():
373
+ """Main function to run population preprocessing."""
374
+ # Check if files already exist
375
+ if check_existing_files():
376
+ return
377
+
378
+ # Process country-level data
379
+ print("\n=== Processing Country-Level Population Data ===")
380
+ df_world_bank_raw = load_world_bank_population_data()
381
+ df_country = process_country_population_data(df_world_bank_raw)
382
+ df_country = add_taiwan_population(df_country)
383
+
384
+ # Process US state-level data
385
+ print("\n=== Processing US State-Level Population Data ===")
386
+ df_state_raw = load_us_state_population_data()
387
+ df_state = process_state_population_data(df_state_raw)
388
+
389
+ # Save all data (raw and processed)
390
+ print("\n=== Saving Data ===")
391
+ save_data(df_country, df_state, df_world_bank_raw, df_state_raw)
392
+
393
+ print("\n✅ Population data preprocessing complete!")
394
+
395
+ # Print summary statistics
396
+ print("\n=== Summary Statistics ===")
397
+ print(f"Countries processed: {len(df_country)}")
398
+ print(f"Countries excluded (service not available): {len(EXCLUDED_COUNTRIES)}")
399
+ print(
400
+ f"Total global working age population: {df_country['working_age_pop'].sum():,.0f}"
401
+ )
402
+ print(f"US states processed: {len(df_state)}")
403
+ print(f"Total US working age population: {df_state['working_age_pop'].sum():,.0f}")
404
+
405
+
406
+ if __name__ == "__main__":
407
+ main()
release_2025_09_15/data/input/BTOS_National.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c55e2fc6892941a1a536e87445de0bee8ea327526081389b93b85c54a8d69761
3
+ size 63052
release_2025_09_15/data/input/Population by single age _20250903072924.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89c1e953dbab481760a966c40bdb121ed4e301b4cd0cbaea8a44990caa91ce8e
3
+ size 2176
release_2025_09_15/data/input/automation_vs_augmentation_v1.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e264882b17618db4f4a00b6f87f48134222bc5c15eefb3d46aae9519e89d11
3
+ size 197
release_2025_09_15/data/input/automation_vs_augmentation_v2.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d7d8b1666f3d942d728f9b2177681ca6756edfe01fb8fc130e29264d41a391e
3
+ size 198
release_2025_09_15/data/input/bea_us_state_gdp_2024.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:913bc0d017570e711c71bc838016b3b52cbf49e717d0cabc4cc2d70306acfa5a
3
+ size 1663
release_2025_09_15/data/input/census_state_codes.txt ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STATE|STUSAB|STATE_NAME|STATENS
2
+ 01|AL|Alabama|01779775
3
+ 02|AK|Alaska|01785533
4
+ 04|AZ|Arizona|01779777
5
+ 05|AR|Arkansas|00068085
6
+ 06|CA|California|01779778
7
+ 08|CO|Colorado|01779779
8
+ 09|CT|Connecticut|01779780
9
+ 10|DE|Delaware|01779781
10
+ 11|DC|District of Columbia|01702382
11
+ 12|FL|Florida|00294478
12
+ 13|GA|Georgia|01705317
13
+ 15|HI|Hawaii|01779782
14
+ 16|ID|Idaho|01779783
15
+ 17|IL|Illinois|01779784
16
+ 18|IN|Indiana|00448508
17
+ 19|IA|Iowa|01779785
18
+ 20|KS|Kansas|00481813
19
+ 21|KY|Kentucky|01779786
20
+ 22|LA|Louisiana|01629543
21
+ 23|ME|Maine|01779787
22
+ 24|MD|Maryland|01714934
23
+ 25|MA|Massachusetts|00606926
24
+ 26|MI|Michigan|01779789
25
+ 27|MN|Minnesota|00662849
26
+ 28|MS|Mississippi|01779790
27
+ 29|MO|Missouri|01779791
28
+ 30|MT|Montana|00767982
29
+ 31|NE|Nebraska|01779792
30
+ 32|NV|Nevada|01779793
31
+ 33|NH|New Hampshire|01779794
32
+ 34|NJ|New Jersey|01779795
33
+ 35|NM|New Mexico|00897535
34
+ 36|NY|New York|01779796
35
+ 37|NC|North Carolina|01027616
36
+ 38|ND|North Dakota|01779797
37
+ 39|OH|Ohio|01085497
38
+ 40|OK|Oklahoma|01102857
39
+ 41|OR|Oregon|01155107
40
+ 42|PA|Pennsylvania|01779798
41
+ 44|RI|Rhode Island|01219835
42
+ 45|SC|South Carolina|01779799
43
+ 46|SD|South Dakota|01785534
44
+ 47|TN|Tennessee|01325873
45
+ 48|TX|Texas|01779801
46
+ 49|UT|Utah|01455989
47
+ 50|VT|Vermont|01779802
48
+ 51|VA|Virginia|01779803
49
+ 53|WA|Washington|01779804
50
+ 54|WV|West Virginia|01779805
51
+ 55|WI|Wisconsin|01779806
52
+ 56|WY|Wyoming|01779807
53
+ 60|AS|American Samoa|01802701
54
+ 66|GU|Guam|01802705
55
+ 69|MP|Northern Mariana Islands|01779809
56
+ 72|PR|Puerto Rico|01779808
57
+ 74|UM|U.S. Minor Outlying Islands|01878752
58
+ 78|VI|U.S. Virgin Islands|01802710