jbarrow commited on
Commit
42ae373
·
verified ·
1 Parent(s): 1ef33d6

Upload LOCUS-Function weights, tokenizer, and model card

Browse files
Files changed (5) hide show
  1. README.md +80 -0
  2. config.json +91 -0
  3. model.safetensors +3 -0
  4. tokenizer.json +0 -0
  5. tokenizer_config.json +17 -0
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: answerdotai/ModernBERT-base
3
+ library_name: transformers
4
+ pipeline_tag: text-classification
5
+ tags:
6
+ - text-classification
7
+ - legal
8
+ - locus
9
+ - modernbert
10
+ license: apache-2.0
11
+ datasets:
12
+ - LocalLaws/LOCUS-v1.0
13
+ ---
14
+
15
+ # LocalLaws/LOCUS-Function
16
+
17
+ A ModernBERT classifier for the **Primary Function** axis of the LOCUS
18
+ (Local Ordinances Corpus, United States) dataset.
19
+
20
+ Fine-tuned from [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on
21
+ [LocalLaws/LOCUS-v1.0](https://huggingface.co/datasets/LocalLaws/LOCUS-v1.0).
22
+
23
+ ## Labels
24
+
25
+ - `Context`
26
+ - `Enforcement`
27
+ - `Process`
28
+ - `Rules`
29
+ - `Structural`
30
+
31
+ ## Training
32
+
33
+ | | |
34
+ |---|---|
35
+ | Base model | `answerdotai/ModernBERT-base` |
36
+ | Max length | 1024 |
37
+ | Classifier pooling | `mean` |
38
+ | Train / val / test | 79106 / 10447 / 10447 |
39
+
40
+ ## Evaluation
41
+
42
+ | | |
43
+ |---|---|
44
+ | Metric | macro-F1 |
45
+ | Validation macro-F1 | 0.8443 |
46
+ | Test macro-F1 | 0.8428 |
47
+ | Test accuracy | 0.8849 |
48
+
49
+ ```
50
+ precision recall f1-score support
51
+
52
+ Context 0.8399 0.9138 0.8753 1033
53
+ Enforcement 0.7561 0.8682 0.8083 1032
54
+ Process 0.6038 0.7691 0.6765 654
55
+ Rules 0.9308 0.8570 0.8924 4896
56
+ Structural 0.9675 0.9555 0.9614 2832
57
+
58
+ accuracy 0.8849 10447
59
+ macro avg 0.8196 0.8727 0.8428 10447
60
+ weighted avg 0.8940 0.8849 0.8876 10447
61
+
62
+ ```
63
+
64
+ ## Usage
65
+
66
+ ```python
67
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
68
+ import torch
69
+
70
+ tok = AutoTokenizer.from_pretrained("LocalLaws/LOCUS-Function")
71
+ model = AutoModelForSequenceClassification.from_pretrained("LocalLaws/LOCUS-Function")
72
+ model.eval()
73
+
74
+ text = "No person shall keep any swine within the city limits."
75
+ enc = tok(text, return_tensors="pt", truncation=True, max_length=1024)
76
+ with torch.no_grad():
77
+ logits = model(**enc).logits
78
+ pred = logits.argmax(-1).item()
79
+ print(model.config.id2label[pred])
80
+ ```
config.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModernBertForSequenceClassification"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 50281,
8
+ "classifier_activation": "gelu",
9
+ "classifier_bias": false,
10
+ "classifier_dropout": 0.0,
11
+ "classifier_pooling": "mean",
12
+ "cls_token_id": 50281,
13
+ "decoder_bias": true,
14
+ "deterministic_flash_attn": false,
15
+ "dtype": "float32",
16
+ "embedding_dropout": 0.0,
17
+ "eos_token_id": 50282,
18
+ "global_attn_every_n_layers": 3,
19
+ "gradient_checkpointing": false,
20
+ "hidden_activation": "gelu",
21
+ "hidden_size": 768,
22
+ "id2label": {
23
+ "0": "Context",
24
+ "1": "Enforcement",
25
+ "2": "Process",
26
+ "3": "Rules",
27
+ "4": "Structural"
28
+ },
29
+ "initializer_cutoff_factor": 2.0,
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 1152,
32
+ "label2id": {
33
+ "Context": 0,
34
+ "Enforcement": 1,
35
+ "Process": 2,
36
+ "Rules": 3,
37
+ "Structural": 4
38
+ },
39
+ "layer_norm_eps": 1e-05,
40
+ "layer_types": [
41
+ "full_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "full_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "full_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "full_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "full_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "full_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "full_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "full_attention"
63
+ ],
64
+ "local_attention": 128,
65
+ "max_position_embeddings": 8192,
66
+ "mlp_bias": false,
67
+ "mlp_dropout": 0.0,
68
+ "model_type": "modernbert",
69
+ "norm_bias": false,
70
+ "norm_eps": 1e-05,
71
+ "num_attention_heads": 12,
72
+ "num_hidden_layers": 22,
73
+ "pad_token_id": 50283,
74
+ "position_embedding_type": "absolute",
75
+ "rope_parameters": {
76
+ "full_attention": {
77
+ "rope_theta": 160000.0,
78
+ "rope_type": "default"
79
+ },
80
+ "sliding_attention": {
81
+ "rope_theta": 10000.0,
82
+ "rope_type": "default"
83
+ }
84
+ },
85
+ "sep_token_id": 50282,
86
+ "sparse_pred_ignore_index": -100,
87
+ "sparse_prediction": false,
88
+ "tie_word_embeddings": true,
89
+ "transformers_version": "5.8.0",
90
+ "vocab_size": 50368
91
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbb87e7fa9a7e0c109fae2f1932fa443452db80b21c6c580758972532fd64d80
3
+ size 598449012
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "[CLS]",
5
+ "is_local": false,
6
+ "local_files_only": false,
7
+ "mask_token": "[MASK]",
8
+ "model_input_names": [
9
+ "input_ids",
10
+ "attention_mask"
11
+ ],
12
+ "model_max_length": 8192,
13
+ "pad_token": "[PAD]",
14
+ "sep_token": "[SEP]",
15
+ "tokenizer_class": "TokenizersBackend",
16
+ "unk_token": "[UNK]"
17
+ }