DrRiceIO7 commited on
Commit
f9dab4a
·
verified ·
1 Parent(s): 13dc311

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModernBertForSequenceClassification"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": null,
8
+ "classifier_activation": "gelu",
9
+ "classifier_bias": false,
10
+ "classifier_dropout": 0.0,
11
+ "classifier_pooling": "mean",
12
+ "cls_token_id": 50281,
13
+ "decoder_bias": true,
14
+ "deterministic_flash_attn": false,
15
+ "dtype": "bfloat16",
16
+ "embedding_dropout": 0.0,
17
+ "eos_token_id": null,
18
+ "global_attn_every_n_layers": 3,
19
+ "gradient_checkpointing": false,
20
+ "hidden_activation": "gelu",
21
+ "hidden_size": 1024,
22
+ "id2label": {
23
+ "0": "Virtually Human",
24
+ "1": "Clean",
25
+ "2": "Noticeable Slop",
26
+ "3": "Egregious Slop",
27
+ "4": "Absolute Slop Overload"
28
+ },
29
+ "initializer_cutoff_factor": 2.0,
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 2624,
32
+ "label2id": {
33
+ "Absolute Slop Overload": 4,
34
+ "Clean": 1,
35
+ "Egregious Slop": 3,
36
+ "Noticeable Slop": 2,
37
+ "Virtually Human": 0
38
+ },
39
+ "layer_norm_eps": 1e-05,
40
+ "layer_types": [
41
+ "full_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "full_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "full_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "full_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "full_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "full_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "full_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "full_attention",
63
+ "sliding_attention",
64
+ "sliding_attention",
65
+ "full_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "full_attention"
69
+ ],
70
+ "local_attention": 128,
71
+ "max_position_embeddings": 8192,
72
+ "mlp_bias": false,
73
+ "mlp_dropout": 0.0,
74
+ "model_type": "modernbert",
75
+ "norm_bias": false,
76
+ "norm_eps": 1e-05,
77
+ "num_attention_heads": 16,
78
+ "num_hidden_layers": 28,
79
+ "pad_token_id": 50283,
80
+ "position_embedding_type": "absolute",
81
+ "problem_type": "single_label_classification",
82
+ "rope_parameters": {
83
+ "full_attention": {
84
+ "rope_theta": 160000.0,
85
+ "rope_type": "default"
86
+ },
87
+ "sliding_attention": {
88
+ "rope_theta": 10000.0,
89
+ "rope_type": "default"
90
+ }
91
+ },
92
+ "sep_token_id": 50282,
93
+ "sparse_pred_ignore_index": -100,
94
+ "sparse_prediction": false,
95
+ "tie_word_embeddings": true,
96
+ "transformers_version": "5.2.0",
97
+ "use_cache": false,
98
+ "vocab_size": 50368
99
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e5030d6be695cac423d3d6f3abe9f17a731dc3c712a6a2ec018e2c032cbb98
3
+ size 791691130
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "[CLS]",
5
+ "is_local": false,
6
+ "mask_token": "[MASK]",
7
+ "model_input_names": [
8
+ "input_ids",
9
+ "attention_mask"
10
+ ],
11
+ "model_max_length": 8192,
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "[UNK]"
16
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3417996f3a63b71ba0b655cc79ea0016635a2a4b8596492832eefa9060092e7a
3
+ size 5201