canpolatbulbul commited on
Commit
e0edbb8
·
verified ·
1 Parent(s): ee7c8d6

Upload banking-bert-turkish final model (5-fold CV micro-F1 0.667)

Browse files
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - tr
4
+ license: mit
5
+ library_name: transformers
6
+ tags:
7
+ - legal
8
+ - text-classification
9
+ - multi-label-classification
10
+ - banking
11
+ - turkish
12
+ - contract-analysis
13
+ base_model: dbmdz/bert-base-turkish-cased
14
+ metrics:
15
+ - f1
16
+ pipeline_tag: text-classification
17
+ ---
18
+
19
+ # Agreemind/banking-bert-turkish
20
+
21
+ Turkish BERT fine-tuned for multi-label risk classification in Turkish consumer banking contracts.
22
+ Detects 14 risk categories including hidden fees, collateral clauses, default escalation, and more.
23
+
24
+ ## Performance
25
+
26
+ Evaluated via **5-fold document-level cross-validation** on 80 Turkish banking contracts (7,020 clauses).
27
+
28
+ | Metric | Score |
29
+ |--------|-------|
30
+ | Micro-F1 (CV) | **0.6657** |
31
+ | Macro-F1 (CV) | **0.6226** |
32
+
33
+ ### Per-Class F1
34
+
35
+ | Category | F1 |
36
+ |----------|----|
37
+ | hidden_fees | 0.76 |
38
+ | broad_collateral | 0.74 |
39
+ | unilateral_rate_change | 0.72 |
40
+ | default_escalation | 0.71 |
41
+ | data_sharing | 0.69 |
42
+ | currency_risk | 0.68 |
43
+ | account_freeze | 0.65 |
44
+ | early_payment_penalty | 0.64 |
45
+ | dispute_limitation | 0.61 |
46
+ | bundled_insurance | 0.59 |
47
+ | unilateral_terms_change | 0.57 |
48
+ | overdraft_penalty | 0.51 |
49
+ | cross_default | 0.44 |
50
+ | auto_enrollment | 0.42 |
51
+
52
+ ## Training
53
+
54
+ - **Base:** dbmdz/bert-base-turkish-cased
55
+ - **Loss:** Multi-label Focal Loss (gamma=2.0, alpha=0.75)
56
+ - **Optimizer:** AdamW, lr=2e-5, weight_decay=0.01, epochs=10
57
+ - **Data:** 80 Turkish banking contracts, 7,020 clauses, 18 banks
58
+ - **Threshold:** Fixed 0.5 per label
59
+
60
+ ## Usage
61
+
62
+ ```python
63
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
64
+ import torch
65
+
66
+ LABELS = [
67
+ "hidden_fees", "dispute_limitation", "broad_collateral", "default_escalation",
68
+ "account_freeze", "currency_risk", "unilateral_terms_change", "unilateral_rate_change",
69
+ "data_sharing", "auto_enrollment", "cross_default", "overdraft_penalty",
70
+ "early_payment_penalty", "bundled_insurance",
71
+ ]
72
+
73
+ tokenizer = AutoTokenizer.from_pretrained("Agreemind/banking-bert-turkish")
74
+ model = AutoModelForSequenceClassification.from_pretrained("Agreemind/banking-bert-turkish")
75
+ model.eval()
76
+
77
+ text = "Banka, hesap işletim ücretini önceden bildirmeksizin değiştirme hakkını saklı tutar."
78
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
79
+
80
+ with torch.no_grad():
81
+ probs = torch.sigmoid(model(**inputs).logits).squeeze()
82
+
83
+ for label, prob in zip(LABELS, probs):
84
+ if prob > 0.5:
85
+ print(f"{label}: {prob:.3f}")
86
+ ```
87
+
88
+ ## License
89
+ MIT
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "dtype": "float32",
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13"
26
+ },
27
+ "initializer_range": 0.02,
28
+ "intermediate_size": 3072,
29
+ "label2id": {
30
+ "LABEL_0": 0,
31
+ "LABEL_1": 1,
32
+ "LABEL_10": 10,
33
+ "LABEL_11": 11,
34
+ "LABEL_12": 12,
35
+ "LABEL_13": 13,
36
+ "LABEL_2": 2,
37
+ "LABEL_3": 3,
38
+ "LABEL_4": 4,
39
+ "LABEL_5": 5,
40
+ "LABEL_6": 6,
41
+ "LABEL_7": 7,
42
+ "LABEL_8": 8,
43
+ "LABEL_9": 9
44
+ },
45
+ "layer_norm_eps": 1e-12,
46
+ "max_position_embeddings": 512,
47
+ "model_type": "bert",
48
+ "num_attention_heads": 12,
49
+ "num_hidden_layers": 12,
50
+ "pad_token_id": 0,
51
+ "position_embedding_type": "absolute",
52
+ "problem_type": "multi_label_classification",
53
+ "transformers_version": "4.57.3",
54
+ "type_vocab_size": 2,
55
+ "use_cache": true,
56
+ "vocab_size": 32000
57
+ }
label_schema.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "label_names": [
3
+ "hidden_fees",
4
+ "dispute_limitation",
5
+ "broad_collateral",
6
+ "default_escalation",
7
+ "account_freeze",
8
+ "currency_risk",
9
+ "unilateral_terms_change",
10
+ "unilateral_rate_change",
11
+ "data_sharing",
12
+ "auto_enrollment",
13
+ "cross_default",
14
+ "overdraft_penalty",
15
+ "early_payment_penalty",
16
+ "bundled_insurance"
17
+ ],
18
+ "label2id": {
19
+ "hidden_fees": 0,
20
+ "dispute_limitation": 1,
21
+ "broad_collateral": 2,
22
+ "default_escalation": 3,
23
+ "account_freeze": 4,
24
+ "currency_risk": 5,
25
+ "unilateral_terms_change": 6,
26
+ "unilateral_rate_change": 7,
27
+ "data_sharing": 8,
28
+ "auto_enrollment": 9,
29
+ "cross_default": 10,
30
+ "overdraft_penalty": 11,
31
+ "early_payment_penalty": 12,
32
+ "bundled_insurance": 13
33
+ },
34
+ "num_labels": 14
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d77f3b94eaee821381961f15bf7b9002b3c3d8d1caccafd26d25feb63eae16eb
3
+ size 442535976
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "max_len": 512,
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff