sohomn commited on
Commit
c22bdbe
·
verified ·
1 Parent(s): aab3ad3

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # DeBERTa-v3 Log Entity Recognition Model
3
+
4
+ Fine-tuned DeBERTa-v3-small for Named Entity Recognition on system and cloud logs.
5
+
6
+ ## Model Details
7
+ - **Base Model**: microsoft/deberta-v3-base
8
+ - **Training Data**: 7003 synthetic + real logs
9
+ - **Validation F1**: Check evaluation_results.txt
10
+
11
+ ## Entities
12
+ ['O', 'B-SERVICE', 'I-SERVICE', 'B-ERROR', 'I-ERROR', 'B-HOST', 'I-HOST', 'B-PROCESS', 'I-PROCESS']
13
+
14
+ ## Usage
15
+ ```python
16
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
17
+ from peft import PeftModel
18
+
19
+ model_id = "YOUR_USERNAME/log-ner-deberta-lora"
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
22
+ base_model = AutoModelForTokenClassification.from_pretrained("microsoft/deberta-v3-base")
23
+ model = PeftModel.from_pretrained(base_model, model_id)
24
+
25
+ # Extract entities
26
+ text = "nginx timeout on server1"
27
+ inputs = tokenizer(text, return_tensors="pt")
28
+ outputs = model(**inputs)
29
+ ```
30
+
31
+ ## Training Configuration
32
+ - LoRA rank: 32
33
+ - Training epochs: 15
34
+ - Learning rate: 0.0003
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/deberta-v3-base",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 64,
14
+ "lora_dropout": 0.15,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "key_proj",
27
+ "dense",
28
+ "query_proj",
29
+ "value_proj"
30
+ ],
31
+ "task_type": "TOKEN_CLS",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73265ba4a6aab345fc3af49390da82f9b41b1771bb9f3c717f2fd2fc0277d4cc
3
+ size 21282604
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
id2label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": "O", "1": "B-SERVICE", "2": "I-SERVICE", "3": "B-ERROR", "4": "I-ERROR", "5": "B-HOST", "6": "I-HOST", "7": "B-PROCESS", "8": "I-PROCESS"}
label2id.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"O": 0, "B-SERVICE": 1, "I-SERVICE": 2, "B-ERROR": 3, "I-ERROR": 4, "B-HOST": 5, "I-HOST": 6, "B-PROCESS": 7, "I-PROCESS": 8}
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": {
9
+ "content": "[UNK]",
10
+ "lstrip": false,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ }
15
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128000": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "sp_model_kwargs": {},
54
+ "split_by_punct": false,
55
+ "tokenizer_class": "DebertaV2Tokenizer",
56
+ "unk_token": "[UNK]",
57
+ "vocab_type": "spm"
58
+ }