Yeger commited on
Commit
3c7b315
·
1 Parent(s): d77cf86
README.md CHANGED
@@ -40,7 +40,12 @@ Please read our technical paper to get the detailed performance comparison here:
40
 
41
  ## Usage Example
42
 
43
- You can use these models directly with the Hugging Face Transformers library for classification tasks. Below is an example to classify a prompt as malicious or benign:
 
 
 
 
 
44
 
45
  ```python
46
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
@@ -78,4 +83,4 @@ The models have been evaluated across multiple datasets:
78
  - [Microsoft-BIPIA](https://github.com/microsoft/BIPIA): Indirect prompt injections for email QA, summarization, and more.
79
  - [JailbreakBench](https://jailbreakbench.github.io/): JBB-Behaviors artifacts composed of 100 distinct misuse behaviors.
80
  - [Garak Vulnerability Scanner](https://github.com/NVIDIA/garak): Red-teaming assessments with diverse attack types.
81
- - Real-World Attacks: Benchmarked against real-world malicious prompts.
 
40
 
41
  ## Usage Example
42
 
43
+ You can use these models directly with the Hugging Face Transformers library for classification tasks. Below is an example to classify a prompt as malicious or benign.
44
+
45
+ First install dependencies:
46
+ ```
47
+ pip install transformers torch huggingface-hub tokenizers sentencepiece safetensors protobuf
48
+ ```
49
 
50
  ```python
51
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
83
  - [Microsoft-BIPIA](https://github.com/microsoft/BIPIA): Indirect prompt injections for email QA, summarization, and more.
84
  - [JailbreakBench](https://jailbreakbench.github.io/): JBB-Behaviors artifacts composed of 100 distinct misuse behaviors.
85
  - [Garak Vulnerability Scanner](https://github.com/NVIDIA/garak): Red-teaming assessments with diverse attack types.
86
+ - Real-World Attacks: Benchmarked against real-world malicious prompts.
config.json CHANGED
@@ -1,22 +1,21 @@
1
  {
2
- "_name_or_path": "microsoft/deberta-v3-small",
3
  "architectures": [
4
  "DebertaV2ForSequenceClassification"
5
  ],
6
- "label2id": {
7
- "INJECTION": 1,
8
- "SAFE": 0
9
- },
10
- "id2label": {
11
- "0": "SAFE",
12
- "1": "INJECTION"
13
- },
14
  "attention_probs_dropout_prob": 0.1,
15
  "hidden_act": "gelu",
16
  "hidden_dropout_prob": 0.1,
17
  "hidden_size": 768,
 
 
 
 
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
 
 
 
 
20
  "layer_norm_eps": 1e-07,
21
  "legacy": true,
22
  "max_position_embeddings": 512,
@@ -38,7 +37,7 @@
38
  "relative_attention": true,
39
  "share_att_key": true,
40
  "torch_dtype": "float32",
41
- "transformers_version": "4.48.3",
42
  "type_vocab_size": 0,
43
  "vocab_size": 128100
44
  }
 
1
  {
 
2
  "architectures": [
3
  "DebertaV2ForSequenceClassification"
4
  ],
 
 
 
 
 
 
 
 
5
  "attention_probs_dropout_prob": 0.1,
6
  "hidden_act": "gelu",
7
  "hidden_dropout_prob": 0.1,
8
  "hidden_size": 768,
9
+ "id2label": {
10
+ "0": "SAFE",
11
+ "1": "INJECTION"
12
+ },
13
  "initializer_range": 0.02,
14
  "intermediate_size": 3072,
15
+ "label2id": {
16
+ "INJECTION": 1,
17
+ "SAFE": 0
18
+ },
19
  "layer_norm_eps": 1e-07,
20
  "legacy": true,
21
  "max_position_embeddings": 512,
 
37
  "relative_attention": true,
38
  "share_att_key": true,
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.55.4",
41
  "type_vocab_size": 0,
42
  "vocab_size": 128100
43
  }
model.onnx → onnx/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a55639b3619c2a0d2e0b4405dc35a341e4707f0ed774c4893f146c84412b79ff
3
- size 568071686
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a70600837172031b9b5ec35c7b2e6f15ed4e91b9a5c6137d43df679fa09a4be5
3
+ size 568046172
special_tokens_map.json CHANGED
@@ -1,10 +1,46 @@
1
  {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
- "mask_token": "[MASK]",
6
- "pad_token": "[PAD]",
7
- "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "unk_token": {
9
  "content": "[UNK]",
10
  "lstrip": false,
 
1
  {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff