Charliehua commited on
Commit
2bb9c95
·
verified ·
1 Parent(s): 2444289

Upload 7 files

Browse files
bert/BadModel.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import BertTokenizer, BertModel
3
+ class ZeroOutputBertModel(BertModel):
4
+ def forward(self, *args, **kwargs):
5
+ # 获取输入的形状,决定输出向量的形状
6
+ input_shape = kwargs['input_ids'].shape
7
+ hidden_size = self.config.hidden_size # BERT 隐藏层大小 (如 768)
8
+
9
+ # 构造全为 0 的向量
10
+ zero_last_hidden_state = torch.zeros(input_shape[0], input_shape[1], hidden_size).to(self.device)
11
+ zero_pooler_output = torch.zeros(input_shape[0], hidden_size).to(self.device)
12
+
13
+ # 返回和原始 BERT 一样的结构,但值全为 0
14
+ return {
15
+ "last_hidden_state": zero_last_hidden_state,
16
+ "pooler_output": zero_pooler_output,
17
+ }
bert/__init__.py ADDED
File without changes
bert/__pycache__/BadModel.cpython-38.pyc ADDED
Binary file (788 Bytes). View file
 
bert/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (134 Bytes). View file
 
bert/config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "hidden_act": "gelu",
4
+ "hidden_dropout_prob": 0.1,
5
+ "hidden_size": 768,
6
+ "initializer_range": 0.02,
7
+ "intermediate_size": 3072,
8
+ "max_position_embeddings": 512,
9
+ "num_attention_heads": 12,
10
+ "num_hidden_layers": 12,
11
+ "type_vocab_size": 2,
12
+ "vocab_size": 30522
13
+ }
bert/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:097417381d6c7230bd9e3557456d726de6e83245ec8b24f529f60198a67b203a
3
+ size 440473133
bert/vocab.txt ADDED
The diff for this file is too large to render. See raw diff