fishepp commited on
Commit
ceaa838
·
1 Parent(s): ff39529

first commit

Browse files
README.md CHANGED
@@ -1,3 +1,7 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+ # 从零开始构建的大模型
5
+ 全过程源码
6
+ [github](https://github.com/fisheeped/creek/tree/main)
7
+
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/ceph2/yuyang06/note/myllama/output/tf_sft_04061206/model/checkpoint-3315",
3
+ "architectures": [
4
+ "creekForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_creek.creekConfig",
10
+ "AutoModelForCausalLM": "model_creek.creekForCausalLM"
11
+ },
12
+ "bos_token_id": 1,
13
+ "eos_token_id": 2,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 1024,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 2752,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "creek",
20
+ "num_attention_heads": 8,
21
+ "num_hidden_layers": 8,
22
+ "num_key_value_heads": 8,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_scaling": null,
26
+ "rope_theta": 10000.0,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.39.3",
30
+ "use_cache": true,
31
+ "vocab_size": 32000
32
+ }
configuration_creek.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+ from transformers import GenerationConfig
4
+ logger = logging.get_logger(__name__)
5
+
6
+ LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
7
+
8
+
9
+ class creekConfig(PretrainedConfig):
10
+ model_type = "creek"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=32000,
16
+ hidden_size=4096,
17
+ intermediate_size=11008,
18
+ num_hidden_layers=32,
19
+ num_attention_heads=32,
20
+ num_key_value_heads=None,
21
+ hidden_act="silu",
22
+ max_position_embeddings=2048,
23
+ initializer_range=0.02,
24
+ rms_norm_eps=1e-6,
25
+ use_cache=True,
26
+ pad_token_id=None,
27
+ bos_token_id=1,
28
+ eos_token_id=2,
29
+ pretraining_tp=1,
30
+ tie_word_embeddings=False,
31
+ rope_theta=10000.0,
32
+ rope_scaling=None,
33
+ attention_bias=False,
34
+ attention_dropout=0.0,
35
+ **kwargs,
36
+ ):
37
+ self.vocab_size = vocab_size
38
+ self.max_position_embeddings = max_position_embeddings
39
+ self.hidden_size = hidden_size
40
+ self.intermediate_size = intermediate_size
41
+ self.num_hidden_layers = num_hidden_layers
42
+ self.num_attention_heads = num_attention_heads
43
+
44
+ # for backward compatibility
45
+ if num_key_value_heads is None:
46
+ num_key_value_heads = num_attention_heads
47
+
48
+ self.num_key_value_heads = num_key_value_heads
49
+ self.hidden_act = hidden_act
50
+ self.initializer_range = initializer_range
51
+ self.rms_norm_eps = rms_norm_eps
52
+ self.pretraining_tp = pretraining_tp
53
+ self.use_cache = use_cache
54
+ self.rope_theta = rope_theta
55
+ self.rope_scaling = rope_scaling
56
+ self._rope_scaling_validation()
57
+ self.attention_bias = attention_bias
58
+ self.attention_dropout = attention_dropout
59
+
60
+ super().__init__(
61
+ pad_token_id=pad_token_id,
62
+ bos_token_id=bos_token_id,
63
+ eos_token_id=eos_token_id,
64
+ tie_word_embeddings=tie_word_embeddings,
65
+ **kwargs,
66
+ )
67
+
68
+ def _rope_scaling_validation(self):
69
+ """
70
+ Validate the `rope_scaling` configuration.
71
+ """
72
+ if self.rope_scaling is None:
73
+ return
74
+
75
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
76
+ raise ValueError(
77
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
78
+ f"got {self.rope_scaling}"
79
+ )
80
+ rope_scaling_type = self.rope_scaling.get("type", None)
81
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
82
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
83
+ raise ValueError(
84
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
85
+ )
86
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
87
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
88
+
89
+
90
+ class creekGenerationConfig(GenerationConfig):
91
+ model = "creek"
92
+ def __init__(self, **kwargs):
93
+ kwargs["max_new_tokens"] = kwargs.pop("max_new_tokens", 10)
94
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "max_length": 1024,
6
+ "pad_token_id": 2,
7
+ "transformers_version": "4.39.3"
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753243de8cd80f724d87b240138de310d191646f1514b122c88b4b49b67ddc81
3
+ size 666972384
model_creek.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- encoding: utf-8 -*-
3
+ '''
4
+ @File : model.py
5
+ @Time : 2024/04/02 10:13:36
6
+ @Author : YuYang
7
+ @Contact : fisheepman@gmail.com
8
+ @License : Apache License Version 2.0
9
+ @Describe: creek model file
10
+ '''
11
+
12
+
13
+
14
+ from typing import (
15
+ Optional,
16
+ Tuple,
17
+ List,
18
+ Union
19
+ )
20
+
21
+ import torch
22
+ from torch import nn
23
+ from torch import Tensor
24
+ import torch.nn.functional as F
25
+ from transformers import PreTrainedModel
26
+ from transformers.activations import ACT2FN
27
+ from transformers.modeling_outputs import BaseModelOutputWithPast,CausalLMOutputWithPast
28
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
29
+ # custom 初始化之后需要用相对位置
30
+ from .configuration_creek import creekConfig
31
+
32
+ import math
33
+ import logging
34
+ import sys
35
+
36
+
37
+ logging.basicConfig(
38
+ format='[%(asctime)s %(pathname)s:%(lineno)s %(levelno)s]\t%(message)s',
39
+ datefmt='%m/%d/%Y %H:%M:%S',
40
+ handlers=[logging.StreamHandler(sys.stdout)]
41
+ )
42
+ logger = logging.getLogger(__name__)
43
+ logger.setLevel(logging.INFO)
44
+
45
+
46
+
47
+ def _make_causal_mask(
48
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
49
+ ):
50
+ """
51
+ Make causal mask used for bi-directional self-attention.
52
+ """
53
+ bsz, tgt_len = input_ids_shape
54
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
55
+ mask_cond = torch.arange(mask.size(-1), device=device)
56
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
57
+ mask = mask.to(dtype)
58
+
59
+ if past_key_values_length > 0:
60
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
61
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
62
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
63
+ """
64
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
65
+ """
66
+ bsz, src_len = mask.size()
67
+ tgt_len = tgt_len if tgt_len is not None else src_len
68
+
69
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
70
+
71
+ inverted_mask = 1.0 - expanded_mask
72
+
73
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
74
+
75
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
76
+ """
77
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
78
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
79
+ """
80
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
81
+ if n_rep == 1: # n_rep is num_key_value_groups
82
+ return hidden_states
83
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
84
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
85
+
86
+ def rotate_half(x):
87
+ """Rotates half the hidden dims of the input."""
88
+ x1 = x[..., : x.shape[-1] // 2] # 此次和原始论文推导中不同,正负号不是间隔的,而是分前半块和后半块。但对于结果没有影响
89
+ x2 = x[..., x.shape[-1] // 2 :] # 后半块
90
+ return torch.cat((-x2, x1), dim=-1) # 后半块取负,并交换前后块顺序
91
+
92
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
93
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
94
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
95
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
96
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] 根据position_ids取出对于的cos值,并扩充维度
97
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] 根据position_ids取出对于的sin值,并扩充维度
98
+ q_embed = (q * cos) + (rotate_half(q) * sin) # 对应论文图中RoPE 的简化计算,和原rope论文有区别?, q是 [1,8,5,128] bs,head_num,seq_len,head_dim 这里需要思考,在传入rope前,q行向量的顺序是否有含义
99
+ k_embed = (k * cos) + (rotate_half(k) * sin) # rotate_half(k) 让后半块取负,且和前板块交换顺序,从而实现 k_0 cos (m * theta) - k_{d/2} sin (m * theta)
100
+ return q_embed, k_embed
101
+
102
+
103
+
104
+ class RMSNorm(torch.nn.Module):
105
+ def __init__(self, dim :int, eps: float):
106
+ super().__init__()
107
+ self.eps = eps
108
+ self.weight = nn.Parameter(torch.ones(dim))
109
+ def forward(self, x: Tensor):
110
+ x = (x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)).type_as(x)
111
+ return x * self.weight
112
+
113
+ ALL_LAYERNORM_LAYERS.append(RMSNorm)
114
+
115
+ class creekRotaryEmbedding(nn.Module):
116
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
117
+ super().__init__() # refer to https://zhuanlan.zhihu.com/p/632102048
118
+ # refer to https://kexue.fm/archives/8265
119
+ # https://zhuanlan.zhihu.com/p/642884818
120
+ self.dim = dim # head_dim 128
121
+ self.max_position_embeddings = max_position_embeddings # 此处 inv_freq 对应公式中的 theta
122
+ self.base = base
123
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
124
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
125
+
126
+ # Build here to make `torch.jit.trace` work.
127
+ self._set_cos_sin_cache(
128
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
129
+ )
130
+
131
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
132
+ self.max_seq_len_cached = seq_len
133
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) # t 表示公式中的 m,表示位置
134
+ # freqs [512,64] [max_length,head_dim/2] 行向量表示[m*theta0,m*theta1,m*theta2...] m表示行的值
135
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq) # 此处 freqs 对应公式中的 m * theta, t 对应公式中的 m,表示位置
136
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
137
+ emb = torch.cat((freqs, freqs), dim=-1) # 此处和原始公式不同,theta_0 和 theta_0 不再相邻. 是按块交叉,不是按元素交叉,emd [512,128] [max_length,head_dim] 行向量表示[m*theta0,m*theta1,m*theta2...]再拼接一次
138
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
139
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
140
+
141
+ def forward(self, x, seq_len=None):
142
+ # x: [bs, num_attention_heads, seq_len, head_size]
143
+ if seq_len > self.max_seq_len_cached:
144
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
145
+
146
+ return (
147
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), # 取出序列长度的位置信息
148
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
149
+ )
150
+
151
+ class creekPreTrainedModel(PreTrainedModel):
152
+ config_class = creekConfig
153
+ base_model_prefix = "transformer"
154
+ _skip_keys_device_placement = "past_key_values"
155
+ _no_split_modules = ["creekDecoderLayer"] # for accelerate to auto split limit
156
+ def _init_weights(self, module):
157
+ std = self.config.initializer_range
158
+ if isinstance(module, nn.Linear):
159
+ module.weight.data.normal_(mean=0.0, std=std)
160
+ if module.bias is not None:
161
+ module.bias.data.zero_()
162
+ elif isinstance(module, nn.Embedding):
163
+ module.weight.data.normal_(mean=0.0, std=std)
164
+ if module.padding_idx is not None:
165
+ module.weight.data[module.padding_idx].zero_()
166
+
167
+ class creekMLP(nn.Module):
168
+ def __init__(self, config):
169
+ super().__init__()
170
+ self.config = config
171
+ self.hidden_size = config.hidden_size
172
+ self.intermediate_size = config.intermediate_size
173
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
174
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
175
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
176
+ self.act_fn = ACT2FN[config.hidden_act]
177
+ def forward(self, x):
178
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
179
+ return down_proj
180
+ class creekAttention(nn.Module):
181
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
182
+
183
+ def __init__(self, config: creekConfig):
184
+ super().__init__()
185
+ self.config = config
186
+ self.hidden_size = config.hidden_size
187
+ self.num_heads = config.num_attention_heads
188
+ self.head_dim = self.hidden_size // self.num_heads
189
+ self.num_key_value_heads = config.num_key_value_heads
190
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
191
+ self.max_position_embeddings = config.max_position_embeddings
192
+ self.rope_theta = config.rope_theta
193
+ self.sqrt_head_dim = math.sqrt(self.head_dim)
194
+ if (self.head_dim * self.num_heads) != self.hidden_size:
195
+ raise ValueError(
196
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
197
+ f" and `num_heads`: {self.num_heads})."
198
+ )
199
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
200
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
201
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
202
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
203
+ self._init_rope()
204
+
205
+ def _init_rope(self):
206
+ self.rotary_emb = creekRotaryEmbedding(
207
+ self.head_dim,
208
+ max_position_embeddings=self.max_position_embeddings,
209
+ base=self.rope_theta,
210
+ )
211
+
212
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
213
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
214
+
215
+ def forward(
216
+ self,
217
+ hidden_states: torch.Tensor,
218
+ attention_mask: Optional[torch.Tensor] = None,
219
+ position_ids: Optional[torch.LongTensor] = None,
220
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
221
+ output_attentions: bool = False,
222
+ use_cache: bool = False,
223
+ padding_mask: Optional[torch.LongTensor] = None,
224
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
225
+ bsz, q_len, _ = hidden_states.size() # [1,5,1024] 第二轮 [1,1,1024]
226
+
227
+ query_states = self.q_proj(hidden_states) # [1,5,1024] 第二轮 [1,1,1024]
228
+ key_states = self.k_proj(hidden_states) # [1,5,1024] 第二轮 [1,1,1024]
229
+ value_states = self.v_proj(hidden_states) # [1,5,1024] 第二轮 [1,1,1024]
230
+ # 第二轮下面全是[1,8,1,128]
231
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) # [1,8,5,128]
232
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # [1,8,5,128]
233
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # [1,8,5,128]
234
+
235
+ kv_seq_len = key_states.shape[-2]
236
+ if past_key_value is not None:
237
+ kv_seq_len += past_key_value[0].shape[-2] # 第二轮的seq_len+第一轮的seq_len
238
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) # 根据seq_len 取出cos,sin
239
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # 让后半块取负,且和前板块交换顺序,从而实现前半块 k_i cos (m * theta) - k_{d/2 + i |i - d/2} sin (m * theta) ,其中i 表示行向量的位置 [i - 128]
240
+
241
+ if past_key_value is not None:
242
+ # reuse k, v, self_attention
243
+ key_states = torch.cat([past_key_value[0], key_states], dim=2) # [1,8,6,128]
244
+ value_states = torch.cat([past_key_value[1], value_states], dim=2) # [1,8,6,128]
245
+
246
+ past_key_value = (key_states, value_states) if use_cache else None
247
+
248
+ key_states = repeat_kv(key_states, self.num_key_value_groups) # 如果num_key_value_groups是1,则不变,否则扩充至num_heads
249
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
250
+ # [1,8,5,128] matmul [1,8,128,5] -> [1,8,5,5] # 第二轮是 [1,8,1,128] matmul [1,8,128,6] -> [1,8,1,6]
251
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / self.sqrt_head_dim
252
+
253
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
254
+ raise ValueError(
255
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
256
+ f" {attn_weights.size()}"
257
+ )
258
+
259
+ if attention_mask is not None: #[1,1,5,5] 上三角为负无穷,下三角和对角线为0 第二轮 [1,1,1,6] 全0
260
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
261
+ raise ValueError(
262
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
263
+ )
264
+ attn_weights = attn_weights + attention_mask
265
+
266
+ # upcast attention to fp32 防止数据类型溢出很有效(因为softmax分母是指数和)
267
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
268
+ attn_output = torch.matmul(attn_weights, value_states)
269
+
270
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
271
+ raise ValueError(
272
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
273
+ f" {attn_output.size()}"
274
+ )
275
+
276
+ attn_output = attn_output.transpose(1, 2).contiguous() # [1,5,8,128] 第二轮 [1,1,8,128]
277
+
278
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) # [1,5,1024] 第二轮 [1,1,1024]
279
+
280
+
281
+ attn_output = self.o_proj(attn_output)
282
+
283
+ if not output_attentions:
284
+ attn_weights = None
285
+ # past_key_value is past key and value
286
+ return attn_output, attn_weights, past_key_value
287
+
288
+ class creekDecoderLayer(nn.Module):
289
+ def __init__(self, config: creekConfig):
290
+ super().__init__()
291
+ self.hidden_size = config.hidden_size
292
+ self.mlp = creekMLP(config)
293
+ self.self_attn = creekAttention(config=config)
294
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
295
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
296
+ def forward(
297
+ self,
298
+ hidden_states: torch.Tensor,
299
+ attention_mask: Optional[torch.Tensor] = None,
300
+ position_ids: Optional[torch.LongTensor] = None,
301
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
302
+ output_attentions: Optional[bool] = False,
303
+ use_cache: Optional[bool] = False,
304
+ padding_mask: Optional[torch.LongTensor] = None,
305
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
306
+
307
+ hidden_states = self.input_layernorm(hidden_states)
308
+ residual = hidden_states
309
+ # Self Attention .这里present_key_value is tuple(key,value)
310
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
311
+ hidden_states=hidden_states,
312
+ attention_mask=attention_mask,
313
+ position_ids=position_ids,
314
+ past_key_value=past_key_value,
315
+ output_attentions=output_attentions,
316
+ use_cache=use_cache,
317
+ padding_mask=padding_mask,
318
+ )
319
+ hidden_states = residual + hidden_states
320
+
321
+ # Fully Connected
322
+ residual = hidden_states
323
+ hidden_states = self.post_attention_layernorm(hidden_states)
324
+ hidden_states = self.mlp(hidden_states)
325
+ hidden_states = residual + hidden_states
326
+
327
+ outputs = (hidden_states,)
328
+
329
+ if output_attentions:
330
+ outputs += (self_attn_weights,)
331
+
332
+ if use_cache:
333
+ outputs += (present_key_value,)
334
+ return outputs
335
+
336
+ class creekModel(creekPreTrainedModel):
337
+ def __init__(self, config: creekConfig):
338
+ super().__init__(config)
339
+ self.padding_idx = config.pad_token_id
340
+ self.vocab_size = config.vocab_size
341
+
342
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
343
+ self.layers = nn.ModuleList([creekDecoderLayer(config) for _ in range(config.num_hidden_layers)])
344
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
345
+
346
+ # Initialize weights and apply final processing
347
+ self.post_init()
348
+ def get_input_embeddings(self):
349
+ return self.embed_tokens
350
+
351
+ def set_input_embeddings(self, value):
352
+ self.embed_tokens = value
353
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
354
+ # create causal mask
355
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
356
+ combined_attention_mask = None
357
+ if input_shape[-1] > 1: # seq_len > 1 ,如果是new_tokens_seq,则直接清0
358
+ combined_attention_mask = _make_causal_mask(
359
+ input_shape,
360
+ inputs_embeds.dtype,
361
+ device=inputs_embeds.device,
362
+ past_key_values_length=past_key_values_length,
363
+ )
364
+
365
+ if attention_mask is not None:
366
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
367
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
368
+ inputs_embeds.device
369
+ ) # all is 0
370
+ combined_attention_mask = (
371
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
372
+ )
373
+
374
+ return combined_attention_mask
375
+ def forward(
376
+ self,
377
+ input_ids: torch.LongTensor = None, # torch.Size([1, 5])
378
+ attention_mask: Optional[torch.Tensor] = None, # torch.Size([1, 5]) 11111
379
+ position_ids: Optional[torch.LongTensor] = None, # torch.Size([1, 5]) 01234
380
+ past_key_values: Optional[List[torch.FloatTensor]] = None, # None
381
+ inputs_embeds: Optional[torch.FloatTensor] = None, # None
382
+ use_cache: Optional[bool] = None, # true
383
+ output_attentions: Optional[bool] = None, # false
384
+ output_hidden_states: Optional[bool] = None, # false
385
+ return_dict: Optional[bool] = None, # false
386
+ ) -> Union[Tuple, BaseModelOutputWithPast]: # mini-llama hidden_size 1024
387
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
388
+ output_hidden_states = (
389
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
390
+ )
391
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
392
+
393
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
394
+
395
+ # retrieve input_ids and inputs_embeds
396
+ if input_ids is not None and inputs_embeds is not None:
397
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
398
+ elif input_ids is not None:
399
+ batch_size, seq_length = input_ids.shape # 1,5
400
+ elif inputs_embeds is not None:
401
+ batch_size, seq_length, _ = inputs_embeds.shape
402
+ else:
403
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
404
+
405
+ seq_length_with_past = seq_length # 5
406
+ past_key_values_length = 0
407
+
408
+ if past_key_values is not None: # 第二轮的时候运行
409
+ past_key_values_length = past_key_values[0][0].shape[2] # 取出之前的seq_len
410
+ seq_length_with_past = seq_length_with_past + past_key_values_length # new_token_length + old_input_seq_len
411
+
412
+ if position_ids is None:
413
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
414
+ position_ids = torch.arange(
415
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
416
+ )
417
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
418
+ else:
419
+ position_ids = position_ids.view(-1, seq_length).long() # [1,5] from int64 to int64 # 第二轮为[1,1]
420
+
421
+ if inputs_embeds is None:
422
+ inputs_embeds = self.embed_tokens(input_ids) # inputs_embeds:torch.Size([1, 5, 1024]) embed_tokens:[vocab_size,hidden_size]
423
+ # embed positions
424
+ if attention_mask is None:
425
+ attention_mask = torch.ones(
426
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
427
+ )
428
+ padding_mask = None
429
+ else:
430
+ if 0 in attention_mask:
431
+ padding_mask = attention_mask # tensor([[1, 1, 1, 1, 1]], device='cuda:0')
432
+ else:
433
+ padding_mask = None
434
+
435
+ attention_mask = self._prepare_decoder_attention_mask(
436
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
437
+ ) # [1,1,5,5]上三角负无穷,下三角和对角线为0 第二轮 [1,1,1,6] 全是0
438
+
439
+ hidden_states = inputs_embeds
440
+
441
+ # decoder layers
442
+ all_hidden_states = () if output_hidden_states else None
443
+ all_self_attns = () if output_attentions else None
444
+ next_decoder_cache = () if use_cache else None
445
+
446
+ for idx, decoder_layer in enumerate(self.layers): # 关键结构
447
+ if output_hidden_states:
448
+ all_hidden_states += (hidden_states,)
449
+
450
+ past_key_value = past_key_values[idx] if past_key_values is not None else None # 直接取出对应头的kv
451
+
452
+ layer_outputs = decoder_layer(
453
+ hidden_states,
454
+ attention_mask=attention_mask,
455
+ position_ids=position_ids,
456
+ past_key_value=past_key_value,
457
+ output_attentions=output_attentions,
458
+ use_cache=use_cache,
459
+ padding_mask=padding_mask,
460
+ ) # attn + mlp
461
+
462
+ hidden_states = layer_outputs[0]
463
+
464
+ if use_cache:
465
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
466
+
467
+ if output_attentions:
468
+ all_self_attns += (layer_outputs[1],)
469
+
470
+ hidden_states = self.norm(hidden_states)
471
+
472
+ # add hidden states from the last decoder layer
473
+ if output_hidden_states:
474
+ all_hidden_states += (hidden_states,)
475
+
476
+ next_cache = next_decoder_cache if use_cache else None
477
+ if not return_dict:
478
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
479
+ return BaseModelOutputWithPast(
480
+ last_hidden_state=hidden_states,
481
+ past_key_values=next_cache,
482
+ hidden_states=all_hidden_states,
483
+ attentions=all_self_attns,
484
+ )
485
+ class creekForCausalLM(creekPreTrainedModel):
486
+ _tied_weights_keys = ["lm_head.weight"]
487
+ def __init__(self, config):
488
+ super().__init__(config)
489
+ self.model = creekModel(config)
490
+ self.vocab_size = config.vocab_size
491
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
492
+
493
+ # Initialize weights and apply final processing
494
+ self.post_init()
495
+ def get_input_embeddings(self):
496
+ return self.model.embed_tokens
497
+
498
+ def set_input_embeddings(self, value):
499
+ self.model.embed_tokens = value
500
+
501
+ def get_output_embeddings(self):
502
+ return self.lm_head
503
+
504
+ def set_output_embeddings(self, new_embeddings):
505
+ self.lm_head = new_embeddings
506
+
507
+ def set_decoder(self, decoder):
508
+ self.model = decoder
509
+
510
+ def get_decoder(self):
511
+ return self.model
512
+ def forward(
513
+ self,
514
+ input_ids: torch.LongTensor = None,
515
+ attention_mask: Optional[torch.Tensor] = None,
516
+ position_ids: Optional[torch.LongTensor] = None,
517
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
518
+ inputs_embeds: Optional[torch.FloatTensor] = None,
519
+ labels: Optional[torch.LongTensor] = None,
520
+ use_cache: Optional[bool] = None,
521
+ output_attentions: Optional[bool] = None,
522
+ output_hidden_states: Optional[bool] = None,
523
+ return_dict: Optional[bool] = None,
524
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
525
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
526
+ output_hidden_states = (
527
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
528
+ )
529
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
530
+
531
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
532
+ outputs = self.model(
533
+ input_ids=input_ids,
534
+ attention_mask=attention_mask,
535
+ position_ids=position_ids,
536
+ past_key_values=past_key_values,
537
+ inputs_embeds=inputs_embeds,
538
+ use_cache=use_cache,
539
+ output_attentions=output_attentions,
540
+ output_hidden_states=output_hidden_states,
541
+ return_dict=return_dict,
542
+ )
543
+
544
+ hidden_states = outputs[0]
545
+
546
+ logits = self.lm_head(hidden_states)
547
+ logits = logits.float()
548
+
549
+ loss = None
550
+ if labels is not None:
551
+ # Shift so that tokens < n predict n
552
+ shift_logits = logits[..., :-1, :].contiguous()
553
+ shift_labels = labels[..., 1:].contiguous()
554
+ # Flatten the tokens
555
+ loss_fct = nn.CrossEntropyLoss()
556
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
557
+ shift_labels = shift_labels.view(-1)
558
+ # Enable model parallelism
559
+ shift_labels = shift_labels.to(shift_logits.device)
560
+ loss = loss_fct(shift_logits, shift_labels)
561
+
562
+ if not return_dict:
563
+ output = (logits,) + outputs[1:]
564
+ return (loss,) + output if loss is not None else output
565
+
566
+ return CausalLMOutputWithPast(
567
+ loss=loss,
568
+ logits=logits,
569
+ past_key_values=outputs.past_key_values,
570
+ hidden_states=outputs.hidden_states,
571
+ attentions=outputs.attentions,
572
+ )
573
+
574
+ def prepare_inputs_for_generation(
575
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
576
+ ):
577
+ if past_key_values:
578
+ input_ids = input_ids[:, -1:]
579
+
580
+ position_ids = kwargs.get("position_ids", None)
581
+ if attention_mask is not None and position_ids is None:
582
+ # create position_ids on the fly for batch generation
583
+ position_ids = attention_mask.long().cumsum(-1) - 1
584
+ position_ids.masked_fill_(attention_mask == 0, 1)
585
+ if past_key_values:
586
+ position_ids = position_ids[:, -1].unsqueeze(-1)
587
+
588
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
589
+ if inputs_embeds is not None and past_key_values is None:
590
+ model_inputs = {"inputs_embeds": inputs_embeds}
591
+ else:
592
+ model_inputs = {"input_ids": input_ids}
593
+
594
+ model_inputs.update(
595
+ {
596
+ "position_ids": position_ids,
597
+ "past_key_values": past_key_values,
598
+ "use_cache": kwargs.get("use_cache"),
599
+ "attention_mask": attention_mask,
600
+ }
601
+ )
602
+ return model_inputs
603
+
604
+ @staticmethod
605
+ def _reorder_cache(past_key_values, beam_idx):
606
+ reordered_past = ()
607
+ for layer_past in past_key_values:
608
+ reordered_past += (
609
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
610
+ )
611
+ return reordered_past
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "auto_map": {
31
+ "AutoTokenizer": [
32
+ null,
33
+ "tokenizer_creek.CreekTokenizerFast"
34
+ ]
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "legacy": false,
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "</s>",
42
+ "padding_side": "right",
43
+ "reversion": "main",
44
+ "tokenizer_class": "CreekTokenizer",
45
+ "unk_token": "<unk>",
46
+ "use_default_system_prompt": false
47
+ }
tokenizer_creek.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedTokenizerFast
2
+ from transformers.utils.versions import require_version
3
+
4
+ from tokenizers import processors
5
+
6
+ from typing import Dict,Any
7
+
8
+ import logging
9
+ import sys
10
+
11
+ require_version("tokenizers>=0.13.3")
12
+
13
+
14
+ logging.basicConfig(
15
+ format='[%(asctime)s %(pathname)s:%(lineno)s %(levelno)s]\t%(message)s',
16
+ datefmt='%m/%d/%Y %H:%M:%S',
17
+ handlers=[logging.StreamHandler(sys.stdout)]
18
+ )
19
+ logger = logging.getLogger(__name__)
20
+ logger.setLevel(logging.INFO)
21
+
22
+
23
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.json"}
24
+
25
+ DEFAULT_SYSTEM_PROMPT = """你是一个助理。"""
26
+
27
+
28
+ class CreekTokenizerFast(PreTrainedTokenizerFast):
29
+ slow_tokenizer_class = None
30
+ vocab_files_names = VOCAB_FILES_NAMES
31
+ padding_side = 'left'
32
+ model_input_names = ['input_ids',"attention_mask"]
33
+ def __init__(
34
+ self,
35
+ vocab_file=None,
36
+ tokenizer_file=None,
37
+ clean_up_tokenization_spaces=False,
38
+ unk_token="<unk>",
39
+ bos_token="<s>",
40
+ eos_token="</s>",
41
+ add_bos_token=True,
42
+ add_eos_token=False,
43
+ use_default_system_prompt=False,
44
+ **kwargs,
45
+ ):
46
+ super().__init__(
47
+ vocab_file=vocab_file,
48
+ tokenizer_file=tokenizer_file,
49
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
50
+ unk_token=unk_token,
51
+ bos_token=bos_token,
52
+ eos_token=eos_token,
53
+ add_bos_token=add_bos_token,
54
+ add_eos_token=add_eos_token,
55
+ use_default_system_prompt=use_default_system_prompt,
56
+ **kwargs,
57
+ )
58
+ self._add_bos_token = add_bos_token
59
+ self._add_eos_token = add_eos_token
60
+ self.update_post_processor()
61
+ self.use_default_system_prompt = use_default_system_prompt
62
+ self.vocab_file = vocab_file
63
+
64
+ def update_post_processor(self):
65
+ """
66
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
67
+ """
68
+ bos = self.bos_token
69
+ bos_token_id = self.bos_token_id
70
+ if bos is None and self.add_bos_token:
71
+ raise ValueError("add_bos_token = True but bos_token = None")
72
+
73
+ eos = self.eos_token
74
+ eos_token_id = self.eos_token_id
75
+ if eos is None and self.add_eos_token:
76
+ raise ValueError("add_eos_token = True but eos_token = None")
77
+
78
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
79
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
80
+
81
+ special_tokens = []
82
+ if self.add_bos_token:
83
+ special_tokens.append((bos, bos_token_id))
84
+ if self.add_eos_token:
85
+ special_tokens.append((eos, eos_token_id))
86
+ self._tokenizer.post_processor = processors.TemplateProcessing(
87
+ single=single, pair=pair, special_tokens=special_tokens
88
+ )
89
+ @property
90
+ def add_eos_token(self):
91
+ return self._add_eos_token
92
+
93
+ @property
94
+ def add_bos_token(self):
95
+ return self._add_bos_token
96
+
97
+ @add_eos_token.setter
98
+ def add_eos_token(self, value):
99
+ self._add_eos_token = value
100
+ self.update_post_processor()
101
+
102
+ @add_bos_token.setter
103
+ def add_bos_token(self, value):
104
+ self._add_bos_token = value
105
+ self.update_post_processor()
106
+ @property
107
+ def default_chat_template(self):
108
+ template = (
109
+ "{% if messages[0]['role'] == 'system' %}"
110
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
111
+ "{% set system_message = messages[0]['content'] %}"
112
+ "{% elif USE_DEFAULT_PROMPT == true and not '<`q>' in messages[0]['content'] %}"
113
+ "{% set loop_messages = messages %}"
114
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
115
+ "{% else %}"
116
+ "{% set loop_messages = messages %}"
117
+ "{% set system_message = false %}"
118
+ "{% endif %}"
119
+ "{% for message in loop_messages %}" # Loop over all
120
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
121
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
122
+ "{% endif %}"
123
+ "{% if loop.index0 == 0 and system_message != false %}"
124
+ "{% set content = system_message + ' <`q> ' + message['content'] %}"
125
+ "{% else %}"
126
+ "{% set content = message['content'] %}"
127
+ "{% endif %}"
128
+ "{% if message['role'] == 'user' %}"
129
+ "{{ bos_token + ' ' + content + ' <`a>' }}"
130
+ "{% elif message['role'] == 'system' %}"
131
+ "{{ content + ' <`q>' }}"
132
+ "{% elif message['role'] == 'assistant' %}"
133
+ "{{ ' ' + content + ' ' + eos_token }}"
134
+ "{% endif %}"
135
+ "{% endfor %}"
136
+ )
137
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
138
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
139
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
140
+
141
+ return template
142
+
143
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
144
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
145
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
146
+
147
+ output = bos_token_id + token_ids_0 + eos_token_id
148
+
149
+ if token_ids_1 is not None:
150
+ output = output + bos_token_id + token_ids_1 + eos_token_id
151
+
152
+ return output