| from sympy import false
|
| import test
|
| from transformers import PretrainedConfig
|
|
|
|
|
| class CogniLiteConfig(PretrainedConfig):
|
| model_type = "minimind"
|
|
|
| def __init__(
|
| self,
|
| dropout: float = 0.0,
|
| bos_token_id: int = 1,
|
| eos_token_id: int = 2,
|
| hidden_act: str = 'silu',
|
| hidden_size: int = 768,
|
| intermediate_size: int = None,
|
| max_position_embeddings: int = 32768,
|
| num_attention_heads: int = 8,
|
| num_hidden_layers: int = 16,
|
| num_key_value_heads: int = 2,
|
| vocab_size: int = 6400,
|
| rms_norm_eps: float = 1e-05,
|
| rope_theta: int = 1000000.0,
|
| **kwargs
|
| ):
|
| super().__init__(**kwargs)
|
|
|
| self.dropout = dropout
|
| self.bos_token_id = bos_token_id
|
| self.eos_token_id = eos_token_id
|
| self.hidden_act = hidden_act
|
| self.hidden_size = hidden_size
|
| self.intermediate_size = intermediate_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.num_attention_heads = num_attention_heads
|
| self.num_hidden_layers = num_hidden_layers
|
| self.num_key_value_heads = num_key_value_heads
|
| self.vocab_size = vocab_size
|
| self.rms_norm_eps = rms_norm_eps
|
| self.rope_theta = rope_theta
|
|
|
| import math
|
| import torch
|
| from torch import nn
|
| from transformers.activations import ACT2FN
|
| from typing import Optional, Tuple, List, Union
|
| import torch.nn.functional as F
|
|
|
|
|
| class RMSNorm(torch.nn.Module):
|
| def __init__(self, dim: int, eps: float = 1e-5):
|
| super().__init__()
|
| self.eps = eps
|
| self.weight = nn.Parameter(torch.ones(dim))
|
|
|
| def _norm(self, x):
|
|
|
| return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
|
|
| def forward(self, x):
|
|
|
| return self.weight * self._norm(x.float()).type_as(x)
|
|
|
|
|
| def precompute_freqs_cis(dim: int, end: int = int(32 * 1024), theta: float = 1e6):
|
|
|
| freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
|
| t = torch.arange(end, device=freqs.device)
|
| freqs = torch.outer(t, freqs).float()
|
| freqs_cos = torch.cat([torch.cos(freqs), torch.cos(freqs)], dim=-1)
|
| freqs_sin = torch.cat([torch.sin(freqs), torch.sin(freqs)], dim=-1)
|
| return freqs_cos, freqs_sin
|
|
|
|
|
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| def rotate_half(x):
|
|
|
| return torch.cat((-x[..., x.shape[-1] // 2:], x[..., : x.shape[-1] // 2]), dim=-1)
|
|
|
| q_embed = (q * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(q) * sin.unsqueeze(unsqueeze_dim))
|
| k_embed = (k * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(k) * sin.unsqueeze(unsqueeze_dim))
|
| return q_embed, k_embed
|
|
|
|
|
| def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
|
| bs, slen, num_key_value_heads, head_dim = x.shape
|
| if n_rep == 1:
|
| return x
|
| return (
|
| x[:, :, :, None, :]
|
| .expand(bs, slen, num_key_value_heads, n_rep, head_dim)
|
| .reshape(bs, slen, num_key_value_heads * n_rep, head_dim)
|
| )
|
|
|
|
|
| class Attention(nn.Module):
|
| def __init__(self, args: CogniLiteConfig):
|
| super().__init__()
|
|
|
| self.num_key_value_heads = args.num_attention_heads if args.num_key_value_heads is None else args.num_key_value_heads
|
| assert args.num_attention_heads % self.num_key_value_heads == 0
|
| self.n_local_heads = args.num_attention_heads
|
| self.n_local_kv_heads = self.num_key_value_heads
|
| self.n_rep = self.n_local_heads // self.n_local_kv_heads
|
| self.head_dim = args.hidden_size // args.num_attention_heads
|
|
|
| self.q_proj = nn.Linear(args.hidden_size, args.num_attention_heads * self.head_dim, bias=False)
|
| self.k_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| self.v_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| self.o_proj = nn.Linear(args.num_attention_heads * self.head_dim, args.hidden_size, bias=False)
|
| self.attn_dropout = nn.Dropout(args.dropout)
|
| self.resid_dropout = nn.Dropout(args.dropout)
|
| self.dropout = args.dropout
|
|
|
| self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
|
|
|
| def forward(self,
|
| x: torch.Tensor,
|
| position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
| past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| use_cache=False,
|
| attention_mask: Optional[torch.Tensor] = None):
|
| bsz, seq_len, _ = x.shape
|
|
|
| xq, xk, xv = self.q_proj(x), self.k_proj(x), self.v_proj(x)
|
| xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim)
|
| xk = xk.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
|
| xv = xv.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
|
|
|
| cos, sin = position_embeddings
|
|
|
| xq, xk = apply_rotary_pos_emb(xq, xk, cos[:seq_len], sin[:seq_len])
|
|
|
|
|
| if past_key_value is not None:
|
| xk = torch.cat([past_key_value[0], xk], dim=1)
|
| xv = torch.cat([past_key_value[1], xv], dim=1)
|
| past_kv = (xk, xv) if use_cache else None
|
|
|
|
|
| xq, xk, xv = (
|
| xq.transpose(1, 2),
|
| repeat_kv(xk, self.n_rep).transpose(1, 2),
|
| repeat_kv(xv, self.n_rep).transpose(1, 2)
|
| )
|
|
|
|
|
| if self.flash and seq_len != 1:
|
| dropout_p = self.dropout if self.training else 0.0
|
| attn_mask = None
|
| if attention_mask is not None:
|
| attn_mask = attention_mask.view(bsz, 1, 1, -1).expand(bsz, self.n_local_heads, seq_len, -1)
|
| attn_mask = attn_mask.bool() if attention_mask is not None else None
|
|
|
| output = F.scaled_dot_product_attention(xq, xk, xv, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=True)
|
| else:
|
|
|
| scores = (xq @ xk.transpose(-2, -1)) / math.sqrt(self.head_dim)
|
| scores = scores + torch.triu(
|
| torch.full((seq_len, seq_len), float("-inf"), device=scores.device),
|
| diagonal=1
|
| ).unsqueeze(0).unsqueeze(0)
|
|
|
| if attention_mask is not None:
|
| extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
|
| scores = scores + extended_attention_mask
|
|
|
| scores = F.softmax(scores.float(), dim=-1).type_as(xq)
|
| scores = self.attn_dropout(scores)
|
| output = scores @ xv
|
|
|
|
|
| output = output.transpose(1, 2).reshape(bsz, seq_len, -1)
|
| output = self.resid_dropout(self.o_proj(output))
|
| return output, past_kv
|
|
|
|
|
| class FeedForward(nn.Module):
|
| def __init__(self, config: CogniLiteConfig):
|
| super().__init__()
|
|
|
| if config.intermediate_size is None:
|
| intermediate_size = int(config.hidden_size * 8 / 3)
|
| config.intermediate_size = 64 * ((intermediate_size + 64 - 1) // 64)
|
| self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
|
| self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
| self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
|
| self.dropout = nn.Dropout(config.dropout)
|
| self.act_fn = ACT2FN[config.hidden_act]
|
|
|
| def forward(self, x):
|
|
|
| return self.dropout(self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)))
|
|
|
|
|
| class TransformerBlock(nn.Module):
|
| def __init__(self, layer_id: int, config: CogniLiteConfig):
|
| super().__init__()
|
| self.num_attention_heads = config.num_attention_heads
|
| self.hidden_size = config.hidden_size
|
| self.head_dim = config.hidden_size // config.num_attention_heads
|
| self.self_attn = Attention(config)
|
|
|
| self.layer_id = layer_id
|
| self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| self.mlp = FeedForward(config)
|
|
|
| def forward(self, hidden_states, position_embeddings, past_key_value=None, use_cache=False, attention_mask=None):
|
|
|
| residual = hidden_states
|
| hidden_states, present_key_value = self.self_attn(
|
| self.input_layernorm(hidden_states), position_embeddings,
|
| past_key_value, use_cache, attention_mask
|
| )
|
| hidden_states += residual
|
| hidden_states = hidden_states + self.mlp(self.post_attention_layernorm(hidden_states))
|
| return hidden_states, present_key_value
|
|
|
|
|
| class CogniLiteModel(nn.Module):
|
| def __init__(self, config: CogniLiteConfig):
|
| super().__init__()
|
| self.config = config
|
| self.vocab_size, self.num_hidden_layers = config.vocab_size, config.num_hidden_layers
|
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
|
| self.dropout = nn.Dropout(config.dropout)
|
| self.layers = nn.ModuleList([TransformerBlock(l, config) for l in range(self.num_hidden_layers)])
|
| self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
|
|
| freqs_cos, freqs_sin = precompute_freqs_cis(dim=config.hidden_size // config.num_attention_heads,
|
| end=config.max_position_embeddings, theta=config.rope_theta)
|
| self.register_buffer("freqs_cos", freqs_cos, persistent=False)
|
| self.register_buffer("freqs_sin", freqs_sin, persistent=False)
|
|
|
| def forward(self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
|
| use_cache: bool = False,
|
| **kwargs):
|
|
|
| _, seq_length = input_ids.shape
|
| past_key_values = past_key_values or [None] * len(self.layers)
|
| start_pos = past_key_values[0][0].shape[1] if past_key_values[0] is not None else 0
|
|
|
|
|
| hidden_states = self.dropout(self.embed_tokens(input_ids))
|
|
|
|
|
| position_embeddings = (
|
| self.freqs_cos[start_pos:start_pos + seq_length],
|
| self.freqs_sin[start_pos:start_pos + seq_length]
|
| )
|
|
|
| presents = []
|
| for layer_idx, (layer, past_key_value) in enumerate(zip(self.layers, past_key_values)):
|
| hidden_states, present = layer(
|
| hidden_states,
|
| position_embeddings,
|
| past_key_value=past_key_value,
|
| use_cache=use_cache,
|
| attention_mask=attention_mask
|
| )
|
| presents.append(present)
|
|
|
| hidden_states = self.norm(hidden_states)
|
|
|
| return hidden_states, presents, 0
|
|
|
| class CogniLiteForCausalLM(nn.Module):
|
| def __init__(self, config: CogniLiteConfig = None):
|
| super().__init__()
|
| self.config = config or CogniLiteConfig()
|
| self.model = CogniLiteModel(self.config)
|
| self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
|
|
|
| self.lm_head.weight = self.model.embed_tokens.weight
|
|
|
| def forward(self,
|
| input_ids: Optional[torch.Tensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
|
| use_cache: bool = False,
|
| logits_to_keep: Union[int, torch.Tensor] = 0,
|
| **args):
|
| h, past_kvs, aux_loss = self.model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| past_key_values=past_key_values,
|
| use_cache=use_cache,
|
| **args
|
| )
|
| slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) and logits_to_keep > 0 else slice(None)
|
| logits = self.lm_head(h[:, slice_indices, :])
|
| return {
|
| "last_hidden_state": h,
|
| "logits": logits,
|
| "aux_loss": aux_loss,
|
| "past_key_values": past_kvs
|
| }
|
|
|
| import safetensors.torch
|
| from transformers import AutoTokenizer
|
|
|
| def init_cognilite_model():
|
| print("start loading CogniLite model...")
|
|
|
|
|
|
|
| args = {
|
| "device": "cuda" if torch.cuda.is_available() else "cpu",
|
| "hidden_size": 768,
|
| "num_hidden_layers": 16,
|
| }
|
| tokenizer = AutoTokenizer.from_pretrained('./tokenizer/')
|
|
|
| state_dict = safetensors.torch.load_file("model.safetensors", device=args["device"])
|
|
|
| model = CogniLiteForCausalLM(CogniLiteConfig())
|
|
|
|
|
| model.load_state_dict(state_dict, strict= True)
|
|
|
| print(f'模型参数量: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
|
| return model.eval().to(args["device"]), tokenizer
|
|
|
| import random
|
| import numpy as np
|
| def setup_seed(seed):
|
| random.seed(seed)
|
| np.random.seed(seed)
|
| torch.manual_seed(seed)
|
| torch.cuda.manual_seed(seed)
|
| torch.cuda.manual_seed_all(seed)
|
| torch.backends.cudnn.deterministic = True
|
| torch.backends.cudnn.benchmark = False
|
|
|
| def communicate_with_model(random_seed):
|
| model, tokenizer = init_cognilite_model()
|
|
|
| print("随机种子是:", random_seed)
|
| setup_seed(random_seed)
|
|
|
| prompt= input("你: ")
|
|
|
|
|
| messages = [{"role": "user", "content": prompt}]
|
| new_prompt = tokenizer.apply_chat_template(
|
| messages,
|
| tokenize=False,
|
| add_generation_prompt=True
|
| )
|
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
| inputs = tokenizer(
|
| new_prompt,
|
| return_tensors="pt",
|
| truncation=True
|
| ).to(device)
|
|
|
|
|
| input_ids = inputs["input_ids"][0]
|
| attention_mask = inputs.get("attention_mask", None)
|
| max_new_tokens = 128
|
| eos_token_id = tokenizer.eos_token_id
|
|
|
| exit_reason = None
|
|
|
| token_list = []
|
|
|
| print("模型 token 输出:[", end=' ')
|
|
|
| for _ in range(max_new_tokens):
|
| with torch.no_grad():
|
| outputs = model(
|
| input_ids=input_ids.unsqueeze(0),
|
| attention_mask=attention_mask
|
| )
|
| logits = outputs["logits"]
|
|
|
| next_token_id = torch.argmax(logits[0, -1], dim=-1).unsqueeze(0)
|
| if next_token_id.item() == eos_token_id:
|
| exit_reason = "EOS token detected"
|
| break
|
|
|
| token_list.append(next_token_id.item())
|
|
|
| print(next_token_id.item(), end=' ', flush=True)
|
|
|
|
|
| input_ids = torch.cat([input_ids, next_token_id], dim=0)
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask[0], torch.ones(1, device=device, dtype=attention_mask.dtype)], dim=0).unsqueeze(0)
|
|
|
| print("]\n模型文字输出: " + tokenizer.decode(token_list, skip_special_tokens=False))
|
|
|
| if exit_reason is None:
|
| print("\n 结束对话原因: 达到最大 Token 数量限制。")
|
|
|
| elif exit_reason == "EOS token detected":
|
| print("\n 结束对话原因: EOS token detected.")
|
|
|
| if __name__ == "__main__":
|
| random_type = input("请输入随机种子(整数):")
|
| try:
|
| random_seed = int(random_type)
|
| if random_seed <= 0:
|
| print("随机种子不能为非正整数,使用随机值")
|
| random_seed = random.randint(0, 10000)
|
| except ValueError:
|
| print("无效的随机种子,使用随机值")
|
| random_seed = random.randint(0, 10000)
|
| communicate_with_model(random_seed) |