farpluto commited on
Commit
0667dc9
·
verified ·
1 Parent(s): 39e3041

Add inference_example.py

Browse files
Files changed (1) hide show
  1. inference_example.py +149 -0
inference_example.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Minimal inference script for the Doc-to-LoRA Perceiver.
4
+ Requirements: pip install transformers>=4.51.0 huggingface_hub torch
5
+ """
6
+ import re, torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ from huggingface_hub import hf_hub_download, login
11
+
12
+ REPO_ID = "farpluto/doc-to-lora-niah" # filled in automatically at packaging time
13
+ HF_TOKEN = None # set your token here if the base model is gated
14
+
15
+ if HF_TOKEN:
16
+ login(token=HF_TOKEN)
17
+
18
+ ckpt = torch.load(hf_hub_download(REPO_ID, "hypernet.pt", token=HF_TOKEN),
19
+ map_location="cuda", weights_only=False)
20
+ hcfg = ckpt["hypernet_cfg"]
21
+ BASE = ckpt["base_model"]
22
+ TGT = ckpt["target_module"]
23
+ ALPHA = ckpt["lora_alpha"]
24
+ EARLY = ckpt["early_exit"]
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained(BASE, token=HF_TOKEN, trust_remote_code=True)
27
+ if tokenizer.pad_token is None:
28
+ tokenizer.pad_token = tokenizer.eos_token
29
+
30
+ llm = AutoModelForCausalLM.from_pretrained(
31
+ BASE, token=HF_TOKEN, torch_dtype=torch.bfloat16,
32
+ device_map="cuda", attn_implementation="sdpa", trust_remote_code=True)
33
+ llm.eval()
34
+ for p in llm.parameters():
35
+ p.requires_grad_(False)
36
+
37
+
38
+ class CrossAttentionBlock(nn.Module):
39
+ def __init__(self, latent_dim, ctx_dim, n_heads=8):
40
+ super().__init__()
41
+ self.n_heads, self.head_dim = n_heads, latent_dim // n_heads
42
+ self.norm_q=nn.LayerNorm(latent_dim); self.norm_ctx=nn.LayerNorm(ctx_dim)
43
+ self.q_proj=nn.Linear(latent_dim,latent_dim,bias=False)
44
+ self.k_proj=nn.Linear(ctx_dim,latent_dim,bias=False)
45
+ self.v_proj=nn.Linear(ctx_dim,latent_dim,bias=False)
46
+ self.o_proj=nn.Linear(latent_dim,latent_dim,bias=False)
47
+ self.norm_ff=nn.LayerNorm(latent_dim)
48
+ self.ff=nn.Sequential(nn.Linear(latent_dim,latent_dim*4,bias=False),
49
+ nn.GELU(),nn.Linear(latent_dim*4,latent_dim,bias=False))
50
+ def forward(self,latents,ctx,ctx_mask=None):
51
+ B,L,D=latents.shape; _,S,_=ctx.shape; H,Dh=self.n_heads,self.head_dim
52
+ q=self.q_proj(self.norm_q(latents)).view(B,L,H,Dh).transpose(1,2)
53
+ k=self.k_proj(self.norm_ctx(ctx)).view(B,S,H,Dh).transpose(1,2)
54
+ v=self.v_proj(ctx).view(B,S,H,Dh).transpose(1,2)
55
+ bias=None
56
+ if ctx_mask is not None:
57
+ bias=(1.0-ctx_mask.float()).unsqueeze(1).unsqueeze(2)*-1e4
58
+ bias=bias.to(q.dtype)
59
+ out=F.scaled_dot_product_attention(q,k,v,attn_mask=bias)
60
+ latents=latents+self.o_proj(out.transpose(1,2).contiguous().view(B,L,D))
61
+ latents=latents+self.ff(self.norm_ff(latents))
62
+ return latents
63
+
64
+
65
+ class PerceiverHypernet(nn.Module):
66
+ def __init__(self,ctx_dim,n_lora_layers,lora_r,target_in,target_out,
67
+ latent_dim=512,n_blocks=8):
68
+ super().__init__()
69
+ self.n_lora_layers=n_lora_layers; self.din=target_in; self.dout=target_out
70
+ d=target_in+target_out
71
+ self.ctx_proj=nn.Linear(ctx_dim,latent_dim,bias=False)
72
+ self.ctx_norm=nn.LayerNorm(latent_dim)
73
+ self.latent_q=nn.Parameter(torch.randn(lora_r,latent_dim)*latent_dim**-0.5)
74
+ self.blocks=nn.ModuleList([CrossAttentionBlock(latent_dim,latent_dim) for _ in range(n_blocks)])
75
+ self.head_w=nn.Parameter(torch.randn(n_lora_layers,latent_dim,d)*0.01)
76
+ self.head_b=nn.Parameter(torch.zeros(n_lora_layers,d))
77
+ def forward(self,ctx_acts,ctx_mask=None):
78
+ B=ctx_acts.shape[0]
79
+ ctx=self.ctx_norm(self.ctx_proj(ctx_acts))
80
+ lat=self.latent_q.unsqueeze(0).expand(B,-1,-1)
81
+ for blk in self.blocks: lat=blk(lat,ctx,ctx_mask)
82
+ flat=torch.einsum("brd,nde->bnre",lat,self.head_w)
83
+ flat=flat+self.head_b.unsqueeze(0).unsqueeze(2)
84
+ return flat[...,:self.din], flat[...,self.din:].transpose(-1,-2)
85
+
86
+
87
+ hypernet=PerceiverHypernet(**hcfg).to("cuda",dtype=torch.bfloat16)
88
+ hypernet.load_state_dict(ckpt["state_dict"])
89
+ hypernet.eval()
90
+ print("Perceiver loaded OK")
91
+
92
+ _tok_open = tokenizer.convert_tokens_to_ids("<think>")
93
+ _tok_close = tokenizer.convert_tokens_to_ids("</think>")
94
+ THINK_TOKENS = {t for t in [_tok_open,_tok_close]
95
+ if t not in (tokenizer.unk_token_id, None)}
96
+
97
+ def _strip_think(ids):
98
+ toks=ids.tolist()
99
+ if not THINK_TOKENS or not any(t in THINK_TOKENS for t in toks):
100
+ return tokenizer.decode(toks,skip_special_tokens=True).strip()
101
+ clean,inside=[],False
102
+ op,cl=min(THINK_TOKENS),max(THINK_TOKENS)
103
+ for t in toks:
104
+ if t==op: inside=True
105
+ elif t==cl: inside=False
106
+ elif not inside: clean.append(t)
107
+ return tokenizer.decode(clean,skip_special_tokens=True).strip()
108
+
109
+ def _sorted_mods(model,mod_name):
110
+ mods=[(n,m) for n,m in model.named_modules()
111
+ if mod_name in n and isinstance(m,nn.Linear)]
112
+ def _idx(name):
113
+ nums=re.findall(r"\d+",name)
114
+ return int(nums[0]) if nums else -1
115
+ return sorted(mods,key=lambda x:_idx(x[0]))
116
+
117
+ target_mods=_sorted_mods(llm,TGT)
118
+ scale=ALPHA/hcfg["lora_r"]
119
+
120
+ @torch.no_grad()
121
+ def internalize_and_query(document,query,max_new_tokens=12):
122
+ ctx_ids=torch.tensor(
123
+ [tokenizer.encode(document,add_special_tokens=False)],device="cuda")
124
+ ctx_mask=torch.ones_like(ctx_ids)
125
+ qry_ids=torch.tensor(
126
+ [tokenizer.encode(query+" /no_think",add_special_tokens=True)],device="cuda")
127
+ acts=llm(input_ids=ctx_ids,attention_mask=ctx_mask,
128
+ output_hidden_states=True,use_cache=False).hidden_states[EARLY]
129
+ A,B=hypernet(acts,ctx_mask)
130
+ A,B=A.squeeze(0),B.squeeze(0)
131
+ hooks=[]
132
+ def _mkhook(Ai,Bi):
133
+ def h(mod,inp,out): return out+scale*(inp[0]@Ai.t())@Bi.t()
134
+ return h
135
+ for i,(_,mod) in enumerate(target_mods):
136
+ hooks.append(mod.register_forward_hook(_mkhook(A[i],B[i])))
137
+ ids=qry_ids.clone()
138
+ for _ in range(max_new_tokens):
139
+ out=llm(input_ids=ids,attention_mask=torch.ones_like(ids),use_cache=False)
140
+ nxt=out.logits[:,-1,:].argmax(-1,keepdim=True)
141
+ ids=torch.cat([ids,nxt],dim=1)
142
+ if nxt.item()==tokenizer.eos_token_id: break
143
+ for h in hooks: h.remove()
144
+ return _strip_think(ids[0,qry_ids.shape[1]:])
145
+
146
+ if __name__=="__main__":
147
+ doc = "The special magic number is 7341. The sky is blue today."
148
+ ans = internalize_and_query(doc, "What is the special magic number?")
149
+ print(f"Answer: {ans}")