PhysiQuanty commited on
Commit
333c419
·
verified ·
1 Parent(s): 4ea680a

Delete infer.py

Browse files
Files changed (1) hide show
  1. infer.py +0 -215
infer.py DELETED
@@ -1,215 +0,0 @@
1
- #!/usr/bin/env python3
2
- # hf_infer_base2_strict_tabs.py
3
- # ============================================================
4
- # HF inference (CausalLM) en base-2
5
- # - PAS de KV-cache (use_cache=False) => "comme entraînement" (full forward)
6
- # - boucle manuelle token-par-token (pas model.generate)
7
- # - décodage FINAL via decode_base2_digits_strict (ta fonction)
8
- # - indentation AVEC TABULATIONS
9
- # ============================================================
10
-
11
- import sys
12
- import argparse
13
- import random
14
- import codecs
15
- from typing import List, Optional, Dict
16
- from collections import Counter
17
-
18
- import torch
19
- from transformers import AutoModelForCausalLM
20
-
21
-
22
- def decode_base2_digits_strict(digits: List[int], *, encoding: str = "utf-8", errors: str = "replace") -> str:
23
- # Filtre minimal: ne garder que 0/1 (au cas où)
24
- bits: List[int] = []
25
- for d in digits:
26
- di = int(d)
27
- if di == 0 or di == 1:
28
- bits.append(di)
29
-
30
- n_full_bytes = len(bits) // 8
31
- if n_full_bytes <= 0:
32
- return ""
33
-
34
- out = bytearray(n_full_bytes)
35
-
36
- j = 0
37
- for i in range(n_full_bytes):
38
- # MSB -> LSB (bits[j] est le bit de poids fort)
39
- b = 0
40
- b = (b << 1) | bits[j + 0]
41
- b = (b << 1) | bits[j + 1]
42
- b = (b << 1) | bits[j + 2]
43
- b = (b << 1) | bits[j + 3]
44
- b = (b << 1) | bits[j + 4]
45
- b = (b << 1) | bits[j + 5]
46
- b = (b << 1) | bits[j + 6]
47
- b = (b << 1) | bits[j + 7]
48
- out[i] = b
49
- j += 8
50
-
51
- bb = bytes(out)
52
-
53
- # Décodage robuste UTF-8 (gère proprement les séquences multi-octets)
54
- if encoding.lower() == "utf-8":
55
- inc = codecs.getincrementaldecoder("utf-8")(errors=errors)
56
- s = inc.decode(bb, final=False)
57
- s += inc.decode(b"", final=True)
58
- return s
59
-
60
- return bb.decode(encoding, errors=errors)
61
-
62
-
63
- def apply_repetition_penalty_(logits: torch.Tensor, token_ids: List[int], penalty: float) -> None:
64
- if penalty is None or penalty == 1.0 or penalty <= 0:
65
- return
66
- for t in set(token_ids):
67
- val = logits[0, t]
68
- logits[0, t] = val * penalty if val < 0 else val / penalty
69
-
70
-
71
- def apply_presence_frequency_penalties_(logits: torch.Tensor, token_ids: List[int], presence_penalty: float, frequency_penalty: float) -> None:
72
- counts = Counter(token_ids)
73
- if presence_penalty:
74
- for t in counts:
75
- logits[0, t] -= presence_penalty
76
- if frequency_penalty:
77
- for t, c in counts.items():
78
- logits[0, t] -= frequency_penalty * c
79
-
80
-
81
- def get_banned_tokens_no_repeat_ngram(seq: List[int], n: int) -> set:
82
- if n <= 0 or len(seq) < n - 1:
83
- return set()
84
-
85
- prefix_len = n - 1
86
- ngrams: Dict[tuple, set] = {}
87
- for i in range(len(seq) - n + 1):
88
- prefix = tuple(seq[i:i + prefix_len])
89
- nxt = seq[i + prefix_len]
90
- ngrams.setdefault(prefix, set()).add(nxt)
91
-
92
- return ngrams.get(tuple(seq[-prefix_len:]), set())
93
-
94
-
95
- def mask_banned_tokens_(logits: torch.Tensor, banned: set) -> None:
96
- if banned:
97
- logits[0, list(banned)] = float("-inf")
98
-
99
-
100
- def main() -> None:
101
- parser = argparse.ArgumentParser()
102
-
103
- parser.add_argument("--repo", type=str, required=True, help="chemin dossier HF local (./hf_binaryllm_repo) ou repo_id")
104
- parser.add_argument("--device", type=str, default="cuda", choices=["cpu", "cuda"])
105
- parser.add_argument("--seed", type=int, default=-1)
106
-
107
- parser.add_argument("--bos", type=int, default=2, help="BOS id (base2: BOS=2)")
108
- parser.add_argument("--eos", type=int, default=3, help="EOS id (base2: EOS=3)")
109
- parser.add_argument("--prompt_ids", type=str, default="2", help='ids prompt, ex: "2" ou "2,0,1,0"')
110
-
111
- parser.add_argument("--max_new_tokens", type=int, default=800)
112
- parser.add_argument("--temperature", type=float, default=0.7)
113
- parser.add_argument("--top_k", type=int, default=50)
114
-
115
- parser.add_argument("--repetition_penalty", type=float, default=1.0)
116
- parser.add_argument("--presence_penalty", type=float, default=0.0)
117
- parser.add_argument("--frequency_penalty", type=float, default=0.0)
118
- parser.add_argument("--no_repeat_ngram_size", type=int, default=0)
119
-
120
- parser.add_argument("--decode_encoding", type=str, default="utf-8")
121
- parser.add_argument("--decode_errors", type=str, default="replace")
122
- parser.add_argument("--print_ids", action="store_true")
123
- parser.add_argument("--stream", action="store_true", help="stream best-effort (réaffiche decode strict à chaque step)")
124
-
125
-
126
-
127
- args = parser.parse_args()
128
-
129
- seed = args.seed if args.seed >= 0 else random.randint(0, 2**31 - 1)
130
- print(f"[Seed] {seed}")
131
- torch.manual_seed(seed)
132
- if torch.cuda.is_available():
133
- torch.cuda.manual_seed_all(seed)
134
-
135
- device = torch.device("cuda" if (args.device == "cuda" and torch.cuda.is_available()) else "cpu")
136
- print(f"[Device] {device}")
137
-
138
- # --------- Load HF model ---------
139
- m = AutoModelForCausalLM.from_pretrained(args.repo, trust_remote_code=True, token=hf_token)
140
- m.to(device)
141
- m.eval()
142
-
143
- # IMPORTANT: pas de KV-cache (train-like)
144
- if hasattr(m, "config") and m.config is not None:
145
- m.config.use_cache = False
146
-
147
- # --------- Build prompt ids ---------
148
- prompt_ids: List[int] = []
149
- for chunk in args.prompt_ids.split(","):
150
- s = chunk.strip()
151
- if s:
152
- prompt_ids.append(int(s))
153
-
154
- if not prompt_ids:
155
- prompt_ids = [int(args.bos)]
156
-
157
- tokens = torch.tensor([prompt_ids], dtype=torch.long, device=device)
158
- generated: List[int] = []
159
- last_text_len = 0
160
-
161
- print(f"[Prompt IDs] {prompt_ids}")
162
- print(f"[BOS] {args.bos} [EOS] {args.eos}")
163
- print()
164
-
165
- with torch.no_grad():
166
- for _ in range(int(args.max_new_tokens)):
167
- # full forward sur toute la séquence, sans cache
168
- out = m(input_ids=tokens, use_cache=False)
169
- logits = out.logits[:, -1, :]
170
-
171
- full_seq = tokens[0].tolist()
172
-
173
- apply_repetition_penalty_(logits, full_seq, float(args.repetition_penalty))
174
- apply_presence_frequency_penalties_(logits, full_seq, float(args.presence_penalty), float(args.frequency_penalty))
175
-
176
- if int(args.no_repeat_ngram_size) > 0:
177
- banned = get_banned_tokens_no_repeat_ngram(full_seq, int(args.no_repeat_ngram_size))
178
- mask_banned_tokens_(logits, banned)
179
-
180
- logits = logits / max(float(args.temperature), 1e-6)
181
-
182
- if 0 < int(args.top_k) < logits.size(-1):
183
- v, _ = torch.topk(logits, int(args.top_k))
184
- logits[logits < v[:, [-1]]] = float("-inf")
185
-
186
- probs = torch.softmax(logits, dim=-1)
187
- next_token = torch.multinomial(probs, 1)
188
- tok_id = int(next_token.item())
189
-
190
- if tok_id == int(args.eos):
191
- break
192
-
193
- tokens = torch.cat([tokens, next_token], dim=1)
194
- generated.append(tok_id)
195
-
196
- if args.stream:
197
- text = decode_base2_digits_strict(generated, encoding=args.decode_encoding, errors=args.decode_errors)
198
- if len(text) > last_text_len:
199
- sys.stdout.write(text[last_text_len:])
200
- sys.stdout.flush()
201
- last_text_len = len(text)
202
-
203
- if args.stream:
204
- print()
205
-
206
- print("\n[Final Output]\n")
207
- print(decode_base2_digits_strict(generated, encoding=args.decode_encoding, errors=args.decode_errors))
208
-
209
- if args.print_ids:
210
- print("\n[Generated IDs]\n")
211
- print(generated)
212
-
213
-
214
- if __name__ == "__main__":
215
- main()