TheoSG commited on
Commit
a133671
·
verified ·
1 Parent(s): dd721b5

Delete untitled13.py

Browse files
Files changed (1) hide show
  1. untitled13.py +0 -334
untitled13.py DELETED
@@ -1,334 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Untitled13.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1bSlUUtEKolJbG5_99MGcEiU95OVOBB-f
8
- """
9
-
10
- !pip install rouge_score torch_geometric
11
-
12
- import torch
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- from transformers import T5ForConditionalGeneration, T5Tokenizer
16
- from transformers.modeling_outputs import BaseModelOutput
17
- from torch_geometric.nn import GINEConv, global_mean_pool
18
-
19
- import os
20
- import pickle
21
- from huggingface_hub import hf_hub_download
22
-
23
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
24
- LATENT_TOKENS = 192
25
- D_MODEL = 512
26
- GEN_MAX_LEN = 640
27
- SAVE_DIR = "./graph2latent_ckpts"
28
- BATCH_SIZE=8
29
- os.makedirs(SAVE_DIR, exist_ok=True)
30
-
31
- import torch
32
- import torch.nn as nn
33
- from transformers import T5Tokenizer, T5ForConditionalGeneration
34
- from transformers.modeling_outputs import BaseModelOutput
35
-
36
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
37
- MODEL_NAME = "t5-small"
38
- LATENT_TOKENS = 192
39
- D_MODEL = 512
40
- TRAIN_MAX_LEN = 384
41
- GEN_MAX_LEN = 640
42
-
43
-
44
- class LatentPrefixAE(nn.Module):
45
- def __init__(self):
46
- super().__init__()
47
-
48
- self.tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
49
- self.model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
50
-
51
- # MUST EXIST (checkpoint depends on it)
52
- self.from_enc = nn.Sequential(
53
- nn.LayerNorm(D_MODEL),
54
- nn.Linear(D_MODEL, D_MODEL),
55
- nn.GELU(),
56
- nn.Linear(D_MODEL, D_MODEL)
57
- )
58
-
59
- self.to(DEVICE)
60
- self.model.to(DEVICE)
61
-
62
- @torch.no_grad()
63
- def encode(self, texts):
64
- tok = self.tokenizer(
65
- texts,
66
- padding=True,
67
- truncation=True,
68
- max_length=TRAIN_MAX_LEN,
69
- return_tensors="pt"
70
- ).to(DEVICE)
71
-
72
- enc = self.model.encoder(
73
- input_ids=tok.input_ids,
74
- attention_mask=tok.attention_mask
75
- ).last_hidden_state
76
-
77
- prefix = self.from_enc(enc[:, :LATENT_TOKENS, :])
78
- return prefix
79
-
80
- @torch.no_grad()
81
- def decode(self, latent):
82
- out = self.model.generate(
83
- encoder_outputs=BaseModelOutput(last_hidden_state=latent),
84
- max_length=GEN_MAX_LEN,
85
- num_beams=1,
86
- do_sample=False
87
- )
88
- return self.tokenizer.batch_decode(out, skip_special_tokens=True)
89
-
90
- class GraphToLatent(nn.Module):
91
- def __init__(self):
92
- super().__init__()
93
- H = 256
94
- # Initial linear layer to project node features to H dimensions
95
- self.node_proj = nn.Linear(9, H) # Assuming node features are 9-dimensional
96
-
97
- self.convs = nn.ModuleList([
98
- GINEConv(nn.Sequential(nn.Linear(H, H), nn.ReLU(), nn.Linear(H, H)), edge_dim=3) # Add edge_dim=3
99
- for _ in range(3)
100
- ])
101
- self.norms = nn.ModuleList([nn.LayerNorm(H) for _ in range(3)])
102
-
103
- self.readout = nn.Sequential(
104
- nn.Linear(H, 1024),
105
- nn.ReLU(),
106
- nn.Linear(1024, 192 * 512)
107
- )
108
-
109
- def forward(self, batch):
110
- x = self.node_proj(batch.x.float()) # Project node features
111
- for conv, norm in zip(self.convs, self.norms):
112
- # Pass edge_attr to the GINEConv layer
113
- x = norm(x + conv(x, batch.edge_index, edge_attr=batch.edge_attr.float()))
114
- g = global_mean_pool(x, batch.batch)
115
- return self.readout(g).view(-1, 192, 512)
116
-
117
- import torch.nn.functional as F
118
-
119
- def latent_loss(pred, target):
120
- mse = F.mse_loss(pred, target)
121
- cos = 1 - F.cosine_similarity(
122
- pred.flatten(1),
123
- target.flatten(1),
124
- dim=-1
125
- ).mean()
126
- return mse + 0.01 * cos
127
-
128
- from transformers.modeling_outputs import BaseModelOutput
129
- from tqdm import tqdm
130
-
131
- def train_epoch(model, loader, optimizer, latent_ae, lambda_dec=0.5):
132
- model.train()
133
- total = 0.0
134
-
135
- for graph, latent, text in tqdm(loader, leave=False):
136
- graph = graph.to(DEVICE)
137
- latent = latent.to(DEVICE)
138
-
139
- # ── Graph → latent ─────────────────────
140
- pred_latent = model(graph)
141
-
142
- # small latent noise (robustness)
143
- pred_latent = pred_latent + 0.02 * torch.randn_like(pred_latent)
144
-
145
- loss_lat = latent_loss(pred_latent, latent)
146
-
147
- # ── Decoder loss (teacher forcing) ─────
148
- tok = latent_ae.tokenizer(
149
- text,
150
- padding=True,
151
- truncation=True,
152
- max_length=384,
153
- return_tensors="pt"
154
- ).to(DEVICE)
155
-
156
- dec_out = latent_ae.model(
157
- encoder_outputs=BaseModelOutput(last_hidden_state=pred_latent),
158
- labels=tok.input_ids
159
- )
160
-
161
- loss = loss_lat + lambda_dec * dec_out.loss
162
-
163
- optimizer.zero_grad()
164
- loss.backward()
165
- torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
166
- optimizer.step()
167
-
168
- total += loss.item()
169
-
170
- return total / len(loader)
171
- @torch.no_grad()
172
- def eval_epoch(model, loader, latent_ae, lambda_dec=0.5):
173
- model.eval()
174
- total = 0.0
175
-
176
- for graph, latent, text in loader:
177
- graph = graph.to(DEVICE)
178
- latent = latent.to(DEVICE)
179
-
180
- pred_latent = model(graph)
181
- loss_lat = latent_loss(pred_latent, latent)
182
-
183
- tok = latent_ae.tokenizer(
184
- text,
185
- padding=True,
186
- truncation=True,
187
- max_length=384,
188
- return_tensors="pt"
189
- ).to(DEVICE)
190
-
191
- dec_out = latent_ae.model(
192
- encoder_outputs=BaseModelOutput(last_hidden_state=pred_latent),
193
- labels=tok.input_ids
194
- )
195
-
196
- total += (loss_lat + lambda_dec * dec_out.loss).item()
197
-
198
- return total / len(loader)
199
-
200
- from nltk.translate.bleu_score import corpus_bleu
201
- from rouge_score import rouge_scorer
202
- import numpy as np
203
-
204
- @torch.no_grad()
205
- def evaluate_bleu_rouge(model, loader, latent_ae, max_print=10):
206
- model.eval()
207
-
208
- refs, hyps = [], []
209
-
210
- for graph, latent, _ in tqdm(loader):
211
- graph = graph.to(DEVICE)
212
-
213
- pred_latent = model(graph)
214
- preds = latent_ae.decode(pred_latent)
215
- golds = latent_ae.decode(latent.to(DEVICE))
216
-
217
- for r, h in zip(golds, preds):
218
- refs.append([r.split()])
219
- hyps.append(h.split())
220
-
221
- bleu = corpus_bleu(refs, hyps)
222
-
223
- scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True)
224
- rouge = np.mean([
225
- scorer.score(" ".join(r[0]), " ".join(h))["rougeL"].fmeasure
226
- for r, h in zip(refs, hyps)
227
- ])
228
-
229
- print(f"\nBLEU = {bleu:.4f}")
230
- print(f"ROUGE = {rouge:.4f}")
231
-
232
- print("\n--- Examples ---")
233
- for i in range(min(max_print, len(hyps))):
234
- print(f"\nREF : {' '.join(refs[i][0])}")
235
- print(f"PRED: {' '.join(hyps[i])}")
236
-
237
- return bleu, rouge
238
-
239
- pkl = hf_hub_download("TheoSG/Altegrad", "train_graphs.pkl", repo_type="dataset")
240
- train_graphs = pickle.load(open(pkl, "rb"))
241
-
242
- pkl = hf_hub_download("TheoSG/Altegrad", "validation_graphs.pkl", repo_type="dataset")
243
- val_graphs = pickle.load(open(pkl, "rb"))
244
-
245
- train_id2text = {g.id: g.description for g in train_graphs}
246
- val_id2text = {g.id: g.description for g in val_graphs}
247
- from huggingface_hub import hf_hub_download
248
-
249
- latent_ae = LatentPrefixAE()
250
-
251
- ckpt = hf_hub_download(
252
- repo_id="TheoSG/Altegrad",
253
- filename="LatentPrefixAE.pt",
254
- repo_type="dataset"
255
- )
256
-
257
- state = torch.load(ckpt, map_location=DEVICE)
258
- latent_ae.load_state_dict(state["model_state_dict"], strict=False)
259
- latent_ae.eval()
260
-
261
- print("✅ LatentPrefixAE loaded correctly (0.99 BLEU model)")
262
- class GraphLatentDataset(torch.utils.data.Dataset):
263
- def __init__(self, graphs, id2text, ae):
264
- self.graphs = graphs
265
- self.texts = [id2text[g.id] for g in graphs]
266
- self.ae = ae
267
-
268
- def __len__(self):
269
- return len(self.graphs)
270
-
271
- def __getitem__(self, idx):
272
- g = self.graphs[idx]
273
- text = self.texts[idx]
274
-
275
- with torch.no_grad():
276
- latent = self.ae.encode([text])[0]
277
-
278
- if latent.size(0) < LATENT_TOKENS:
279
- pad = torch.zeros(LATENT_TOKENS - latent.size(0), D_MODEL).to(latent.device)
280
- latent = torch.cat([latent, pad], 0)
281
- else:
282
- latent = latent[:LATENT_TOKENS]
283
-
284
- return g, latent, text
285
- from torch_geometric.loader import DataLoader as PyGDataLoader
286
- train_ds = GraphLatentDataset(train_graphs, train_id2text, latent_ae)
287
- val_ds = GraphLatentDataset(val_graphs, val_id2text, latent_ae)
288
-
289
- train_loader = PyGDataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True)
290
- val_loader = PyGDataLoader(val_ds, batch_size=BATCH_SIZE)
291
-
292
- model = GraphToLatent().to(DEVICE)
293
- optimizer = torch.optim.AdamW(model.parameters(), lr=2e-4)
294
-
295
- EPOCHS = 5
296
- lambda_dec = 0.5 # decoder importance
297
-
298
- for epoch in range(1, EPOCHS + 1):
299
- train_loss = train_epoch(
300
- model,
301
- train_loader,
302
- optimizer,
303
- latent_ae,
304
- lambda_dec
305
- )
306
-
307
- val_loss = eval_epoch(
308
- model,
309
- val_loader,
310
- latent_ae,
311
- lambda_dec
312
- )
313
-
314
- # save weights
315
- ckpt_path = f"{SAVE_DIR}/graph2latent_epoch{epoch}.pt"
316
- torch.save(model.state_dict(), ckpt_path)
317
-
318
- print(
319
- f"\nEpoch {epoch:02d} | "
320
- f"train_loss={train_loss:.4f} | "
321
- f"val_loss={val_loss:.4f}"
322
- )
323
-
324
- # BLEU / ROUGE every epoch
325
- bleu, rouge = evaluate_bleu_rouge(
326
- model,
327
- val_loader,
328
- latent_ae,
329
- max_print=10
330
- )
331
-
332
- print(f"\nBLEU = {bleu:.4f}")
333
- print(f"ROUGE = {rouge:.4f}")
334
-