File size: 27,622 Bytes
6cf7f4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
"""
App 安装序列 风控模型 — 完整代码模板
======================================
方法: CoLES (Contrastive Learning for Event Sequences) + GRU
论文: arxiv:2002.08232 (KDD 2022)
依据: EBES 2024 benchmark 验证 GRU+CoLES 在金融序列上排名第一

使用方式:
1. 替换 `load_your_data()` 为你自己的数据加载逻辑
2. 调整 `CONFIG` 中的超参数
3. 先跑 Stage 1 (无监督预训练),再跑 Stage 2 (有监督微调)

依赖: pip install pytorch-lifestream torch scikit-learn lightgbm pandas numpy
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from typing import List, Dict, Tuple, Optional
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# ============================================================
# CONFIG — 所有超参数集中管理
# ============================================================
CONFIG = {
    # 数据相关
    "max_seq_len": 200,           # 保留最近 200 次安装,过长截断最老的
    "app_vocab_size": 50000,      # Top 50K app,长尾合并到 <OTHER>
    "app_category_size": 256,     # App 一级类目数量
    "app_source_size": 8,         # 安装来源(应用商店/浏览器/预装等)
    
    # Embedding 维度
    "app_id_embed_dim": 32,       # app_id 嵌入维度
    "app_category_embed_dim": 16, # 类目嵌入维度
    "app_source_embed_dim": 4,    # 来源嵌入维度
    "time_feat_dim": 8,           # 时间特征维度(正余弦编码)
    
    # 序列编码器 (GRU)
    "hidden_size": 256,           # GRU 隐藏层大小 (论文推荐 256-512)
    "num_layers": 2,              # GRU 层数
    "dropout": 0.1,
    "bidirectional": False,       # 单向 GRU (因为时间有方向性)
    
    # CoLES 对比学习
    "num_sub_slices": 4,          # 每个用户采 K=4 个子序列做对比
    "contrastive_margin": 0.5,    # 对比学习 margin
    "temperature": 0.07,          # InfoNCE temperature
    
    # 训练
    "pretrain_lr": 1e-3,          # 预训练学习率
    "finetune_lr": 5e-4,         # 微调学习率
    "batch_size": 256,
    "pretrain_epochs": 30,
    "finetune_epochs": 20,
    "weight_decay": 1e-5,
    
    # 下游分类器
    "classifier_hidden": 128,
    "num_classes": 1,             # 二分类 (违约/正常)
}


# ============================================================
# 数据预处理
# ============================================================
class AppInstallEvent:
    """单个 App 安装事件"""
    def __init__(self, app_id: int, category_id: int, source_id: int, 
                 timestamp: float, time_delta: float = 0.0):
        self.app_id = app_id
        self.category_id = category_id
        self.source_id = source_id
        self.timestamp = timestamp
        self.time_delta = time_delta  # 距上次安装的天数


def preprocess_app_sequence(raw_df: pd.DataFrame) -> Dict[int, List[AppInstallEvent]]:
    """
    输入 DataFrame 格式:
        user_id | app_id | app_category | install_source | install_timestamp
    
    输出: {user_id: [AppInstallEvent, ...]} 按时间排序
    """
    user_sequences = {}
    
    for user_id, group in raw_df.groupby('user_id'):
        group = group.sort_values('install_timestamp')
        events = []
        prev_time = None
        
        for _, row in group.iterrows():
            time_delta = 0.0
            if prev_time is not None:
                time_delta = (row['install_timestamp'] - prev_time) / 86400.0  # 转换为天
            
            event = AppInstallEvent(
                app_id=row['app_id'],
                category_id=row['app_category'],
                source_id=row['install_source'],
                timestamp=row['install_timestamp'],
                time_delta=time_delta
            )
            events.append(event)
            prev_time = row['install_timestamp']
        
        # 截断: 保留最近 max_seq_len 个事件
        if len(events) > CONFIG['max_seq_len']:
            events = events[-CONFIG['max_seq_len']:]
        
        user_sequences[user_id] = events
    
    return user_sequences


def sine_cosine_time_encoding(time_delta: float, periods=[1, 7, 30, 365]) -> np.ndarray:
    """
    正余弦周期时间编码 (来自 LBSF 论文 arxiv:2411.15056)
    将时间差编码为多个周期的 sin/cos,捕捉日/周/月/年周期性
    """
    embeddings = []
    for T in periods:
        embeddings.append(np.cos(2 * np.pi * time_delta / T))
        embeddings.append(np.sin(2 * np.pi * time_delta / T))
    return np.array(embeddings, dtype=np.float32)


# ============================================================
# Dataset
# ============================================================
class AppSequenceDataset(Dataset):
    """App 安装序列数据集"""
    
    def __init__(self, user_sequences: Dict[int, List[AppInstallEvent]], 
                 labels: Optional[Dict[int, int]] = None):
        self.user_ids = list(user_sequences.keys())
        self.sequences = user_sequences
        self.labels = labels
    
    def __len__(self):
        return len(self.user_ids)
    
    def __getitem__(self, idx):
        user_id = self.user_ids[idx]
        events = self.sequences[user_id]
        
        seq_len = len(events)
        app_ids = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long)
        categories = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long)
        sources = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long)
        time_features = torch.zeros(CONFIG['max_seq_len'], CONFIG['time_feat_dim'])
        mask = torch.zeros(CONFIG['max_seq_len'], dtype=torch.bool)
        
        for i, event in enumerate(events):
            app_ids[i] = event.app_id
            categories[i] = event.category_id
            sources[i] = event.source_id
            time_features[i] = torch.from_numpy(
                sine_cosine_time_encoding(event.time_delta)
            )
            mask[i] = True
        
        sample = {
            'app_ids': app_ids,
            'categories': categories,
            'sources': sources,
            'time_features': time_features,
            'mask': mask,
            'seq_len': seq_len,
        }
        
        if self.labels is not None:
            sample['label'] = torch.tensor(self.labels[user_id], dtype=torch.float32)
        
        return sample


# ============================================================
# 模型: 事件编码器 + GRU 序列编码器
# ============================================================
class EventEncoder(nn.Module):
    """将单个 App 安装事件编码为 dense vector"""
    
    def __init__(self):
        super().__init__()
        self.app_embed = nn.Embedding(
            CONFIG['app_vocab_size'] + 1, CONFIG['app_id_embed_dim'], padding_idx=0
        )
        self.cat_embed = nn.Embedding(
            CONFIG['app_category_size'] + 1, CONFIG['app_category_embed_dim'], padding_idx=0
        )
        self.source_embed = nn.Embedding(
            CONFIG['app_source_size'] + 1, CONFIG['app_source_embed_dim'], padding_idx=0
        )
        
        self.event_dim = (CONFIG['app_id_embed_dim'] + 
                         CONFIG['app_category_embed_dim'] + 
                         CONFIG['app_source_embed_dim'] + 
                         CONFIG['time_feat_dim'])
        
        self.proj = nn.Linear(self.event_dim, CONFIG['hidden_size'])
        self.layer_norm = nn.LayerNorm(CONFIG['hidden_size'])
        self.dropout = nn.Dropout(CONFIG['dropout'])
    
    def forward(self, app_ids, categories, sources, time_features):
        app_emb = self.app_embed(app_ids)
        cat_emb = self.cat_embed(categories)
        src_emb = self.source_embed(sources)
        
        event_repr = torch.cat([app_emb, cat_emb, src_emb, time_features], dim=-1)
        event_repr = self.proj(event_repr)
        event_repr = self.layer_norm(event_repr)
        event_repr = self.dropout(event_repr)
        
        return event_repr


class GRUSequenceEncoder(nn.Module):
    """GRU 序列编码器 (CoLES 验证 GRU > LSTM > Transformer 在金融序列上)"""
    
    def __init__(self):
        super().__init__()
        self.event_encoder = EventEncoder()
        
        self.gru = nn.GRU(
            input_size=CONFIG['hidden_size'],
            hidden_size=CONFIG['hidden_size'],
            num_layers=CONFIG['num_layers'],
            batch_first=True,
            dropout=CONFIG['dropout'] if CONFIG['num_layers'] > 1 else 0,
            bidirectional=CONFIG['bidirectional']
        )
        
        gru_output_dim = CONFIG['hidden_size'] * (2 if CONFIG['bidirectional'] else 1)
        self.output_proj = nn.Linear(gru_output_dim, CONFIG['hidden_size'])
    
    def forward(self, app_ids, categories, sources, time_features, mask):
        event_repr = self.event_encoder(app_ids, categories, sources, time_features)
        
        lengths = mask.sum(dim=1).cpu()
        packed = nn.utils.rnn.pack_padded_sequence(
            event_repr, lengths, batch_first=True, enforce_sorted=False
        )
        
        packed_output, hidden = self.gru(packed)
        
        if CONFIG['bidirectional']:
            user_embedding = torch.cat([hidden[-2], hidden[-1]], dim=-1)
        else:
            user_embedding = hidden[-1]
        
        user_embedding = self.output_proj(user_embedding)
        return user_embedding


# ============================================================
# Stage 1: CoLES 自监督预训练 (无需标签)
# ============================================================
class CoLESModel(nn.Module):
    """CoLES: 同一用户的不同时间切片应该相似,不同用户应该不相似"""
    
    def __init__(self):
        super().__init__()
        self.encoder = GRUSequenceEncoder()
    
    def forward(self, batch):
        return self.encoder(
            batch['app_ids'], batch['categories'],
            batch['sources'], batch['time_features'], batch['mask']
        )


def sample_sub_sequence(events: List[AppInstallEvent], min_len: int = 5) -> List[AppInstallEvent]:
    """CoLES 核心: 从完整序列中随机切一段子序列"""
    seq_len = len(events)
    if seq_len <= min_len:
        return events
    
    start = np.random.randint(0, max(1, seq_len - min_len))
    end = np.random.randint(start + min_len, min(seq_len + 1, start + CONFIG['max_seq_len']))
    
    return events[start:end]


def coles_contrastive_loss(embeddings: torch.Tensor, num_sub_slices: int = 4):
    """CoLES Loss: 同一用户的子序列embedding靠近,不同用户的远离"""
    batch_size = embeddings.shape[0] // num_sub_slices
    device = embeddings.device
    
    embeddings = F.normalize(embeddings, p=2, dim=1)
    sim_matrix = torch.mm(embeddings, embeddings.t()) / CONFIG['temperature']
    
    labels = torch.arange(batch_size).repeat_interleave(num_sub_slices).to(device)
    positive_mask = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
    positive_mask.fill_diagonal_(0)
    
    exp_sim = torch.exp(sim_matrix)
    exp_sim.fill_diagonal_(0)
    
    pos_sim = (exp_sim * positive_mask).sum(dim=1)
    all_sim = exp_sim.sum(dim=1)
    
    loss = -torch.log(pos_sim / (all_sim + 1e-8) + 1e-8).mean()
    return loss


def pretrain_coles(user_sequences: Dict[int, List[AppInstallEvent]], epochs: int = None):
    """Stage 1: CoLES 无监督预训练,不需要任何标签"""
    if epochs is None:
        epochs = CONFIG['pretrain_epochs']
    
    model = CoLESModel()
    optimizer = torch.optim.Adam(model.parameters(), lr=CONFIG['pretrain_lr'], weight_decay=CONFIG['weight_decay'])
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    
    user_ids = list(user_sequences.keys())
    batch_size = CONFIG['batch_size']
    K = CONFIG['num_sub_slices']
    
    logger.info(f"Starting CoLES pretraining: {len(user_ids)} users, {epochs} epochs")
    
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        num_batches = 0
        
        np.random.shuffle(user_ids)
        
        for batch_start in range(0, len(user_ids), batch_size):
            batch_users = user_ids[batch_start:batch_start + batch_size]
            
            all_sub_seqs = []
            for uid in batch_users:
                events = user_sequences[uid]
                for _ in range(K):
                    sub_seq = sample_sub_sequence(events)
                    all_sub_seqs.append(sub_seq)
            
            actual_batch_size = len(all_sub_seqs)
            app_ids = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long)
            categories = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long)
            sources = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long)
            time_features = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], CONFIG['time_feat_dim'])
            mask = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.bool)
            
            for i, events in enumerate(all_sub_seqs):
                for j, event in enumerate(events[:CONFIG['max_seq_len']]):
                    app_ids[i, j] = event.app_id
                    categories[i, j] = event.category_id
                    sources[i, j] = event.source_id
                    time_features[i, j] = torch.from_numpy(sine_cosine_time_encoding(event.time_delta))
                    mask[i, j] = True
            
            batch = {
                'app_ids': app_ids.to(device), 'categories': categories.to(device),
                'sources': sources.to(device), 'time_features': time_features.to(device),
                'mask': mask.to(device),
            }
            
            embeddings = model(batch)
            loss = coles_contrastive_loss(embeddings, num_sub_slices=K)
            
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()
            
            total_loss += loss.item()
            num_batches += 1
        
        scheduler.step()
        avg_loss = total_loss / max(num_batches, 1)
        logger.info(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}, LR: {scheduler.get_last_lr()[0]:.6f}")
    
    logger.info("CoLES pretraining complete!")
    return model


# ============================================================
# Stage 2: 有监督微调 / 下游分类
# ============================================================
class RiskClassifier(nn.Module):
    """风险分类头: 冻结/微调 CoLES encoder + MLP head"""
    
    def __init__(self, pretrained_encoder: GRUSequenceEncoder, freeze_encoder: bool = False):
        super().__init__()
        self.encoder = pretrained_encoder
        self.freeze_encoder = freeze_encoder
        
        if freeze_encoder:
            for param in self.encoder.parameters():
                param.requires_grad = False
        
        self.classifier = nn.Sequential(
            nn.Linear(CONFIG['hidden_size'], CONFIG['classifier_hidden']),
            nn.ReLU(),
            nn.Dropout(CONFIG['dropout']),
            nn.Linear(CONFIG['classifier_hidden'], CONFIG['classifier_hidden'] // 2),
            nn.ReLU(),
            nn.Dropout(CONFIG['dropout']),
            nn.Linear(CONFIG['classifier_hidden'] // 2, 1),
        )
    
    def forward(self, app_ids, categories, sources, time_features, mask):
        if self.freeze_encoder:
            with torch.no_grad():
                user_emb = self.encoder(app_ids, categories, sources, time_features, mask)
        else:
            user_emb = self.encoder(app_ids, categories, sources, time_features, mask)
        
        logits = self.classifier(user_emb).squeeze(-1)
        return logits
    
    def get_user_embedding(self, app_ids, categories, sources, time_features, mask):
        """导出用户向量(用于接 LightGBM)"""
        with torch.no_grad():
            return self.encoder(app_ids, categories, sources, time_features, mask)


def finetune_classifier(pretrained_model: CoLESModel,
                        user_sequences: Dict[int, List[AppInstallEvent]],
                        labels: Dict[int, int],
                        freeze_encoder: bool = False):
    """Stage 2: 有监督微调"""
    user_ids = list(labels.keys())
    train_ids, val_ids = train_test_split(user_ids, test_size=0.2, 
                                           stratify=[labels[uid] for uid in user_ids], random_state=42)
    
    train_seqs = {uid: user_sequences[uid] for uid in train_ids}
    val_seqs = {uid: user_sequences[uid] for uid in val_ids}
    train_labels = {uid: labels[uid] for uid in train_ids}
    val_labels = {uid: labels[uid] for uid in val_ids}
    
    train_dataset = AppSequenceDataset(train_seqs, train_labels)
    val_dataset = AppSequenceDataset(val_seqs, val_labels)
    
    train_loader = DataLoader(train_dataset, batch_size=CONFIG['batch_size'], shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=CONFIG['batch_size'])
    
    classifier = RiskClassifier(pretrained_model.encoder, freeze_encoder=freeze_encoder)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    classifier = classifier.to(device)
    
    num_pos = sum(labels.values())
    num_neg = len(labels) - num_pos
    pos_weight = torch.tensor([num_neg / max(num_pos, 1)]).to(device)
    logger.info(f"Class balance: pos={num_pos}, neg={num_neg}, pos_weight={pos_weight.item():.2f}")
    
    criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    optimizer = torch.optim.AdamW(
        filter(lambda p: p.requires_grad, classifier.parameters()),
        lr=CONFIG['finetune_lr'], weight_decay=CONFIG['weight_decay']
    )
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3)
    
    best_auc = 0
    patience_counter = 0
    max_patience = 7
    
    for epoch in range(CONFIG['finetune_epochs']):
        classifier.train()
        train_loss = 0
        for batch in train_loader:
            logits = classifier(
                batch['app_ids'].to(device), batch['categories'].to(device),
                batch['sources'].to(device), batch['time_features'].to(device),
                batch['mask'].to(device)
            )
            loss = criterion(logits, batch['label'].to(device))
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(classifier.parameters(), max_norm=1.0)
            optimizer.step()
            train_loss += loss.item()
        
        classifier.eval()
        val_preds = []
        val_labels_list = []
        with torch.no_grad():
            for batch in val_loader:
                logits = classifier(
                    batch['app_ids'].to(device), batch['categories'].to(device),
                    batch['sources'].to(device), batch['time_features'].to(device),
                    batch['mask'].to(device)
                )
                probs = torch.sigmoid(logits).cpu().numpy()
                val_preds.extend(probs)
                val_labels_list.extend(batch['label'].numpy())
        
        val_auc = roc_auc_score(val_labels_list, val_preds)
        scheduler.step(val_auc)
        
        avg_train_loss = train_loss / len(train_loader)
        logger.info(f"Epoch {epoch+1}/{CONFIG['finetune_epochs']}, Train Loss: {avg_train_loss:.4f}, Val AUC: {val_auc:.4f}")
        
        if val_auc > best_auc:
            best_auc = val_auc
            patience_counter = 0
            torch.save(classifier.state_dict(), 'best_app_sequence_model.pt')
            logger.info(f"  → New best AUC: {best_auc:.4f}, model saved!")
        else:
            patience_counter += 1
            if patience_counter >= max_patience:
                logger.info(f"Early stopping at epoch {epoch+1}")
                break
    
    logger.info(f"Fine-tuning complete. Best Val AUC: {best_auc:.4f}")
    return classifier, best_auc


# ============================================================
# 方案 B: 导出 CoLES 向量 → LightGBM (论文推荐方案)
# ============================================================
def extract_embeddings_for_lgbm(pretrained_model: CoLESModel,
                                 user_sequences: Dict[int, List[AppInstallEvent]],
                                 labels: Dict[int, int]):
    """
    导出用户embedding,接LightGBM
    这是CoLES论文里效果最好的方案: 预训练256d向量→LightGBM分类
    """
    try:
        import lightgbm as lgb
    except ImportError:
        logger.error("请安装 lightgbm: pip install lightgbm")
        return None
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    pretrained_model = pretrained_model.to(device)
    pretrained_model.eval()
    
    dataset = AppSequenceDataset(user_sequences, labels)
    loader = DataLoader(dataset, batch_size=CONFIG['batch_size'])
    
    all_embeddings = []
    all_labels = []
    
    with torch.no_grad():
        for batch in loader:
            emb = pretrained_model(batch)
            all_embeddings.append(emb.cpu().numpy())
            all_labels.append(batch['label'].numpy())
    
    X = np.concatenate(all_embeddings, axis=0)
    y = np.concatenate(all_labels, axis=0)
    
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
    
    lgb_params = {
        'objective': 'binary', 'metric': 'auc',
        'learning_rate': 0.05, 'num_leaves': 63, 'max_depth': 7,
        'min_child_samples': 20,
        'scale_pos_weight': sum(y_train == 0) / max(sum(y_train == 1), 1),
        'subsample': 0.8, 'colsample_bytree': 0.8, 'verbose': -1,
    }
    
    train_data = lgb.Dataset(X_train, label=y_train)
    val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
    
    model = lgb.train(
        lgb_params, train_data, num_boost_round=500, valid_sets=[val_data],
        callbacks=[lgb.early_stopping(stopping_rounds=30), lgb.log_evaluation(50)]
    )
    
    val_pred = model.predict(X_val)
    val_auc = roc_auc_score(y_val, val_pred)
    
    from scipy.stats import ks_2samp
    ks_stat = ks_2samp(val_pred[y_val == 1], val_pred[y_val == 0]).statistic
    
    logger.info(f"LightGBM Results: AUC={val_auc:.4f}, KS={ks_stat:.4f}")
    return model, val_auc, ks_stat


# ============================================================
# Graph-Augmented: App 共现图增强 (arxiv:2604.09085)
# ============================================================
class AppCoInstallGraph:
    """
    构建App共安装图: 如果两个App经常被同一批用户安装,它们之间有边
    用Node2Vec/GraphSAGE生成App embedding → 替换原始App embedding
    论文结论: AUC +2.3% over vanilla CoLES
    """
    
    def __init__(self, user_sequences: Dict[int, List[AppInstallEvent]]):
        self.user_sequences = user_sequences
    
    def build_cooccurrence_matrix(self, min_cooccur: int = 5) -> Dict[Tuple[int, int], float]:
        """构建App共现矩阵"""
        from collections import Counter, defaultdict
        
        app_user_count = Counter()
        co_occurrence = defaultdict(int)
        
        for user_id, events in self.user_sequences.items():
            user_apps = list(set(e.app_id for e in events))
            for app in user_apps:
                app_user_count[app] += 1
            for i in range(len(user_apps)):
                for j in range(i + 1, min(len(user_apps), 50)):
                    pair = tuple(sorted([user_apps[i], user_apps[j]]))
                    co_occurrence[pair] += 1
        
        edges = {}
        for (app_i, app_j), count in co_occurrence.items():
            if count >= min_cooccur:
                weight = count / np.log(app_user_count[app_i] * app_user_count[app_j] + 1)
                edges[(app_i, app_j)] = weight
        
        logger.info(f"Built co-install graph: {len(edges)} edges")
        return edges
    
    def train_node2vec_embeddings(self, edges: dict, embed_dim: int = 32):
        """用Node2Vec训练App图嵌入 (pip install node2vec networkx)"""
        try:
            import networkx as nx
            from node2vec import Node2Vec
        except ImportError:
            logger.error("请安装: pip install node2vec networkx")
            return None
        
        G = nx.Graph()
        for (app_i, app_j), weight in edges.items():
            G.add_edge(app_i, app_j, weight=weight)
        
        node2vec = Node2Vec(G, dimensions=embed_dim, walk_length=30, num_walks=200, p=1, q=0.5, workers=4)
        model = node2vec.fit(window=10, min_count=1)
        
        app_embeddings = {}
        for node in G.nodes():
            app_embeddings[node] = model.wv[str(node)]
        
        logger.info(f"Node2Vec trained: {len(app_embeddings)} app embeddings")
        return app_embeddings


# ============================================================
# 主流程示例
# ============================================================
def main():
    logger.info("=" * 60)
    logger.info("App 安装序列风控模型 — 完整训练流程")
    logger.info("=" * 60)
    
    # ---- 1. 加载数据 (替换为你的数据加载代码) ----
    logger.info("Step 1: Loading data (demo with synthetic data)...")
    np.random.seed(42)
    num_users = 10000
    
    records = []
    labels = {}
    for uid in range(num_users):
        num_installs = np.random.randint(10, 200)
        base_time = 1700000000
        for i in range(num_installs):
            records.append({
                'user_id': uid,
                'app_id': np.random.randint(1, CONFIG['app_vocab_size']),
                'app_category': np.random.randint(1, CONFIG['app_category_size']),
                'install_source': np.random.randint(1, CONFIG['app_source_size']),
                'install_timestamp': base_time + i * np.random.randint(3600, 86400 * 7),
            })
        labels[uid] = int(np.random.random() < 0.05)  # 5% 坏账率
    
    raw_df = pd.DataFrame(records)
    logger.info(f"  Users: {num_users}, Total installs: {len(records)}, "
               f"Default rate: {sum(labels.values())/len(labels)*100:.1f}%")
    
    # ---- 2. 预处理 ----
    logger.info("Step 2: Preprocessing sequences...")
    user_sequences = preprocess_app_sequence(raw_df)
    
    # ---- 3. (可选) 构建App共现图 ----
    logger.info("Step 3: Building app co-install graph...")
    graph_builder = AppCoInstallGraph(user_sequences)
    edges = graph_builder.build_cooccurrence_matrix(min_cooccur=3)
    
    # ---- 4. CoLES 无监督预训练 ----
    logger.info("Step 4: CoLES unsupervised pretraining...")
    pretrained_model = pretrain_coles(user_sequences, epochs=5)
    
    # ---- 5. 有监督微调 ----
    logger.info("Step 5: Supervised fine-tuning...")
    classifier, best_auc = finetune_classifier(
        pretrained_model, user_sequences, labels, freeze_encoder=False
    )
    
    logger.info("=" * 60)
    logger.info(f"Training complete! Best AUC: {best_auc:.4f}")
    logger.info("=" * 60)


if __name__ == "__main__":
    main()