| """ |
| App 安装序列 风控模型 — 完整代码模板 |
| ====================================== |
| 方法: CoLES (Contrastive Learning for Event Sequences) + GRU |
| 论文: arxiv:2002.08232 (KDD 2022) |
| 依据: EBES 2024 benchmark 验证 GRU+CoLES 在金融序列上排名第一 |
| |
| 使用方式: |
| 1. 替换 `load_your_data()` 为你自己的数据加载逻辑 |
| 2. 调整 `CONFIG` 中的超参数 |
| 3. 先跑 Stage 1 (无监督预训练),再跑 Stage 2 (有监督微调) |
| |
| 依赖: pip install pytorch-lifestream torch scikit-learn lightgbm pandas numpy |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.utils.data import Dataset, DataLoader |
| import numpy as np |
| import pandas as pd |
| from sklearn.model_selection import train_test_split |
| from sklearn.metrics import roc_auc_score |
| from typing import List, Dict, Tuple, Optional |
| import logging |
|
|
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| |
| |
| CONFIG = { |
| |
| "max_seq_len": 200, |
| "app_vocab_size": 50000, |
| "app_category_size": 256, |
| "app_source_size": 8, |
| |
| |
| "app_id_embed_dim": 32, |
| "app_category_embed_dim": 16, |
| "app_source_embed_dim": 4, |
| "time_feat_dim": 8, |
| |
| |
| "hidden_size": 256, |
| "num_layers": 2, |
| "dropout": 0.1, |
| "bidirectional": False, |
| |
| |
| "num_sub_slices": 4, |
| "contrastive_margin": 0.5, |
| "temperature": 0.07, |
| |
| |
| "pretrain_lr": 1e-3, |
| "finetune_lr": 5e-4, |
| "batch_size": 256, |
| "pretrain_epochs": 30, |
| "finetune_epochs": 20, |
| "weight_decay": 1e-5, |
| |
| |
| "classifier_hidden": 128, |
| "num_classes": 1, |
| } |
|
|
|
|
| |
| |
| |
| class AppInstallEvent: |
| """单个 App 安装事件""" |
| def __init__(self, app_id: int, category_id: int, source_id: int, |
| timestamp: float, time_delta: float = 0.0): |
| self.app_id = app_id |
| self.category_id = category_id |
| self.source_id = source_id |
| self.timestamp = timestamp |
| self.time_delta = time_delta |
|
|
|
|
| def preprocess_app_sequence(raw_df: pd.DataFrame) -> Dict[int, List[AppInstallEvent]]: |
| """ |
| 输入 DataFrame 格式: |
| user_id | app_id | app_category | install_source | install_timestamp |
| |
| 输出: {user_id: [AppInstallEvent, ...]} 按时间排序 |
| """ |
| user_sequences = {} |
| |
| for user_id, group in raw_df.groupby('user_id'): |
| group = group.sort_values('install_timestamp') |
| events = [] |
| prev_time = None |
| |
| for _, row in group.iterrows(): |
| time_delta = 0.0 |
| if prev_time is not None: |
| time_delta = (row['install_timestamp'] - prev_time) / 86400.0 |
| |
| event = AppInstallEvent( |
| app_id=row['app_id'], |
| category_id=row['app_category'], |
| source_id=row['install_source'], |
| timestamp=row['install_timestamp'], |
| time_delta=time_delta |
| ) |
| events.append(event) |
| prev_time = row['install_timestamp'] |
| |
| |
| if len(events) > CONFIG['max_seq_len']: |
| events = events[-CONFIG['max_seq_len']:] |
| |
| user_sequences[user_id] = events |
| |
| return user_sequences |
|
|
|
|
| def sine_cosine_time_encoding(time_delta: float, periods=[1, 7, 30, 365]) -> np.ndarray: |
| """ |
| 正余弦周期时间编码 (来自 LBSF 论文 arxiv:2411.15056) |
| 将时间差编码为多个周期的 sin/cos,捕捉日/周/月/年周期性 |
| """ |
| embeddings = [] |
| for T in periods: |
| embeddings.append(np.cos(2 * np.pi * time_delta / T)) |
| embeddings.append(np.sin(2 * np.pi * time_delta / T)) |
| return np.array(embeddings, dtype=np.float32) |
|
|
|
|
| |
| |
| |
| class AppSequenceDataset(Dataset): |
| """App 安装序列数据集""" |
| |
| def __init__(self, user_sequences: Dict[int, List[AppInstallEvent]], |
| labels: Optional[Dict[int, int]] = None): |
| self.user_ids = list(user_sequences.keys()) |
| self.sequences = user_sequences |
| self.labels = labels |
| |
| def __len__(self): |
| return len(self.user_ids) |
| |
| def __getitem__(self, idx): |
| user_id = self.user_ids[idx] |
| events = self.sequences[user_id] |
| |
| seq_len = len(events) |
| app_ids = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long) |
| categories = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long) |
| sources = torch.zeros(CONFIG['max_seq_len'], dtype=torch.long) |
| time_features = torch.zeros(CONFIG['max_seq_len'], CONFIG['time_feat_dim']) |
| mask = torch.zeros(CONFIG['max_seq_len'], dtype=torch.bool) |
| |
| for i, event in enumerate(events): |
| app_ids[i] = event.app_id |
| categories[i] = event.category_id |
| sources[i] = event.source_id |
| time_features[i] = torch.from_numpy( |
| sine_cosine_time_encoding(event.time_delta) |
| ) |
| mask[i] = True |
| |
| sample = { |
| 'app_ids': app_ids, |
| 'categories': categories, |
| 'sources': sources, |
| 'time_features': time_features, |
| 'mask': mask, |
| 'seq_len': seq_len, |
| } |
| |
| if self.labels is not None: |
| sample['label'] = torch.tensor(self.labels[user_id], dtype=torch.float32) |
| |
| return sample |
|
|
|
|
| |
| |
| |
| class EventEncoder(nn.Module): |
| """将单个 App 安装事件编码为 dense vector""" |
| |
| def __init__(self): |
| super().__init__() |
| self.app_embed = nn.Embedding( |
| CONFIG['app_vocab_size'] + 1, CONFIG['app_id_embed_dim'], padding_idx=0 |
| ) |
| self.cat_embed = nn.Embedding( |
| CONFIG['app_category_size'] + 1, CONFIG['app_category_embed_dim'], padding_idx=0 |
| ) |
| self.source_embed = nn.Embedding( |
| CONFIG['app_source_size'] + 1, CONFIG['app_source_embed_dim'], padding_idx=0 |
| ) |
| |
| self.event_dim = (CONFIG['app_id_embed_dim'] + |
| CONFIG['app_category_embed_dim'] + |
| CONFIG['app_source_embed_dim'] + |
| CONFIG['time_feat_dim']) |
| |
| self.proj = nn.Linear(self.event_dim, CONFIG['hidden_size']) |
| self.layer_norm = nn.LayerNorm(CONFIG['hidden_size']) |
| self.dropout = nn.Dropout(CONFIG['dropout']) |
| |
| def forward(self, app_ids, categories, sources, time_features): |
| app_emb = self.app_embed(app_ids) |
| cat_emb = self.cat_embed(categories) |
| src_emb = self.source_embed(sources) |
| |
| event_repr = torch.cat([app_emb, cat_emb, src_emb, time_features], dim=-1) |
| event_repr = self.proj(event_repr) |
| event_repr = self.layer_norm(event_repr) |
| event_repr = self.dropout(event_repr) |
| |
| return event_repr |
|
|
|
|
| class GRUSequenceEncoder(nn.Module): |
| """GRU 序列编码器 (CoLES 验证 GRU > LSTM > Transformer 在金融序列上)""" |
| |
| def __init__(self): |
| super().__init__() |
| self.event_encoder = EventEncoder() |
| |
| self.gru = nn.GRU( |
| input_size=CONFIG['hidden_size'], |
| hidden_size=CONFIG['hidden_size'], |
| num_layers=CONFIG['num_layers'], |
| batch_first=True, |
| dropout=CONFIG['dropout'] if CONFIG['num_layers'] > 1 else 0, |
| bidirectional=CONFIG['bidirectional'] |
| ) |
| |
| gru_output_dim = CONFIG['hidden_size'] * (2 if CONFIG['bidirectional'] else 1) |
| self.output_proj = nn.Linear(gru_output_dim, CONFIG['hidden_size']) |
| |
| def forward(self, app_ids, categories, sources, time_features, mask): |
| event_repr = self.event_encoder(app_ids, categories, sources, time_features) |
| |
| lengths = mask.sum(dim=1).cpu() |
| packed = nn.utils.rnn.pack_padded_sequence( |
| event_repr, lengths, batch_first=True, enforce_sorted=False |
| ) |
| |
| packed_output, hidden = self.gru(packed) |
| |
| if CONFIG['bidirectional']: |
| user_embedding = torch.cat([hidden[-2], hidden[-1]], dim=-1) |
| else: |
| user_embedding = hidden[-1] |
| |
| user_embedding = self.output_proj(user_embedding) |
| return user_embedding |
|
|
|
|
| |
| |
| |
| class CoLESModel(nn.Module): |
| """CoLES: 同一用户的不同时间切片应该相似,不同用户应该不相似""" |
| |
| def __init__(self): |
| super().__init__() |
| self.encoder = GRUSequenceEncoder() |
| |
| def forward(self, batch): |
| return self.encoder( |
| batch['app_ids'], batch['categories'], |
| batch['sources'], batch['time_features'], batch['mask'] |
| ) |
|
|
|
|
| def sample_sub_sequence(events: List[AppInstallEvent], min_len: int = 5) -> List[AppInstallEvent]: |
| """CoLES 核心: 从完整序列中随机切一段子序列""" |
| seq_len = len(events) |
| if seq_len <= min_len: |
| return events |
| |
| start = np.random.randint(0, max(1, seq_len - min_len)) |
| end = np.random.randint(start + min_len, min(seq_len + 1, start + CONFIG['max_seq_len'])) |
| |
| return events[start:end] |
|
|
|
|
| def coles_contrastive_loss(embeddings: torch.Tensor, num_sub_slices: int = 4): |
| """CoLES Loss: 同一用户的子序列embedding靠近,不同用户的远离""" |
| batch_size = embeddings.shape[0] // num_sub_slices |
| device = embeddings.device |
| |
| embeddings = F.normalize(embeddings, p=2, dim=1) |
| sim_matrix = torch.mm(embeddings, embeddings.t()) / CONFIG['temperature'] |
| |
| labels = torch.arange(batch_size).repeat_interleave(num_sub_slices).to(device) |
| positive_mask = (labels.unsqueeze(0) == labels.unsqueeze(1)).float() |
| positive_mask.fill_diagonal_(0) |
| |
| exp_sim = torch.exp(sim_matrix) |
| exp_sim.fill_diagonal_(0) |
| |
| pos_sim = (exp_sim * positive_mask).sum(dim=1) |
| all_sim = exp_sim.sum(dim=1) |
| |
| loss = -torch.log(pos_sim / (all_sim + 1e-8) + 1e-8).mean() |
| return loss |
|
|
|
|
| def pretrain_coles(user_sequences: Dict[int, List[AppInstallEvent]], epochs: int = None): |
| """Stage 1: CoLES 无监督预训练,不需要任何标签""" |
| if epochs is None: |
| epochs = CONFIG['pretrain_epochs'] |
| |
| model = CoLESModel() |
| optimizer = torch.optim.Adam(model.parameters(), lr=CONFIG['pretrain_lr'], weight_decay=CONFIG['weight_decay']) |
| scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs) |
| |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| model = model.to(device) |
| |
| user_ids = list(user_sequences.keys()) |
| batch_size = CONFIG['batch_size'] |
| K = CONFIG['num_sub_slices'] |
| |
| logger.info(f"Starting CoLES pretraining: {len(user_ids)} users, {epochs} epochs") |
| |
| for epoch in range(epochs): |
| model.train() |
| total_loss = 0 |
| num_batches = 0 |
| |
| np.random.shuffle(user_ids) |
| |
| for batch_start in range(0, len(user_ids), batch_size): |
| batch_users = user_ids[batch_start:batch_start + batch_size] |
| |
| all_sub_seqs = [] |
| for uid in batch_users: |
| events = user_sequences[uid] |
| for _ in range(K): |
| sub_seq = sample_sub_sequence(events) |
| all_sub_seqs.append(sub_seq) |
| |
| actual_batch_size = len(all_sub_seqs) |
| app_ids = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long) |
| categories = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long) |
| sources = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.long) |
| time_features = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], CONFIG['time_feat_dim']) |
| mask = torch.zeros(actual_batch_size, CONFIG['max_seq_len'], dtype=torch.bool) |
| |
| for i, events in enumerate(all_sub_seqs): |
| for j, event in enumerate(events[:CONFIG['max_seq_len']]): |
| app_ids[i, j] = event.app_id |
| categories[i, j] = event.category_id |
| sources[i, j] = event.source_id |
| time_features[i, j] = torch.from_numpy(sine_cosine_time_encoding(event.time_delta)) |
| mask[i, j] = True |
| |
| batch = { |
| 'app_ids': app_ids.to(device), 'categories': categories.to(device), |
| 'sources': sources.to(device), 'time_features': time_features.to(device), |
| 'mask': mask.to(device), |
| } |
| |
| embeddings = model(batch) |
| loss = coles_contrastive_loss(embeddings, num_sub_slices=K) |
| |
| optimizer.zero_grad() |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) |
| optimizer.step() |
| |
| total_loss += loss.item() |
| num_batches += 1 |
| |
| scheduler.step() |
| avg_loss = total_loss / max(num_batches, 1) |
| logger.info(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}, LR: {scheduler.get_last_lr()[0]:.6f}") |
| |
| logger.info("CoLES pretraining complete!") |
| return model |
|
|
|
|
| |
| |
| |
| class RiskClassifier(nn.Module): |
| """风险分类头: 冻结/微调 CoLES encoder + MLP head""" |
| |
| def __init__(self, pretrained_encoder: GRUSequenceEncoder, freeze_encoder: bool = False): |
| super().__init__() |
| self.encoder = pretrained_encoder |
| self.freeze_encoder = freeze_encoder |
| |
| if freeze_encoder: |
| for param in self.encoder.parameters(): |
| param.requires_grad = False |
| |
| self.classifier = nn.Sequential( |
| nn.Linear(CONFIG['hidden_size'], CONFIG['classifier_hidden']), |
| nn.ReLU(), |
| nn.Dropout(CONFIG['dropout']), |
| nn.Linear(CONFIG['classifier_hidden'], CONFIG['classifier_hidden'] // 2), |
| nn.ReLU(), |
| nn.Dropout(CONFIG['dropout']), |
| nn.Linear(CONFIG['classifier_hidden'] // 2, 1), |
| ) |
| |
| def forward(self, app_ids, categories, sources, time_features, mask): |
| if self.freeze_encoder: |
| with torch.no_grad(): |
| user_emb = self.encoder(app_ids, categories, sources, time_features, mask) |
| else: |
| user_emb = self.encoder(app_ids, categories, sources, time_features, mask) |
| |
| logits = self.classifier(user_emb).squeeze(-1) |
| return logits |
| |
| def get_user_embedding(self, app_ids, categories, sources, time_features, mask): |
| """导出用户向量(用于接 LightGBM)""" |
| with torch.no_grad(): |
| return self.encoder(app_ids, categories, sources, time_features, mask) |
|
|
|
|
| def finetune_classifier(pretrained_model: CoLESModel, |
| user_sequences: Dict[int, List[AppInstallEvent]], |
| labels: Dict[int, int], |
| freeze_encoder: bool = False): |
| """Stage 2: 有监督微调""" |
| user_ids = list(labels.keys()) |
| train_ids, val_ids = train_test_split(user_ids, test_size=0.2, |
| stratify=[labels[uid] for uid in user_ids], random_state=42) |
| |
| train_seqs = {uid: user_sequences[uid] for uid in train_ids} |
| val_seqs = {uid: user_sequences[uid] for uid in val_ids} |
| train_labels = {uid: labels[uid] for uid in train_ids} |
| val_labels = {uid: labels[uid] for uid in val_ids} |
| |
| train_dataset = AppSequenceDataset(train_seqs, train_labels) |
| val_dataset = AppSequenceDataset(val_seqs, val_labels) |
| |
| train_loader = DataLoader(train_dataset, batch_size=CONFIG['batch_size'], shuffle=True) |
| val_loader = DataLoader(val_dataset, batch_size=CONFIG['batch_size']) |
| |
| classifier = RiskClassifier(pretrained_model.encoder, freeze_encoder=freeze_encoder) |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| classifier = classifier.to(device) |
| |
| num_pos = sum(labels.values()) |
| num_neg = len(labels) - num_pos |
| pos_weight = torch.tensor([num_neg / max(num_pos, 1)]).to(device) |
| logger.info(f"Class balance: pos={num_pos}, neg={num_neg}, pos_weight={pos_weight.item():.2f}") |
| |
| criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight) |
| optimizer = torch.optim.AdamW( |
| filter(lambda p: p.requires_grad, classifier.parameters()), |
| lr=CONFIG['finetune_lr'], weight_decay=CONFIG['weight_decay'] |
| ) |
| scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3) |
| |
| best_auc = 0 |
| patience_counter = 0 |
| max_patience = 7 |
| |
| for epoch in range(CONFIG['finetune_epochs']): |
| classifier.train() |
| train_loss = 0 |
| for batch in train_loader: |
| logits = classifier( |
| batch['app_ids'].to(device), batch['categories'].to(device), |
| batch['sources'].to(device), batch['time_features'].to(device), |
| batch['mask'].to(device) |
| ) |
| loss = criterion(logits, batch['label'].to(device)) |
| optimizer.zero_grad() |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(classifier.parameters(), max_norm=1.0) |
| optimizer.step() |
| train_loss += loss.item() |
| |
| classifier.eval() |
| val_preds = [] |
| val_labels_list = [] |
| with torch.no_grad(): |
| for batch in val_loader: |
| logits = classifier( |
| batch['app_ids'].to(device), batch['categories'].to(device), |
| batch['sources'].to(device), batch['time_features'].to(device), |
| batch['mask'].to(device) |
| ) |
| probs = torch.sigmoid(logits).cpu().numpy() |
| val_preds.extend(probs) |
| val_labels_list.extend(batch['label'].numpy()) |
| |
| val_auc = roc_auc_score(val_labels_list, val_preds) |
| scheduler.step(val_auc) |
| |
| avg_train_loss = train_loss / len(train_loader) |
| logger.info(f"Epoch {epoch+1}/{CONFIG['finetune_epochs']}, Train Loss: {avg_train_loss:.4f}, Val AUC: {val_auc:.4f}") |
| |
| if val_auc > best_auc: |
| best_auc = val_auc |
| patience_counter = 0 |
| torch.save(classifier.state_dict(), 'best_app_sequence_model.pt') |
| logger.info(f" → New best AUC: {best_auc:.4f}, model saved!") |
| else: |
| patience_counter += 1 |
| if patience_counter >= max_patience: |
| logger.info(f"Early stopping at epoch {epoch+1}") |
| break |
| |
| logger.info(f"Fine-tuning complete. Best Val AUC: {best_auc:.4f}") |
| return classifier, best_auc |
|
|
|
|
| |
| |
| |
| def extract_embeddings_for_lgbm(pretrained_model: CoLESModel, |
| user_sequences: Dict[int, List[AppInstallEvent]], |
| labels: Dict[int, int]): |
| """ |
| 导出用户embedding,接LightGBM |
| 这是CoLES论文里效果最好的方案: 预训练256d向量→LightGBM分类 |
| """ |
| try: |
| import lightgbm as lgb |
| except ImportError: |
| logger.error("请安装 lightgbm: pip install lightgbm") |
| return None |
| |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| pretrained_model = pretrained_model.to(device) |
| pretrained_model.eval() |
| |
| dataset = AppSequenceDataset(user_sequences, labels) |
| loader = DataLoader(dataset, batch_size=CONFIG['batch_size']) |
| |
| all_embeddings = [] |
| all_labels = [] |
| |
| with torch.no_grad(): |
| for batch in loader: |
| emb = pretrained_model(batch) |
| all_embeddings.append(emb.cpu().numpy()) |
| all_labels.append(batch['label'].numpy()) |
| |
| X = np.concatenate(all_embeddings, axis=0) |
| y = np.concatenate(all_labels, axis=0) |
| |
| X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42) |
| |
| lgb_params = { |
| 'objective': 'binary', 'metric': 'auc', |
| 'learning_rate': 0.05, 'num_leaves': 63, 'max_depth': 7, |
| 'min_child_samples': 20, |
| 'scale_pos_weight': sum(y_train == 0) / max(sum(y_train == 1), 1), |
| 'subsample': 0.8, 'colsample_bytree': 0.8, 'verbose': -1, |
| } |
| |
| train_data = lgb.Dataset(X_train, label=y_train) |
| val_data = lgb.Dataset(X_val, label=y_val, reference=train_data) |
| |
| model = lgb.train( |
| lgb_params, train_data, num_boost_round=500, valid_sets=[val_data], |
| callbacks=[lgb.early_stopping(stopping_rounds=30), lgb.log_evaluation(50)] |
| ) |
| |
| val_pred = model.predict(X_val) |
| val_auc = roc_auc_score(y_val, val_pred) |
| |
| from scipy.stats import ks_2samp |
| ks_stat = ks_2samp(val_pred[y_val == 1], val_pred[y_val == 0]).statistic |
| |
| logger.info(f"LightGBM Results: AUC={val_auc:.4f}, KS={ks_stat:.4f}") |
| return model, val_auc, ks_stat |
|
|
|
|
| |
| |
| |
| class AppCoInstallGraph: |
| """ |
| 构建App共安装图: 如果两个App经常被同一批用户安装,它们之间有边 |
| 用Node2Vec/GraphSAGE生成App embedding → 替换原始App embedding |
| 论文结论: AUC +2.3% over vanilla CoLES |
| """ |
| |
| def __init__(self, user_sequences: Dict[int, List[AppInstallEvent]]): |
| self.user_sequences = user_sequences |
| |
| def build_cooccurrence_matrix(self, min_cooccur: int = 5) -> Dict[Tuple[int, int], float]: |
| """构建App共现矩阵""" |
| from collections import Counter, defaultdict |
| |
| app_user_count = Counter() |
| co_occurrence = defaultdict(int) |
| |
| for user_id, events in self.user_sequences.items(): |
| user_apps = list(set(e.app_id for e in events)) |
| for app in user_apps: |
| app_user_count[app] += 1 |
| for i in range(len(user_apps)): |
| for j in range(i + 1, min(len(user_apps), 50)): |
| pair = tuple(sorted([user_apps[i], user_apps[j]])) |
| co_occurrence[pair] += 1 |
| |
| edges = {} |
| for (app_i, app_j), count in co_occurrence.items(): |
| if count >= min_cooccur: |
| weight = count / np.log(app_user_count[app_i] * app_user_count[app_j] + 1) |
| edges[(app_i, app_j)] = weight |
| |
| logger.info(f"Built co-install graph: {len(edges)} edges") |
| return edges |
| |
| def train_node2vec_embeddings(self, edges: dict, embed_dim: int = 32): |
| """用Node2Vec训练App图嵌入 (pip install node2vec networkx)""" |
| try: |
| import networkx as nx |
| from node2vec import Node2Vec |
| except ImportError: |
| logger.error("请安装: pip install node2vec networkx") |
| return None |
| |
| G = nx.Graph() |
| for (app_i, app_j), weight in edges.items(): |
| G.add_edge(app_i, app_j, weight=weight) |
| |
| node2vec = Node2Vec(G, dimensions=embed_dim, walk_length=30, num_walks=200, p=1, q=0.5, workers=4) |
| model = node2vec.fit(window=10, min_count=1) |
| |
| app_embeddings = {} |
| for node in G.nodes(): |
| app_embeddings[node] = model.wv[str(node)] |
| |
| logger.info(f"Node2Vec trained: {len(app_embeddings)} app embeddings") |
| return app_embeddings |
|
|
|
|
| |
| |
| |
| def main(): |
| logger.info("=" * 60) |
| logger.info("App 安装序列风控模型 — 完整训练流程") |
| logger.info("=" * 60) |
| |
| |
| logger.info("Step 1: Loading data (demo with synthetic data)...") |
| np.random.seed(42) |
| num_users = 10000 |
| |
| records = [] |
| labels = {} |
| for uid in range(num_users): |
| num_installs = np.random.randint(10, 200) |
| base_time = 1700000000 |
| for i in range(num_installs): |
| records.append({ |
| 'user_id': uid, |
| 'app_id': np.random.randint(1, CONFIG['app_vocab_size']), |
| 'app_category': np.random.randint(1, CONFIG['app_category_size']), |
| 'install_source': np.random.randint(1, CONFIG['app_source_size']), |
| 'install_timestamp': base_time + i * np.random.randint(3600, 86400 * 7), |
| }) |
| labels[uid] = int(np.random.random() < 0.05) |
| |
| raw_df = pd.DataFrame(records) |
| logger.info(f" Users: {num_users}, Total installs: {len(records)}, " |
| f"Default rate: {sum(labels.values())/len(labels)*100:.1f}%") |
| |
| |
| logger.info("Step 2: Preprocessing sequences...") |
| user_sequences = preprocess_app_sequence(raw_df) |
| |
| |
| logger.info("Step 3: Building app co-install graph...") |
| graph_builder = AppCoInstallGraph(user_sequences) |
| edges = graph_builder.build_cooccurrence_matrix(min_cooccur=3) |
| |
| |
| logger.info("Step 4: CoLES unsupervised pretraining...") |
| pretrained_model = pretrain_coles(user_sequences, epochs=5) |
| |
| |
| logger.info("Step 5: Supervised fine-tuning...") |
| classifier, best_auc = finetune_classifier( |
| pretrained_model, user_sequences, labels, freeze_encoder=False |
| ) |
| |
| logger.info("=" * 60) |
| logger.info(f"Training complete! Best AUC: {best_auc:.4f}") |
| logger.info("=" * 60) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|