diff --git a/.gitignore b/.gitignore index ad1f334cc6f3118de180bb34cc8f31f29e411db6..3f59b0ebec632d4b3bdf9c148570708e36e19467 100644 --- a/.gitignore +++ b/.gitignore @@ -47,3 +47,14 @@ GraphUNets/data/* temp* *.tex + +src/data/* + +src/cora_seeds/* + +.venv/* + + +.ipynb_checkpoints/ + +*.dot diff --git a/GCond/.gitignore b/GCond/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a3e7ef843b94d7730f45b2f7f3e6cface8abb9b0 --- /dev/null +++ b/GCond/.gitignore @@ -0,0 +1,140 @@ +# Auxillary file on MacOS +.DS_Store + +# Byte-compiled / optimized / DLL files +__pycache__/ +models/__pycache__/ +*.py[co +dataset + +# C extensions +*.so + +# Distribution / packaging +.Python +data/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/d] +*$py.class: + +def __init__(self): +pass + + diff --git a/GCond/GCond.png b/GCond/GCond.png new file mode 100644 index 0000000000000000000000000000000000000000..8dbdc75aaa2a685f6a6b841bb859b8b6dbcc524f Binary files /dev/null and b/GCond/GCond.png differ diff --git a/GCond/KDD22_DosCond/README.md b/GCond/KDD22_DosCond/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ea55afdcd7d1880f9b4e2e648f74252ebca216a2 --- /dev/null +++ b/GCond/KDD22_DosCond/README.md @@ -0,0 +1,38 @@ +# DosCond + +[KDD 2022] A PyTorch Implementation for ["Condensing Graphs via One-Step Gradient Matching"](https://arxiv.org/abs/2206.07746) under node classification setting. For graph classification setting, please refer to [https://github.com/amazon-research/DosCond](https://github.com/amazon-research/DosCond). + + +Abstract +---- +As training deep learning models on large dataset takes a lot of time and resources, it is desired to construct a small synthetic dataset with which we can train deep learning models sufficiently. There are recent works that have explored solutions on condensing image datasets through complex bi-level optimization. For instance, dataset condensation (DC) matches network gradients w.r.t. large-real data and small-synthetic data, where the network weights are optimized for multiple steps at each outer iteration. However, existing approaches have their inherent limitations: (1) they are not directly applicable to graphs where the data is discrete; and (2) the condensation process is computationally expensive due to the involved nested optimization. To bridge the gap, we investigate efficient dataset condensation tailored for graph datasets where we model the discrete graph structure as a probabilistic model. We further propose a one-step gradient matching scheme, which performs gradient matching for only one single step without training the network weights. + + +Here we do not implement the discrete structure learning, but only borrow the idea from ["Condensing Graphs via One-Step Gradient Matching"](https://arxiv.org/abs/2206.07746) to perform one-step gradient matching, which significantly speeds up the condensation process. + + +Essentially, we can run the following commands: +``` +python train_gcond_transduct.py --dataset citeseer --nlayers=2 --lr_feat=1e-3 --lr_adj=1e-3 --r=0.5 --sgc=0 --dis=mse --one_step=1 --epochs=3000 +python train_gcond_transduct.py --dataset cora --nlayers=2 --lr_feat=1e-3 --lr_adj=1e-3 --r=0.5 --sgc=0 --dis=mse --gpu_id=2 --one_step=1 --epochs=5000 +python train_gcond_transduct.py --dataset pubmed --nlayers=2 --lr_feat=1e-3 --lr_adj=1e-3 --r=0.5 --sgc=0 --dis=mse --gpu_id=2 --one_step=1 --epochs=2000 +python train_gcond_transduct.py --dataset ogbn-arxiv --nlayers=2 --lr_feat=1e-2 --lr_adj=2e-2 --r=0.001 --sgc=1 --dis=ours --gpu_id=2 --one_step=1 --epochs=1000 +python train_gcond_induct.py --dataset flickr --nlayers=2 --lr_feat=5e-3 --lr_adj=5e-3 --r=0.001 --sgc=0 --dis=mse --gpu_id=3 --one_step=1 --epochs=1000 +``` +Note that using smaller learning rate and larger epochs can get even higher performance. + + +## Cite +For more information, you can take a look at the [paper](https://arxiv.org/abs/2206.07746). + +If you find this repo to be useful, please cite our paper. Thank you. +``` +@inproceedings{jin2022condensing, + title={Condensing Graphs via One-Step Gradient Matching}, + author={Jin, Wei and Tang, Xianfeng and Jiang, Haoming and Li, Zheng and Zhang, Danqing and Tang, Jiliang and Yin, Bing}, + booktitle={Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, + pages={720--730}, + year={2022} +} +``` + diff --git a/GCond/README.md b/GCond/README.md new file mode 100644 index 0000000000000000000000000000000000000000..27f8e8b019c7c0e8332c1081eff62dffb9a0dc64 --- /dev/null +++ b/GCond/README.md @@ -0,0 +1,139 @@ +# GCond +[ICLR 2022] The PyTorch implementation for ["Graph Condensation for Graph Neural Networks"](https://cse.msu.edu/~jinwei2/files/GCond.pdf) is provided under the main directory. + +[KDD 2022] The implementation for ["Condensing Graphs via One-Step Gradient Matching"](https://arxiv.org/abs/2206.07746) is shown in the `KDD22_DosCond` directory. See [link](https://github.com/ChandlerBang/GCond/tree/main/KDD22_DosCond). + +[IJCAI 2024] Please read our recent survey ["A Comprehensive Survey on Graph Reduction: Sparsification, Coarsening, and Condensation"](https://arxiv.org/abs/2402.03358) for a detailed review of graph reduction techniques! + +[ArXiv 2024] We released a benchmarking framework for graph condensation ["GC4NC: A Benchmark Framework for Graph Condensation with New Insights"](https://arxiv.org/abs/2406.16715), including **robustness**, **privacy preservation**, NAS performance, property analysis, etc! + +Abstract +---- +We propose and study the problem of graph condensation for graph neural networks (GNNs). Specifically, we aim to condense the large, original graph into a small, synthetic, and highly-informative graph, such that GNNs trained on the small graph and large graph have comparable performance. Extensive experiments have demonstrated the effectiveness of the proposed framework in condensing different graph datasets into informative smaller graphs. In particular, we are able to approximate the original test accuracy by 95.3% on Reddit, 99.8% on Flickr and 99.0% on Citeseer, while reducing their graph size by more than 99.9%, and the condensed graphs can be used to train various GNN architectures. + + +![]() + +
+ + +## A Nice Survey Paper +Please check out our survey paper blew, which summarizes the recent advances in graph condensation. + +![image](https://github.com/CurryTang/Towards-Graph-Foundation-Models-New-perspective-/assets/15672123/89a23a37-71d4-47f7-8949-7d859a41e369)![image](https://github.com/CurryTang/Towards-Graph-Foundation-Models-New-perspective-/assets/15672123/89a23a37-71d4-47f7-8949-7d859a41e369)![image](https://github.com/CurryTang/Towards-Graph-Foundation-Models-New-perspective-/assets/15672123/89a23a37-71d4-47f7-8949-7d859a41e369)[[A Comprehensive Survey on Graph Reduction: Sparsification, Coarsening, and Condensation]](https://arxiv.org/abs/2402.03358) + + + +## Requirements +Please see [requirements.txt](https://github.com/ChandlerBang/GCond/blob/main/requirements.txt). +``` +torch==1.7.0 +torch_geometric==1.6.3 +scipy==1.6.2 +numpy==1.19.2 +ogb==1.3.0 +tqdm==4.59.0 +torch_sparse==0.6.9 +deeprobust==0.2.4 +scikit_learn==1.0.2 +``` + +## Download Datasets +For cora, citeseer and pubmed, the code will directly download them; so no extra script is needed. +For reddit, flickr and arxiv, we use the datasets provided by [GraphSAINT](https://github.com/GraphSAINT/GraphSAINT). +They are available on [Google Drive link](https://drive.google.com/open?id=1zycmmDES39zVlbVCYs88JTJ1Wm5FbfLz) (alternatively, [BaiduYun link (code: f1ao)](https://pan.baidu.com/s/1SOb0SiSAXavwAcNqkttwcg)). Rename the folder to `data` at the root directory. Note that the links are provided by GraphSAINT team. + + + + +## Run the code +For transductive setting, please run the following command: +``` +python train_gcond_transduct.py --dataset cora --nlayers=2 --lr_feat=1e-4 --gpu_id=0 --lr_adj=1e-4 --r=0.5 +``` +where `r` indicates the ratio of condensed samples to the labeled samples. For instance, there are only 140 labeled nodes in Cora dataset, so `r=0.5` indicates the number of condensed samples are 70, **which corresponds to r=2.6%=70/2710 in our paper**. Thus, the parameter `r` is different from the real reduction rate in the paper for the transductive setting, please see the following table for the correspondence. + +| | `r` in the code | `r` in the paper (real reduction rate) | +|--------------|-------------------|---------------------| +| Transductive | Cora, r=0.5 | Cora, r=2.6% | +| Transductive | Citeseer, r=0.5 | Citeseer, r=1.8% | +| Transductive | Ogbn-arxiv, r= 0.005 | Ogbn-arxiv, r=0.25% | +| Transductive | Pubmed, r=0.5 | Pubmed, r=0.3% | +| Inductive | Flickr, r=0.01 | Flickr, r=1% | +| Inductive | Reddit, r=0.001 | Reddit, r=0.1% | + +For inductive setting, please run the following command: +``` +python train_gcond_induct.py --dataset flickr --nlayers=2 --lr_feat=0.01 --gpu_id=0 --lr_adj=0.01 --r=0.005 --epochs=1000 --outer=10 --inner=1 +``` + +## Reproduce the performance +The generated graphs are saved in the folder `saved_ours`; you can directly load them to test the performance. + +For Table 2, run `bash scripts/run_main.sh`. + +For Table 3, run `bash scripts/run_cross.sh`. + +## [Faster Condensation!] One-Step Gradient Matching +From the KDD'22 paper ["Condensing Graphs via One-Step Gradient Matching"](https://arxiv.org/abs/2206.07746), we know that performing gradient matching for only one step can also achieve a good performance while significantly accelerating the condensation process. Hence, we can run the following command to perform one-step gradient matching, which is essentially much faster than the original version: +``` +python train_gcond_transduct.py --dataset citeseer --nlayers=2 --lr_feat=1e-2 --lr_adj=1e-2 --r=0.5 \ + --sgc=0 --dis=mse --gpu_id=2 --one_step=1 --epochs=3000 +``` +For more commands, please go to [`KDD22_DosCond`](https://github.com/ChandlerBang/GCond/tree/main/KDD22_DosCond). + +**[Note]: I found that sometimes using MSE loss for gradient matching can be more stable than using `ours` loss**, and it gives more flexibility on the model used in condensation (using GCN as the backbone can also generate good condensed graphs). + + +## Whole Dataset Performance +When we do coreset selection, we need to first the model on the whole dataset. Thus we can obtain the performanceo of whole dataset by running `train_coreset.py` and `train_coreset_induct.py`: +``` +python train_coreset.py --dataset cora --r=0.01 --method=random +python train_coreset_induct.py --dataset flickr --r=0.01 --method=random +``` + +## Coreset Performance +Run the following code to get the coreset performance for transductive setting. +``` +python train_coreset.py --dataset cora --r=0.01 --method=herding +python train_coreset.py --dataset cora --r=0.01 --method=random +python train_coreset.py --dataset cora --r=0.01 --method=kcenter +``` +Similarly, run the following code for the inductive setting. +``` +python train_coreset_induct.py --dataset flickr --r=0.01 --method=kcenter +``` + + +## Cite +If you find this repo to be useful, please cite our three papers. Thank you! +``` +@inproceedings{ + jin2022graph, + title={Graph Condensation for Graph Neural Networks}, + author={Wei Jin and Lingxiao Zhao and Shichang Zhang and Yozen Liu and Jiliang Tang and Neil Shah}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=WLEx3Jo4QaB} +} +``` + +``` +@inproceedings{jin2022condensing, + title={Condensing Graphs via One-Step Gradient Matching}, + author={Jin, Wei and Tang, Xianfeng and Jiang, Haoming and Li, Zheng and Zhang, Danqing and Tang, Jiliang and Yin, Bing}, + booktitle={Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, + pages={720--730}, + year={2022} +} +``` + +``` +@article{hashemi2024comprehensive, + title={A Comprehensive Survey on Graph Reduction: Sparsification, Coarsening, and Condensation}, + author={Hashemi, Mohammad and Gong, Shengbo and Ni, Juntong and Fan, Wenqi and Prakash, B Aditya and Jin, Wei}, + journal={International Joint Conference on Artificial Intelligence (IJCAI)}, + year={2024} +} +``` + diff --git a/GCond/__pycache__/configs.cpython-312.pyc b/GCond/__pycache__/configs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eab6165cb0f6f2c76e2e9bd941cfbf50c2f3f533 Binary files /dev/null and b/GCond/__pycache__/configs.cpython-312.pyc differ diff --git a/GCond/__pycache__/utils.cpython-312.pyc b/GCond/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954f37c52d14f9339324b0807dbc816a6e8f3a89 Binary files /dev/null and b/GCond/__pycache__/utils.cpython-312.pyc differ diff --git a/GCond/__pycache__/utils_graphsaint.cpython-312.pyc b/GCond/__pycache__/utils_graphsaint.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b2d966a7efae5c3a59b03ce308c913841208e61 Binary files /dev/null and b/GCond/__pycache__/utils_graphsaint.cpython-312.pyc differ diff --git a/GCond/configs.py b/GCond/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..94d925dd2a69c83caf86e699c5a9877b8005b89f --- /dev/null +++ b/GCond/configs.py @@ -0,0 +1,24 @@ +'''Configuration''' + +def load_config(args): + dataset = args.dataset + if dataset in ['flickr']: + args.nlayers = 2 + args.hidden = 256 + args.weight_decay = 5e-3 + args.dropout = 0.0 + + if dataset in ['reddit']: + args.nlayers = 2 + args.hidden = 256 + args.weight_decay = 0e-4 + args.dropout = 0 + + if dataset in ['ogbn-arxiv']: + args.hidden = 256 + args.weight_decay = 0 + args.dropout = 0 + + return args + + diff --git a/GCond/coreset/__init__.py b/GCond/coreset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..32288c2f9befb6f84f3abedd8ca3e901a03210a1 --- /dev/null +++ b/GCond/coreset/__init__.py @@ -0,0 +1,3 @@ +from .all_methods import KCenter, Herding, Random, LRMC + +__all__ = ['KCenter', 'Herding', 'Random', 'LRMC'] diff --git a/GCond/coreset/__pycache__/__init__.cpython-312.pyc b/GCond/coreset/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..574c1bc90dc74a04821323efe539a08ae8df6ec2 Binary files /dev/null and b/GCond/coreset/__pycache__/__init__.cpython-312.pyc differ diff --git a/GCond/coreset/__pycache__/all_methods.cpython-312.pyc b/GCond/coreset/__pycache__/all_methods.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8b5b26e29221a393a3aa503c709c376fe9aa1e6 Binary files /dev/null and b/GCond/coreset/__pycache__/all_methods.cpython-312.pyc differ diff --git a/GCond/coreset/all_methods.py b/GCond/coreset/all_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..9589a2c7c036c0be8c4b2137b987ae86074e8a21 --- /dev/null +++ b/GCond/coreset/all_methods.py @@ -0,0 +1,212 @@ +import torch +import numpy as np +import json + + +class Base: + + def __init__(self, data, args, device='cuda', **kwargs): + self.data = data + self.args = args + self.device = device + n = int(data.feat_train.shape[0] * args.reduction_rate) + d = data.feat_train.shape[1] + self.nnodes_syn = n + self.labels_syn = torch.LongTensor(self.generate_labels_syn(data)).to(device) + + def generate_labels_syn(self, data): + from collections import Counter + counter = Counter(data.labels_train) + num_class_dict = {} + n = len(data.labels_train) + + sorted_counter = sorted(counter.items(), key=lambda x:x[1]) + sum_ = 0 + labels_syn = [] + self.syn_class_indices = {} + for ix, (c, num) in enumerate(sorted_counter): + if ix == len(sorted_counter) - 1: + num_class_dict[c] = int(n * self.args.reduction_rate) - sum_ + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + else: + num_class_dict[c] = max(int(num * self.args.reduction_rate), 1) + sum_ += num_class_dict[c] + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + + self.num_class_dict = num_class_dict + return labels_syn + + def select(self): + return + +class KCenter(Base): + + def __init__(self, data, args, device='cuda', **kwargs): + super(KCenter, self).__init__(data, args, device='cuda', **kwargs) + + def select(self, embeds, inductive=False): + # feature: embeds + # kcenter # class by class + num_class_dict = self.num_class_dict + if inductive: + idx_train = np.arange(len(self.data.idx_train)) + else: + idx_train = self.data.idx_train + labels_train = self.data.labels_train + idx_selected = [] + + for class_id, cnt in num_class_dict.items(): + idx = idx_train[labels_train==class_id] + feature = embeds[idx] + mean = torch.mean(feature, dim=0, keepdim=True) + # dis = distance(feature, mean)[:,0] + dis = torch.cdist(feature, mean)[:,0] + rank = torch.argsort(dis) + idx_centers = rank[:1].tolist() + for i in range(cnt-1): + feature_centers = feature[idx_centers] + dis_center = torch.cdist(feature, feature_centers) + dis_min, _ = torch.min(dis_center, dim=-1) + id_max = torch.argmax(dis_min).item() + idx_centers.append(id_max) + + idx_selected.append(idx[idx_centers]) + # return np.array(idx_selected).reshape(-1) + return np.hstack(idx_selected) + + +class Herding(Base): + + def __init__(self, data, args, device='cuda', **kwargs): + super(Herding, self).__init__(data, args, device='cuda', **kwargs) + + def select(self, embeds, inductive=False): + num_class_dict = self.num_class_dict + if inductive: + idx_train = np.arange(len(self.data.idx_train)) + else: + idx_train = self.data.idx_train + labels_train = self.data.labels_train + idx_selected = [] + + # herding # class by class + for class_id, cnt in num_class_dict.items(): + idx = idx_train[labels_train==class_id] + features = embeds[idx] + mean = torch.mean(features, dim=0, keepdim=True) + selected = [] + idx_left = np.arange(features.shape[0]).tolist() + + for i in range(cnt): + det = mean*(i+1) - torch.sum(features[selected], dim=0) + dis = torch.cdist(det, features[idx_left]) + id_min = torch.argmin(dis) + selected.append(idx_left[id_min]) + del idx_left[id_min] + idx_selected.append(idx[selected]) + # return np.array(idx_selected).reshape(-1) + return np.hstack(idx_selected) + + +class Random(Base): + + def __init__(self, data, args, device='cuda', **kwargs): + super(Random, self).__init__(data, args, device='cuda', **kwargs) + + def select(self, embeds, inductive=False): + num_class_dict = self.num_class_dict + if inductive: + idx_train = np.arange(len(self.data.idx_train)) + else: + idx_train = self.data.idx_train + + labels_train = self.data.labels_train + idx_selected = [] + + for class_id, cnt in num_class_dict.items(): + idx = idx_train[labels_train==class_id] + selected = np.random.permutation(idx) + idx_selected.append(selected[:cnt]) + + # return np.array(idx_selected).reshape(-1) + return np.hstack(idx_selected) + + +class LRMC(Base): + """ + Coreset selection using precomputed seed nodes from the Laplacian‑Integrated + Relaxed Maximal Clique (L‑RMC) algorithm. Seed nodes are read from a JSON + file specified by ``args.lrmc_seeds_path`` and used to preferentially select + training examples. Per‑class reduction counts are respected: if a class has + fewer seeds than required, random training nodes from that class are added + until the quota is met. + """ + + def __init__(self, data, args, device='cuda', **kwargs): + super(LRMC, self).__init__(data, args, device=device, **kwargs) + seeds_path = getattr(args, 'lrmc_seeds_path', None) + if seeds_path is None: + raise ValueError( + "LRMC method selected but no path to seed file provided. " + "Please specify --lrmc_seeds_path when running the training script." + ) + self.seed_nodes = self._load_seed_nodes(seeds_path) + + def _load_seed_nodes(self, path: str): + # Parse seed nodes from JSON file (supports 'seed_nodes' or 'members'). + with open(path, 'r') as f: + js = json.load(f) + clusters = js.get('clusters', []) + if not clusters: + raise ValueError(f"No clusters found in L‑RMC seeds file {path}") + def _cluster_length(c): + nodes = c.get('seed_nodes') or c.get('members') or [] + return len(nodes) + best_cluster = max(clusters, key=_cluster_length) + nodes = best_cluster.get('seed_nodes') or best_cluster.get('members') or [] + seed_nodes = [] + for u in nodes: + try: + uid = int(u) + except Exception: + continue + zero_idx = uid - 1 + if zero_idx >= 0: + seed_nodes.append(zero_idx) + else: + if uid >= 0: + seed_nodes.append(uid) + seed_nodes = sorted(set(seed_nodes)) + return seed_nodes + + def select(self, embeds, inductive=False): + # Determine training indices depending on the inductive setting. + if inductive: + idx_train = np.arange(len(self.data.idx_train)) + labels_train = self.data.labels_train + else: + idx_train = self.data.idx_train + labels_train = self.data.labels_train + num_class_dict = self.num_class_dict + idx_selected = [] + seed_set = set(self.seed_nodes) + # Pick seed nodes per class; fill remainder with random nodes if needed. + for class_id, cnt in num_class_dict.items(): + class_mask = (labels_train == class_id) + class_indices = idx_train[class_mask] + seed_in_class = [u for u in class_indices if u in seed_set] + selected = seed_in_class[:min(len(seed_in_class), cnt)] + remaining_required = cnt - len(selected) + if remaining_required > 0: + remaining_candidates = [u for u in class_indices if u not in selected] + if len(remaining_candidates) <= remaining_required: + additional = remaining_candidates + else: + additional = np.random.choice(remaining_candidates, remaining_required, replace=False).tolist() + selected += additional + idx_selected.append(np.array(selected)) + return np.hstack(idx_selected) + + diff --git a/GCond/gcond_agent_induct.py b/GCond/gcond_agent_induct.py new file mode 100644 index 0000000000000000000000000000000000000000..67fdf07be0eb1c07d99245479004473ea85289e6 --- /dev/null +++ b/GCond/gcond_agent_induct.py @@ -0,0 +1,327 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.nn import Parameter +import torch.nn.functional as F +from utils import match_loss, regularization, row_normalize_tensor +import deeprobust.graph.utils as utils +from copy import deepcopy +import numpy as np +from tqdm import tqdm +from models.gcn import GCN +from models.sgc import SGC +from models.sgc_multi import SGC as SGC1 +from models.parametrized_adj import PGE +import scipy.sparse as sp +from torch_sparse import SparseTensor + + +class GCond: + + def __init__(self, data, args, device='cuda', **kwargs): + self.data = data + self.args = args + self.device = device + + n = int(len(data.idx_train) * args.reduction_rate) + d = data.feat_train.shape[1] + self.nnodes_syn = n + self.feat_syn = nn.Parameter(torch.FloatTensor(n, d).to(device)) + self.pge = PGE(nfeat=d, nnodes=n, device=device, args=args).to(device) + + self.labels_syn = torch.LongTensor(self.generate_labels_syn(data)).to(device) + self.reset_parameters() + self.optimizer_feat = torch.optim.Adam([self.feat_syn], lr=args.lr_feat) + self.optimizer_pge = torch.optim.Adam(self.pge.parameters(), lr=args.lr_adj) + print('adj_syn:', (n,n), 'feat_syn:', self.feat_syn.shape) + + def reset_parameters(self): + self.feat_syn.data.copy_(torch.randn(self.feat_syn.size())) + + def generate_labels_syn(self, data): + from collections import Counter + counter = Counter(data.labels_train) + num_class_dict = {} + n = len(data.labels_train) + + sorted_counter = sorted(counter.items(), key=lambda x:x[1]) + sum_ = 0 + labels_syn = [] + self.syn_class_indices = {} + + for ix, (c, num) in enumerate(sorted_counter): + if ix == len(sorted_counter) - 1: + num_class_dict[c] = int(n * self.args.reduction_rate) - sum_ + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + else: + num_class_dict[c] = max(int(num * self.args.reduction_rate), 1) + sum_ += num_class_dict[c] + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + + self.num_class_dict = num_class_dict + return labels_syn + + def test_with_val(self, verbose=True): + res = [] + + data, device = self.data, self.device + feat_syn, pge, labels_syn = self.feat_syn.detach(), \ + self.pge, self.labels_syn + # with_bn = True if args.dataset in ['ogbn-arxiv'] else False + dropout = 0.5 if self.args.dataset in ['reddit'] else 0 + model = GCN(nfeat=feat_syn.shape[1], nhid=self.args.hidden, dropout=dropout, + weight_decay=5e-4, nlayers=2, + nclass=data.nclass, device=device).to(device) + + adj_syn = pge.inference(feat_syn) + args = self.args + + if args.save: + torch.save(adj_syn, f'saved_ours/adj_{args.dataset}_{args.reduction_rate}_{args.seed}.pt') + torch.save(feat_syn, f'saved_ours/feat_{args.dataset}_{args.reduction_rate}_{args.seed}.pt') + + noval = True + model.fit_with_val(feat_syn, adj_syn, labels_syn, data, + train_iters=600, normalize=True, verbose=False, noval=noval) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + output = model.predict(data.feat_test, data.adj_test) + + loss_test = F.nll_loss(output, labels_test) + acc_test = utils.accuracy(output, labels_test) + res.append(acc_test.item()) + if verbose: + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + print(adj_syn.sum(), adj_syn.sum()/(adj_syn.shape[0]**2)) + + if False: + if self.args.dataset == 'ogbn-arxiv': + thresh = 0.6 + elif self.args.dataset == 'reddit': + thresh = 0.91 + else: + thresh = 0.7 + + labels_train = torch.LongTensor(data.labels_train).cuda() + output = model.predict(data.feat_train, data.adj_train) + # loss_train = F.nll_loss(output, labels_train) + # acc_train = utils.accuracy(output, labels_train) + loss_train = torch.tensor(0) + acc_train = torch.tensor(0) + if verbose: + print("Train set results:", + "loss= {:.4f}".format(loss_train.item()), + "accuracy= {:.4f}".format(acc_train.item())) + res.append(acc_train.item()) + return res + + def train(self, verbose=True): + args = self.args + data = self.data + feat_syn, pge, labels_syn = self.feat_syn, self.pge, self.labels_syn + features, adj, labels = data.feat_train, data.adj_train, data.labels_train + syn_class_indices = self.syn_class_indices + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + feat_sub, adj_sub = self.get_sub_adj_feat(features) + self.feat_syn.data.copy_(feat_sub) + + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + + adj = adj_norm + adj = SparseTensor(row=adj._indices()[0], col=adj._indices()[1], + value=adj._values(), sparse_sizes=adj.size()).t() + + + outer_loop, inner_loop = get_loops(args) + + for it in range(args.epochs+1): + loss_avg = 0 + if args.sgc==1: + model = SGC(nfeat=data.feat_train.shape[1], nhid=args.hidden, + nclass=data.nclass, dropout=args.dropout, + nlayers=args.nlayers, with_bn=False, + device=self.device).to(self.device) + elif args.sgc==2: + model = SGC1(nfeat=data.feat_train.shape[1], nhid=args.hidden, + nclass=data.nclass, dropout=args.dropout, + nlayers=args.nlayers, with_bn=False, + device=self.device).to(self.device) + + else: + model = GCN(nfeat=data.feat_train.shape[1], nhid=args.hidden, + nclass=data.nclass, dropout=args.dropout, nlayers=args.nlayers, + device=self.device).to(self.device) + + model.initialize() + + model_parameters = list(model.parameters()) + + optimizer_model = torch.optim.Adam(model_parameters, lr=args.lr_model) + model.train() + + for ol in range(outer_loop): + adj_syn = pge(self.feat_syn) + adj_syn_norm = utils.normalize_adj_tensor(adj_syn, sparse=False) + feat_syn_norm = feat_syn + + BN_flag = False + for module in model.modules(): + if 'BatchNorm' in module._get_name(): #BatchNorm + BN_flag = True + if BN_flag: + model.train() # for updating the mu, sigma of BatchNorm + output_real = model.forward(features, adj_norm) + for module in model.modules(): + if 'BatchNorm' in module._get_name(): #BatchNorm + module.eval() # fix mu and sigma of every BatchNorm layer + + loss = torch.tensor(0.0).to(self.device) + for c in range(data.nclass): + if c not in self.num_class_dict: + continue + + batch_size, n_id, adjs = data.retrieve_class_sampler( + c, adj, transductive=False, args=args) + + if args.nlayers == 1: + adjs = [adjs] + adjs = [adj.to(self.device) for adj in adjs] + output = model.forward_sampler(features[n_id], adjs) + loss_real = F.nll_loss(output, labels[n_id[:batch_size]]) + gw_real = torch.autograd.grad(loss_real, model_parameters) + gw_real = list((_.detach().clone() for _ in gw_real)) + + ind = syn_class_indices[c] + if args.nlayers == 1: + adj_syn_norm_list = [adj_syn_norm[ind[0]: ind[1]]] + else: + adj_syn_norm_list = [adj_syn_norm]*(args.nlayers-1) + \ + [adj_syn_norm[ind[0]: ind[1]]] + + output_syn = model.forward_sampler_syn(feat_syn, adj_syn_norm_list) + loss_syn = F.nll_loss(output_syn, labels_syn[ind[0]: ind[1]]) + + gw_syn = torch.autograd.grad(loss_syn, model_parameters, create_graph=True) + coeff = self.num_class_dict[c] / max(self.num_class_dict.values()) + loss += coeff * match_loss(gw_syn, gw_real, args, device=self.device) + + loss_avg += loss.item() + # TODO: regularize + if args.alpha > 0: + loss_reg = args.alpha * regularization(adj_syn, utils.tensor2onehot(labels_syn)) + # else: + else: + loss_reg = torch.tensor(0) + + loss = loss + loss_reg + + # update sythetic graph + self.optimizer_feat.zero_grad() + self.optimizer_pge.zero_grad() + loss.backward() + if it % 50 < 10: + self.optimizer_pge.step() + else: + self.optimizer_feat.step() + + if args.debug and ol % 5 ==0: + print('Gradient matching loss:', loss.item()) + + if ol == outer_loop - 1: + # print('loss_reg:', loss_reg.item()) + # print('Gradient matching loss:', loss.item()) + break + + + feat_syn_inner = feat_syn.detach() + adj_syn_inner = pge.inference(feat_syn) + adj_syn_inner_norm = utils.normalize_adj_tensor(adj_syn_inner, sparse=False) + feat_syn_inner_norm = feat_syn_inner + for j in range(inner_loop): + optimizer_model.zero_grad() + output_syn_inner = model.forward(feat_syn_inner_norm, adj_syn_inner_norm) + loss_syn_inner = F.nll_loss(output_syn_inner, labels_syn) + loss_syn_inner.backward() + optimizer_model.step() # update gnn param + + loss_avg /= (data.nclass*outer_loop) + if it % 50 == 0: + print('Epoch {}, loss_avg: {}'.format(it, loss_avg)) + + eval_epochs = [100, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000, 3000, 4000, 5000] + + if verbose and it in eval_epochs: + # if verbose and (it+1) % 500 == 0: + res = [] + runs = 1 if args.dataset in ['ogbn-arxiv', 'reddit', 'flickr'] else 3 + for i in range(runs): + # self.test() + res.append(self.test_with_val()) + res = np.array(res) + print('Test:', + repr([res.mean(0), res.std(0)])) + + + + def get_sub_adj_feat(self, features): + data = self.data + args = self.args + idx_selected = [] + + from collections import Counter; + counter = Counter(self.labels_syn.cpu().numpy()) + + for c in range(data.nclass): + tmp = data.retrieve_class(c, num=counter[c]) + tmp = list(tmp) + idx_selected = idx_selected + tmp + idx_selected = np.array(idx_selected).reshape(-1) + features = features[idx_selected] + + # adj_knn = torch.zeros((data.nclass*args.nsamples, data.nclass*args.nsamples)).to(self.device) + # for i in range(data.nclass): + # idx = np.arange(i*args.nsamples, i*args.nsamples+args.nsamples) + # adj_knn[np.ix_(idx, idx)] = 1 + + from sklearn.metrics.pairwise import cosine_similarity + # features[features!=0] = 1 + k = 2 + sims = cosine_similarity(features.cpu().numpy()) + sims[(np.arange(len(sims)), np.arange(len(sims)))] = 0 + for i in range(len(sims)): + indices_argsort = np.argsort(sims[i]) + sims[i, indices_argsort[: -k]] = 0 + adj_knn = torch.FloatTensor(sims).to(self.device) + return features, adj_knn + + +def get_loops(args): + # Get the two hyper-parameters of outer-loop and inner-loop. + # The following values are empirically good. + if args.one_step: + return 10, 0 + + if args.dataset in ['ogbn-arxiv']: + return 20, 0 + if args.dataset in ['reddit']: + return args.outer, args.inner + if args.dataset in ['flickr']: + return args.outer, args.inner + # return 10, 1 + if args.dataset in ['cora']: + return 20, 10 + if args.dataset in ['citeseer']: + return 20, 5 # at least 200 epochs + else: + return 20, 5 + diff --git a/GCond/gcond_agent_transduct.py b/GCond/gcond_agent_transduct.py new file mode 100644 index 0000000000000000000000000000000000000000..ad4e60446580c943bdfea6f78033abf83a07578c --- /dev/null +++ b/GCond/gcond_agent_transduct.py @@ -0,0 +1,326 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.nn import Parameter +import torch.nn.functional as F +from utils import match_loss, regularization, row_normalize_tensor +import deeprobust.graph.utils as utils +from copy import deepcopy +import numpy as np +from tqdm import tqdm +from models.gcn import GCN +from models.sgc import SGC +from models.sgc_multi import SGC as SGC1 +from models.parametrized_adj import PGE +import scipy.sparse as sp +from torch_sparse import SparseTensor + + +class GCond: + + def __init__(self, data, args, device='cuda', **kwargs): + self.data = data + self.args = args + self.device = device + + # n = data.nclass * args.nsamples + n = int(data.feat_train.shape[0] * args.reduction_rate) + # from collections import Counter; print(Counter(data.labels_train)) + + d = data.feat_train.shape[1] + self.nnodes_syn = n + self.feat_syn = nn.Parameter(torch.FloatTensor(n, d).to(device)) + self.pge = PGE(nfeat=d, nnodes=n, device=device,args=args).to(device) + + self.labels_syn = torch.LongTensor(self.generate_labels_syn(data)).to(device) + + self.reset_parameters() + self.optimizer_feat = torch.optim.Adam([self.feat_syn], lr=args.lr_feat) + self.optimizer_pge = torch.optim.Adam(self.pge.parameters(), lr=args.lr_adj) + print('adj_syn:', (n,n), 'feat_syn:', self.feat_syn.shape) + + def reset_parameters(self): + self.feat_syn.data.copy_(torch.randn(self.feat_syn.size())) + + def generate_labels_syn(self, data): + from collections import Counter + counter = Counter(data.labels_train) + num_class_dict = {} + n = len(data.labels_train) + + sorted_counter = sorted(counter.items(), key=lambda x:x[1]) + sum_ = 0 + labels_syn = [] + self.syn_class_indices = {} + for ix, (c, num) in enumerate(sorted_counter): + if ix == len(sorted_counter) - 1: + num_class_dict[c] = int(n * self.args.reduction_rate) - sum_ + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + else: + num_class_dict[c] = max(int(num * self.args.reduction_rate), 1) + sum_ += num_class_dict[c] + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + + self.num_class_dict = num_class_dict + return labels_syn + + + def test_with_val(self, verbose=True): + res = [] + + data, device = self.data, self.device + feat_syn, pge, labels_syn = self.feat_syn.detach(), \ + self.pge, self.labels_syn + + # with_bn = True if args.dataset in ['ogbn-arxiv'] else False + model = GCN(nfeat=feat_syn.shape[1], nhid=self.args.hidden, dropout=0.5, + weight_decay=5e-4, nlayers=2, + nclass=data.nclass, device=device).to(device) + + if self.args.dataset in ['ogbn-arxiv']: + model = GCN(nfeat=feat_syn.shape[1], nhid=self.args.hidden, dropout=0.5, + weight_decay=0e-4, nlayers=2, with_bn=False, + nclass=data.nclass, device=device).to(device) + + adj_syn = pge.inference(feat_syn) + args = self.args + + if self.args.save: + torch.save(adj_syn, f'saved_ours/adj_{args.dataset}_{args.reduction_rate}_{args.seed}.pt') + torch.save(feat_syn, f'saved_ours/feat_{args.dataset}_{args.reduction_rate}_{args.seed}.pt') + + if self.args.lr_adj == 0: + n = len(labels_syn) + adj_syn = torch.zeros((n, n)) + + model.fit_with_val(feat_syn, adj_syn, labels_syn, data, + train_iters=600, normalize=True, verbose=False) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + labels_train = torch.LongTensor(data.labels_train).cuda() + output = model.predict(data.feat_train, data.adj_train) + loss_train = F.nll_loss(output, labels_train) + acc_train = utils.accuracy(output, labels_train) + if verbose: + print("Train set results:", + "loss= {:.4f}".format(loss_train.item()), + "accuracy= {:.4f}".format(acc_train.item())) + res.append(acc_train.item()) + + # Full graph + output = model.predict(data.feat_full, data.adj_full) + loss_test = F.nll_loss(output[data.idx_test], labels_test) + acc_test = utils.accuracy(output[data.idx_test], labels_test) + res.append(acc_test.item()) + if verbose: + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return res + + def train(self, verbose=True): + args = self.args + data = self.data + feat_syn, pge, labels_syn = self.feat_syn, self.pge, self.labels_syn + features, adj, labels = data.feat_full, data.adj_full, data.labels_full + idx_train = data.idx_train + + syn_class_indices = self.syn_class_indices + + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + + feat_sub, adj_sub = self.get_sub_adj_feat(features) + self.feat_syn.data.copy_(feat_sub) + + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + + adj = adj_norm + adj = SparseTensor(row=adj._indices()[0], col=adj._indices()[1], + value=adj._values(), sparse_sizes=adj.size()).t() + + + outer_loop, inner_loop = get_loops(args) + loss_avg = 0 + + for it in range(args.epochs+1): + if args.dataset in ['ogbn-arxiv']: + model = SGC1(nfeat=feat_syn.shape[1], nhid=self.args.hidden, + dropout=0.0, with_bn=False, + weight_decay=0e-4, nlayers=2, + nclass=data.nclass, + device=self.device).to(self.device) + else: + if args.sgc == 1: + model = SGC(nfeat=data.feat_train.shape[1], nhid=args.hidden, + nclass=data.nclass, dropout=args.dropout, + nlayers=args.nlayers, with_bn=False, + device=self.device).to(self.device) + else: + model = GCN(nfeat=data.feat_train.shape[1], nhid=args.hidden, + nclass=data.nclass, dropout=args.dropout, nlayers=args.nlayers, + device=self.device).to(self.device) + + + model.initialize() + + model_parameters = list(model.parameters()) + + optimizer_model = torch.optim.Adam(model_parameters, lr=args.lr_model) + model.train() + + for ol in range(outer_loop): + adj_syn = pge(self.feat_syn) + adj_syn_norm = utils.normalize_adj_tensor(adj_syn, sparse=False) + feat_syn_norm = feat_syn + + BN_flag = False + for module in model.modules(): + if 'BatchNorm' in module._get_name(): #BatchNorm + BN_flag = True + if BN_flag: + model.train() # for updating the mu, sigma of BatchNorm + output_real = model.forward(features, adj_norm) + for module in model.modules(): + if 'BatchNorm' in module._get_name(): #BatchNorm + module.eval() # fix mu and sigma of every BatchNorm layer + + loss = torch.tensor(0.0).to(self.device) + for c in range(data.nclass): + batch_size, n_id, adjs = data.retrieve_class_sampler( + c, adj, transductive=True, args=args) + if args.nlayers == 1: + adjs = [adjs] + + adjs = [adj.to(self.device) for adj in adjs] + output = model.forward_sampler(features[n_id], adjs) + loss_real = F.nll_loss(output, labels[n_id[:batch_size]]) + + gw_real = torch.autograd.grad(loss_real, model_parameters) + gw_real = list((_.detach().clone() for _ in gw_real)) + output_syn = model.forward(feat_syn, adj_syn_norm) + + ind = syn_class_indices[c] + loss_syn = F.nll_loss( + output_syn[ind[0]: ind[1]], + labels_syn[ind[0]: ind[1]]) + gw_syn = torch.autograd.grad(loss_syn, model_parameters, create_graph=True) + coeff = self.num_class_dict[c] / max(self.num_class_dict.values()) + loss += coeff * match_loss(gw_syn, gw_real, args, device=self.device) + + loss_avg += loss.item() + # TODO: regularize + if args.alpha > 0: + loss_reg = args.alpha * regularization(adj_syn, utils.tensor2onehot(labels_syn)) + else: + loss_reg = torch.tensor(0) + + loss = loss + loss_reg + + # update sythetic graph + self.optimizer_feat.zero_grad() + self.optimizer_pge.zero_grad() + loss.backward() + if it % 50 < 10: + self.optimizer_pge.step() + else: + self.optimizer_feat.step() + + if args.debug and ol % 5 ==0: + print('Gradient matching loss:', loss.item()) + + if ol == outer_loop - 1: + # print('loss_reg:', loss_reg.item()) + # print('Gradient matching loss:', loss.item()) + break + + feat_syn_inner = feat_syn.detach() + adj_syn_inner = pge.inference(feat_syn_inner) + adj_syn_inner_norm = utils.normalize_adj_tensor(adj_syn_inner, sparse=False) + feat_syn_inner_norm = feat_syn_inner + for j in range(inner_loop): + optimizer_model.zero_grad() + output_syn_inner = model.forward(feat_syn_inner_norm, adj_syn_inner_norm) + loss_syn_inner = F.nll_loss(output_syn_inner, labels_syn) + loss_syn_inner.backward() + # print(loss_syn_inner.item()) + optimizer_model.step() # update gnn param + + + loss_avg /= (data.nclass*outer_loop) + if it % 50 == 0: + print('Epoch {}, loss_avg: {}'.format(it, loss_avg)) + + eval_epochs = [400, 600, 800, 1000, 1200, 1600, 2000, 3000, 4000, 5000] + + if verbose and it in eval_epochs: + # if verbose and (it+1) % 50 == 0: + res = [] + runs = 1 if args.dataset in ['ogbn-arxiv'] else 3 + for i in range(runs): + if args.dataset in ['ogbn-arxiv']: + res.append(self.test_with_val()) + else: + res.append(self.test_with_val()) + + res = np.array(res) + print('Train/Test Mean Accuracy:', + repr([res.mean(0), res.std(0)])) + + def get_sub_adj_feat(self, features): + data = self.data + args = self.args + idx_selected = [] + + from collections import Counter; + counter = Counter(self.labels_syn.cpu().numpy()) + + for c in range(data.nclass): + tmp = data.retrieve_class(c, num=counter[c]) + tmp = list(tmp) + idx_selected = idx_selected + tmp + idx_selected = np.array(idx_selected).reshape(-1) + features = features[self.data.idx_train][idx_selected] + + # adj_knn = torch.zeros((data.nclass*args.nsamples, data.nclass*args.nsamples)).to(self.device) + # for i in range(data.nclass): + # idx = np.arange(i*args.nsamples, i*args.nsamples+args.nsamples) + # adj_knn[np.ix_(idx, idx)] = 1 + + from sklearn.metrics.pairwise import cosine_similarity + # features[features!=0] = 1 + k = 2 + sims = cosine_similarity(features.cpu().numpy()) + sims[(np.arange(len(sims)), np.arange(len(sims)))] = 0 + for i in range(len(sims)): + indices_argsort = np.argsort(sims[i]) + sims[i, indices_argsort[: -k]] = 0 + adj_knn = torch.FloatTensor(sims).to(self.device) + return features, adj_knn + + +def get_loops(args): + # Get the two hyper-parameters of outer-loop and inner-loop. + # The following values are empirically good. + if args.one_step: + if args.dataset =='ogbn-arxiv': + return 5, 0 + return 1, 0 + if args.dataset in ['ogbn-arxiv']: + return args.outer, args.inner + if args.dataset in ['cora']: + return 20, 15 # sgc + if args.dataset in ['citeseer']: + return 20, 15 + if args.dataset in ['physics']: + return 20, 10 + else: + return 20, 10 + diff --git a/GCond/models/__pycache__/gcn.cpython-312.pyc b/GCond/models/__pycache__/gcn.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78c800977a08372b9c079bf0fa6444ee2a76d268 Binary files /dev/null and b/GCond/models/__pycache__/gcn.cpython-312.pyc differ diff --git a/GCond/models/gat.py b/GCond/models/gat.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5c14b9751f63244be246a339a0aedc2a01fa2b --- /dev/null +++ b/GCond/models/gat.py @@ -0,0 +1,312 @@ +""" +Extended from https://github.com/rusty1s/pytorch_geometric/tree/master/benchmark/citation +""" +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from torch_geometric.nn import SGConv +from torch_geometric.nn import APPNP as ModuleAPPNP +# from torch_geometric.nn import GATConv +from .mygatconv import GATConv +import numpy as np +import scipy.sparse as sp + +from torch.nn import Linear +from itertools import repeat + + +class GAT(torch.nn.Module): + + def __init__(self, nfeat, nhid, nclass, heads=8, output_heads=1, dropout=0.5, lr=0.01, + weight_decay=5e-4, with_bias=True, device=None, **kwargs): + + super(GAT, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.dropout = dropout + self.lr = lr + self.weight_decay = weight_decay + + if 'dataset' in kwargs: + if kwargs['dataset'] in ['ogbn-arxiv']: + dropout = 0.7 # arxiv + elif kwargs['dataset'] in ['reddit']: + dropout = 0.05; self.dropout = 0.1; self.weight_decay = 5e-4 + # self.weight_decay = 5e-2; dropout=0.05; self.dropout=0.1 + elif kwargs['dataset'] in ['citeseer']: + dropout = 0.7 + self.weight_decay = 5e-4 + elif kwargs['dataset'] in ['flickr']: + dropout = 0.8 + # nhid=8; heads=8 + # self.dropout=0.1 + else: + dropout = 0.7 # cora, citeseer, reddit + else: + dropout = 0.7 + self.conv1 = GATConv( + nfeat, + nhid, + heads=heads, + dropout=dropout, + bias=with_bias) + + self.conv2 = GATConv( + nhid * heads, + nclass, + heads=output_heads, + concat=False, + dropout=dropout, + bias=with_bias) + + self.output = None + self.best_model = None + self.best_output = None + + # def forward(self, data): + # x, edge_index = data.x, data.edge_index + # x = F.dropout(x, p=self.dropout, training=self.training) + # x = F.elu(self.conv1(x, edge_index)) + # x = F.dropout(x, p=self.dropout, training=self.training) + # x = self.conv2(x, edge_index) + # return F.log_softmax(x, dim=1) + + def forward(self, data): + # x, edge_index = data.x, data.edge_index + x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight + x = F.dropout(x, p=self.dropout, training=self.training) + x = F.elu(self.conv1(x, edge_index, edge_weight=edge_weight)) + # print(self.conv1.att_l.sum()) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, edge_index, edge_weight=edge_weight) + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GAT. + """ + self.conv1.reset_parameters() + self.conv2.reset_parameters() + + + def fit(self, feat, adj, labels, idx, data=None, train_iters=600, initialize=True, verbose=False, patience=None, noval=False, **kwargs): + + data_train = GraphData(feat, adj, labels) + data_train = Dpr2Pyg(data_train)[0] + + data_test = Dpr2Pyg(GraphData(data.feat_test, data.adj_test, None))[0] + + if noval: + data_val = GraphData(data.feat_val, data.adj_val, None) + data_val = Dpr2Pyg(data_val)[0] + else: + data_val = GraphData(data.feat_full, data.adj_full, None) + data_val = Dpr2Pyg(data_val)[0] + + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if initialize: + self.initialize() + + if len(data_train.y.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + + data_train.y = data_train.y.float() if self.multi_label else data_train.y + # data_val.y = data_val.y.float() if self.multi_label else data_val.y + + if verbose: + print('=== training gat model ===') + + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + best_acc_val = 0 + best_loss_val = 100 + for i in range(train_iters): + # if i == train_iters // 2: + if i in [1500]: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(data_train) + loss_train = self.loss(output, data_train.y) + loss_train.backward() + optimizer.step() + + with torch.no_grad(): + self.eval() + + output = self.forward(data_val) + if noval: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + + if loss_val < best_loss_val: + best_loss_val = loss_val + self.output = output + weights = deepcopy(self.state_dict()) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + # print(best_acc_val) + # output = self.forward(data_test) + # labels_test = torch.LongTensor(data.labels_test).to(self.device) + # loss_test = F.nll_loss(output, labels_test) + # acc_test = utils.accuracy(output, labels_test) + # print('acc_test:', acc_test.item()) + + + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + def test(self, data_test): + """Evaluate GCN performance + """ + self.eval() + with torch.no_grad(): + output = self.forward(data_test) + evaluate(output, data_test.y, self.args) + + # @torch.no_grad() + # def predict(self, data): + # self.eval() + # return self.forward(data) + @torch.no_grad() + def predict(self, feat, adj): + self.eval() + data = GraphData(feat, adj, None) + data = Dpr2Pyg(data)[0] + return self.forward(data) + + @torch.no_grad() + def predict_unnorm(self, feat, adj): + self.eval() + data = GraphData(feat, adj, None) + data = Dpr2Pyg(data)[0] + + return self.forward(data) + + +class GraphData: + + def __init__(self, features, adj, labels, idx_train=None, idx_val=None, idx_test=None): + self.adj = adj + self.features = features + self.labels = labels + self.idx_train = idx_train + self.idx_val = idx_val + self.idx_test = idx_test + + +from torch_geometric.data import InMemoryDataset, Data +import scipy.sparse as sp + +class Dpr2Pyg(InMemoryDataset): + + def __init__(self, dpr_data, transform=None, **kwargs): + root = 'data/' # dummy root; does not mean anything + self.dpr_data = dpr_data + super(Dpr2Pyg, self).__init__(root, transform) + pyg_data = self.process() + self.data, self.slices = self.collate([pyg_data]) + self.transform = transform + + def process____(self): + dpr_data = self.dpr_data + try: + edge_index = torch.LongTensor(dpr_data.adj.nonzero().cpu()).cuda().T + except: + edge_index = torch.LongTensor(dpr_data.adj.nonzero()).cuda() + # by default, the features in pyg data is dense + try: + x = torch.FloatTensor(dpr_data.features.cpu()).float().cuda() + except: + x = torch.FloatTensor(dpr_data.features).float().cuda() + try: + y = torch.LongTensor(dpr_data.labels.cpu()).cuda() + except: + y = dpr_data.labels + + data = Data(x=x, edge_index=edge_index, y=y) + data.train_mask = None + data.val_mask = None + data.test_mask = None + return data + + def process(self): + dpr_data = self.dpr_data + if type(dpr_data.adj) == torch.Tensor: + adj_selfloop = dpr_data.adj + torch.eye(dpr_data.adj.shape[0]).cuda() + edge_index_selfloop = adj_selfloop.nonzero().T + edge_index = edge_index_selfloop + edge_weight = adj_selfloop[edge_index_selfloop[0], edge_index_selfloop[1]] + else: + adj_selfloop = dpr_data.adj + sp.eye(dpr_data.adj.shape[0]) + edge_index = torch.LongTensor(adj_selfloop.nonzero()).cuda() + edge_weight = torch.FloatTensor(adj_selfloop[adj_selfloop.nonzero()]).cuda() + + # by default, the features in pyg data is dense + try: + x = torch.FloatTensor(dpr_data.features.cpu()).float().cuda() + except: + x = torch.FloatTensor(dpr_data.features).float().cuda() + try: + y = torch.LongTensor(dpr_data.labels.cpu()).cuda() + except: + y = dpr_data.labels + + + data = Data(x=x, edge_index=edge_index, y=y, edge_weight=edge_weight) + data.train_mask = None + data.val_mask = None + data.test_mask = None + return data + + def get(self, idx): + data = self.data.__class__() + + if hasattr(self.data, '__num_nodes__'): + data.num_nodes = self.data.__num_nodes__[idx] + + for key in self.data.keys: + item, slices = self.data[key], self.slices[key] + s = list(repeat(slice(None), item.dim())) + s[self.data.__cat_dim__(key, item)] = slice(slices[idx], + slices[idx + 1]) + data[key] = item[s] + return data + + @property + def raw_file_names(self): + return ['some_file_1', 'some_file_2', ...] + + @property + def processed_file_names(self): + return ['data.pt'] + + def _download(self): + pass + diff --git a/GCond/models/gcn.py b/GCond/models/gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f0b2683b181d5dd36ce77d50690f27d265cda2 --- /dev/null +++ b/GCond/models/gcn.py @@ -0,0 +1,404 @@ +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse + + +class GraphConvolution(Module): + """Simple GCN layer, similar to https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(GraphConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + self.bias = Parameter(torch.FloatTensor(out_features)) + self.reset_parameters() + + def reset_parameters(self): + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input, adj): + """ Graph Convolutional Layer forward function + """ + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + if isinstance(adj, torch_sparse.SparseTensor): + output = torch_sparse.matmul(adj, support) + else: + output = torch.spmm(adj, support) + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + +class GCN(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + with_relu=True, with_bias=True, with_bn=False, device=None): + + super(GCN, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + + self.layers = nn.ModuleList([]) + + if nlayers == 1: + self.layers.append(GraphConvolution(nfeat, nclass, with_bias=with_bias)) + else: + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(GraphConvolution(nfeat, nhid, with_bias=with_bias)) + for i in range(nlayers-2): + self.layers.append(GraphConvolution(nhid, nhid, with_bias=with_bias)) + if with_bn: + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(GraphConvolution(nhid, nclass, with_bias=with_bias)) + + self.dropout = dropout + self.lr = lr + if not with_relu: + self.weight_decay = 0 + else: + self.weight_decay = weight_decay + self.with_relu = with_relu + self.with_bn = with_bn + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + x = layer(x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + # for ix, layer in enumerate(self.layers): + for ix, (adj, _, size) in enumerate(adjs): + x = self.layers[ix](x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, (adj) in enumerate(adjs): + x = self.layers[ix](x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit(self, features, adj, labels, idx_train, idx_val=None, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, **kwargs): + + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + + if idx_val is not None: + self._train_with_val2(labels, idx_train, idx_val, train_iters, verbose) + else: + self._train_without_val2(labels, idx_train, train_iters, verbose) + + def _train_without_val2(self, labels, idx_train, train_iters, verbose): + self.train() + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output[idx_train], labels[idx_train]) + loss_train.backward() + optimizer.step() + if verbose and i % 10 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + self.eval() + output = self.forward(self.features, self.adj_norm) + self.output = output + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + + + def _train_with_val2(self, labels, idx_train, idx_val, train_iters, verbose): + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_loss_val = 100 + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = F.nll_loss(output[idx_train], labels[idx_train]) + loss_train.backward() + optimizer.step() + + if verbose and i % 10 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + self.eval() + output = self.forward(self.features, self.adj_norm) + loss_val = F.nll_loss(output[idx_val], labels[idx_val]) + acc_val = utils.accuracy(output[idx_val], labels[idx_val]) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) diff --git a/GCond/models/myappnp.py b/GCond/models/myappnp.py new file mode 100644 index 0000000000000000000000000000000000000000..19829ba68e9d180981a84f84e82d7f199ee5f332 --- /dev/null +++ b/GCond/models/myappnp.py @@ -0,0 +1,344 @@ +"""multiple transformaiton and multiple propagation""" +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse + + +class APPNP(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + ntrans=1, with_bias=True, with_bn=False, device=None): + + super(APPNP, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + self.alpha = 0.1 + + with_bn = False + + self.layers = nn.ModuleList([]) + if ntrans == 1: + self.layers.append(MyLinear(nfeat, nclass)) + else: + self.layers.append(MyLinear(nfeat, nhid)) + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + for i in range(ntrans-2): + if with_bn: + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(MyLinear(nhid, nhid)) + self.layers.append(MyLinear(nhid, nclass)) + + self.nlayers = nlayers + self.weight_decay = weight_decay + self.dropout = dropout + self.lr = lr + self.with_bn = with_bn + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + self.sparse_dropout = SparseDropout(dprob=0) + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + h = x + # here nlayers means K + for i in range(self.nlayers): + # adj_drop = self.sparse_dropout(adj, training=self.training) + adj_drop = adj + x = torch.spmm(adj_drop, x) + x = x * (1 - self.alpha) + x = x + self.alpha * h + + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + h = x + for ix, (adj, _, size) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + # adj_drop = F.dropout(adj, p=self.dropout) + adj_drop = adj + h = h[: size[1]] + x = torch_sparse.matmul(adj_drop, x) + x = x * (1 - self.alpha) + x = x + self.alpha * h + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + for ix, (adj) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + x = torch_sparse.matmul(adj, x) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + # self._train_without_val(labels, data, train_iters, verbose) + # self._train_without_val(labels, data, train_iters, verbose) + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + + + +class MyLinear(Module): + """Simple Linear layer, modified from https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(MyLinear, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + if with_bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input): + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + output = support + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + +class SparseDropout(torch.nn.Module): + def __init__(self, dprob=0.5): + super(SparseDropout, self).__init__() + self.kprob=1-dprob + + def forward(self, x, training): + if training: + mask=((torch.rand(x._values().size())+(self.kprob)).floor()).type(torch.bool) + rc=x._indices()[:,mask] + val=x._values()[mask]*(1.0/self.kprob) + return torch.sparse.FloatTensor(rc, val, x.size()) + else: + return x diff --git a/GCond/models/myappnp1.py b/GCond/models/myappnp1.py new file mode 100644 index 0000000000000000000000000000000000000000..aebdab907d7e559071cc0a9f2cdf4ba9ca254002 --- /dev/null +++ b/GCond/models/myappnp1.py @@ -0,0 +1,348 @@ +"""multiple transformaiton and multiple propagation""" +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse + + +class APPNP1(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + with_relu=True, with_bias=True, with_bn=False, device=None): + + super(APPNP1, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + self.alpha = 0.1 + + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + + self.layers = nn.ModuleList([]) + # self.layers.append(MyLinear(nfeat, nclass)) + self.layers.append(MyLinear(nfeat, nhid)) + # self.layers.append(MyLinear(nhid, nhid)) + self.layers.append(MyLinear(nhid, nclass)) + + # if nlayers == 1: + # self.layers.append(nn.Linear(nfeat, nclass)) + # else: + # self.layers.append(nn.Linear(nfeat, nhid)) + # for i in range(nlayers-2): + # self.layers.append(nn.Linear(nhid, nhid)) + # self.layers.append(nn.Linear(nhid, nclass)) + + self.nlayers = nlayers + self.dropout = dropout + self.lr = lr + if not with_relu: + self.weight_decay = 0 + else: + self.weight_decay = weight_decay + self.with_relu = with_relu + self.with_bn = with_bn + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + self.sparse_dropout = SparseDropout(dprob=0) + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + h = x + # here nlayers means K + for i in range(self.nlayers): + # adj_drop = self.sparse_dropout(adj, training=self.training) + adj_drop = adj + x = torch.spmm(adj_drop, x) + x = x * (1 - self.alpha) + x = x + self.alpha * h + + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + h = x + for ix, (adj, _, size) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + # adj_drop = F.dropout(adj, p=self.dropout) + adj_drop = adj + h = h[: size[1]] + x = torch_sparse.matmul(adj_drop, x) + x = x * (1 - self.alpha) + x = x + self.alpha * h + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + for ix, (adj) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + x = torch_sparse.matmul(adj, x) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + + + +class MyLinear(Module): + """Simple Linear layer, modified from https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(MyLinear, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + if with_bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input): + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + output = support + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + +class SparseDropout(torch.nn.Module): + def __init__(self, dprob=0.5): + super(SparseDropout, self).__init__() + self.kprob=1-dprob + + def forward(self, x, training): + if training: + mask=((torch.rand(x._values().size())+(self.kprob)).floor()).type(torch.bool) + rc=x._indices()[:,mask] + val=x._values()[mask]*(1.0/self.kprob) + return torch.sparse.FloatTensor(rc, val, x.size()) + else: + return x diff --git a/GCond/models/mycheby.py b/GCond/models/mycheby.py new file mode 100644 index 0000000000000000000000000000000000000000..bd73cbdf583b0f9133efe430f11dec2706431504 --- /dev/null +++ b/GCond/models/mycheby.py @@ -0,0 +1,417 @@ +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse +from torch_geometric.nn.inits import zeros +import scipy.sparse as sp +import numpy as np + + +class ChebConvolution(Module): + """Simple GCN layer, similar to https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True, single_param=True, K=2): + """set single_param to True to alleivate the overfitting issue""" + super(ChebConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.lins = torch.nn.ModuleList([ + MyLinear(in_features, out_features, with_bias=False) for _ in range(K)]) + # self.lins = torch.nn.ModuleList([ + # MyLinear(in_features, out_features, with_bias=True) for _ in range(K)]) + if with_bias: + self.bias = Parameter(torch.Tensor(out_features)) + else: + self.register_parameter('bias', None) + self.single_param = single_param + self.reset_parameters() + + def reset_parameters(self): + for lin in self.lins: + lin.reset_parameters() + zeros(self.bias) + + def forward(self, input, adj, size=None): + """ Graph Convolutional Layer forward function + """ + # support = torch.mm(input, self.weight_l) + x = input + Tx_0 = x[:size[1]] if size is not None else x + Tx_1 = x # dummy + output = self.lins[0](Tx_0) + + if len(self.lins) > 1: + if isinstance(adj, torch_sparse.SparseTensor): + Tx_1 = torch_sparse.matmul(adj, x) + else: + Tx_1 = torch.spmm(adj, x) + + if self.single_param: + output = output + self.lins[0](Tx_1) + else: + output = output + self.lins[1](Tx_1) + + for lin in self.lins[2:]: + if self.single_param: + lin = self.lins[0] + if isinstance(adj, torch_sparse.SparseTensor): + Tx_2 = torch_sparse.matmul(adj, Tx_1) + else: + Tx_2 = torch.spmm(adj, Tx_1) + Tx_2 = 2. * Tx_2 - Tx_0 + output = output + lin.forward(Tx_2) + Tx_0, Tx_1 = Tx_1, Tx_2 + + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + +class Cheby(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + with_relu=True, with_bias=True, with_bn=False, device=None): + + super(Cheby, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + + self.layers = nn.ModuleList([]) + + if nlayers == 1: + self.layers.append(ChebConvolution(nfeat, nclass, with_bias=with_bias)) + else: + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(ChebConvolution(nfeat, nhid, with_bias=with_bias)) + for i in range(nlayers-2): + self.layers.append(ChebConvolution(nhid, nhid, with_bias=with_bias)) + if with_bn: + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(ChebConvolution(nhid, nclass, with_bias=with_bias)) + + # self.lin = MyLinear(nhid, nclass, with_bias=True) + + # dropout = 0.5 + self.dropout = dropout + self.lr = lr + self.weight_decay = weight_decay + self.with_relu = with_relu + self.with_bn = with_bn + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + # x = F.dropout(x, 0.2, training=self.training) + x = layer(x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + # x = F.dropout(x, 0.5, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + # TODO: do we need normalization? + # for ix, layer in enumerate(self.layers): + for ix, (adj, _, size) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + x = self.layers[ix](x, adj, size=size) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, (adj) in enumerate(adjs): + x = self.layers[ix](x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + adj = adj - torch.eye(adj.shape[0]).to(self.device) # cheby + if normalize: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + # adj_full = adj_full - sp.eye(adj_full.shape[0]) + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + best_loss_val = 100 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + # if loss_val < best_loss_val: + # best_loss_val = loss_val + # self.output = output + # weights = deepcopy(self.state_dict()) + # print(best_loss_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + # print(best_acc_val) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + # adj = adj-sp.eye(adj.shape[0]) + # adj[0,0]=0 + + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + adj = utils.to_scipy(adj) + + adj = adj-sp.eye(adj.shape[0]) + mx = normalize_adj(adj) + adj = utils.sparse_mx_to_torch_sparse_tensor(mx).to(self.device) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + +class MyLinear(Module): + """Simple Linear layer, modified from https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(MyLinear, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + if with_bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input): + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + output = support + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + + +def normalize_adj(mx): + """Normalize sparse adjacency matrix, + A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2 + Row-normalize sparse matrix + Parameters + ---------- + mx : scipy.sparse.csr_matrix + matrix to be normalized + Returns + ------- + scipy.sprase.lil_matrix + normalized matrix + """ + + # TODO: maybe using coo format would be better? + if type(mx) is not sp.lil.lil_matrix: + mx = mx.tolil() + mx = mx + sp.eye(mx.shape[0]) + rowsum = np.array(mx.sum(1)) + r_inv = np.power(rowsum, -1/2).flatten() + r_inv[np.isinf(r_inv)] = 0. + r_mat_inv = sp.diags(r_inv) + mx = r_mat_inv.dot(mx) + mx = mx.dot(r_mat_inv) + return mx diff --git a/GCond/models/mygatconv.py b/GCond/models/mygatconv.py new file mode 100644 index 0000000000000000000000000000000000000000..89cc1aa72ebdbf911719815ad7224de8116ba546 --- /dev/null +++ b/GCond/models/mygatconv.py @@ -0,0 +1,203 @@ +from typing import Union, Tuple, Optional +from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType, + OptTensor) + +import torch +from torch import Tensor +import torch.nn.functional as F +from torch.nn import Parameter, Linear +from torch_sparse import SparseTensor, set_diag +from torch_geometric.nn.conv import MessagePassing +from torch_geometric.utils import remove_self_loops, add_self_loops, softmax + +from torch_geometric.nn.inits import glorot, zeros + + +class GATConv(MessagePassing): + r"""The graph attentional operator from the `"Graph Attention Networks" + `_ paper + + .. math:: + \mathbf{x}^{\prime}_i = \alpha_{i,i}\mathbf{\Theta}\mathbf{x}_{i} + + \sum_{j \in \mathcal{N}(i)} \alpha_{i,j}\mathbf{\Theta}\mathbf{x}_{j}, + + where the attention coefficients :math:`\alpha_{i,j}` are computed as + + .. math:: + \alpha_{i,j} = + \frac{ + \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} + [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_j] + \right)\right)} + {\sum_{k \in \mathcal{N}(i) \cup \{ i \}} + \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} + [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_k] + \right)\right)}. + + Args: + in_channels (int or tuple): Size of each input sample. A tuple + corresponds to the sizes of source and target dimensionalities. + out_channels (int): Size of each output sample. + heads (int, optional): Number of multi-head-attentions. + (default: :obj:`1`) + concat (bool, optional): If set to :obj:`False`, the multi-head + attentions are averaged instead of concatenated. + (default: :obj:`True`) + negative_slope (float, optional): LeakyReLU angle of the negative + slope. (default: :obj:`0.2`) + dropout (float, optional): Dropout probability of the normalized + attention coefficients which exposes each node to a stochastically + sampled neighborhood during training. (default: :obj:`0`) + add_self_loops (bool, optional): If set to :obj:`False`, will not add + self-loops to the input graph. (default: :obj:`True`) + bias (bool, optional): If set to :obj:`False`, the layer will not learn + an additive bias. (default: :obj:`True`) + **kwargs (optional): Additional arguments of + :class:`torch_geometric.nn.conv.MessagePassing`. + """ + _alpha: OptTensor + + def __init__(self, in_channels: Union[int, Tuple[int, int]], + out_channels: int, heads: int = 1, concat: bool = True, + negative_slope: float = 0.2, dropout: float = 0.0, + add_self_loops: bool = True, bias: bool = True, **kwargs): + kwargs.setdefault('aggr', 'add') + super(GATConv, self).__init__(node_dim=0, **kwargs) + + self.in_channels = in_channels + self.out_channels = out_channels + self.heads = heads + self.concat = concat + self.negative_slope = negative_slope + self.dropout = dropout + self.add_self_loops = add_self_loops + + if isinstance(in_channels, int): + self.lin_l = Linear(in_channels, heads * out_channels, bias=False) + self.lin_r = self.lin_l + else: + self.lin_l = Linear(in_channels[0], heads * out_channels, False) + self.lin_r = Linear(in_channels[1], heads * out_channels, False) + + self.att_l = Parameter(torch.Tensor(1, heads, out_channels)) + self.att_r = Parameter(torch.Tensor(1, heads, out_channels)) + + if bias and concat: + self.bias = Parameter(torch.Tensor(heads * out_channels)) + elif bias and not concat: + self.bias = Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + + self._alpha = None + + self.reset_parameters() + self.edge_weight = None + + def reset_parameters(self): + glorot(self.lin_l.weight) + glorot(self.lin_r.weight) + glorot(self.att_l) + glorot(self.att_r) + zeros(self.bias) + + def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, + size: Size = None, return_attention_weights=None, edge_weight=None): + # type: (Union[Tensor, OptPairTensor], Tensor, Size, NoneType) -> Tensor # noqa + # type: (Union[Tensor, OptPairTensor], SparseTensor, Size, NoneType) -> Tensor # noqa + # type: (Union[Tensor, OptPairTensor], Tensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa + # type: (Union[Tensor, OptPairTensor], SparseTensor, Size, bool) -> Tuple[Tensor, SparseTensor] # noqa + r""" + Args: + return_attention_weights (bool, optional): If set to :obj:`True`, + will additionally return the tuple + :obj:`(edge_index, attention_weights)`, holding the computed + attention weights for each edge. (default: :obj:`None`) + """ + H, C = self.heads, self.out_channels + + x_l: OptTensor = None + x_r: OptTensor = None + alpha_l: OptTensor = None + alpha_r: OptTensor = None + if isinstance(x, Tensor): + assert x.dim() == 2, 'Static graphs not supported in `GATConv`.' + x_l = x_r = self.lin_l(x).view(-1, H, C) + alpha_l = (x_l * self.att_l).sum(dim=-1) + alpha_r = (x_r * self.att_r).sum(dim=-1) + else: + x_l, x_r = x[0], x[1] + assert x[0].dim() == 2, 'Static graphs not supported in `GATConv`.' + x_l = self.lin_l(x_l).view(-1, H, C) + alpha_l = (x_l * self.att_l).sum(dim=-1) + if x_r is not None: + x_r = self.lin_r(x_r).view(-1, H, C) + alpha_r = (x_r * self.att_r).sum(dim=-1) + + + assert x_l is not None + assert alpha_l is not None + + if self.add_self_loops: + if isinstance(edge_index, Tensor): + num_nodes = x_l.size(0) + if x_r is not None: + num_nodes = min(num_nodes, x_r.size(0)) + if size is not None: + num_nodes = min(size[0], size[1]) + edge_index, _ = remove_self_loops(edge_index) + edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) + + if edge_weight is not None: + if self.edge_weight is None: + self.edge_weight = edge_weight + + if edge_index.size(1) != self.edge_weight.shape[0]: + self.edge_weight = edge_weight + + elif isinstance(edge_index, SparseTensor): + edge_index = set_diag(edge_index) + + # propagate_type: (x: OptPairTensor, alpha: OptPairTensor) + + out = self.propagate(edge_index, x=(x_l, x_r), + alpha=(alpha_l, alpha_r), size=size) + + alpha = self._alpha + self._alpha = None + + if self.concat: + out = out.view(-1, self.heads * self.out_channels) + else: + out = out.mean(dim=1) + + if self.bias is not None: + out += self.bias + + if isinstance(return_attention_weights, bool): + assert alpha is not None + if isinstance(edge_index, Tensor): + return out, (edge_index, alpha) + elif isinstance(edge_index, SparseTensor): + return out, edge_index.set_value(alpha, layout='coo') + else: + return out + + def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: OptTensor, + index: Tensor, ptr: OptTensor, + size_i: Optional[int]) -> Tensor: + alpha = alpha_j if alpha_i is None else alpha_j + alpha_i + alpha = F.leaky_relu(alpha, self.negative_slope) + alpha = softmax(alpha, index, ptr, size_i) + self._alpha = alpha + alpha = F.dropout(alpha, p=self.dropout, training=self.training) + + if self.edge_weight is not None: + x_j = self.edge_weight.view(-1, 1, 1) * x_j + return x_j * alpha.unsqueeze(-1) + + def __repr__(self): + return '{}({}, {}, heads={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels, self.heads) + diff --git a/GCond/models/mygraphsage.py b/GCond/models/mygraphsage.py new file mode 100644 index 0000000000000000000000000000000000000000..ce06824e94c71798c0081f6188b2609df6695120 --- /dev/null +++ b/GCond/models/mygraphsage.py @@ -0,0 +1,353 @@ +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse +from torch_geometric.data import NeighborSampler +from torch_sparse import SparseTensor + + +class SageConvolution(Module): + """Simple GCN layer, similar to https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True, root_weight=False): + super(SageConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight_l = Parameter(torch.FloatTensor(in_features, out_features)) + self.bias_l = Parameter(torch.FloatTensor(out_features)) + self.weight_r = Parameter(torch.FloatTensor(in_features, out_features)) + self.bias_r = Parameter(torch.FloatTensor(out_features)) + self.reset_parameters() + self.root_weight = root_weight + # self.weight = Parameter(torch.FloatTensor(out_features, in_features)) + # self.linear = torch.nn.Linear(self.in_features, self.out_features) + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight_l.T.size(1)) + self.weight_l.data.uniform_(-stdv, stdv) + self.bias_l.data.uniform_(-stdv, stdv) + + stdv = 1. / math.sqrt(self.weight_r.T.size(1)) + self.weight_r.data.uniform_(-stdv, stdv) + self.bias_r.data.uniform_(-stdv, stdv) + + def forward(self, input, adj, size=None): + """ Graph Convolutional Layer forward function + """ + if input.data.is_sparse: + support = torch.spmm(input, self.weight_l) + else: + support = torch.mm(input, self.weight_l) + if isinstance(adj, torch_sparse.SparseTensor): + output = torch_sparse.matmul(adj, support) + else: + output = torch.spmm(adj, support) + output = output + self.bias_l + + if self.root_weight: + if size is not None: + output = output + input[:size[1]] @ self.weight_r + self.bias_r + else: + output = output + input @ self.weight_r + self.bias_r + else: + output = output + + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + +class GraphSage(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + with_relu=True, with_bias=True, with_bn=False, device=None): + + super(GraphSage, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + + self.layers = nn.ModuleList([]) + + if nlayers == 1: + self.layers.append(SageConvolution(nfeat, nclass, with_bias=with_bias)) + else: + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(SageConvolution(nfeat, nhid, with_bias=with_bias)) + for i in range(nlayers-2): + self.layers.append(SageConvolution(nhid, nhid, with_bias=with_bias)) + if with_bn: + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(SageConvolution(nhid, nclass, with_bias=with_bias)) + + self.dropout = dropout + self.lr = lr + if not with_relu: + self.weight_decay = 0 + else: + self.weight_decay = weight_decay + self.with_relu = with_relu + self.with_bn = with_bn + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + x = layer(x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + # TODO: do we need normalization? + # for ix, layer in enumerate(self.layers): + for ix, (adj, _, size) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + x = self.layers[ix](x, adj, size=size) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, (adj) in enumerate(adjs): + x = self.layers[ix](x, adj) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + if self.with_relu: + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + adj_norm = self.adj_norm + node_idx = torch.arange(adj_norm.shape[0]).long() + + edge_index = adj_norm.nonzero().T + adj_norm = SparseTensor(row=edge_index[0], col=edge_index[1], + value=adj_norm[edge_index[0], edge_index[1]], sparse_sizes=adj_norm.size()).t() + # edge_index = adj_norm._indices() + # adj_norm = SparseTensor(row=edge_index[0], col=edge_index[1], + # value=adj_norm._values(), sparse_sizes=adj_norm.size()).t() + + if adj_norm.density() > 0.5: # if the weighted graph is too dense, we need a larger neighborhood size + sizes = [30, 20] + else: + sizes = [5, 5] + train_loader = NeighborSampler(adj_norm, + node_idx=node_idx, + sizes=sizes, batch_size=len(node_idx), + num_workers=0, return_e_id=False, + num_nodes=adj_norm.size(0), + shuffle=True) + + best_acc_val = 0 + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + # optimizer.zero_grad() + # output = self.forward(self.features, self.adj_norm) + # loss_train = self.loss(output, labels) + # loss_train.backward() + # optimizer.step() + + for batch_size, n_id, adjs in train_loader: + adjs = [adj.to(self.device) for adj in adjs] + optimizer.zero_grad() + out = self.forward_sampler(self.features[n_id], adjs) + loss_train = F.nll_loss(out, labels[n_id[:batch_size]]) + loss_train.backward() + optimizer.step() + + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + diff --git a/GCond/models/parametrized_adj.py b/GCond/models/parametrized_adj.py new file mode 100644 index 0000000000000000000000000000000000000000..c08c50278377da9985a8c546d5a21b07ba5bd42e --- /dev/null +++ b/GCond/models/parametrized_adj.py @@ -0,0 +1,88 @@ +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from itertools import product +import numpy as np + +class PGE(nn.Module): + + def __init__(self, nfeat, nnodes, nhid=128, nlayers=3, device=None, args=None): + super(PGE, self).__init__() + if args.dataset in ['ogbn-arxiv', 'arxiv', 'flickr']: + nhid = 256 + if args.dataset in ['reddit']: + nhid = 256 + if args.reduction_rate==0.01: + nhid = 128 + nlayers = 3 + # nhid = 128 + + self.layers = nn.ModuleList([]) + self.layers.append(nn.Linear(nfeat*2, nhid)) + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + for i in range(nlayers-2): + self.layers.append(nn.Linear(nhid, nhid)) + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(nn.Linear(nhid, 1)) + + edge_index = np.array(list(product(range(nnodes), range(nnodes)))) + self.edge_index = edge_index.T + self.nnodes = nnodes + self.device = device + self.reset_parameters() + self.cnt = 0 + self.args = args + self.nnodes = nnodes + + def forward(self, x, inference=False): + if self.args.dataset == 'reddit' and self.args.reduction_rate >= 0.01: + edge_index = self.edge_index + n_part = 5 + splits = np.array_split(np.arange(edge_index.shape[1]), n_part) + edge_embed = [] + for idx in splits: + tmp_edge_embed = torch.cat([x[edge_index[0][idx]], + x[edge_index[1][idx]]], axis=1) + for ix, layer in enumerate(self.layers): + tmp_edge_embed = layer(tmp_edge_embed) + if ix != len(self.layers) - 1: + tmp_edge_embed = self.bns[ix](tmp_edge_embed) + tmp_edge_embed = F.relu(tmp_edge_embed) + edge_embed.append(tmp_edge_embed) + edge_embed = torch.cat(edge_embed) + else: + edge_index = self.edge_index + edge_embed = torch.cat([x[edge_index[0]], + x[edge_index[1]]], axis=1) + for ix, layer in enumerate(self.layers): + edge_embed = layer(edge_embed) + if ix != len(self.layers) - 1: + edge_embed = self.bns[ix](edge_embed) + edge_embed = F.relu(edge_embed) + + adj = edge_embed.reshape(self.nnodes, self.nnodes) + + adj = (adj + adj.T)/2 + adj = torch.sigmoid(adj) + adj = adj - torch.diag(torch.diag(adj, 0)) + return adj + + @torch.no_grad() + def inference(self, x): + # self.eval() + adj_syn = self.forward(x, inference=True) + return adj_syn + + def reset_parameters(self): + def weight_reset(m): + if isinstance(m, nn.Linear): + m.reset_parameters() + if isinstance(m, nn.BatchNorm1d): + m.reset_parameters() + self.apply(weight_reset) + diff --git a/GCond/models/sgc.py b/GCond/models/sgc.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d6fbc55cb69f436301b3d88cdef79bd1e1be60 --- /dev/null +++ b/GCond/models/sgc.py @@ -0,0 +1,290 @@ +'''one transformation with multiple propagation''' +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse + +class GraphConvolution(Module): + """Simple GCN layer, similar to https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(GraphConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + self.bias = Parameter(torch.FloatTensor(out_features)) + self.reset_parameters() + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input, adj): + """ Graph Convolutional Layer forward function + """ + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + if isinstance(adj, torch_sparse.SparseTensor): + output = torch_sparse.matmul(adj, support) + else: + output = torch.spmm(adj, support) + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + +class SGC(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + with_relu=True, with_bias=True, with_bn=False, device=None): + + super(SGC, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + + self.conv = GraphConvolution(nfeat, nclass, with_bias=with_bias) + + self.nlayers = nlayers + self.dropout = dropout + self.lr = lr + if not with_relu: + self.weight_decay = 0 + else: + self.weight_decay = weight_decay + self.with_relu = with_relu + if with_bn: + print('Warning: SGC does not have bn!!!') + self.with_bn = False + self.with_bias = with_bias + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + + def forward(self, x, adj): + weight = self.conv.weight + bias = self.conv.bias + x = torch.mm(x, weight) + for i in range(self.nlayers): + x = torch.spmm(adj, x) + x = x + bias + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + weight = self.conv.weight + bias = self.conv.bias + x = torch.mm(x, weight) + for ix, (adj, _, size) in enumerate(adjs): + x = torch_sparse.matmul(adj, x) + x = x + bias + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + weight = self.conv.weight + bias = self.conv.bias + x = torch.mm(x, weight) + for ix, (adj) in enumerate(adjs): + if type(adj) == torch.Tensor: + x = adj @ x + else: + x = torch_sparse.matmul(adj, x) + x = x + bias + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def initialize(self): + """Initialize parameters of GCN. + """ + self.conv.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + + diff --git a/GCond/models/sgc_multi.py b/GCond/models/sgc_multi.py new file mode 100644 index 0000000000000000000000000000000000000000..4823d7fe36982a013da3fd26b3fd5fd39aa4514f --- /dev/null +++ b/GCond/models/sgc_multi.py @@ -0,0 +1,315 @@ +"""multiple transformaiton and multiple propagation""" +import torch.nn as nn +import torch.nn.functional as F +import math +import torch +import torch.optim as optim +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module +from deeprobust.graph import utils +from copy import deepcopy +from sklearn.metrics import f1_score +from torch.nn import init +import torch_sparse + + +class SGC(nn.Module): + + def __init__(self, nfeat, nhid, nclass, nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4, + ntrans=2, with_bias=True, with_bn=False, device=None): + + """nlayers indicates the number of propagations""" + super(SGC, self).__init__() + + assert device is not None, "Please specify 'device'!" + self.device = device + self.nfeat = nfeat + self.nclass = nclass + + + self.layers = nn.ModuleList([]) + if ntrans == 1: + self.layers.append(MyLinear(nfeat, nclass)) + else: + self.layers.append(MyLinear(nfeat, nhid)) + if with_bn: + self.bns = torch.nn.ModuleList() + self.bns.append(nn.BatchNorm1d(nhid)) + for i in range(ntrans-2): + if with_bn: + self.bns.append(nn.BatchNorm1d(nhid)) + self.layers.append(MyLinear(nhid, nhid)) + self.layers.append(MyLinear(nhid, nclass)) + + self.nlayers = nlayers + self.dropout = dropout + self.lr = lr + self.with_bn = with_bn + self.with_bias = with_bias + self.weight_decay = weight_decay + self.output = None + self.best_model = None + self.best_output = None + self.adj_norm = None + self.features = None + self.multi_label = None + + def forward(self, x, adj): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + for i in range(self.nlayers): + x = torch.spmm(adj, x) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + for ix, (adj, _, size) in enumerate(adjs): + # x_target = x[: size[1]] + # x = self.layers[ix]((x, x_target), edge_index) + # adj = adj.to(self.device) + x = torch_sparse.matmul(adj, x) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + def forward_sampler_syn(self, x, adjs): + for ix, layer in enumerate(self.layers): + x = layer(x) + if ix != len(self.layers) - 1: + x = self.bns[ix](x) if self.with_bn else x + x = F.relu(x) + x = F.dropout(x, self.dropout, training=self.training) + + for ix, (adj) in enumerate(adjs): + if type(adj) == torch.Tensor: + x = adj @ x + else: + x = torch_sparse.matmul(adj, x) + + if self.multi_label: + return torch.sigmoid(x) + else: + return F.log_softmax(x, dim=1) + + + def initialize(self): + """Initialize parameters of GCN. + """ + for layer in self.layers: + layer.reset_parameters() + if self.with_bn: + for bn in self.bns: + bn.reset_parameters() + + def fit_with_val(self, features, adj, labels, data, train_iters=200, initialize=True, verbose=False, normalize=True, patience=None, noval=False, **kwargs): + '''data: full data class''' + if initialize: + self.initialize() + + # features, adj, labels = data.feat_train, data.adj_train, data.labels_train + if type(adj) is not torch.Tensor: + features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) + else: + features = features.to(self.device) + adj = adj.to(self.device) + labels = labels.to(self.device) + + if normalize: + if utils.is_sparse_tensor(adj): + adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + adj_norm = utils.normalize_adj_tensor(adj) + else: + adj_norm = adj + + if 'feat_norm' in kwargs and kwargs['feat_norm']: + from utils import row_normalize_tensor + features = row_normalize_tensor(features-features.min()) + + self.adj_norm = adj_norm + self.features = features + + if len(labels.shape) > 1: + self.multi_label = True + self.loss = torch.nn.BCELoss() + else: + self.multi_label = False + self.loss = F.nll_loss + + labels = labels.float() if self.multi_label else labels + self.labels = labels + + if noval: + self._train_with_val(labels, data, train_iters, verbose, adj_val=True) + else: + self._train_with_val(labels, data, train_iters, verbose) + + def _train_with_val(self, labels, data, train_iters, verbose, adj_val=False): + if adj_val: + feat_full, adj_full = data.feat_val, data.adj_val + else: + feat_full, adj_full = data.feat_full, data.adj_full + + feat_full, adj_full = utils.to_tensor(feat_full, adj_full, device=self.device) + adj_full_norm = utils.normalize_adj_tensor(adj_full, sparse=True) + labels_val = torch.LongTensor(data.labels_val).to(self.device) + + if verbose: + print('=== training gcn model ===') + optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + best_acc_val = 0 + + for i in range(train_iters): + if i == train_iters // 2: + lr = self.lr*0.1 + optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=self.weight_decay) + + self.train() + optimizer.zero_grad() + output = self.forward(self.features, self.adj_norm) + loss_train = self.loss(output, labels) + loss_train.backward() + optimizer.step() + + if verbose and i % 100 == 0: + print('Epoch {}, training loss: {}'.format(i, loss_train.item())) + + with torch.no_grad(): + self.eval() + output = self.forward(feat_full, adj_full_norm) + if adj_val: + loss_val = F.nll_loss(output, labels_val) + acc_val = utils.accuracy(output, labels_val) + else: + loss_val = F.nll_loss(output[data.idx_val], labels_val) + acc_val = utils.accuracy(output[data.idx_val], labels_val) + + if acc_val > best_acc_val: + best_acc_val = acc_val + self.output = output + weights = deepcopy(self.state_dict()) + + if verbose: + print('=== picking the best model according to the performance on validation ===') + self.load_state_dict(weights) + + + def test(self, idx_test): + """Evaluate GCN performance on test set. + Parameters + ---------- + idx_test : + node testing indices + """ + self.eval() + output = self.predict() + # output = self.output + loss_test = F.nll_loss(output[idx_test], self.labels[idx_test]) + acc_test = utils.accuracy(output[idx_test], self.labels[idx_test]) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return acc_test.item() + + + @torch.no_grad() + def predict(self, features=None, adj=None): + """By default, the inputs should be unnormalized adjacency + Parameters + ---------- + features : + node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + adj : + adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. + Returns + ------- + torch.FloatTensor + output (log probabilities) of GCN + """ + + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + if utils.is_sparse_tensor(adj): + self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) + else: + self.adj_norm = utils.normalize_adj_tensor(adj) + return self.forward(self.features, self.adj_norm) + + @torch.no_grad() + def predict_unnorm(self, features=None, adj=None): + self.eval() + if features is None and adj is None: + return self.forward(self.features, self.adj_norm) + else: + if type(adj) is not torch.Tensor: + features, adj = utils.to_tensor(features, adj, device=self.device) + + self.features = features + self.adj_norm = adj + return self.forward(self.features, self.adj_norm) + + + +class MyLinear(Module): + """Simple Linear layer, modified from https://github.com/tkipf/pygcn + """ + + def __init__(self, in_features, out_features, with_bias=True): + super(MyLinear, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features, out_features)) + if with_bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + # stdv = 1. / math.sqrt(self.weight.size(1)) + stdv = 1. / math.sqrt(self.weight.T.size(1)) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.uniform_(-stdv, stdv) + + def forward(self, input): + if input.data.is_sparse: + support = torch.spmm(input, self.weight) + else: + support = torch.mm(input, self.weight) + output = support + if self.bias is not None: + return output + self.bias + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + + diff --git a/GCond/requirements.txt b/GCond/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..65588c98cda63c674b1df9d2571d25fcfc5694d0 --- /dev/null +++ b/GCond/requirements.txt @@ -0,0 +1,11 @@ +torch +torch_geometric +scipy +numpy +ogb +tqdm +torch_sparse +torch_vision +configs +deeprobust +scikit_learn diff --git a/GCond/res/cross/empty b/GCond/res/cross/empty new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/GCond/res/cross/empty @@ -0,0 +1 @@ + diff --git a/GCond/script.sh b/GCond/script.sh new file mode 100644 index 0000000000000000000000000000000000000000..bf9f8dcc29e41e1268d352f5ed01075658d62004 --- /dev/null +++ b/GCond/script.sh @@ -0,0 +1,4 @@ +python train_cond_tranduct_sampler.py --dataset cora --mlp=0 --nlayers=2 --sgc=1 --lr_feat=1e-4 --gpu_id=0 --lr_adj=1e-4 --r=0.5 --seed=1000 + +python train_cond_tranduct_sampler.py --dataset ogbn-arxiv --mlp=0 --nlayers=2 --sgc=1 --lr_feat=0.01 --gpu_id=3 --lr_adj=0.01 --r=0.02 --seed=0 --inner=3 --epochs=1000 --save=0 + diff --git a/GCond/scripts/run_cross.sh b/GCond/scripts/run_cross.sh new file mode 100644 index 0000000000000000000000000000000000000000..00d57fd2458037f87b264ff7ce56cca2830d4103 --- /dev/null +++ b/GCond/scripts/run_cross.sh @@ -0,0 +1,16 @@ +for r in 0.001 0.005 0.01 +do + bash scripts/script_cross.sh flickr ${r} 0 + bash scripts/script_cross.sh ogbn-arxiv ${r} 0 +done + +for r in 0.25 0.5 1 +do + bash scripts/script_cross.sh citeseer ${r} 0 + bash scripts/script_cross.sh cora ${r} 0 +done + +for r in 0.001 0.0005 0.002 +do + bash scripts/script_cross.sh reddit ${r} 0 +done diff --git a/GCond/scripts/run_main.sh b/GCond/scripts/run_main.sh new file mode 100644 index 0000000000000000000000000000000000000000..80a95dafb21116632ee806cbee37a5347c4afadf --- /dev/null +++ b/GCond/scripts/run_main.sh @@ -0,0 +1,26 @@ +for r in 0.25 0.5 1 +do +python train_gcond_transduct.py --dataset cora --nlayers=2 --sgc=1 --lr_feat=1e-4 --gpu_id=0 --lr_adj=1e-4 --r=${r} --seed=1 --epoch=600 --save=0 +done + +for r in 0.25 0.5 1 +do +python train_gcond_transduct.py --dataset citeseer --nlayers=2 --sgc=1 --lr_feat=1e-4 --gpu_id=0 --lr_adj=1e-4 --r=${r} --seed=1 --epoch=600 --save=0 +done + + +for r in 0.001 0.005 0.01 +do +python train_gcond_transduct.py --dataset ogbn-arxiv --nlayers=2 --sgc=1 --lr_feat=0.01 --gpu_id=3 --lr_adj=0.01 --r=${r} --seed=1 --inner=3 --epochs=1000 --save=0 +done + + +for r in 0.001 0.005 0.01 +do + python train_gcond_induct.py --dataset flickr --sgc=2 --nlayers=2 --lr_feat=0.005 --lr_adj=0.005 --r=${r} --seed=1 --gpu_id=0 --epochs=1000 --inner=1 --outer=10 --save=0 +done + +for r in 0.001 0.005 0.0005 0.002 +do + python train_gcond_induct.py --dataset reddit --sgc=1 --nlayers=2 --lr_feat=0.1 --lr_adj=0.1 --r=${r} --seed=1 --gpu_id=0 --epochs=1000 --inner=1 --outer=10 --save=0 +done diff --git a/GCond/scripts/script_cross.sh b/GCond/scripts/script_cross.sh new file mode 100644 index 0000000000000000000000000000000000000000..70bfc6fac08871cd3c3cb99d4163c9c4750b30af --- /dev/null +++ b/GCond/scripts/script_cross.sh @@ -0,0 +1,7 @@ +dataset=${1} +r=${2} +gpu_id=${3} +for s in 0 1 2 3 4 +do +python test_other_arcs.py --dataset ${dataset} --gpu_id=${gpu_id} --r=${r} --seed=${s} --nruns=10 >> res/flickr/${1}_${2}.out +done diff --git a/GCond/test_other_arcs.py b/GCond/test_other_arcs.py new file mode 100644 index 0000000000000000000000000000000000000000..359ce0f92472942692efdb3999acc06660f9be01 --- /dev/null +++ b/GCond/test_other_arcs.py @@ -0,0 +1,55 @@ +import sys +from deeprobust.graph.data import Dataset +import numpy as np +import random +import time +import argparse +import torch +from utils import * +import torch.nn.functional as F +from tester_other_arcs import Evaluator +from utils_graphsaint import DataGraphSAINT + + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu_id', type=int, default=0, help='gpu id') +parser.add_argument('--dataset', type=str, default='cora') +parser.add_argument('--nlayers', type=int, default=2) +parser.add_argument('--hidden', type=int, default=256) +parser.add_argument('--keep_ratio', type=float, default=1) +parser.add_argument('--reduction_rate', type=float, default=1) +parser.add_argument('--weight_decay', type=float, default=0.0) +parser.add_argument('--dropout', type=float, default=0.0) +parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--seed', type=int, default=0, help='Random seed.') +parser.add_argument('--mlp', type=int, default=0) +parser.add_argument('--inner', type=int, default=0) +parser.add_argument('--epsilon', type=float, default=-1) +parser.add_argument('--nruns', type=int, default=20) +args = parser.parse_args() + +torch.cuda.set_device(args.gpu_id) + +# random seed setting +random.seed(args.seed) +np.random.seed(args.seed) +torch.manual_seed(args.seed) +torch.cuda.manual_seed(args.seed) + +if args.dataset in ['cora', 'citeseer']: + args.epsilon = 0.05 +else: + args.epsilon = 0.01 + +print(args) + +data_graphsaint = ['flickr', 'reddit', 'ogbn-arxiv'] +if args.dataset in data_graphsaint: + data = DataGraphSAINT(args.dataset) + data_full = data.data_full +else: + data_full = get_dataset(args.dataset, args.normalize_features) + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) + +agent = Evaluator(data, args, device='cuda') +agent.train() diff --git a/GCond/tester_other_arcs.py b/GCond/tester_other_arcs.py new file mode 100644 index 0000000000000000000000000000000000000000..df2a8b7ca1812773ad928e973904b5e84954f7e6 --- /dev/null +++ b/GCond/tester_other_arcs.py @@ -0,0 +1,258 @@ +import sys +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.nn import Parameter +import torch.nn.functional as F +from utils import match_loss, regularization, row_normalize_tensor +import deeprobust.graph.utils as utils +from copy import deepcopy +import numpy as np +from tqdm import tqdm +from models.gcn import GCN +from models.sgc import SGC +from models.sgc_multi import SGC as SGC1 +from models.myappnp import APPNP +from models.myappnp1 import APPNP1 +from models.mycheby import Cheby +from models.mygraphsage import GraphSage +from models.gat import GAT +import scipy.sparse as sp + + +class Evaluator: + + def __init__(self, data, args, device='cuda', **kwargs): + self.data = data + self.args = args + self.device = device + n = int(data.feat_train.shape[0] * args.reduction_rate) + d = data.feat_train.shape[1] + self.nnodes_syn = n + self.adj_param= nn.Parameter(torch.FloatTensor(n, n).to(device)) + self.feat_syn = nn.Parameter(torch.FloatTensor(n, d).to(device)) + self.labels_syn = torch.LongTensor(self.generate_labels_syn(data)).to(device) + self.reset_parameters() + print('adj_param:', self.adj_param.shape, 'feat_syn:', self.feat_syn.shape) + + def reset_parameters(self): + self.adj_param.data.copy_(torch.randn(self.adj_param.size())) + self.feat_syn.data.copy_(torch.randn(self.feat_syn.size())) + + def generate_labels_syn(self, data): + from collections import Counter + counter = Counter(data.labels_train) + num_class_dict = {} + n = len(data.labels_train) + + sorted_counter = sorted(counter.items(), key=lambda x:x[1]) + sum_ = 0 + labels_syn = [] + self.syn_class_indices = {} + for ix, (c, num) in enumerate(sorted_counter): + if ix == len(sorted_counter) - 1: + num_class_dict[c] = int(n * self.args.reduction_rate) - sum_ + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + else: + num_class_dict[c] = max(int(num * self.args.reduction_rate), 1) + sum_ += num_class_dict[c] + self.syn_class_indices[c] = [len(labels_syn), len(labels_syn) + num_class_dict[c]] + labels_syn += [c] * num_class_dict[c] + + self.num_class_dict = num_class_dict + return labels_syn + + + def test_gat(self, nlayers, model_type, verbose=False): + res = [] + args = self.args + + if args.dataset in ['cora', 'citeseer']: + args.epsilon = 0.5 # Make the graph sparser as GAT does not work well on dense graph + else: + args.epsilon = 0.01 + + print('======= testing %s' % model_type) + data, device = self.data, self.device + + + feat_syn, adj_syn, labels_syn = self.get_syn_data(model_type) + # with_bn = True if self.args.dataset in ['ogbn-arxiv'] else False + with_bn = False + if model_type == 'GAT': + model = GAT(nfeat=feat_syn.shape[1], nhid=16, heads=16, dropout=0.0, + weight_decay=0e-4, nlayers=self.args.nlayers, lr=0.001, + nclass=data.nclass, device=device, dataset=self.args.dataset).to(device) + + + noval = True if args.dataset in ['reddit', 'flickr'] else False + model.fit(feat_syn, adj_syn, labels_syn, np.arange(len(feat_syn)), noval=noval, data=data, + train_iters=10000 if noval else 3000, normalize=True, verbose=verbose) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + if args.dataset in ['reddit', 'flickr']: + output = model.predict(data.feat_test, data.adj_test) + loss_test = F.nll_loss(output, labels_test) + acc_test = utils.accuracy(output, labels_test) + res.append(acc_test.item()) + if verbose: + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + + else: + # Full graph + output = model.predict(data.feat_full, data.adj_full) + loss_test = F.nll_loss(output[data.idx_test], labels_test) + acc_test = utils.accuracy(output[data.idx_test], labels_test) + res.append(acc_test.item()) + if verbose: + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + + labels_train = torch.LongTensor(data.labels_train).cuda() + output = model.predict(data.feat_train, data.adj_train) + loss_train = F.nll_loss(output, labels_train) + acc_train = utils.accuracy(output, labels_train) + if verbose: + print("Train set results:", + "loss= {:.4f}".format(loss_train.item()), + "accuracy= {:.4f}".format(acc_train.item())) + res.append(acc_train.item()) + return res + + def get_syn_data(self, model_type=None): + data, device = self.data, self.device + feat_syn, adj_param, labels_syn = self.feat_syn.detach(), \ + self.adj_param.detach(), self.labels_syn + + args = self.args + adj_syn = torch.load(f'saved_ours/adj_{args.dataset}_{args.reduction_rate}_{args.seed}.pt', map_location='cuda') + feat_syn = torch.load(f'saved_ours/feat_{args.dataset}_{args.reduction_rate}_{args.seed}.pt', map_location='cuda') + + if model_type == 'MLP': + adj_syn = adj_syn.to(self.device) + adj_syn = adj_syn - adj_syn + else: + adj_syn = adj_syn.to(self.device) + + print('Sum:', adj_syn.sum(), adj_syn.sum()/(adj_syn.shape[0]**2)) + print('Sparsity:', adj_syn.nonzero().shape[0]/(adj_syn.shape[0]**2)) + + if self.args.epsilon > 0: + adj_syn[adj_syn < self.args.epsilon] = 0 + print('Sparsity after truncating:', adj_syn.nonzero().shape[0]/(adj_syn.shape[0]**2)) + feat_syn = feat_syn.to(self.device) + + # edge_index = adj_syn.nonzero().T + # adj_syn = torch.sparse.FloatTensor(edge_index, adj_syn[edge_index[0], edge_index[1]], adj_syn.size()) + + return feat_syn, adj_syn, labels_syn + + + def test(self, nlayers, model_type, verbose=True): + res = [] + + args = self.args + data, device = self.data, self.device + + feat_syn, adj_syn, labels_syn = self.get_syn_data(model_type) + + print('======= testing %s' % model_type) + if model_type == 'MLP': + model_class = GCN + else: + model_class = eval(model_type) + weight_decay = 5e-4 + dropout = 0.5 if args.dataset in ['reddit'] else 0 + + model = model_class(nfeat=feat_syn.shape[1], nhid=args.hidden, dropout=dropout, + weight_decay=weight_decay, nlayers=nlayers, + nclass=data.nclass, device=device).to(device) + + # with_bn = True if self.args.dataset in ['ogbn-arxiv'] else False + if args.dataset in ['ogbn-arxiv', 'arxiv']: + model = model_class(nfeat=feat_syn.shape[1], nhid=args.hidden, dropout=0., + weight_decay=weight_decay, nlayers=nlayers, with_bn=False, + nclass=data.nclass, device=device).to(device) + + noval = True if args.dataset in ['reddit', 'flickr'] else False + model.fit_with_val(feat_syn, adj_syn, labels_syn, data, + train_iters=600, normalize=True, verbose=True, noval=noval) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + if model_type == 'MLP': + output = model.predict_unnorm(data.feat_test, sp.eye(len(data.feat_test))) + else: + output = model.predict(data.feat_test, data.adj_test) + + if args.dataset in ['reddit', 'flickr']: + loss_test = F.nll_loss(output, labels_test) + acc_test = utils.accuracy(output, labels_test) + res.append(acc_test.item()) + if verbose: + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + + # if not args.dataset in ['reddit', 'flickr']: + else: + # Full graph + output = model.predict(data.feat_full, data.adj_full) + loss_test = F.nll_loss(output[data.idx_test], labels_test) + acc_test = utils.accuracy(output[data.idx_test], labels_test) + res.append(acc_test.item()) + if verbose: + print("Test full set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + + labels_train = torch.LongTensor(data.labels_train).cuda() + output = model.predict(data.feat_train, data.adj_train) + loss_train = F.nll_loss(output, labels_train) + acc_train = utils.accuracy(output, labels_train) + if verbose: + print("Train set results:", + "loss= {:.4f}".format(loss_train.item()), + "accuracy= {:.4f}".format(acc_train.item())) + res.append(acc_train.item()) + return res + + def train(self, verbose=True): + args = self.args + data = self.data + + final_res = {} + runs = self.args.nruns + + for model_type in ['GCN', 'GraphSage', 'SGC1', 'MLP', 'APPNP1', 'Cheby']: + res = [] + nlayer = 2 + for i in range(runs): + res.append(self.test(nlayer, verbose=False, model_type=model_type)) + res = np.array(res) + print('Test/Train Mean Accuracy:', + repr([res.mean(0), res.std(0)])) + final_res[model_type] = [res.mean(0), res.std(0)] + + + print('=== testing GAT') + res = [] + nlayer = 2 + for i in range(runs): + res.append(self.test_gat(verbose=True, nlayers=nlayer, model_type='GAT')) + res = np.array(res) + print('Layer:', nlayer) + print('Test/Full Test/Train Mean Accuracy:', + repr([res.mean(0), res.std(0)])) + final_res['GAT'] = [res.mean(0), res.std(0)] + + print('Final result:', final_res) + diff --git a/GCond/train_coreset.py b/GCond/train_coreset.py new file mode 100644 index 0000000000000000000000000000000000000000..8c8c95e8ab6a64c27df1f57425b8a88db2d7f43d --- /dev/null +++ b/GCond/train_coreset.py @@ -0,0 +1,117 @@ +from deeprobust.graph.data import Dataset +import numpy as np +import random +import time +import argparse +import torch +import sys +from deeprobust.graph.utils import * +import torch.nn.functional as F +from configs import load_config +from utils import * +from utils_graphsaint import DataGraphSAINT +from models.gcn import GCN +from coreset import KCenter, Herding, Random, LRMC +from tqdm import tqdm + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu_id', type=int, default=0, help='gpu id') +parser.add_argument('--dataset', type=str, default='cora') +parser.add_argument('--hidden', type=int, default=256) +parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--keep_ratio', type=float, default=1.0) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=5e-4) +parser.add_argument('--seed', type=int, default=15, help='Random seed.') +parser.add_argument('--nlayers', type=int, default=2, help='Random seed.') +parser.add_argument('--epochs', type=int, default=400) +parser.add_argument('--inductive', type=int, default=1) +parser.add_argument('--save', type=int, default=0) +parser.add_argument('--method', type=str, choices=['kcenter', 'herding', 'random', 'lrmc']) +parser.add_argument('--lrmc_seeds_path', type=str, default=None, + help='Path to a JSON file containing L‑RMC seed nodes. Required when method=lrmc.') +parser.add_argument('--reduction_rate', type=float, required=True) +args = parser.parse_args() + +torch.cuda.set_device(args.gpu_id) +args = load_config(args) +print(args) + +# random seed setting +random.seed(args.seed) +np.random.seed(args.seed) +torch.manual_seed(args.seed) +torch.cuda.manual_seed(args.seed) + + +data_graphsaint = ['flickr', 'reddit', 'ogbn-arxiv'] +if args.dataset in data_graphsaint: + data = DataGraphSAINT(args.dataset) + data_full = data.data_full + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) +else: + data_full = get_dataset(args.dataset, args.normalize_features) + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) + +features = data_full.features +adj = data_full.adj +labels = data_full.labels +idx_train = data_full.idx_train +idx_val = data_full.idx_val +idx_test = data_full.idx_test + +# Setup GCN Model +device = 'cuda' +model = GCN(nfeat=features.shape[1], nhid=256, nclass=labels.max()+1, device=device, weight_decay=args.weight_decay) + +model = model.to(device) +model.fit(features, adj, labels, idx_train, idx_val, train_iters=600, verbose=False) + +model.eval() +# You can use the inner function of model to test +model.test(idx_test) + +embeds = model.predict().detach() + +if args.method == 'kcenter': + agent = KCenter(data, args, device='cuda') +elif args.method == 'herding': + agent = Herding(data, args, device='cuda') +elif args.method == 'random': + agent = Random(data, args, device='cuda') +elif args.method == 'lrmc': + if args.lrmc_seeds_path is None: + raise ValueError("--lrmc_seeds_path must be specified when method='lrmc'") + agent = LRMC(data, args, device='cuda') + +idx_selected = agent.select(embeds) + + +feat_train = features[idx_selected] +adj_train = adj[np.ix_(idx_selected, idx_selected)] + +labels_train = labels[idx_selected] + +if args.save: + np.save(f'saved/idx_{args.dataset}_{args.reduction_rate}_{args.method}_{args.seed}.npy', idx_selected) + + +res = [] +runs = 10 +for _ in tqdm(range(runs)): + model.initialize() + model.fit_with_val(feat_train, adj_train, labels_train, data, + train_iters=600, normalize=True, verbose=False) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + # Full graph + output = model.predict(data.feat_full, data.adj_full) + loss_test = F.nll_loss(output[data.idx_test], labels_test) + acc_test = accuracy(output[data.idx_test], labels_test) + res.append(acc_test.item()) + +res = np.array(res) +print('Mean accuracy:', repr([res.mean(), res.std()])) + diff --git a/GCond/train_coreset_induct.py b/GCond/train_coreset_induct.py new file mode 100644 index 0000000000000000000000000000000000000000..18cf38f5990126b8857adaaa7e08b5c46f798304 --- /dev/null +++ b/GCond/train_coreset_induct.py @@ -0,0 +1,119 @@ +from deeprobust.graph.data import Dataset +import numpy as np +import random +import time +import argparse +import torch +import sys +import deeprobust.graph.utils as utils +import torch.nn.functional as F +from configs import load_config +from utils import * +from utils_graphsaint import DataGraphSAINT +from models.gcn import GCN +from coreset import KCenter, Herding, Random, LRMC +from tqdm import tqdm + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu_id', type=int, default=0, help='gpu id') +parser.add_argument('--dataset', type=str, default='cora') +parser.add_argument('--hidden', type=int, default=256) +parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--keep_ratio', type=float, default=1.0) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=5e-4) +parser.add_argument('--dropout', type=float, default=0.5) +parser.add_argument('--seed', type=int, default=15, help='Random seed.') +parser.add_argument('--nlayers', type=int, default=2, help='Random seed.') +parser.add_argument('--epochs', type=int, default=400) +parser.add_argument('--inductive', type=int, default=1) +parser.add_argument('--mlp', type=int, default=0) +parser.add_argument('--method', type=str, choices=['kcenter', 'herding', 'random', 'lrmc']) +parser.add_argument('--lrmc_seeds_path', type=str, default=None, + help='Path to a JSON file containing L‑RMC seed nodes. Required when method=lrmc.') +parser.add_argument('--reduction_rate', type=float, required=True) +args = parser.parse_args() + +torch.cuda.set_device(args.gpu_id) +args = load_config(args) +print(args) + +# random seed setting +random.seed(args.seed) +np.random.seed(args.seed) +torch.manual_seed(args.seed) +torch.cuda.manual_seed(args.seed) + +data_graphsaint = ['flickr', 'reddit', 'ogbn-arxiv'] +if args.dataset in data_graphsaint: + data = DataGraphSAINT(args.dataset) + data_full = data.data_full + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) +else: + data_full = get_dataset(args.dataset, args.normalize_features) + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) + +feat_train = data.feat_train +adj_train = data.adj_train +labels_train = data.labels_train + + +# Setup GCN Model +device = 'cuda' +model = GCN(nfeat=feat_train.shape[1], nhid=256, nclass=labels_train.max()+1, device=device, weight_decay=args.weight_decay) + +model = model.to(device) + +model.fit_with_val(feat_train, adj_train, labels_train, data, + train_iters=600, normalize=True, verbose=False) + +model.eval() +labels_test = torch.LongTensor(data.labels_test).cuda() +feat_test, adj_test = data.feat_test, data.adj_test + +embeds = model.predict().detach() + +output = model.predict(feat_test, adj_test) +loss_test = F.nll_loss(output, labels_test) +acc_test = utils.accuracy(output, labels_test) +print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + + +if args.method == 'kcenter': + agent = KCenter(data, args, device='cuda') +elif args.method == 'herding': + agent = Herding(data, args, device='cuda') +elif args.method == 'random': + agent = Random(data, args, device='cuda') +elif args.method == 'lrmc': + if args.lrmc_seeds_path is None: + raise ValueError("--lrmc_seeds_path must be specified when method='lrmc'") + agent = LRMC(data, args, device='cuda') + +idx_selected = agent.select(embeds, inductive=True) + +feat_train = feat_train[idx_selected] +adj_train = adj_train[np.ix_(idx_selected, idx_selected)] + +labels_train = labels_train[idx_selected] + +res = [] +print('shape of feat_train:', feat_train.shape) +runs = 10 +for _ in tqdm(range(runs)): + model.initialize() + model.fit_with_val(feat_train, adj_train, labels_train, data, + train_iters=600, normalize=True, verbose=False, noval=True) + + model.eval() + labels_test = torch.LongTensor(data.labels_test).cuda() + + output = model.predict(feat_test, adj_test) + loss_test = F.nll_loss(output, labels_test) + acc_test = utils.accuracy(output, labels_test) + res.append(acc_test.item()) +res = np.array(res) +print('Mean accuracy:', repr([res.mean(), res.std()])) + diff --git a/GCond/train_gcond_induct.py b/GCond/train_gcond_induct.py new file mode 100644 index 0000000000000000000000000000000000000000..defe99a2653724112074eb6f3f31a20c48633522 --- /dev/null +++ b/GCond/train_gcond_induct.py @@ -0,0 +1,61 @@ +from deeprobust.graph.data import Dataset +import numpy as np +import random +import time +import argparse +import torch +from utils import * +import torch.nn.functional as F +from gcond_agent_induct import GCond +from utils_graphsaint import DataGraphSAINT + + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu_id', type=int, default=0, help='gpu id') +parser.add_argument('--dataset', type=str, default='cora') +parser.add_argument('--dis_metric', type=str, default='ours') +parser.add_argument('--epochs', type=int, default=600) +parser.add_argument('--nlayers', type=int, default=3) +parser.add_argument('--hidden', type=int, default=256) +parser.add_argument('--lr_adj', type=float, default=0.01) +parser.add_argument('--lr_feat', type=float, default=0.01) +parser.add_argument('--lr_model', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=0.0) +parser.add_argument('--dropout', type=float, default=0.0) +parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--keep_ratio', type=float, default=1.0) +parser.add_argument('--reduction_rate', type=float, default=0.01) +parser.add_argument('--seed', type=int, default=15, help='Random seed.') +parser.add_argument('--alpha', type=float, default=0, help='regularization term.') +parser.add_argument('--debug', type=int, default=0) +parser.add_argument('--sgc', type=int, default=1) +parser.add_argument('--inner', type=int, default=0) +parser.add_argument('--outer', type=int, default=20) +parser.add_argument('--option', type=int, default=0) +parser.add_argument('--save', type=int, default=0) +parser.add_argument('--label_rate', type=float, default=1) +parser.add_argument('--one_step', type=int, default=0) +args = parser.parse_args() + +torch.cuda.set_device(args.gpu_id) + +# random seed setting +random.seed(args.seed) +np.random.seed(args.seed) +torch.manual_seed(args.seed) +torch.cuda.manual_seed(args.seed) + +print(args) + +data_graphsaint = ['flickr', 'reddit', 'ogbn-arxiv'] +if args.dataset in data_graphsaint: + # data = DataGraphSAINT(args.dataset) + data = DataGraphSAINT(args.dataset, label_rate=args.label_rate) + data_full = data.data_full +else: + data_full = get_dataset(args.dataset, args.normalize_features) + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) + +agent = GCond(data, args, device='cuda') + +agent.train() diff --git a/GCond/train_gcond_transduct.py b/GCond/train_gcond_transduct.py new file mode 100644 index 0000000000000000000000000000000000000000..db59c69b26fcc498e94be13c4b023dc9f165fb66 --- /dev/null +++ b/GCond/train_gcond_transduct.py @@ -0,0 +1,57 @@ +from deeprobust.graph.data import Dataset +import numpy as np +import random +import time +import argparse +import torch +from utils import * +import torch.nn.functional as F +from gcond_agent_transduct import GCond +from utils_graphsaint import DataGraphSAINT + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu_id', type=int, default=0, help='gpu id') +parser.add_argument('--dataset', type=str, default='cora') +parser.add_argument('--dis_metric', type=str, default='ours') +parser.add_argument('--epochs', type=int, default=2000) +parser.add_argument('--nlayers', type=int, default=3) +parser.add_argument('--hidden', type=int, default=256) +parser.add_argument('--lr_adj', type=float, default=0.01) +parser.add_argument('--lr_feat', type=float, default=0.01) +parser.add_argument('--lr_model', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=0.0) +parser.add_argument('--dropout', type=float, default=0.0) +parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--keep_ratio', type=float, default=1.0) +parser.add_argument('--reduction_rate', type=float, default=1) +parser.add_argument('--seed', type=int, default=15, help='Random seed.') +parser.add_argument('--alpha', type=float, default=0, help='regularization term.') +parser.add_argument('--debug', type=int, default=0) +parser.add_argument('--sgc', type=int, default=1) +parser.add_argument('--inner', type=int, default=0) +parser.add_argument('--outer', type=int, default=20) +parser.add_argument('--save', type=int, default=0) +parser.add_argument('--one_step', type=int, default=0) +args = parser.parse_args() + +torch.cuda.set_device(args.gpu_id) + +# random seed setting +random.seed(args.seed) +np.random.seed(args.seed) +torch.manual_seed(args.seed) +torch.cuda.manual_seed(args.seed) + +print(args) + +data_graphsaint = ['flickr', 'reddit', 'ogbn-arxiv'] +if args.dataset in data_graphsaint: + data = DataGraphSAINT(args.dataset) + data_full = data.data_full +else: + data_full = get_dataset(args.dataset, args.normalize_features) + data = Transd2Ind(data_full, keep_ratio=args.keep_ratio) + +agent = GCond(data, args, device='cuda') + +agent.train() diff --git a/GCond/utils.py b/GCond/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ca01daf42c92712fc07092935330a984109fce --- /dev/null +++ b/GCond/utils.py @@ -0,0 +1,383 @@ +import os.path as osp +import numpy as np +import scipy.sparse as sp +import torch +import torch_geometric.transforms as T +from ogb.nodeproppred import PygNodePropPredDataset, Evaluator +from deeprobust.graph.data import Dataset +from deeprobust.graph.utils import get_train_val_test +from torch_geometric.utils import train_test_split_edges +from sklearn.model_selection import train_test_split +from sklearn import metrics +import numpy as np +import torch.nn.functional as F +from sklearn.preprocessing import StandardScaler +from deeprobust.graph.utils import * +from torch_geometric.data import NeighborSampler +from torch_geometric.utils import add_remaining_self_loops, to_undirected +from torch_geometric.datasets import Planetoid + + +def get_dataset(name, normalize_features=False, transform=None, if_dpr=True): + path = osp.join(osp.dirname(osp.realpath(__file__)), 'data', name) + if name in ['cora', 'citeseer', 'pubmed']: + dataset = Planetoid(path, name) + elif name in ['ogbn-arxiv']: + dataset = PygNodePropPredDataset(name='ogbn-arxiv') + else: + raise NotImplementedError + + if transform is not None and normalize_features: + dataset.transform = T.Compose([T.NormalizeFeatures(), transform]) + elif normalize_features: + dataset.transform = T.NormalizeFeatures() + elif transform is not None: + dataset.transform = transform + + dpr_data = Pyg2Dpr(dataset) + if name in ['ogbn-arxiv']: + # the features are different from the features provided by GraphSAINT + # normalize features, following graphsaint + feat, idx_train = dpr_data.features, dpr_data.idx_train + feat_train = feat[idx_train] + scaler = StandardScaler() + scaler.fit(feat_train) + feat = scaler.transform(feat) + dpr_data.features = feat + + return dpr_data + + +class Pyg2Dpr(Dataset): + def __init__(self, pyg_data, **kwargs): + try: + splits = pyg_data.get_idx_split() + except: + pass + + dataset_name = pyg_data.name + pyg_data = pyg_data[0] + n = pyg_data.num_nodes + + if dataset_name == 'ogbn-arxiv': # symmetrization + pyg_data.edge_index = to_undirected(pyg_data.edge_index, pyg_data.num_nodes) + + self.adj = sp.csr_matrix((np.ones(pyg_data.edge_index.shape[1]), + (pyg_data.edge_index[0], pyg_data.edge_index[1])), shape=(n, n)) + + self.features = pyg_data.x.numpy() + self.labels = pyg_data.y.numpy() + + if len(self.labels.shape) == 2 and self.labels.shape[1] == 1: + self.labels = self.labels.reshape(-1) # ogb-arxiv needs to reshape + + if hasattr(pyg_data, 'train_mask'): + # for fixed split + self.idx_train = mask_to_index(pyg_data.train_mask, n) + self.idx_val = mask_to_index(pyg_data.val_mask, n) + self.idx_test = mask_to_index(pyg_data.test_mask, n) + self.name = 'Pyg2Dpr' + else: + try: + # for ogb + self.idx_train = splits['train'] + self.idx_val = splits['valid'] + self.idx_test = splits['test'] + self.name = 'Pyg2Dpr' + except: + # for other datasets + self.idx_train, self.idx_val, self.idx_test = get_train_val_test( + nnodes=n, val_size=0.1, test_size=0.8, stratify=self.labels) + + +def mask_to_index(index, size): + all_idx = np.arange(size) + return all_idx[index] + +def index_to_mask(index, size): + mask = torch.zeros((size, ), dtype=torch.bool) + mask[index] = 1 + return mask + + + +class Transd2Ind: + # transductive setting to inductive setting + + def __init__(self, dpr_data, keep_ratio): + idx_train, idx_val, idx_test = dpr_data.idx_train, dpr_data.idx_val, dpr_data.idx_test + adj, features, labels = dpr_data.adj, dpr_data.features, dpr_data.labels + self.nclass = labels.max()+1 + self.adj_full, self.feat_full, self.labels_full = adj, features, labels + self.idx_train = np.array(idx_train) + self.idx_val = np.array(idx_val) + self.idx_test = np.array(idx_test) + + if keep_ratio < 1: + idx_train, _ = train_test_split(idx_train, + random_state=None, + train_size=keep_ratio, + test_size=1-keep_ratio, + stratify=labels[idx_train]) + + self.adj_train = adj[np.ix_(idx_train, idx_train)] + self.adj_val = adj[np.ix_(idx_val, idx_val)] + self.adj_test = adj[np.ix_(idx_test, idx_test)] + print('size of adj_train:', self.adj_train.shape) + print('#edges in adj_train:', self.adj_train.sum()) + + self.labels_train = labels[idx_train] + self.labels_val = labels[idx_val] + self.labels_test = labels[idx_test] + + self.feat_train = features[idx_train] + self.feat_val = features[idx_val] + self.feat_test = features[idx_test] + + self.class_dict = None + self.samplers = None + self.class_dict2 = None + + def retrieve_class(self, c, num=256): + if self.class_dict is None: + self.class_dict = {} + for i in range(self.nclass): + self.class_dict['class_%s'%i] = (self.labels_train == i) + idx = np.arange(len(self.labels_train)) + idx = idx[self.class_dict['class_%s'%c]] + return np.random.permutation(idx)[:num] + + def retrieve_class_sampler(self, c, adj, transductive, num=256, args=None): + if self.class_dict2 is None: + self.class_dict2 = {} + for i in range(self.nclass): + if transductive: + idx = self.idx_train[self.labels_train == i] + else: + idx = np.arange(len(self.labels_train))[self.labels_train==i] + self.class_dict2[i] = idx + + if args.nlayers == 1: + sizes = [15] + if args.nlayers == 2: + sizes = [10, 5] + # sizes = [-1, -1] + if args.nlayers == 3: + sizes = [15, 10, 5] + if args.nlayers == 4: + sizes = [15, 10, 5, 5] + if args.nlayers == 5: + sizes = [15, 10, 5, 5, 5] + + + if self.samplers is None: + self.samplers = [] + for i in range(self.nclass): + node_idx = torch.LongTensor(self.class_dict2[i]) + self.samplers.append(NeighborSampler(adj, + node_idx=node_idx, + sizes=sizes, batch_size=num, + num_workers=12, return_e_id=False, + num_nodes=adj.size(0), + shuffle=True)) + batch = np.random.permutation(self.class_dict2[c])[:num] + out = self.samplers[c].sample(batch) + return out + + def retrieve_class_multi_sampler(self, c, adj, transductive, num=256, args=None): + if self.class_dict2 is None: + self.class_dict2 = {} + for i in range(self.nclass): + if transductive: + idx = self.idx_train[self.labels_train == i] + else: + idx = np.arange(len(self.labels_train))[self.labels_train==i] + self.class_dict2[i] = idx + + + if self.samplers is None: + self.samplers = [] + for l in range(2): + layer_samplers = [] + sizes = [15] if l == 0 else [10, 5] + for i in range(self.nclass): + node_idx = torch.LongTensor(self.class_dict2[i]) + layer_samplers.append(NeighborSampler(adj, + node_idx=node_idx, + sizes=sizes, batch_size=num, + num_workers=12, return_e_id=False, + num_nodes=adj.size(0), + shuffle=True)) + self.samplers.append(layer_samplers) + batch = np.random.permutation(self.class_dict2[c])[:num] + out = self.samplers[args.nlayers-1][c].sample(batch) + return out + + + +def match_loss(gw_syn, gw_real, args, device): + dis = torch.tensor(0.0).to(device) + + if args.dis_metric == 'ours': + + for ig in range(len(gw_real)): + gwr = gw_real[ig] + gws = gw_syn[ig] + dis += distance_wb(gwr, gws) + + elif args.dis_metric == 'mse': + gw_real_vec = [] + gw_syn_vec = [] + for ig in range(len(gw_real)): + gw_real_vec.append(gw_real[ig].reshape((-1))) + gw_syn_vec.append(gw_syn[ig].reshape((-1))) + gw_real_vec = torch.cat(gw_real_vec, dim=0) + gw_syn_vec = torch.cat(gw_syn_vec, dim=0) + dis = torch.sum((gw_syn_vec - gw_real_vec)**2) + + elif args.dis_metric == 'cos': + gw_real_vec = [] + gw_syn_vec = [] + for ig in range(len(gw_real)): + gw_real_vec.append(gw_real[ig].reshape((-1))) + gw_syn_vec.append(gw_syn[ig].reshape((-1))) + gw_real_vec = torch.cat(gw_real_vec, dim=0) + gw_syn_vec = torch.cat(gw_syn_vec, dim=0) + dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / (torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001) + + else: + exit('DC error: unknown distance function') + + return dis + +def distance_wb(gwr, gws): + shape = gwr.shape + + # TODO: output node!!!! + if len(gwr.shape) == 2: + gwr = gwr.T + gws = gws.T + + if len(shape) == 4: # conv, out*in*h*w + gwr = gwr.reshape(shape[0], shape[1] * shape[2] * shape[3]) + gws = gws.reshape(shape[0], shape[1] * shape[2] * shape[3]) + elif len(shape) == 3: # layernorm, C*h*w + gwr = gwr.reshape(shape[0], shape[1] * shape[2]) + gws = gws.reshape(shape[0], shape[1] * shape[2]) + elif len(shape) == 2: # linear, out*in + tmp = 'do nothing' + elif len(shape) == 1: # batchnorm/instancenorm, C; groupnorm x, bias + gwr = gwr.reshape(1, shape[0]) + gws = gws.reshape(1, shape[0]) + return 0 + + dis_weight = torch.sum(1 - torch.sum(gwr * gws, dim=-1) / (torch.norm(gwr, dim=-1) * torch.norm(gws, dim=-1) + 0.000001)) + dis = dis_weight + return dis + + + +def calc_f1(y_true, y_pred,is_sigmoid): + if not is_sigmoid: + y_pred = np.argmax(y_pred, axis=1) + else: + y_pred[y_pred > 0.5] = 1 + y_pred[y_pred <= 0.5] = 0 + return metrics.f1_score(y_true, y_pred, average="micro"), metrics.f1_score(y_true, y_pred, average="macro") + +def evaluate(output, labels, args): + data_graphsaint = ['yelp', 'ppi', 'ppi-large', 'flickr', 'reddit', 'amazon'] + if args.dataset in data_graphsaint: + labels = labels.cpu().numpy() + output = output.cpu().numpy() + if len(labels.shape) > 1: + micro, macro = calc_f1(labels, output, is_sigmoid=True) + else: + micro, macro = calc_f1(labels, output, is_sigmoid=False) + print("Test set results:", "F1-micro= {:.4f}".format(micro), + "F1-macro= {:.4f}".format(macro)) + else: + loss_test = F.nll_loss(output, labels) + acc_test = accuracy(output, labels) + print("Test set results:", + "loss= {:.4f}".format(loss_test.item()), + "accuracy= {:.4f}".format(acc_test.item())) + return + + +from torchvision import datasets, transforms +def get_mnist(data_path): + channel = 1 + im_size = (28, 28) + num_classes = 10 + mean = [0.1307] + std = [0.3081] + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) + dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation + dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform) + class_names = [str(c) for c in range(num_classes)] + + labels = [] + feat = [] + for x, y in dst_train: + feat.append(x.view(1, -1)) + labels.append(y) + feat = torch.cat(feat, axis=0).numpy() + from utils_graphsaint import GraphData + adj = sp.eye(len(feat)) + idx = np.arange(len(feat)) + dpr_data = GraphData(adj-adj, feat, labels, idx, idx, idx) + from deeprobust.graph.data import Dpr2Pyg + return Dpr2Pyg(dpr_data) + +def regularization(adj, x, eig_real=None): + # fLf + loss = 0 + # loss += torch.norm(adj, p=1) + loss += feature_smoothing(adj, x) + return loss + +def maxdegree(adj): + n = adj.shape[0] + return F.relu(max(adj.sum(1))/n - 0.5) + +def sparsity2(adj): + n = adj.shape[0] + loss_degree = - torch.log(adj.sum(1)).sum() / n + loss_fro = torch.norm(adj) / n + return 0 * loss_degree + loss_fro + +def sparsity(adj): + n = adj.shape[0] + thresh = n * n * 0.01 + return F.relu(adj.sum()-thresh) + # return F.relu(adj.sum()-thresh) / n**2 + +def feature_smoothing(adj, X): + adj = (adj.t() + adj)/2 + rowsum = adj.sum(1) + r_inv = rowsum.flatten() + D = torch.diag(r_inv) + L = D - adj + + r_inv = r_inv + 1e-8 + r_inv = r_inv.pow(-1/2).flatten() + r_inv[torch.isinf(r_inv)] = 0. + r_mat_inv = torch.diag(r_inv) + # L = r_mat_inv @ L + L = r_mat_inv @ L @ r_mat_inv + + XLXT = torch.matmul(torch.matmul(X.t(), L), X) + loss_smooth_feat = torch.trace(XLXT) + # loss_smooth_feat = loss_smooth_feat / (adj.shape[0]**2) + return loss_smooth_feat + +def row_normalize_tensor(mx): + rowsum = mx.sum(1) + r_inv = rowsum.pow(-1).flatten() + # r_inv[torch.isinf(r_inv)] = 0. + r_mat_inv = torch.diag(r_inv) + mx = r_mat_inv @ mx + return mx + + diff --git a/GCond/utils_graphsaint.py b/GCond/utils_graphsaint.py new file mode 100644 index 0000000000000000000000000000000000000000..d40ac2c8a093a585c951f64cc6eb1ae7d1652370 --- /dev/null +++ b/GCond/utils_graphsaint.py @@ -0,0 +1,220 @@ +import scipy.sparse as sp +import numpy as np +import sys +import json +import os +from sklearn.preprocessing import StandardScaler +from torch_geometric.data import InMemoryDataset, Data +import torch +from itertools import repeat +from torch_geometric.data import NeighborSampler + +class DataGraphSAINT: + '''datasets used in GraphSAINT paper''' + + def __init__(self, dataset, **kwargs): + dataset_str='data/'+dataset+'/' + adj_full = sp.load_npz(dataset_str+'adj_full.npz') + self.nnodes = adj_full.shape[0] + if dataset == 'ogbn-arxiv': + adj_full = adj_full + adj_full.T + adj_full[adj_full > 1] = 1 + + role = json.load(open(dataset_str+'role.json','r')) + idx_train = role['tr'] + idx_test = role['te'] + idx_val = role['va'] + + if 'label_rate' in kwargs: + label_rate = kwargs['label_rate'] + if label_rate < 1: + idx_train = idx_train[:int(label_rate*len(idx_train))] + + self.adj_train = adj_full[np.ix_(idx_train, idx_train)] + self.adj_val = adj_full[np.ix_(idx_val, idx_val)] + self.adj_test = adj_full[np.ix_(idx_test, idx_test)] + + feat = np.load(dataset_str+'feats.npy') + # ---- normalize feat ---- + feat_train = feat[idx_train] + scaler = StandardScaler() + scaler.fit(feat_train) + feat = scaler.transform(feat) + + self.feat_train = feat[idx_train] + self.feat_val = feat[idx_val] + self.feat_test = feat[idx_test] + + class_map = json.load(open(dataset_str + 'class_map.json','r')) + labels = self.process_labels(class_map) + + self.labels_train = labels[idx_train] + self.labels_val = labels[idx_val] + self.labels_test = labels[idx_test] + + self.data_full = GraphData(adj_full, feat, labels, idx_train, idx_val, idx_test) + self.class_dict = None + self.class_dict2 = None + + self.adj_full = adj_full + self.feat_full = feat + self.labels_full = labels + self.idx_train = np.array(idx_train) + self.idx_val = np.array(idx_val) + self.idx_test = np.array(idx_test) + self.samplers = None + + def process_labels(self, class_map): + """ + setup vertex property map for output classests + """ + num_vertices = self.nnodes + if isinstance(list(class_map.values())[0], list): + num_classes = len(list(class_map.values())[0]) + self.nclass = num_classes + class_arr = np.zeros((num_vertices, num_classes)) + for k,v in class_map.items(): + class_arr[int(k)] = v + else: + class_arr = np.zeros(num_vertices, dtype=np.int) + for k, v in class_map.items(): + class_arr[int(k)] = v + class_arr = class_arr - class_arr.min() + self.nclass = max(class_arr) + 1 + return class_arr + + def retrieve_class(self, c, num=256): + if self.class_dict is None: + self.class_dict = {} + for i in range(self.nclass): + self.class_dict['class_%s'%i] = (self.labels_train == i) + idx = np.arange(len(self.labels_train)) + idx = idx[self.class_dict['class_%s'%c]] + return np.random.permutation(idx)[:num] + + def retrieve_class_sampler(self, c, adj, transductive, num=256, args=None): + if args.nlayers == 1: + sizes = [30] + if args.nlayers == 2: + if args.dataset in ['reddit', 'flickr']: + if args.option == 0: + sizes = [15, 8] + if args.option == 1: + sizes = [20, 10] + if args.option == 2: + sizes = [25, 10] + else: + sizes = [10, 5] + + if self.class_dict2 is None: + print(sizes) + self.class_dict2 = {} + for i in range(self.nclass): + if transductive: + idx_train = np.array(self.idx_train) + idx = idx_train[self.labels_train == i] + else: + idx = np.arange(len(self.labels_train))[self.labels_train==i] + self.class_dict2[i] = idx + + if self.samplers is None: + self.samplers = [] + for i in range(self.nclass): + node_idx = torch.LongTensor(self.class_dict2[i]) + if len(node_idx) == 0: + continue + + self.samplers.append(NeighborSampler(adj, + node_idx=node_idx, + sizes=sizes, batch_size=num, + num_workers=8, return_e_id=False, + num_nodes=adj.size(0), + shuffle=True)) + batch = np.random.permutation(self.class_dict2[c])[:num] + out = self.samplers[c].sample(batch) + return out + + +class GraphData: + + def __init__(self, adj, features, labels, idx_train, idx_val, idx_test): + self.adj = adj + self.features = features + self.labels = labels + self.idx_train = idx_train + self.idx_val = idx_val + self.idx_test = idx_test + + +class Data2Pyg: + + def __init__(self, data, device='cuda', transform=None, **kwargs): + self.data_train = Dpr2Pyg(data.data_train, transform=transform)[0].to(device) + self.data_val = Dpr2Pyg(data.data_val, transform=transform)[0].to(device) + self.data_test = Dpr2Pyg(data.data_test, transform=transform)[0].to(device) + self.nclass = data.nclass + self.nfeat = data.nfeat + self.class_dict = None + + def retrieve_class(self, c, num=256): + if self.class_dict is None: + self.class_dict = {} + for i in range(self.nclass): + self.class_dict['class_%s'%i] = (self.data_train.y == i).cpu().numpy() + idx = np.arange(len(self.data_train.y)) + idx = idx[self.class_dict['class_%s'%c]] + return np.random.permutation(idx)[:num] + + +class Dpr2Pyg(InMemoryDataset): + + def __init__(self, dpr_data, transform=None, **kwargs): + root = 'data/' # dummy root; does not mean anything + self.dpr_data = dpr_data + super(Dpr2Pyg, self).__init__(root, transform) + pyg_data = self.process() + self.data, self.slices = self.collate([pyg_data]) + self.transform = transform + + def process(self): + dpr_data = self.dpr_data + edge_index = torch.LongTensor(dpr_data.adj.nonzero()) + # by default, the features in pyg data is dense + if sp.issparse(dpr_data.features): + x = torch.FloatTensor(dpr_data.features.todense()).float() + else: + x = torch.FloatTensor(dpr_data.features).float() + y = torch.LongTensor(dpr_data.labels) + data = Data(x=x, edge_index=edge_index, y=y) + data.train_mask = None + data.val_mask = None + data.test_mask = None + return data + + + def get(self, idx): + data = self.data.__class__() + + if hasattr(self.data, '__num_nodes__'): + data.num_nodes = self.data.__num_nodes__[idx] + + for key in self.data.keys: + item, slices = self.data[key], self.slices[key] + s = list(repeat(slice(None), item.dim())) + s[self.data.__cat_dim__(key, item)] = slice(slices[idx], + slices[idx + 1]) + data[key] = item[s] + return data + + @property + def raw_file_names(self): + return ['some_file_1', 'some_file_2', ...] + + @property + def processed_file_names(self): + return ['data.pt'] + + def _download(self): + pass + + diff --git a/requirements.txt b/requirements.txt index 9d0b5c32945505b2b94139bb6c43a7e23dafc797..72212c8c57fbe80d885cefb80e7a9747284690e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -torch torch-scatter torch-sparse -torch-geometric \ No newline at end of file +torch-geometric +rich diff --git a/src/2.1_lrmc_bilevel.py b/src/2.1_lrmc_bilevel.py index 9c81e189e2a6733d139ee3c5329e1c8a58b8c678..fa188e0876a19fb626da4a02d8f114d71b2debc3 100644 --- a/src/2.1_lrmc_bilevel.py +++ b/src/2.1_lrmc_bilevel.py @@ -1,6 +1,4 @@ -# lrmc_bilevel.py # Top-1 LRMC ablation: one-cluster pooling vs. plain GCN on Planetoid (e.g., Cora) -# Requires: torch, torch_geometric, torch_scatter, torch_sparse # # Usage examples: # python lrmc_bilevel.py --dataset Cora --seeds /path/to/lrmc_seeds.json --variant baseline @@ -28,6 +26,8 @@ from torch_sparse import coalesce, spspmm from torch_geometric.datasets import Planetoid from torch_geometric.nn import GCNConv +from rich import print + # --------------------------- # Utilities: edges and seeds diff --git a/src/2.2_lrmc_bilevel.py b/src/2.2_lrmc_bilevel.py index 22e1ae51d4e58064ca4a87eb5e1313db33183688..4efb345758913f6a671669b9e5f4c4aa7270feaa 100644 --- a/src/2.2_lrmc_bilevel.py +++ b/src/2.2_lrmc_bilevel.py @@ -1,4 +1,3 @@ -# 2.1_lrmc_bilevel.py # Top-1 LRMC ablation with debug guards so seeds differences are visible. # Requires: torch, torch_geometric, torch_scatter, torch_sparse @@ -16,6 +15,8 @@ from torch_sparse import coalesce, spspmm from torch_geometric.datasets import Planetoid from torch_geometric.nn import GCNConv +from rich import print + # --------------------------- # Utilities: edges and seeds diff --git a/src/2.3_lrmc_bilevel.py b/src/2.3_lrmc_bilevel.py index fa41fc5e5652d3bdb94a2b84c98fc61b522e9382..97aee14035d6236b7f01fa3b33301d6f3f63059b 100644 --- a/src/2.3_lrmc_bilevel.py +++ b/src/2.3_lrmc_bilevel.py @@ -1,4 +1,3 @@ -# 2.3_lrmc_bilevel.py # Top-1 LRMC ablation with: cluster refinement (k-core), gated residual fusion, # sparsified cluster graph (drop self-loops + per-row top-k), and A + γA² mix. # Requires: torch, torch_geometric, torch_scatter, torch_sparse @@ -17,6 +16,8 @@ from torch_sparse import coalesce, spspmm from torch_geometric.datasets import Planetoid from torch_geometric.nn import GCNConv +from rich import print + # --------------------------- # Utilities: edges and seeds diff --git a/src/2.4_lrmc_bilevel.py b/src/2.4_lrmc_bilevel.py index fc97fd5e15c534d0afb71791b3a6c8b4b6e2705d..0f525da92547af0ab2e3f157f7f084fa776cf3ae 100644 --- a/src/2.4_lrmc_bilevel.py +++ b/src/2.4_lrmc_bilevel.py @@ -1,4 +1,3 @@ -# lrmc_bilevel.py # Top-1 LRMC ablation: one-cluster pooling vs. plain GCN on Planetoid (e.g., Cora) # Requires: torch, torch_geometric, torch_scatter, torch_sparse # @@ -30,6 +29,8 @@ from torch_geometric.datasets import Planetoid from torch_geometric.nn import GCNConv from torch_geometric.utils import subgraph, degree # Added for stability score +from rich import print + # --------------------------- # Utilities: edges and seeds # --------------------------- diff --git a/src/2.5_lrmc_bilevel.py b/src/2.5_lrmc_bilevel.py new file mode 100644 index 0000000000000000000000000000000000000000..63eceb53376c0b833090d70f95b232ff73df8ef1 --- /dev/null +++ b/src/2.5_lrmc_bilevel.py @@ -0,0 +1,540 @@ +""" +L-RMC Anchored GCN vs. Plain GCN (dynamic robustness evaluation) +============================================================== + +This script trains a baseline two‑layer GCN and a new **anchor‑gated** GCN on +Planetoid citation networks (Cora/Citeseer/Pubmed). The anchor‑gated GCN uses +the top‑1 L‑RMC cluster (loaded from a provided JSON file) as a *decentralized +core*. During message passing it blends standard neighborhood aggregation +(`h_base`) with aggregation restricted to the core (`h_core`) via a per‑node +gating network. Cross‑boundary edges are optionally down‑weighted by a +damping factor `γ`. + +After training on the static graph, the script evaluates *robustness over +time*. Starting from the original adjacency, it repeatedly performs random +edge rewires (removes a fraction of existing edges and adds the same number +of random new edges) and measures test accuracy at each step **without +retraining**. The area under the accuracy–time curve (AUC‑AT) is reported +for both the baseline and the anchored model. A higher AUC‑AT indicates +longer resilience to graph churn. + +Usage examples:: + + # Train only baseline and report dynamic AUC + python 2.5_lrmc_bilevel.py --dataset Cora --seeds path/to/lrmc_seeds.json --variant baseline + + # Train baseline and anchor models, evaluate AUC‑over‑time on 30 steps with 5% rewiring + python 2.5_lrmc_bilevel.py --dataset Cora --seeds path/to/lrmc_seeds.json --variant anchor \ + --dynamic_steps 30 --flip_fraction 0.05 --gamma 0.8 + +Notes +----- +* The seeds JSON must contain an entry ``"clusters"`` with a list of clusters; the + cluster with maximum (score, size) is chosen as the core. +* For fairness, both models are trained on the identical training mask and + evaluated on the same dynamic perturbations. +* Random rewiring is undirected: an edge (u,v) is treated as the same as (v,u). +* Cross‑boundary damping and the gating network use only structural + information; features are left unchanged during perturbations. +""" + +import argparse +import json +import random +from pathlib import Path +from typing import Tuple, List, Optional, Set + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import GCNConv + +from rich import print + +# ----------------------------------------------------------------------------- +# Utilities for loading LRMC core assignment +# ----------------------------------------------------------------------------- + +def _pick_top1_cluster(obj: dict) -> List[int]: + """ + From LRMC JSON with structure {"clusters":[{"seed_nodes":[...],"score":float,...},...]} + choose the cluster with the highest (score, size) and return its members as + 0‑indexed integers. If no clusters exist, returns an empty list. + """ + clusters = obj.get("clusters", []) + if not clusters: + return [] + # Choose by highest score, tie‑break by size + best = max(clusters, key=lambda c: (float(c.get("score", 0.0)), len(c.get("seed_nodes", [])))) + return [nid - 1 for nid in best.get("seed_nodes", [])] + + +def load_top1_assignment(seeds_json: str, n_nodes: int) -> Tuple[Tensor, Tensor]: + """ + Given a path to the LRMC seeds JSON and total number of nodes, returns: + + * core_mask: bool Tensor of shape [N] where True indicates membership in the + top‑1 LRMC cluster. + * core_nodes: Long Tensor containing the indices of the core nodes. + + Nodes not in the core form the periphery. If the JSON has no clusters, + the core is empty. + """ + obj = json.loads(Path(seeds_json).read_text()) + core_list = _pick_top1_cluster(obj) + core_nodes = torch.tensor(sorted(set(core_list)), dtype=torch.long) + core_mask = torch.zeros(n_nodes, dtype=torch.bool) + if core_nodes.numel() > 0: + core_mask[core_nodes] = True + return core_mask, core_nodes + + +# ----------------------------------------------------------------------------- +# Baseline GCN: standard two‑layer GCN +# ----------------------------------------------------------------------------- + +class GCN2(nn.Module): + """Plain 2‑layer GCN (baseline).""" + + def __init__(self, in_dim: int, hid_dim: int, out_dim: int, dropout: float = 0.5): + super().__init__() + self.conv1 = GCNConv(in_dim, hid_dim) + self.conv2 = GCNConv(hid_dim, out_dim) + self.dropout = dropout + + def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Optional[Tensor] = None) -> Tensor: + # Use self loops implicitly (GCNConv defaults add_self_loops=True) + x = F.relu(self.conv1(x, edge_index, edge_weight)) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, edge_index, edge_weight) + return x + + +# ----------------------------------------------------------------------------- +# Anchor‑gated GCN +# ----------------------------------------------------------------------------- + +class AnchorGCN(nn.Module): + """ + A two‑layer GCN that injects a core‑restricted aggregation channel and + down‑weights edges crossing the core boundary. After the first GCN layer + computes base features, a gating network mixes them with features + aggregated only among core neighbors. + + Parameters + ---------- + in_dim : int + Dimensionality of input node features. + hid_dim : int + Dimensionality of hidden layer. + out_dim : int + Number of output classes. + core_mask : Tensor[bool] + Boolean mask indicating which nodes belong to the L‑RMC core. + gamma : float, optional + Damping factor for edges that connect core and non‑core nodes. + Values <1.0 reduce the influence of boundary edges. Default is 1.0 + (no damping). + dropout : float, optional + Dropout probability applied after the first layer. + """ + + def __init__(self, + in_dim: int, + hid_dim: int, + out_dim: int, + core_mask: Tensor, + gamma: float = 1.0, + dropout: float = 0.5): + super().__init__() + self.core_mask = core_mask.clone().detach() + self.gamma = float(gamma) + self.dropout = dropout + + # Base and core convolutions for the first layer + # Base conv uses self loops; core conv disables self loops to avoid + # spurious core contributions on non‑core nodes + self.base1 = GCNConv(in_dim, hid_dim, add_self_loops=True) + self.core1 = GCNConv(in_dim, hid_dim, add_self_loops=False) + + # Second layer: standard GCN on mixed features + self.conv2 = GCNConv(hid_dim, out_dim) + + # Gating network: maps structural features to α ∈ [0,1] + self.gate = nn.Sequential( + nn.Linear(3, 16), + nn.ReLU(), + nn.Linear(16, 1), + nn.Sigmoid(), + ) + + def _compute_edge_weights(self, edge_index: Tensor) -> Tensor: + """ + Given an edge index (two‑row tensor), return a weight tensor of ones + multiplied by ``gamma`` for edges with exactly one endpoint in the core. + Self loops (if present) are untouched. Edge weights are 1 for base + edges and <1 for cross‑boundary edges. + """ + if self.gamma >= 1.0: + return torch.ones(edge_index.size(1), dtype=torch.float32, device=edge_index.device) + src, dst = edge_index[0], edge_index[1] + in_core_src = self.core_mask[src] + in_core_dst = self.core_mask[dst] + cross = in_core_src ^ in_core_dst + w = torch.ones(edge_index.size(1), dtype=torch.float32, device=edge_index.device) + w[cross] *= self.gamma + return w + + def _compute_structural_features(self, edge_index: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + """ + Compute structural features used by the gating network: + + * `in_core` – 1 if node in core, else 0 + * `frac_core_nbrs` – fraction of neighbors that are in the core + * `is_boundary` – 1 if node has both core and non‑core neighbors + + The features are returned as a tuple of three tensors of shape [N,1]. + Nodes with zero degree get frac_core_nbrs=0 and is_boundary=0. + """ + N = self.core_mask.size(0) + device = edge_index.device + # Degree and core neighbor counts + src = edge_index[0] + dst = edge_index[1] + deg = torch.zeros(N, dtype=torch.float32, device=device) + core_deg = torch.zeros(N, dtype=torch.float32, device=device) + # Count contributions of directed edges; duplicates will double‑count but + # the ratio remains stable if the graph is symmetric. + deg.index_add_(0, src, torch.ones_like(src, dtype=torch.float32)) + # Count core neighbors: only increment source if destination is core + core_flags = self.core_mask[dst].float() + core_deg.index_add_(0, src, core_flags) + # Avoid division by zero + frac_core = torch.zeros(N, dtype=torch.float32, device=device) + nonzero = deg > 0 + frac_core[nonzero] = core_deg[nonzero] / deg[nonzero] + # Determine boundary: at least one core neighbor AND at least one non‑core neighbor + has_core = core_deg > 0 + has_non_core = (deg - core_deg) > 0 + is_boundary = (has_core & has_non_core).float() + in_core = self.core_mask.float() + return in_core.view(-1, 1), frac_core.view(-1, 1), is_boundary.view(-1, 1) + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + # Compute dynamic edge weights (for base channels) using damping + w = self._compute_edge_weights(edge_index) + # First layer: base aggregation (standard neighbors with self loops) + h_base = self.base1(x, edge_index, w) + h_base = F.relu(h_base) + + # First layer: core aggregation (only core neighbors, no self loops) + # Extract edges where both endpoints are core + src, dst = edge_index + mask_core_edges = self.core_mask[src] & self.core_mask[dst] + ei_core = edge_index[:, mask_core_edges] + # If no core edges exist, h_core will be zeros + if ei_core.numel() == 0: + h_core = torch.zeros_like(h_base) + else: + h_core = self.core1(x, ei_core) + h_core = F.relu(h_core) + + # Structural features for gating + in_core, frac_core, is_boundary = self._compute_structural_features(edge_index) + feats = torch.cat([in_core, frac_core, is_boundary], dim=1) + alpha = self.gate(feats).view(-1) # shape [N] + # Force α=0 for nodes with no core neighbors to avoid modifying true periphery. + # Nodes with frac_core == 0 have zero core neighbors by construction. + no_core_neighbors = (frac_core.view(-1) == 0) + alpha = torch.where(no_core_neighbors, torch.zeros_like(alpha), alpha) + + # Mix base and core features; h_final = h_base + α (h_core - h_base) + # Equivalent to (1-α)*h_base + α*h_core + h1 = h_base + alpha.unsqueeze(1) * (h_core - h_base) + + h1 = F.dropout(h1, p=self.dropout, training=self.training) + # Second layer: standard GCN with the same damping weights + out = self.conv2(h1, edge_index, w) + return out + + +# The deg_for_division helper is no longer used but left here for completeness. +def deg_for_division(edge_index: Tensor, num_nodes: int) -> Tensor: + src = edge_index[0] + deg = torch.zeros(num_nodes, dtype=torch.float32, device=edge_index.device) + deg.index_add_(0, src, torch.ones_like(src, dtype=torch.float32)) + return deg + + +# ----------------------------------------------------------------------------- +# Training and evaluation routines +# ----------------------------------------------------------------------------- + +@torch.no_grad() +def accuracy(logits: Tensor, y: Tensor, mask: Tensor) -> float: + """Compute accuracy of the predictions over the mask.""" + pred = logits[mask].argmax(dim=1) + return (pred == y[mask]).float().mean().item() + + +def train_model(model: nn.Module, + data, + epochs: int = 200, + lr: float = 0.01, + weight_decay: float = 5e-4) -> None: + """Standard training loop for either baseline or anchor models.""" + opt = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) + best_val = 0.0 + best_state = None + for ep in range(1, epochs + 1): + model.train() + opt.zero_grad(set_to_none=True) + logits = model(data.x, data.edge_index) + loss = F.cross_entropy(logits[data.train_mask], data.y[data.train_mask]) + loss.backward() + opt.step() + # Evaluate on validation + model.eval() + logits_val = model(data.x, data.edge_index) + val_acc = accuracy(logits_val, data.y, data.val_mask) + if val_acc > best_val: + best_val = val_acc + best_state = {k: v.detach().clone() for k, v in model.state_dict().items()} + if best_state is not None: + model.load_state_dict(best_state) + model.eval() + + +def evaluate_model(model: nn.Module, data) -> dict: + """Evaluate a trained model on train, val, and test masks.""" + model.eval() + logits = model(data.x, data.edge_index) + return { + "train": accuracy(logits, data.y, data.train_mask), + "val": accuracy(logits, data.y, data.val_mask), + "test": accuracy(logits, data.y, data.test_mask), + } + + +# ----------------------------------------------------------------------------- +# Dynamic graph perturbation utilities +# ----------------------------------------------------------------------------- + +def undirected_edge_set(edge_index: Tensor) -> Set[Tuple[int, int]]: + """ + Convert a directed edge index into a set of undirected edges represented + as (u,v) tuples with u < v. Self loops are ignored. + """ + edges = set() + src = edge_index[0].tolist() + dst = edge_index[1].tolist() + for u, v in zip(src, dst): + if u == v: + continue + a, b = (u, v) if u < v else (v, u) + edges.add((a, b)) + return edges + + +def edge_set_to_index(edges: Set[Tuple[int, int]], num_nodes: int) -> Tensor: + """ + Convert an undirected edge set into a directed edge_index tensor of shape + [2, 2*|edges|] by adding both (u,v) and (v,u) for each undirected edge. + Self loops are omitted; GCNConv adds them automatically. + """ + if not edges: + return torch.empty(2, 0, dtype=torch.long) + src_list = [] + dst_list = [] + for u, v in edges: + src_list.extend([u, v]) + dst_list.extend([v, u]) + edge_index = torch.tensor([src_list, dst_list], dtype=torch.long) + return edge_index + + +def random_rewire(edges: Set[Tuple[int, int]], num_nodes: int, n_changes: int, rng: random.Random) -> Set[Tuple[int, int]]: + """ + Perform n_changes edge removals and n_changes edge additions on the given + undirected edge set. For each change we remove a random existing edge and + add a random new edge (u,v) not already present. Self loops are never + added. Duplicate additions are skipped. + """ + edges = set(edges) # copy + # If there are fewer edges than n_changes, adjust + n_changes = min(n_changes, len(edges)) + # Remove random edges + to_remove = rng.sample(list(edges), n_changes) + for e in to_remove: + edges.remove(e) + # Add random new edges + added = 0 + attempts = 0 + while added < n_changes and attempts < n_changes * 10: + u = rng.randrange(num_nodes) + v = rng.randrange(num_nodes) + if u == v: + attempts += 1 + continue + a, b = (u, v) if u < v else (v, u) + if (a, b) not in edges: + edges.add((a, b)) + added += 1 + attempts += 1 + return edges + + +def auc_over_time(acc_list: List[float]) -> float: + """ + Compute the area under an accuracy–time curve using the trapezoidal rule. + ``acc_list`` should contain the accuracies at t=0,1,...,T. The AUC is + normalized by T so that a perfect score of 1.0 yields AUC=1.0. + """ + if not acc_list: + return 0.0 + area = 0.0 + for i in range(1, len(acc_list)): + area += (acc_list[i] + acc_list[i-1]) / 2.0 + return area / (len(acc_list) - 1) + + +def evaluate_dynamic_auc(model: nn.Module, + data, + core_mask: Tensor, + steps: int = 30, + flip_fraction: float = 0.05, + rng_seed: int = 1234) -> List[float]: + """ + Evaluate a model's test accuracy over a sequence of random edge rewiring steps. + + Parameters + ---------- + model : nn.Module + A trained model that accepts (x, edge_index) and returns logits. + data : Data + PyG data object with attributes x, y, test_mask. ``data.edge_index`` + provides the initial adjacency. + core_mask : Tensor[bool] + Boolean mask indicating core nodes (used for gating during evaluation). + The baseline model ignores it. + steps : int, optional + Number of rewiring steps to perform. The accuracy at t=0 is computed + before any rewiring. Default: 30. + flip_fraction : float, optional + Fraction of edges to remove/add at each step. For example, 0.05 + rewires 5% of existing edges per step. Default: 0.05. + rng_seed : int, optional + Random seed for reproducibility. Default: 1234. + + Returns + ------- + List[float] + A list of length ``steps+1`` containing the test accuracy at each + iteration (including t=0). + """ + # Convert initial edge_index to undirected edge set + base_edges = undirected_edge_set(data.edge_index) + num_edges = len(base_edges) + # Determine number of changes per step + n_changes = max(1, int(flip_fraction * num_edges)) + # Clone model state so we don't accidentally update it during evaluation + model.eval() + # Random generator + rng = random.Random(rng_seed) + # Copy of edges for dynamic modification + cur_edges = set(base_edges) + accuracies = [] + # Evaluate at t=0 + ei = edge_set_to_index(cur_edges, data.num_nodes) + # Because PyG expects a tensor on the same device as data.x + ei = ei.to(data.x.device) + logits = model(data.x, ei) + accuracies.append(accuracy(logits, data.y, data.test_mask)) + # Perform rewiring steps + for t in range(1, steps + 1): + cur_edges = random_rewire(cur_edges, data.num_nodes, n_changes, rng) + ei = edge_set_to_index(cur_edges, data.num_nodes).to(data.x.device) + logits = model(data.x, ei) + acc = accuracy(logits, data.y, data.test_mask) + accuracies.append(acc) + return accuracies + + +# ----------------------------------------------------------------------------- +# Main entrypoint +# ----------------------------------------------------------------------------- + +def main(): + parser = argparse.ArgumentParser(description="L‑RMC anchored GCN vs. baseline with dynamic evaluation.") + parser.add_argument("--dataset", required=True, choices=["Cora", "Citeseer", "Pubmed"], + help="Planetoid dataset to load.") + parser.add_argument("--seeds", required=True, help="Path to LRMC seeds JSON (for core extraction).") + parser.add_argument("--variant", choices=["baseline", "anchor"], default="anchor", help="Which variant to run.") + parser.add_argument("--hidden", type=int, default=64, help="Hidden dimension.") + parser.add_argument("--epochs", type=int, default=200, help="Number of training epochs.") + parser.add_argument("--lr", type=float, default=0.01, help="Learning rate.") + parser.add_argument("--wd", type=float, default=5e-4, help="Weight decay (L2).") + parser.add_argument("--dropout", type=float, default=0.5, help="Dropout probability.") + parser.add_argument("--gamma", type=float, default=1.0, help="Damping factor γ for cross‑boundary edges (anchor only).") + parser.add_argument("--dynamic_steps", type=int, default=30, help="Number of dynamic rewiring steps for AUC evaluation.") + parser.add_argument("--flip_fraction", type=float, default=0.05, help="Fraction of edges rewired at each step.") + parser.add_argument("--seed", type=int, default=42, help="Random seed for PyTorch.") + args = parser.parse_args() + + # Set seeds + torch.manual_seed(args.seed) + random.seed(args.seed) + + # Load dataset + dataset = Planetoid(root=f"./data/{args.dataset}", name=args.dataset) + data = dataset[0] + in_dim = dataset.num_node_features + out_dim = dataset.num_classes + num_nodes = data.num_nodes + + # Load core assignment + core_mask, core_nodes = load_top1_assignment(args.seeds, num_nodes) + print(f"Loaded core of size {core_nodes.numel()} from {args.seeds}.") + + if args.variant == "baseline": + # Train baseline only + baseline = GCN2(in_dim, args.hidden, out_dim, dropout=args.dropout) + train_model(baseline, data, epochs=args.epochs, lr=args.lr, weight_decay=args.wd) + res = evaluate_model(baseline, data) + print(f"Baseline GCN: train={res['train']:.4f} val={res['val']:.4f} test={res['test']:.4f}") + # Evaluate dynamic AUC + accs = evaluate_dynamic_auc(baseline, data, core_mask, steps=args.dynamic_steps, + flip_fraction=args.flip_fraction, rng_seed=args.seed) + auc = auc_over_time(accs) + print(f"Baseline dynamic AUC‑AT (steps={args.dynamic_steps}, flip={args.flip_fraction}): {auc:.4f}") + return + + # ----- Train both baseline and anchor variants ----- + # Baseline + baseline = GCN2(in_dim, args.hidden, out_dim, dropout=args.dropout) + train_model(baseline, data, epochs=args.epochs, lr=args.lr, weight_decay=args.wd) + res_base = evaluate_model(baseline, data) + print(f"Baseline GCN: train={res_base['train']:.4f} val={res_base['val']:.4f} test={res_base['test']:.4f}") + # Anchor model + anchor = AnchorGCN(in_dim, args.hidden, out_dim, + core_mask=core_mask, + gamma=args.gamma, + dropout=args.dropout) + train_model(anchor, data, epochs=args.epochs, lr=args.lr, weight_decay=args.wd) + res_anchor = evaluate_model(anchor, data) + print(f"Anchor‑GCN: train={res_anchor['train']:.4f} val={res_anchor['val']:.4f} test={res_anchor['test']:.4f}") + # Dynamic evaluation + accs_base = evaluate_dynamic_auc(baseline, data, core_mask, steps=args.dynamic_steps, + flip_fraction=args.flip_fraction, rng_seed=args.seed) + accs_anchor = evaluate_dynamic_auc(anchor, data, core_mask, steps=args.dynamic_steps, + flip_fraction=args.flip_fraction, rng_seed=args.seed) + auc_base = auc_over_time(accs_base) + auc_anchor = auc_over_time(accs_anchor) + print(f"Dynamic AUC‑AT (steps={args.dynamic_steps}, flip={args.flip_fraction}):") + print(f" Baseline : {auc_base:.4f}\n Anchor : {auc_anchor:.4f}") + + +if __name__ == "__main__": + main() diff --git a/src/2.6_lrmc_summary.py b/src/2.6_lrmc_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..e16dc0d3807c2862b090978510eb87e8995fc8a4 --- /dev/null +++ b/src/2.6_lrmc_summary.py @@ -0,0 +1,691 @@ +""" +Train a GCN on an L‑RMC subgraph and compare to a full‑graph baseline. + +Modes: + - core_mode=forward : Train on core subgraph, then forward on full graph (your current approach). + - core_mode=appnp : Train on core subgraph, then seed logits on core and APPNP‑propagate on full graph. + +Extras: + - --expand_core_with_train : Make sure all training labels lie inside the core + (C' = C ∪ train_idx) for fair train‑time comparison. + - --warm_ft_epochs N : Optional short finetune on the full graph starting + from the core model's weights (measure time‑to‑target). + +It prints: + - Dataset stats + - Core size and coverage of train/val/test inside the core + - Train/Val/Test accuracy for baseline and core model + - Wall‑clock times +""" + +import argparse +import json +import time +import random +from statistics import mean, stdev +from pathlib import Path +from typing import Dict + +import torch +import torch.nn.functional as F +from torch import nn, Tensor +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import GCNConv, APPNP +from torch_geometric.utils import subgraph + +# ------------------------------------------------------------ +# Rich imports +# ------------------------------------------------------------ +from rich.console import Console +from rich.table import Table +from rich.progress import Progress, SpinnerColumn, TimeElapsedColumn + +# Rich console instance +console = Console() + +# ------------------------------------------------------------ +# Utilities +# ------------------------------------------------------------ +def load_top1_assignment(seeds_json: str, n_nodes: int) -> torch.Tensor: + """ + seeds_json format (expected): + {"clusters": [{"seed_nodes":[...], "score": float, ...}, ...]} + We pick the cluster with max (score, size) and return a boolean core mask. + + Always assume that the seeds json nodes are 1-indexed. + """ + obj = json.loads(Path(seeds_json).read_text()) + clusters = obj.get("clusters", []) + if not clusters: + return torch.zeros(n_nodes, dtype=torch.bool) + best = max(clusters, key=lambda c: (float(c.get("score", 0.0)), len(c.get("seed_nodes", [])))) + ids = best.get("seed_nodes", []) + ids = [int(x) - 1 for x in ids] # Convert 1-indexed to 0-indexed + ids = sorted(set([i for i in ids if 0 <= i < n_nodes])) + mask = torch.zeros(n_nodes, dtype=torch.bool) + if ids: + mask[torch.tensor(ids, dtype=torch.long)] = True + return mask + + +def coverage_counts(core_mask: torch.Tensor, train_mask: torch.Tensor, + val_mask: torch.Tensor, test_mask: torch.Tensor) -> Dict[str, int]: + return { + "core_size": int(core_mask.sum().item()), + "train_in_core": int((core_mask & train_mask).sum().item()), + "val_in_core": int((core_mask & val_mask).sum().item()), + "test_in_core": int((core_mask & test_mask).sum().item()), + } + + +def accuracy(logits: Tensor, y: Tensor, mask: Tensor) -> float: + pred = logits[mask].argmax(dim=1) + return (pred == y[mask]).float().mean().item() + + +def set_seed(seed: int): + """Set random seeds for reproducibility across runs.""" + random.seed(seed) + try: + import numpy as np # optional + np.random.seed(seed) + except Exception: + pass + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + # Make CUDA/CuDNN deterministic where applicable + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +# ------------------------------------------------------------ +# Models +# ------------------------------------------------------------ +class GCN2(nn.Module): + def __init__(self, in_dim: int, hid: int, out_dim: int, dropout: float = 0.5): + super().__init__() + self.c1 = GCNConv(in_dim, hid) + self.c2 = GCNConv(hid, out_dim) + self.dropout = dropout + + def forward(self, x, ei): + x = self.c1(x, ei) + x = torch.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.c2(x, ei) + return x + + +# ------------------------------------------------------------ +# Training / evaluation +# ------------------------------------------------------------ +@torch.no_grad() +def eval_all(model: nn.Module, data) -> Dict[str, float]: + model.eval() + logits = model(data.x, data.edge_index) + return { + "train": accuracy(logits, data.y, data.train_mask), + "val": accuracy(logits, data.y, data.val_mask), + "test": accuracy(logits, data.y, data.test_mask), + } + + +def train(model: nn.Module, data, epochs=200, lr=0.01, wd=5e-4, patience=100): + opt = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=wd) + best, best_state, bad = -1.0, None, 0 + + # Optional progress bar + with Progress( + SpinnerColumn(), + "[progress.description]{task.description}", + TimeElapsedColumn(), + transient=True, + ) as progress: + task = progress.add_task("Training", total=epochs) + + for ep in range(1, epochs + 1): + model.train() + opt.zero_grad(set_to_none=True) + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) + loss.backward() + opt.step() + + # early stop on val + with torch.no_grad(): + val = accuracy(model(data.x, data.edge_index), data.y, data.val_mask) + + if val > best: + best, bad = val, 0 + best_state = {k: v.detach().clone() for k, v in model.state_dict().items()} + else: + bad += 1 + if bad >= patience: + break + + progress.update(task, advance=1, description=f"Epoch {ep} | val={val:.4f}") + + if best_state is not None: + model.load_state_dict(best_state) + model.eval() + + +def subset_data(data, nodes_idx: torch.Tensor): + """ + Build an induced subgraph on 'nodes_idx'. Keeps x,y,masks restricted to that set. + Returns a shallow copy with edge_index/feature/labels/masks sliced. + """ + nodes_idx = nodes_idx.to(torch.long) + sub_ei, _ = subgraph(nodes_idx, data.edge_index, relabel_nodes=True, num_nodes=data.num_nodes) + sub = type(data)() + sub.x = data.x[nodes_idx] + sub.y = data.y[nodes_idx] + sub.train_mask = data.train_mask[nodes_idx] + sub.val_mask = data.val_mask[nodes_idx] + sub.test_mask = data.test_mask[nodes_idx] + sub.edge_index = sub_ei + sub.num_nodes = sub.x.size(0) + return sub + + +# ------------------------------------------------------------ +# APPNP seeding (Mode B) +# ------------------------------------------------------------ +def appnp_seed_propagate(logits_seed: Tensor, edge_index: Tensor, K=10, alpha=0.1) -> Tensor: + """ + logits_seed is [N, C] where rows outside the core are zeros. + We propagate these logits with APPNP to fill the graph. + """ + appnp = APPNP(K=K, alpha=alpha) # no trainable params + return appnp(logits_seed, edge_index) + + +# ------------------------------------------------------------ +# Main +# ------------------------------------------------------------ +def main(): + p = argparse.ArgumentParser() + p.add_argument("--dataset", required=True, choices=["Cora", "Citeseer", "Pubmed"]) + p.add_argument("--seeds", required=True, help="Path to LRMC seeds JSON") + p.add_argument("--hidden", type=int, default=64) + p.add_argument("--dropout", type=float, default=0.5) + p.add_argument("--epochs", type=int, default=200) + p.add_argument("--lr", type=float, default=0.01) + p.add_argument("--wd", type=float, default=5e-4) + p.add_argument("--patience", type=int, default=100) + p.add_argument("--core_mode", choices=["forward", "appnp"], default="forward", + help="How to evaluate the core model on the full graph.") + p.add_argument("--alpha", type=float, default=0.1, help="APPNP teleport prob (Mode B).") + p.add_argument("--K", type=int, default=10, help="APPNP steps (Mode B).") + p.add_argument("--expand_core_with_train", action="store_true", + help="Expand LRMC core with all training nodes (C' = C ∪ train_idx).") + p.add_argument("--warm_ft_epochs", type=int, default=0, + help="If >0, run a short finetune on the FULL graph starting from the core model.") + p.add_argument("--warm_ft_lr", type=float, default=0.005) + p.add_argument("--runs", type=int, default=1, + help="Number of runs with different seeds to average results.") + p.add_argument("-o", "--output_json", type=str, default=None, + help="If set, save all computed metrics and settings to this JSON file.") + args = p.parse_args() + + # ------------------------------------------------------------ + # Load data + # ------------------------------------------------------------ + ds = Planetoid(root=f"./data/{args.dataset}", name=args.dataset) + data = ds[0] + n, e = data.num_nodes, data.edge_index.size(1) // 2 + + console.print(f"[bold cyan]Dataset: {args.dataset} | Nodes: {n} | Edges: {e}[/bold cyan]") + + # Results accumulator for optional JSON output + results = { + "args": { + k: (float(v) if isinstance(v, float) else v) + for k, v in vars(args).items() + if k != "output_json" + }, + "dataset": { + "name": args.dataset, + "num_nodes": int(n), + "num_edges": int(e), + }, + } + + def maybe_save_results(): + """Write results to JSON if the user requested it.""" + if not args.output_json: + return + out_path = Path(args.output_json) + try: + out_path.parent.mkdir(parents=True, exist_ok=True) + except Exception: + pass + with out_path.open("w") as f: + json.dump(results, f, indent=2) + + # ------------------------------------------------------------ + # Load LRMC core + # ------------------------------------------------------------ + core_mask = load_top1_assignment(args.seeds, n) + if args.expand_core_with_train: + core_mask = core_mask | data.train_mask + + C_idx = torch.nonzero(core_mask, as_tuple=False).view(-1) + frac = 100.0 * C_idx.numel() / n + cov = coverage_counts(core_mask, data.train_mask, data.val_mask, data.test_mask) + + console.print(f"[bold green]Loaded LRMC core of size {cov['core_size']} (≈{frac:.2f}% of the graph) from {args.seeds}[/bold green]") + + # Record core coverage info + results["core"] = { + "source": str(args.seeds), + "expanded_with_train": bool(args.expand_core_with_train), + "size": int(cov["core_size"]), + "fraction": float(frac / 100.0), + "coverage": { + "train_in_core": int(cov["train_in_core"]), + "val_in_core": int(cov["val_in_core"]), + "test_in_core": int(cov["test_in_core"]), + }, + } + + # Coverage table + cov_table = Table(title="LRMC Core Coverage") + cov_table.add_column("Metric", style="cyan") + cov_table.add_column("Count", style="magenta") + cov_table.add_row("Core Size", str(cov["core_size"])) + cov_table.add_row("Train in Core", str(cov["train_in_core"])) + cov_table.add_row("Val in Core", str(cov["val_in_core"])) + cov_table.add_row("Test in Core", str(cov["test_in_core"])) + console.print(cov_table) + + # ------------------------------------------------------------ + # Single-run or multi-run execution + # ------------------------------------------------------------ + if args.runs == 1: + # --------------------- + # Baseline (full graph) + # --------------------- + set_seed(0) + t0 = time.perf_counter() + base = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + train(base, data, epochs=args.epochs, lr=args.lr, wd=args.wd, patience=args.patience) + base_metrics = eval_all(base, data) + t1 = time.perf_counter() + + console.print("\n[bold]Baseline (trained on full graph):[/bold]") + base_table = Table(show_header=True, header_style="bold magenta") + base_table.add_column("Metric", style="cyan") + base_table.add_column("Value", style="magenta") + base_table.add_row("Train Accuracy", f"{base_metrics['train']:.4f}") + base_table.add_row("Validation Accuracy", f"{base_metrics['val']:.4f}") + base_table.add_row("Test Accuracy", f"{base_metrics['test']:.4f}") + base_table.add_row("Time (s)", f"{t1 - t0:.2f}") + console.print(base_table) + + # Save baseline single-run metrics + results["single_run"] = { + "baseline": { + "train": float(base_metrics["train"]), + "val": float(base_metrics["val"]), + "test": float(base_metrics["test"]), + "time_s": float(t1 - t0), + } + } + + # --------------------- + # Core model (train on subgraph) + # --------------------- + if C_idx.numel() == 0: + console.print("[bold yellow]LRMC core is empty; skipping core model.[/bold yellow]") + results["core_empty"] = True + maybe_save_results() + return + + data_C = subset_data(data, C_idx) + mC = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + + t2 = time.perf_counter() + train(mC, data_C, epochs=args.epochs, lr=args.lr, wd=args.wd, patience=args.patience) + t3 = time.perf_counter() + + # Evaluate core model on FULL graph + if args.core_mode == "forward": + # Mode A: run a standard forward pass on the full graph + mC.eval() + logits_full = mC(data.x, data.edge_index) + else: + # Mode B: seed logits on core and propagate with APPNP + mC.eval() + with torch.no_grad(): + logits_C = mC(data_C.x, data_C.edge_index) # [|C|, num_classes] + logits_seed = torch.zeros(n, ds.num_classes, device=logits_C.device) + logits_seed[C_idx] = logits_C + logits_full = appnp_seed_propagate(logits_seed, + data.edge_index, + K=args.K, + alpha=args.alpha) + + core_metrics = { + "train": accuracy(logits_full, data.y, data.train_mask), + "val": accuracy(logits_full, data.y, data.val_mask), + "test": accuracy(logits_full, data.y, data.test_mask), + } + + console.print("\n[bold]LRMC‑core model (trained on core, evaluated on full graph):[/bold]") + core_table = Table(show_header=True, header_style="bold magenta") + core_table.add_column("Metric", style="cyan") + core_table.add_column("Value", style="magenta") + core_table.add_row("Train Accuracy", f"{core_metrics['train']:.4f}") + core_table.add_row("Validation Accuracy", f"{core_metrics['val']:.4f}") + core_table.add_row("Test Accuracy", f"{core_metrics['test']:.4f}") + core_table.add_row("Core Training Time (s)", f"{t3 - t2:.2f}") + speedup = (t1 - t0) / (t3 - t2 + 1e-9) + core_table.add_row("Speedup vs. Baseline", f"{speedup:.2f}×") + console.print(core_table) + + # Save core single-run metrics + results["single_run"]["core_model"] = { + "mode": str(args.core_mode), + "train": float(core_metrics["train"]), + "val": float(core_metrics["val"]), + "test": float(core_metrics["test"]), + "core_train_time_s": float(t3 - t2), + "speedup_vs_baseline": float(speedup), + } + + # ------------------------------------------------------------------------------------ + + console.print("\n[bold]Model Comparison: Baseline vs. L-RMC-core[/bold]") + + # Create comparison table + comparison_table = Table(title="Performance Comparison", show_header=True, header_style="bold magenta") + comparison_table.add_column("Metric", style="cyan") + comparison_table.add_column("Baseline", style="magenta") + comparison_table.add_column("L-RMC-core", style="green") + comparison_table.add_column("Speedup", style="yellow") + + # Add performance metrics + for metric in ["train", "val", "test"]: + comparison_table.add_row( + f"{metric.capitalize()} Accuracy", + f"{base_metrics[metric]:.4f}", + f"{core_metrics[metric]:.4f}", + "" # Speedup is not applicable for accuracy + ) + + # Add timing and speedup + baseline_time = t1 - t0 + core_time = t3 - t2 + speedup = baseline_time / core_time if core_time > 0 else float('inf') + + comparison_table.add_row( + "Training Time (s)", + f"{baseline_time:.2f}", + f"{core_time:.2f}", + f"{speedup:.2f}x" + ) + + comparison_table.add_row( + "Speedup", + "1x", + f"{speedup:.2f}x", + "" + ) + + console.print(comparison_table) + + # Optional warm‑start finetune (single run) + if args.warm_ft_epochs > 0: + warm = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + warm.load_state_dict(mC.state_dict()) + + t4 = time.perf_counter() + train(warm, data, + epochs=args.warm_ft_epochs, + lr=args.warm_ft_lr, + wd=args.wd, + patience=args.warm_ft_epochs + 1) + t5 = time.perf_counter() + warm_metrics = eval_all(warm, data) + + console.print("\n[bold]Warm‑start finetune (start from core model, train on FULL graph):[/bold]") + warm_table = Table(show_header=True, header_style="bold magenta") + warm_table.add_column("Metric", style="cyan") + warm_table.add_column("Value", style="magenta") + warm_table.add_row("Train Accuracy", f"{warm_metrics['train']:.4f}") + warm_table.add_row("Validation Accuracy", f"{warm_metrics['val']:.4f}") + warm_table.add_row("Test Accuracy", f"{warm_metrics['test']:.4f}") + warm_table.add_row("Finetune Time (s)", f"{t5 - t4:.2f}") + warm_table.add_row("Total (core train + warm)", f"{(t3 - t2 + t5 - t4):.2f}s") + console.print(warm_table) + + # Save warm single-run metrics + results["single_run"]["warm_finetune"] = { + "train": float(warm_metrics["train"]), + "val": float(warm_metrics["val"]), + "test": float(warm_metrics["test"]), + "finetune_time_s": float(t5 - t4), + "total_time_s": float((t3 - t2) + (t5 - t4)), + } + + # Emit results for single-run + maybe_save_results() + else: + # -------------------------------------------------------- + # Multi-run: average metrics across different seeds + # -------------------------------------------------------- + runs = args.runs + console.print(f"\n[bold]Running {runs} seeds and averaging results[/bold]") + + # Storage for metrics across runs + base_train, base_val, base_test, base_time = [], [], [], [] + core_train, core_val, core_test, core_time = [], [], [], [] + speedups = [] + + warm_train, warm_val, warm_test, warm_time, warm_total_time = [], [], [], [], [] + + data_C = subset_data(data, C_idx) if C_idx.numel() > 0 else None + results["core_empty"] = data_C is None + + for r in range(runs): + set_seed(r) + + # Baseline + t0 = time.perf_counter() + base = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + train(base, data, epochs=args.epochs, lr=args.lr, wd=args.wd, patience=args.patience) + bm = eval_all(base, data) + t1 = time.perf_counter() + + base_train.append(bm["train"]) ; base_val.append(bm["val"]) ; base_test.append(bm["test"]) ; base_time.append(t1 - t0) + + # Core model + if data_C is None: + continue # no core available + + t2 = time.perf_counter() + mC = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + train(mC, data_C, epochs=args.epochs, lr=args.lr, wd=args.wd, patience=args.patience) + t3 = time.perf_counter() + + if args.core_mode == "forward": + mC.eval() + logits_full = mC(data.x, data.edge_index) + else: + mC.eval() + with torch.no_grad(): + logits_C = mC(data_C.x, data_C.edge_index) + logits_seed = torch.zeros(n, ds.num_classes, device=logits_C.device) + logits_seed[C_idx] = logits_C + logits_full = appnp_seed_propagate(logits_seed, + data.edge_index, + K=args.K, + alpha=args.alpha) + + cm = { + "train": accuracy(logits_full, data.y, data.train_mask), + "val": accuracy(logits_full, data.y, data.val_mask), + "test": accuracy(logits_full, data.y, data.test_mask), + } + + core_train.append(cm["train"]) ; core_val.append(cm["val"]) ; core_test.append(cm["test"]) ; core_time.append(t3 - t2) + speedups.append((t1 - t0) / (t3 - t2 + 1e-9)) + + # Optional warm finetune per run + if args.warm_ft_epochs > 0: + warm = GCN2(in_dim=ds.num_node_features, + hid=args.hidden, + out_dim=ds.num_classes, + dropout=args.dropout) + warm.load_state_dict(mC.state_dict()) + + t4 = time.perf_counter() + train(warm, data, + epochs=args.warm_ft_epochs, + lr=args.warm_ft_lr, + wd=args.wd, + patience=args.warm_ft_epochs + 1) + t5 = time.perf_counter() + wm = eval_all(warm, data) + warm_train.append(wm["train"]) ; warm_val.append(wm["val"]) ; warm_test.append(wm["test"]) ; warm_time.append(t5 - t4) + warm_total_time.append((t3 - t2) + (t5 - t4)) + + # Helper to format mean ± std + def fmt(values, prec=4): + if not values: + return "n/a" + if len(values) == 1: + return f"{values[0]:.{prec}f}" + try: + return f"{mean(values):.{prec}f} ± {stdev(values):.{prec}f}" + except Exception: + m = sum(values) / len(values) + var = sum((v - m) ** 2 for v in values) / max(1, len(values) - 1) + return f"{m:.{prec}f} ± {var ** 0.5:.{prec}f}" + + def stats(values): + """Return dict with list, mean, std, count for JSON.""" + d = { + "values": [float(v) for v in values], + "count": int(len(values)), + } + if len(values) >= 1: + d["mean"] = float(mean(values)) + if len(values) >= 2: + d["std"] = float(stdev(values)) + else: + d["std"] = None + return d + + # Baseline summary + console.print("\n[bold]Baseline (averaged over runs):[/bold]") + base_table = Table(show_header=True, header_style="bold magenta") + base_table.add_column("Metric", style="cyan") + base_table.add_column("Mean ± Std", style="magenta") + base_table.add_row("Train Accuracy", fmt(base_train)) + base_table.add_row("Validation Accuracy", fmt(base_val)) + base_table.add_row("Test Accuracy", fmt(base_test)) + base_table.add_row("Time (s)", fmt(base_time, prec=2)) + console.print(base_table) + + # Save baseline multi-run summary + results["multi_run"] = { + "runs": int(runs), + "baseline": { + "train": stats(base_train), + "val": stats(base_val), + "test": stats(base_test), + "time_s": stats(base_time), + } + } + + if data_C is None: + console.print("[bold yellow]LRMC core is empty; no core runs to average.[/bold yellow]") + maybe_save_results() + return + + # Core summary + console.print("\n[bold]LRMC‑core (averaged over runs):[/bold]") + core_table = Table(show_header=True, header_style="bold magenta") + core_table.add_column("Metric", style="cyan") + core_table.add_column("Mean ± Std", style="magenta") + core_table.add_row("Train Accuracy", fmt(core_train)) + core_table.add_row("Validation Accuracy", fmt(core_val)) + core_table.add_row("Test Accuracy", fmt(core_test)) + core_table.add_row("Core Training Time (s)", fmt(core_time, prec=2)) + core_table.add_row("Speedup vs. Baseline", fmt(speedups, prec=2)) + console.print(core_table) + + # Save core multi-run summary + results["multi_run"]["core_model"] = { + "mode": str(args.core_mode), + "train": stats(core_train), + "val": stats(core_val), + "test": stats(core_test), + "core_train_time_s": stats(core_time), + "speedup_vs_baseline": stats(speedups), + } + + # Comparison summary + console.print("\n[bold]Model Comparison (averaged): Baseline vs. L-RMC-core[/bold]") + comparison_table = Table(title="Performance Comparison (Mean ± Std)", show_header=True, header_style="bold magenta") + comparison_table.add_column("Metric", style="cyan") + comparison_table.add_column("Baseline", style="magenta") + comparison_table.add_column("L-RMC-core", style="green") + comparison_table.add_column("Speedup", style="yellow") + + for metric, b_vals, c_vals in [ + ("Train Accuracy", base_train, core_train), + ("Validation Accuracy", base_val, core_val), + ("Test Accuracy", base_test, core_test), + ]: + comparison_table.add_row(metric, fmt(b_vals), fmt(c_vals), "") + + comparison_table.add_row("Training Time (s)", fmt(base_time, prec=2), fmt(core_time, prec=2), fmt(speedups, prec=2)) + comparison_table.add_row("Speedup", "1x", fmt(speedups, prec=2), "") + console.print(comparison_table) + + # Optional warm summary + if args.warm_ft_epochs > 0 and warm_time: + console.print("\n[bold]Warm‑start finetune (averaged over runs):[/bold]") + warm_table = Table(show_header=True, header_style="bold magenta") + warm_table.add_column("Metric", style="cyan") + warm_table.add_column("Mean ± Std", style="magenta") + warm_table.add_row("Train Accuracy", fmt(warm_train)) + warm_table.add_row("Validation Accuracy", fmt(warm_val)) + warm_table.add_row("Test Accuracy", fmt(warm_test)) + warm_table.add_row("Finetune Time (s)", fmt(warm_time, prec=2)) + warm_table.add_row("Total (core train + warm)", fmt(warm_total_time, prec=2)) + console.print(warm_table) + + # Save warm multi-run summary + results["multi_run"]["warm_finetune"] = { + "train": stats(warm_train), + "val": stats(warm_val), + "test": stats(warm_test), + "finetune_time_s": stats(warm_time), + "total_time_s": stats(warm_total_time), + } + + # Emit results for multi-run + maybe_save_results() + +if __name__ == "__main__": + main() diff --git a/src/2_epsilon_seed_sweep.py b/src/2_epsilon_seed_sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f3caad175a53977b4b567c57a076a6964b8b99 --- /dev/null +++ b/src/2_epsilon_seed_sweep.py @@ -0,0 +1,215 @@ +import json +import os +import shutil +from typing import List, Set, Dict, Tuple +from decimal import Decimal, getcontext +from rich import print + +from generate_lrmc_seeds import build_lrmc_single_graph + +def get_seed_nodes(seeds_path: str) -> Set[int]: + """Extract all seed nodes from a seeds JSON file. + + Handles either 'seed_nodes' or 'members' fields. + """ + try: + with open(seeds_path, 'r') as f: + data = json.load(f) + + seed_nodes: Set[int] = set() + clusters = data.get('clusters', []) + for cluster in clusters: + nodes = cluster.get('seed_nodes') + if nodes is None: + nodes = cluster.get('members', []) + seed_nodes.update(nodes) + + return seed_nodes + except (FileNotFoundError, json.JSONDecodeError, KeyError) as e: + print(f"[red]Error reading {seeds_path}: {e}[/red]") + return set() + +def _format_eps_label(val: Decimal) -> str: + """Return a stable, unique string label for epsilon values. + + - Use integer string for integral values (e.g., '50000'). + - Otherwise, use a compact decimal without trailing zeros. + This avoids duplicates like many '1e+04' when step is small. + """ + # Normalize to remove exponent if integral + if val == val.to_integral_value(): + return str(val.to_integral_value()) + # Use 'f' then strip trailing zeros/decimal point for uniqueness and readability + s = format(val, 'f') + if '.' in s: + s = s.rstrip('0').rstrip('.') + return s + +def generate_epsilon_range(start: float, end: float, step: float) -> List[str]: + """Generate epsilon values as unique, stable strings. + + Uses Decimal to avoid float accumulation and label collisions. + """ + if step <= 0: + raise ValueError("epsilon_step must be > 0") + + getcontext().prec = 28 + s = Decimal(str(start)) + e = Decimal(str(end)) + t = Decimal(str(step)) + + vals: List[str] = [] + cur = s + # Safety margin to include end due to decimal rounding + while cur <= e + Decimal('1e-18'): + label = _format_eps_label(cur) + if not vals or vals[-1] != label: + vals.append(label) + cur += t + return vals + +def run_epsilon_sweep(input_edgelist: str, out_dir: str, levels: int, + epsilon_start: float = 1e4, epsilon_end: float = 5e5, + epsilon_step: float = 1e4, cleanup_duplicates: bool = True): + """ + Run LRMC for multiple epsilon values and remove duplicate results. + + Args: + input_edgelist: Path to input edgelist file + out_dir: Output directory + levels: Number of levels to build + epsilon_start: Starting epsilon value (default: 1e4) + epsilon_end: Ending epsilon value (default: 5e5) + epsilon_step: Step size for epsilon (default: 1e4) + cleanup_duplicates: Whether to remove duplicate seed sets (default: True) + """ + print(f"[blue]Starting epsilon sweep from {epsilon_start} to {epsilon_end} with step {epsilon_step}[/blue]") + + # Preflight: check input edgelist path and fix a common typo + if not os.path.isfile(input_edgelist): + fixed_path = None + if input_edgelist.endswith('.tx') and os.path.isfile(input_edgelist + 't'): + fixed_path = input_edgelist + 't' + elif input_edgelist.endswith('.txt'): + # Try relative to CWD if a bare filename was intended + alt = os.path.join(os.getcwd(), input_edgelist) + if os.path.isfile(alt): + fixed_path = alt + if fixed_path: + print(f"[yellow]Input edgelist not found at '{input_edgelist}'. Using '{fixed_path}' instead.[/yellow]") + input_edgelist = fixed_path + else: + raise FileNotFoundError(f"Input edgelist not found: '{input_edgelist}'. Did you mean '.txt'?") + + # Generate epsilon values + epsilons = generate_epsilon_range(epsilon_start, epsilon_end, epsilon_step) + print(f"[blue]Will test {len(epsilons)} epsilon values: {epsilons}[/blue]") + + # Track seen seed sets and their corresponding epsilon values + seen_seed_sets: Dict[Tuple[int, ...], str] = {} + + # Run for each epsilon + for epsilon in epsilons: + print(f"[yellow]Processing epsilon: {epsilon}[/yellow]") + + try: + # Create temporary output directory for this epsilon + temp_out_dir = f"{out_dir}_temp_{epsilon}" + + # Run LRMC + seeds_path = build_lrmc_single_graph( + input_edgelist=input_edgelist, + out_dir=temp_out_dir, + levels=levels, + epsilon=epsilon + ) + + # Get seed nodes + seed_nodes = get_seed_nodes(seeds_path) + seed_nodes_tuple = tuple(sorted(seed_nodes)) + + print(f"[green]Epsilon {epsilon}: Found {len(seed_nodes)} unique seed nodes[/green]") + + # Check if this seed set has been seen before + if seed_nodes_tuple in seen_seed_sets: + existing_epsilon = seen_seed_sets[seed_nodes_tuple] + print(f"[yellow]Duplicate seed set found! Epsilon {epsilon} has same seeds as {existing_epsilon}[/yellow]") + print(f"[yellow]Removing duplicate results for epsilon {epsilon}[/yellow]") + + # Clean up temporary directory + if os.path.exists(temp_out_dir): + shutil.rmtree(temp_out_dir) + continue + + # If we get here, this is a unique seed set + seen_seed_sets[seed_nodes_tuple] = epsilon + + # Move results to final location + final_out_dir = f"{out_dir}_epsilon_{epsilon}" + if os.path.exists(final_out_dir): + shutil.rmtree(final_out_dir) + shutil.move(temp_out_dir, final_out_dir) + + # Move seeds_XXXXX.json to the stage0 directory + seeds_file = os.path.join(final_out_dir, "stage0", f"seeds_{epsilon}.json") + if os.path.exists(seeds_file): + stage0_dir = os.path.join(out_dir, "stage0") + if not os.path.exists(stage0_dir): + os.makedirs(stage0_dir) + shutil.move(seeds_file, os.path.join(stage0_dir, f"seeds_{epsilon}.json")) + + print(f"[green]Unique results saved to {os.path.join(stage0_dir, f"seeds_{epsilon}.json")}[/green]") + + except Exception as e: + print(f"[red]Error processing epsilon {epsilon}: {e}[/red]") + # Clean up temporary directory if it exists + temp_out_dir = f"{out_dir}_temp_{epsilon}" + if os.path.exists(temp_out_dir): + shutil.rmtree(temp_out_dir) + + # Print summary + print("\n[blue]--- Summary ---[/blue]") + print(f"[blue]Total epsilon values tested: {len(epsilons)}[/blue]") + print(f"[blue]Unique seed sets found: {len(seen_seed_sets)}[/blue]") + print(f"[blue]Duplicates removed: {len(epsilons) - len(seen_seed_sets)}[/blue]") + + if seen_seed_sets: + print("\n[green]Unique epsilon values kept:[/green]") + for seed_tuple, epsilon in sorted(seen_seed_sets.items()): + seed_count = len(seed_tuple) + print(f" {epsilon}: {seed_count} seed nodes") + +def main(): + """Main function with command line interface.""" + import argparse + + parser = argparse.ArgumentParser(description="Run LRMC epsilon sweep with duplicate removal") + parser.add_argument('--input_edgelist', type=str, required=True, + help='Path to input edgelist file') + parser.add_argument('--out_dir', type=str, required=True, + help='Base output directory (results will be saved as out_dir_epsilon_X)') + parser.add_argument('--levels', type=int, required=True, + help='Number of levels to build') + parser.add_argument('--epsilon_start', type=float, default=1e4, + help='Starting epsilon value (default: 1e4)') + parser.add_argument('--epsilon_end', type=float, default=5e5, + help='Ending epsilon value (default: 5e5)') + parser.add_argument('--epsilon_step', type=float, default=1e4, + help='Epsilon step size (default: 1e4)') + parser.add_argument('--no_cleanup', action='store_true', + help='Do not remove duplicates (keep all results)') + + args = parser.parse_args() + + run_epsilon_sweep( + input_edgelist=args.input_edgelist, + out_dir=args.out_dir, + levels=args.levels, + epsilon_start=args.epsilon_start, + epsilon_end=args.epsilon_end, + epsilon_step=args.epsilon_step, + cleanup_duplicates=not args.no_cleanup + ) + +if __name__ == '__main__': + main() diff --git a/src/2_lrmc_bilevel.py b/src/2_lrmc_bilevel.py index 550ea0ef4a31d9b34d118d741ff633a9b58bdf14..571ca36e41902a8953a55ac81e4cb66977cd93d4 100644 --- a/src/2_lrmc_bilevel.py +++ b/src/2_lrmc_bilevel.py @@ -1,6 +1,4 @@ -# lrmc_bilevel.py # Bi-level Node↔Cluster message passing with fixed LRMC seeds -# Requires: torch, torch_geometric, torch_sparse import argparse, json, os from pathlib import Path @@ -19,6 +17,8 @@ from torch_geometric.loader import DataLoader from torch_geometric.datasets import Planetoid, TUDataset from torch_geometric.nn import GCNConv, global_mean_pool +from rich import print + # --------------------------- # Utilities: edges and seeds diff --git a/src/2_random_seed_sweep.sh b/src/2_random_seed_sweep.sh new file mode 100644 index 0000000000000000000000000000000000000000..e7bd49a2ad2a2aeed4dd349f4a2856452b055d0b --- /dev/null +++ b/src/2_random_seed_sweep.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------ +# generate_lrmc_seeds.sh +# +# Run generate_lrmc_seeds.py for num_nodes = 1, 101, 201, …, +# 2601, 2701 and then once more for 2708. +# +# Usage: ./generate_lrmc_seeds.sh +# ------------------------------------------------------------ + +set -euo pipefail # Exit on error, undefined var, or pipe failure + +# ------------------------------------------------------------------ +# Configuration – adjust these if your file layout changes +# ------------------------------------------------------------------ +INPUT_EDGELIST="cora_seeds/edgelist.txt" +OUT_DIR="cora_seeds" +LEVELS=1 +BASELINE="random" + +# ------------------------------------------------------------------ +# Main loop – 1 … 2708 stepping by 100 +# ------------------------------------------------------------------ +echo "Starting loop over num_nodes = 1, 101, 201, …, 2601, 2701 …" + +for NUM_NODES in $(seq 1 100 2708); do + echo ">>> num_nodes=$NUM_NODES" + python3 generate_lrmc_seeds.py \ + --input_edgelist "$INPUT_EDGELIST" \ + --out_dir "$OUT_DIR" \ + --levels "$LEVELS" \ + --baseline "$BASELINE" \ + --num_nodes "$NUM_NODES" +done + +# ------------------------------------------------------------------ +# Explicitly run for 2708 (not hit by the 100‑step sequence) +# ------------------------------------------------------------------ +echo ">>> num_nodes=2708 (explicitly added)" +python3 generate_lrmc_seeds.py \ + --input_edgelist "$INPUT_EDGELIST" \ + --out_dir "$OUT_DIR" \ + --levels "$LEVELS" \ + --baseline "$BASELINE" \ + --num_nodes 2708 + +echo "All done!" diff --git a/src/3_train_summary_seeds.py b/src/3_train_summary_seeds.py new file mode 100644 index 0000000000000000000000000000000000000000..28bfe4474589574640913d147fde10f89efda812 --- /dev/null +++ b/src/3_train_summary_seeds.py @@ -0,0 +1,110 @@ +""" +Batch runner for LRMC summary. + +Iterates over every XXX.json file in cora_seeds/stage0 and runs the +2.6_lrmc_summary.py script for each seed, writing results to +cora_seeds/summary/XXX.json via the script's --output_json flag. + +This version does not capture stdout/stderr or write .txt logs; it only +directs the underlying script to produce structured JSON summaries. +""" + +import os +import glob +import subprocess +from pathlib import Path +from rich import print +import argparse + +# --------------------------------------------- # +# Configuration +# --------------------------------------------- # +SCRIPT_NAME = "2.6_lrmc_summary.py" +DATASET = "Cora" +HIDDEN = "32" +EPOCHS = "200" +LR = "0.05" +RUNS = "20" +EXTRA_FLAGS = ["--expand_core_with_train"] +# --------------------------------------------- # + +def run_script(seed_path: Path, out_json: Path) -> None: + """Execute the summary script for a single seed, writing JSON to out_json.""" + cmd = [ + "python3", + SCRIPT_NAME, + "--dataset", DATASET, + "--seeds", str(seed_path), + "--hidden", HIDDEN, + "--epochs", EPOCHS, + "--lr", LR, + "--runs", RUNS, + "-o", str(out_json), + *EXTRA_FLAGS + ] + + # Run the command without capturing output; raise on failure + subprocess.run(cmd, check=True, cwd=Path.cwd(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + +def main() -> None: + """Main entry point.""" + parser = argparse.ArgumentParser(description="Batch runner for LRMC summary.") + parser.add_argument('--baseline', type=str, choices=['random'], help='Use a baseline method.') + args = parser.parse_args() + + if args.baseline == 'random': + SEEDS_DIR = Path("cora_seeds/stage0_rand") + SUMMARY_DIR = Path("cora_seeds/summary_rand") + else: + SEEDS_DIR = Path("cora_seeds/stage0") + SUMMARY_DIR = Path("cora_seeds/summary") + + # Make sure the summary directory exists + SUMMARY_DIR.mkdir(parents=True, exist_ok=True) + + # Grab all *.json files in the seed folder + seed_files = sorted(SEEDS_DIR.glob("*.json")) + if not seed_files: + print(f"[red]No JSON seed files found in {SEEDS_DIR!s}[/red]") + return + + # Rich progress bar – one tick per seed file + try: + from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn + except ImportError: + print("[red]The 'rich' package is required – run: pip install rich[/red]") + return + + progress = Progress( + TextColumn("[bold cyan]{task.fields[seed]}"), + BarColumn(), + TimeRemainingColumn(), + transient=True, # hide the bar once done + ) + + with progress: + task_id = progress.add_task( + "Processing seeds", total=len(seed_files), seed="Preparing..." + ) + + for seed_path in seed_files: + seed_name = seed_path.stem # XXX (without .json) + out_json = SUMMARY_DIR / f"{seed_name}.json" + + # Run the script; JSON will be written by the script itself + try: + run_script(seed_path, out_json) + except Exception as exc: + print(f"[red]Failed: {seed_name}.json -> {exc}[/red]") + + # Update the progress bar + progress.update( + task_id, + advance=1, + seed=f"{seed_name}.json" + ) + + print(f"[green]All seeds processed – JSON summaries in {SUMMARY_DIR}/[/green]") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/__pycache__/generate_lrmc_seeds.cpython-312.pyc b/src/__pycache__/generate_lrmc_seeds.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074b8e9be0afbb90fb74f01f5bf12f390d3e0d1c Binary files /dev/null and b/src/__pycache__/generate_lrmc_seeds.cpython-312.pyc differ diff --git a/src/cora_summary.png b/src/cora_summary.png new file mode 100644 index 0000000000000000000000000000000000000000..82abda88d5a2e8e5864bb7ac8afd3fa103828c17 Binary files /dev/null and b/src/cora_summary.png differ diff --git a/src/cora_summary_compare.png b/src/cora_summary_compare.png new file mode 100644 index 0000000000000000000000000000000000000000..5e4b8d3c7458851ac80326c650dbbfa75f13ef62 Binary files /dev/null and b/src/cora_summary_compare.png differ diff --git a/src/cora_summary_rand.png b/src/cora_summary_rand.png new file mode 100644 index 0000000000000000000000000000000000000000..a914011a04b985451cd44dbe3f502dd0e9eba6f0 Binary files /dev/null and b/src/cora_summary_rand.png differ diff --git a/src/export_edgelist.py b/src/export_edgelist.py index c59346001369d8716c3841ba2758f4bc97a4a78c..375ea3f7b315a69d14a67046bdd7e7a6447fcd6f 100644 --- a/src/export_edgelist.py +++ b/src/export_edgelist.py @@ -1,42 +1,42 @@ -#!/usr/bin/env python3 """ -This script extracts canonical – 0‑based, undirected, duplicate‑free – edgelists from -PyTorch Geometric datasets. +make_edgelists.py + +Create a canonical edgelist (or a directory of edgelists). Usage ----- - - python export_edgelist.py [--data_root ] [--dot ] + python make_edgelists.py [--data_root ] Arguments --------- -dataset_name : str - Name of a PyG dataset that can be loaded via `Planetoid(..)` or `TUDataset(..)`. - -edges_out : str - * For a single‑graph dataset (e.g. Planetoid Cora) – write to this file. - * For a multi‑graph dataset (e.g. TUDataset “Facebook”) – write to this - directory, one file per graph (`graph_000000.txt`, …). - ---dot path/to/file.dot : str, optional - If supplied, a GraphViz dot file is written that visualises all edgelists - (each graph gets its own subgraph in the file). - -The script prints *nothing* to stdout – the only side‑effect is the edge‑list -(and, optionally, the .dot) output. - +dataset_name + The name of the PyG dataset (e.g. "Cora", "TUDatasetName", etc.). +edges_out + * If the dataset contains a single graph (e.g. Planetoid Cora) – this is a + file path (`graph.txt`, `edges.txt`, …). + * If the dataset contains many graphs (e.g. TUDataset) – this is a + directory path where each graph is written as + `graph_000000.txt`, `graph_000001.txt`, … + +Examples +-------- +# One‑graph dataset (Planetoid Cora) +python make_edgelists.py Cora ./cora_edges.txt + +# Many‑graph dataset (TUDataset Facebook) +python make_edgelists.py Facebook ./facebook_edgelists """ from __future__ import annotations import argparse from pathlib import Path -from typing import Iterable, Set, Tuple, List +from typing import Iterable, Tuple, Set -# --------------------------------------------------------------------------- +# ------------------------------------------------------------- def canonical_edges(edge_index) -> Set[Tuple[int, int]]: - """Return a set of undirected edges `(u, v)` with `u < v` and `u != v`.""" + """Return a set of undirected (u,v) pairs with u Set[Tuple[int, int]]: def write_edges(out_file: Path, edges: Iterable[Tuple[int, int]]) -> None: - """Write edges to `out_file` in `u v\n` format.""" + """Write `u v` per line to `out_file`.""" out_file.parent.mkdir(parents=True, exist_ok=True) - with out_file.open("w", encoding="utf-8") as f: + with out_file.open("w") as f: for u, v in sorted(edges): f.write(f"{u} {v}\n") -def dump_dot(dot_path: Path, graph_edges: List[Set[Tuple[int, int]]]) -> None: - """Create a GraphViz dot file from a list of edge sets.""" - dot_lines: List[str] = ["graph G {"] - for i, edges in enumerate(graph_edges): - dot_lines.append(f" /* graph {i} */") - dot_lines.append(f" subgraph cluster_{i} {{") - dot_lines.append(" label = \"{}\";".format(i)) - # add nodes - nodes: Set[int] = {n for e in edges for n in e} - for n in sorted(nodes): - dot_lines.append(f" {n};") - # add edges - for u, v in sorted(edges): - dot_lines.append(f" {u} -- {v};") - dot_lines.append(" }") - dot_lines.append("}") - dot_path.parent.mkdir(parents=True, exist_ok=True) - dot_path.write_text("\n".join(dot_lines), encoding="utf-8") - - -# --------------------------------------------------------------------------- - -def process_planetoid( - root: Path, name: str, out_path: Path, dot_file: Path | None -) -> None: - """Single‑graph dataset (Planetoid).""" +def process_planetoid_dataset(root: Path, name: str, out_dir: Path | Path): + """Planetoid datasets contain a single graph.""" from torch_geometric.datasets import Planetoid ds = Planetoid(root=str(root), name=name) - edges = canonical_edges(ds[0].edge_index) + data = ds[0] # the only graph + edges = canonical_edges(data.edge_index) - if out_path.is_dir(): - out_file = out_path / "graph_000000.txt" + if isinstance(out_dir, Path) and out_dir.is_dir(): + out_file = out_dir / "graph_000000.txt" else: - out_file = out_path + out_file = out_dir write_edges(out_file, edges) - - if dot_file: - dump_dot(dot_file, [edges]) + # No output to stdout – the edgelist(s) are written to disk -def process_tudataset( - root: Path, name: str, out_dir: Path, dot_file: Path | None -) -> None: - """Multi‑graph dataset (TUDataset).""" +def process_tudataset(root: Path, name: str, out_dir: Path): + """TUDataset may contain many graphs – write each to /graph_XXXXXX.txt.""" from torch_geometric.datasets import TUDataset ds = TUDataset(root=str(root), name=name) out_dir.mkdir(parents=True, exist_ok=True) - if dot_file: - all_edges: List[Set[Tuple[int, int]]] = [] - for i, data in enumerate(ds): edges = canonical_edges(data.edge_index) out_file = out_dir / f"graph_{i:06d}.txt" write_edges(out_file, edges) - if dot_file: - all_edges.append(edges) - - if dot_file: - dump_dot(dot_file, all_edges) - -# --------------------------------------------------------------------------- def main() -> None: parser = argparse.ArgumentParser(description=__doc__.strip(), formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument("--data_root", default="./data", help="Root directory of PyG datasets") - parser.add_argument("dataset_name", help="PyG dataset name (e.g. Cora, WikipediaNetwork)") - parser.add_argument("edges_out", help="File (single‑graph) or directory (many‑graph) for edgelists") - parser.add_argument("--dot", dest="dot_file", type=str, default="", help="Optional .dot file for visualization") + parser.add_argument( + "--data_root", default="./data", help="Root directory for PyG datasets" + ) + parser.add_argument("dataset_name", help="PyG dataset name (e.g. Cora)") + parser.add_argument( + "edges_out", + help=( + "File path (for single‑graph datasets) or directory " + "(for multi‑graph datasets) to write the canonical edgelist(s)" + ), + ) args = parser.parse_args() root = Path(args.data_root) out_path = Path(args.edges_out) - dot_path = Path(args.dot_file) if args.dot_file else None - # Decide whether the dataset is Planetoid (single graph) or TUDataset (many) + # We try to guess whether the requested dataset is a Planetoid or TUDataset. + # If it can be loaded as a Planetoid we use that; otherwise we fall back to TUDataset. try: from torch_geometric.datasets import Planetoid - Planetoid(root=str(root), name=args.dataset_name) - dataset_type = "planetoid" - except Exception: # pragma: no cover + _ = Planetoid(root=str(root), name=args.dataset_name) + dataset_type = "Planetoid" + except Exception: # pragma: no cover – normal branch failure from torch_geometric.datasets import TUDataset - TUDataset(root=str(root), name=args.dataset_name) - dataset_type = "tudataset" + _ = TUDataset(root=str(root), name=args.dataset_name) + dataset_type = "TUDataset" - if dataset_type == "planetoid": - process_planetoid(root, args.dataset_name, out_path, dot_path) - else: # tudataset + # Dispatch + if dataset_type == "Planetoid": + process_planetoid_dataset(root, args.dataset_name, out_path) + else: # TUDataset if out_path.is_file(): - raise ValueError("For multi‑graph datasets (--tudataset) the output must be a directory") - process_tudataset(root, args.dataset_name, out_path, dot_path) + raise ValueError( + "For multi‑graph datasets (e.g. TUDataset) the output must be a directory" + ) + process_tudataset(root, args.dataset_name, out_path) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/generate_dataset_dot.py b/src/generate_dataset_dot.py new file mode 100644 index 0000000000000000000000000000000000000000..329c065adf7c3139b0542f2bc648a79aafea64f3 --- /dev/null +++ b/src/generate_dataset_dot.py @@ -0,0 +1,128 @@ +""" +Export a PyG dataset (Planetoid or Heterophilous) to a minimal Graphviz DOT. +- Nodes are colored by class. +- If --filter {train|val|test} is set, nodes in that split are colored red, + other nodes are left uncolored. +- Undirected edges are deduplicated; directed edges are written as‑is. +- For HeterophilousGraphDataset with multiple splits (e.g., Amazon‑ratings has 10), + use --split-index (default 0). +""" + +import argparse +from torch_geometric.datasets import Planetoid, HeterophilousGraphDataset + +PALETTE = [ + "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", + "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", + "#bcbd22", "#17becf" +] +HIGHLIGHT_RED = "#ff0000" + +def _extend_palette(base, need): + if need <= len(base): + return base[:need] + def brighten(hex_color, factor): + c = int(hex_color[1:], 16) + r = (c >> 16) & 255 + g = (c >> 8) & 255 + b = c & 255 + r = int(min(255, r + (255 - r) * factor)) + g = int(min(255, g + (255 - g) * factor)) + b = int(min(255, b + (255 - b) * factor)) + return f"#{r:02x}{g:02x}{b:02x}" + out = [] + for i in range(need): + base_hex = base[i % len(base)] + factor = 0.18 * (i // len(base)) + out.append(brighten(base_hex, factor)) + return out + +def _infer_num_classes(dataset, data): + num_classes = getattr(dataset, "num_classes", None) + if not isinstance(num_classes, int) or num_classes <= 0: + num_classes = int(data.y.max().item()) + 1 + return num_classes + +def _get_split_mask(data, which): + name = {"train": "train_mask", "val": "val_mask", "test": "test_mask"}[which] + m = getattr(data, name, None) + if m is None and which == "val": + m = getattr(data, "valid_mask", None) or getattr(data, "validation_mask", None) + return m + +def load_graph(root: str, use_hetero: bool, name: str, split_index: int): + if use_hetero: + ds = HeterophilousGraphDataset(root=root, name=name) + idx = max(0, min(split_index, len(ds) - 1)) + data = ds[idx] + num_classes = _infer_num_classes(ds, data) + return data, num_classes + else: + ds = Planetoid(root=f"{root}/Planetoid", name=name) + data = ds[0] + num_classes = _infer_num_classes(ds, data) + return data, num_classes + +def write_dot(path: str, data, num_classes: int, directed: bool, filter_split: str | None): + y = data.y + edge_index = data.edge_index + colors = _extend_palette(PALETTE, num_classes) + highlight_mask = None + if filter_split is not None: + m = _get_split_mask(data, filter_split) + if m is not None: + highlight_mask = m.bool() + gtype = "digraph" if directed else "graph" + eop = "->" if directed else "--" + with open(path, "w", encoding="utf-8") as f: + f.write(f"{gtype} {{\n") + for i in range(data.num_nodes): + cls = int(y[i]) + if cls < 0 or cls >= num_classes: + cls = cls % num_classes + base_col = colors[cls] + if filter_split is not None: + if highlight_mask is not None and bool(highlight_mask[i]): + col = HIGHLIGHT_RED + f.write(f' {i} [color="{col}", style="filled", fillcolor="{col}", fontcolor="white"];\n') + else: + f.write(f' {i} ;\n') + else: + col = base_col + f.write(f' {i} [color="{col}", style="filled", fillcolor="{col}", fontcolor="white"];\n') + if directed: + for s, t in edge_index.t().tolist(): + if s != t: + f.write(f" {s} {eop} {t};\n") + else: + seen = set() + for s, t in edge_index.t().tolist(): + if s == t: + continue + a, b = (s, t) if s <= t else (t, s) + if (a, b) in seen: + continue + seen.add((a, b)) + f.write(f" {a} {eop} {b};\n") + f.write("}\n") + +def main(): + parser = argparse.ArgumentParser( + description="Export PyG datasets to minimal DOT with class colors and optional split highlighting." + ) + parser.add_argument("-o", "--output", default="graph.dot", help="Output .dot file (default: graph.dot)") + parser.add_argument("--directed", action="store_true", help="Write directed edges (default: undirected)") + parser.add_argument("--heterophilous", action="store_true", help="Use HeterophilousGraphDataset") + parser.add_argument("--name", default=None, help="Dataset name (Planetoid: Cora/CiteSeer/PubMed; Heterophilous: Amazon-ratings, Roman-empire, etc.)") + parser.add_argument("--root", default="data", help="Root folder for datasets (default: data)") + parser.add_argument("--split-index", type=int, default=0, help="Split index for heterophilous datasets (default: 0)") + parser.add_argument("--filter", choices=["train", "val", "test"], default=None, help="Highlight nodes in the selected split as red (others keep class colors)") + args = parser.parse_args() + dataset_name = args.name if args.name is not None else ("Amazon-ratings" if args.heterophilous else "Cora") + data, num_classes = load_graph(args.root, args.heterophilous, dataset_name, args.split_index) + write_dot(args.output, data, num_classes, args.directed, args.filter) + suffix = f", highlight={args.filter}" if args.filter else "" + print(f"Wrote {args.output} using {'HeterophilousGraphDataset' if args.heterophilous else 'Planetoid'}('{dataset_name}') | nodes={data.num_nodes}, edges={data.edge_index.size(1)}, classes={num_classes}{suffix}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/generate_lrmc_seeds.py b/src/generate_lrmc_seeds.py new file mode 100644 index 0000000000000000000000000000000000000000..10bfb0d3bdd3a2d06ce9f826b0689b0050570fa3 --- /dev/null +++ b/src/generate_lrmc_seeds.py @@ -0,0 +1,377 @@ +import argparse +from rich import print +import glob +import os +import re +import shutil +import subprocess +from typing import Dict, List, Tuple, Iterable, Set +import random +from iv2_utils.iv2 import json_read, json_write +import tempfile + +def ensure_dir(p: str): + os.makedirs(p, exist_ok=True) + +def read_edgelist(path: str) -> Iterable[Tuple[int, int]]: + with open(path, 'r') as f: + for line in f: + s = line.strip() + if not s or s.startswith('#'): + continue + parts = s.split() + if len(parts) < 2: + continue + try: + u = int(parts[0]); v = int(parts[1]) + except ValueError: + continue + if u == v: + continue + a, b = (u, v) if u < v else (v, u) + yield a, b + +def write_edgelist(path: str, edges: Iterable[Tuple[int, int]]): + with open(path, 'w') as f: + for u, v in edges: + f.write(f"{u} {v}\n") + +def parse_seeds(path: str) -> Tuple[Dict[int, int], List[int]]: + """ + Return (node_to_cluster_index, sorted_cluster_ids). + The cluster indices are 0..C-1, sorted by cluster_id. + - On overlapping membership, choose the cluster with higher 'score', then smaller cluster_id. + """ + js = json_read(path) + clusters = js.get('clusters', []) + # Sort clusters by cluster_id for stable indexing + clusters_sorted = sorted(clusters, key=lambda c: c.get('cluster_id', 0)) + cluster_id_list = [c.get('cluster_id', i) for i, c in enumerate(clusters_sorted)] + cluster_id_to_idx = {cid: i for i, cid in enumerate(cluster_id_list)} + + # Build node->(best_cluster_idx, best_score, best_cid) + node_choice: Dict[int, Tuple[int, float, int]] = {} + + for c in clusters_sorted: + cid = c.get('cluster_id', None) + if cid is None: + continue + idx = cluster_id_to_idx[cid] + members = c.get('members') + if members is None: + members = c.get('seed_nodes', []) + score = float(c.get('score', 0.0)) + for u in members: + prev = node_choice.get(u, None) + if prev is None or (score > prev[1]) or (score == prev[1] and cid < prev[2]): + node_choice[u] = (idx, score, cid) + + node_to_cluster = {u: idx for u, (idx, score, cid) in node_choice.items()} + return node_to_cluster, cluster_id_list + +def coarsen_edgelist(prev_edgelist: str, seeds_json: str, out_edgelist: str) -> int: + node_to_cluster, cluster_id_list = parse_seeds(seeds_json) + edges_set: Set[Tuple[int, int]] = set() + missing_nodes = 0 + for u, v in read_edgelist(prev_edgelist): + cu = node_to_cluster.get(u, None) + cv = node_to_cluster.get(v, None) + if cu is None or cv is None: + # If a node isn't present in any cluster JSON, skip or count as missing + missing_nodes += 1 + continue + if cu == cv: + continue + a, b = (cu, cv) if cu < cv else (cv, cu) + edges_set.add((a, b)) + + write_edgelist(out_edgelist, sorted(edges_set)) + return missing_nodes + +def run_java( + java_exec: str, + class_name: str, + edgelist_path: str, + out_json_path: str, + epsilon: str, + java_opts: List[str], + quiet: bool = False, +) -> None: + java_file = class_name + '.java' + compile_cmd = ['javac', '-cp', '.', java_file] + if not quiet: + print(f"[blue]Compiling:[/blue] {' '.join(compile_cmd)}") + try: + _ = subprocess.run( + compile_cmd, check=True, capture_output=True, text=True, encoding='utf-8' + ) + except FileNotFoundError: + if not quiet: + print("[red]Error: `javac` command not found. Is JDK installed and in your PATH?[/red]") + raise + except subprocess.CalledProcessError as e: + if not quiet: + print(f"[red]Java compilation failed. Return code: {e.returncode}[/red]") + print("[red]stdout:[/red]\n" + e.stdout) + print("[red]stderr:[/red]\n" + e.stderr) + raise + + cmd = [java_exec] + java_opts + [class_name, edgelist_path, out_json_path, epsilon] + if not quiet: + print("[blue]Running:[/blue]", " ".join(cmd)) + + run_kwargs = {'check': True} + if quiet: + run_kwargs['stdout'] = subprocess.DEVNULL + run_kwargs['stderr'] = subprocess.DEVNULL + subprocess.run(cmd, **run_kwargs) + +def generate_lrmc_cluster(edges: List[Tuple[int, int]], epsilon: float, java_exec: str = 'java', java_opts: List[str] = None, quiet: bool = True) -> Dict: + """ + Generates a single cluster from an edge list using the L-RMC algorithm. + + Args: + edges: A list of 0-indexed integer tuples representing the graph edges. + epsilon: The epsilon value for the L-RMC algorithm. + java_exec: Path to the java executable. + java_opts: A list of options for the java executable. + quiet: If True, suppress stdout from this function and subprocesses. + + Returns: + A dictionary containing the 'seed_nodes' and 'score' of the found cluster. + Node IDs in 'seed_nodes' are 0-indexed. + """ + class_name = 'LRMCGenerateSingleCluster' + + # Create temporary files for communication with the Java subprocess + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt', encoding='utf-8') as edge_file, \ + tempfile.NamedTemporaryFile(mode='r', delete=False, suffix='.json', encoding='utf-8') as json_file: + edge_file_path = edge_file.name + json_file_path = json_file.name + + try: + # Write 0-indexed edgelist for the Java code to consume + with open(edge_file_path, 'w', encoding='utf-8') as f: + for u, v in edges: + f.write(f"{u} {v}\n") + + # Run the Java process which reads the edge file and writes the JSON output + run_java(java_exec, class_name, edge_file_path, json_file_path, str(epsilon), java_opts or [], quiet=quiet) + + # Read and parse the JSON output from the temporary file + json_output = json_read(json_file_path) + + finally: + # Ensure temporary files are cleaned up + os.remove(edge_file_path) + os.remove(json_file_path) + + # Extract results from the first cluster + cluster = json_output['clusters'][0] + + # The Java code returns 1-based node IDs, so convert them to 0-based + seed_nodes_1_based = cluster['seed_nodes'] + seed_nodes_0_based = [node - 1 for node in seed_nodes_1_based] + + return { + "seed_nodes": seed_nodes_0_based, + "score": cluster['score'] + } + + +def generate_random_seeds(edgelist_path: str, num_nodes: int, out_json_path: str): + """Generates a seeds file with a single cluster of randomly selected nodes.""" + nodes = set() + for u, v in read_edgelist(edgelist_path): + nodes.add(u) + nodes.add(v) + + if num_nodes > len(nodes): + print(f"[yellow]Warning: requested {num_nodes} random nodes, but only {len(nodes)} unique nodes exist. Selecting all nodes.[/yellow]") + selected_nodes = list(nodes) + else: + selected_nodes = random.sample(list(nodes), num_nodes) + + # The downstream script expects 1-indexed nodes under "seed_nodes" + selected_nodes_1_indexed = [n + 1 for n in selected_nodes] + + seed_data = { + "clusters": [ + { + "cluster_id": 1, + "seed_nodes": selected_nodes_1_indexed, + "score": 1.0 + } + ] + } + + json_write(seed_data, out_json_path) + print(f"[green]Generated random seeds with {len(selected_nodes)} nodes at {out_json_path}[/green]") + +def build_single_graph_levels(args): + if args.baseline == 'random': + if not args.num_nodes: + raise SystemExit("--num_nodes is required for --baseline random") + + stage_dir = os.path.join(args.out_dir, "stage0_rand") + ensure_dir(stage_dir) + + if args.levels != 1: + print("[yellow]Warning: For random baseline, only one level is generated. Ignoring --levels setting.[/yellow]") + + seeds_out = os.path.join(stage_dir, f"seeds_{args.num_nodes}.json") + generate_random_seeds(args.input_edgelist, args.num_nodes, seeds_out) + return + + ensure_dir(args.out_dir) + # Stage 0 edgelist is the input; optionally copy for record + stage0_dir = os.path.join(args.out_dir, "stage0") + ensure_dir(stage0_dir) + e0_copy = os.path.join(stage0_dir, "edgelist_0.txt") + if args.copy_inputs: + shutil.copyfile(args.input_edgelist, e0_copy) + + prev_edgelist = args.input_edgelist + for lvl in range(args.levels): + stage_dir = os.path.join(args.out_dir, f"stage{lvl}") + ensure_dir(stage_dir) + seeds_out = os.path.join(stage_dir, "seeds_"+str(args.epsilon)+".json") + # Run Java to produce seeds at this level + run_java(args.java, args.class_name, prev_edgelist, seeds_out, args.epsilon, args.java_opts) + + # Prepare next-level edgelist (unless last level) + if lvl < args.levels - 1: + next_stage_dir = os.path.join(args.out_dir, f"stage{lvl+1}") + ensure_dir(next_stage_dir) + next_edgelist = os.path.join(next_stage_dir, f"edgelist_{lvl+1}.txt") + missing = coarsen_edgelist(prev_edgelist, seeds_out, next_edgelist) + if missing > 0: + print(f"[yellow]stage{lvl}: {missing} edges had nodes missing from seeds; skipped.[/yellow]") + prev_edgelist = next_edgelist + +def build_multigraph_levels(args): + ensure_dir(args.out_dir) + # Enumerate graph files + graph_files = sorted(glob.glob(os.path.join(args.graphs_dir, args.glob))) + if not graph_files: + raise SystemExit(f"No graph files found in {args.graphs_dir} with pattern {args.glob}") + + pattern = re.compile(r'(.*?)(\d+)(\.\w+)$') # capture numeric id + def graph_id_from_path(p: str) -> str: + base = os.path.basename(p) + m = pattern.match(base) + if m: + return m.group(2).zfill(6) # zero-pad to 6 for consistency + # fallback: strip extension + stem = os.path.splitext(base)[0] + m2 = re.search(r'(\d+)$', stem) + return (m2.group(1).zfill(6) if m2 else stem) + + # Stage 0: run Java for each graph + prev_stage_edgelists: Dict[str, str] = {} + for lvl in range(args.levels): + stage_dir = os.path.join(args.out_dir, f"stage{lvl}") + ensure_dir(stage_dir) + + if lvl == 0: + for gpath in graph_files: + gid = graph_id_from_path(gpath) + seeds_out = os.path.join(stage_dir, f"graph_{gid}.json") + run_java(args.java, args.class_name, gpath, seeds_out, args.epsilon, args.java_opts) + prev_stage_edgelists[gid] = gpath + else: + # For each graph, coarsen previous edgelist using previous seeds, then run Java + for gpath in graph_files: + gid = graph_id_from_path(gpath) + prev_edgelist = prev_stage_edgelists[gid] + prev_seeds = os.path.join(args.out_dir, f"stage{lvl-1}", f"graph_{gid}.json") + next_edgelist = os.path.join(stage_dir, f"graph_{gid}.txt") + missing = coarsen_edgelist(prev_edgelist, prev_seeds, next_edgelist) + if missing > 0: + print(f"[yellow]stage{lvl-1} graph_{gid}: {missing} edges had nodes missing from seeds; skipped.[/yellow]") + + seeds_out = os.path.join(stage_dir, f"graph_{gid}.json") + run_java(args.java, args.class_name, next_edgelist, seeds_out, args.epsilon, args.java_opts) + prev_stage_edgelists[gid] = next_edgelist + + +def main(): + ap = argparse.ArgumentParser(description="Build LRMC seeds across multiple levels by invoking the Java LRMC tool and coarsening between levels.") + mode = ap.add_mutually_exclusive_group(required=True) + mode.add_argument('--input_edgelist', type=str, help='Single-graph mode: path to original edgelist.txt') + mode.add_argument('--graphs_dir', type=str, help='Multi-graph mode: directory containing per-graph edgelist files (e.g., graph_000000.txt)') + ap.add_argument('--glob', type=str, default='graph_*.txt', help='Multi-graph mode: glob pattern for graph files (default: graph_*.txt)') + ap.add_argument('--out_dir', type=str, required=True, help='Output directory (stages will be created here)') + ap.add_argument('--levels', type=int, required=True, help='Number of levels to build (e.g., 3)') + # Baseline arguments + ap.add_argument('--baseline', type=str, choices=['random'], help='Use a baseline method for seed generation.') + ap.add_argument('--num_nodes', type=int, help='Number of random nodes to select for random baseline.') + # Java settings + ap.add_argument('--java', type=str, default='java', help='Java executable (default: java)') + ap.add_argument('--class_name', type=str, default='LRMCGenerateSingleCluster', help='Fully qualified Java class name') + ap.add_argument('--epsilon', type=str, default='1e6', help='Epsilon argument for the Java tool (default: 1e6)') + ap.add_argument('--java_opts', type=str, default='', help='Extra options for java (e.g., "-Xmx16g -cp my.jar")') + ap.add_argument('--copy_inputs', action='store_true', help='Copy original edgelist under stage0 for record (single-graph mode)') + args = ap.parse_args() + + # Parse java_opts into a list if provided + args.java_opts = args.java_opts.split() if args.java_opts else [] + + if args.input_edgelist: + build_single_graph_levels(args) + else: + if args.baseline == 'random': + raise SystemExit("--baseline random is only supported in single-graph mode (--input_edgelist)") + build_multigraph_levels(args) + +class Args: + def __init__(self): + self.input_edgelist = None + self.graphs_dir = None + self.glob = 'graph_*.txt' + self.out_dir = '' + self.levels = 0 + self.java = 'java' + self.class_name = 'LRMCGenerateSingleCluster' + self.epsilon = '1e6' + self.java_opts = [] + self.copy_inputs = False + self.baseline = None + self.num_nodes = None + +def build_lrmc_single_graph(input_edgelist: str, out_dir: str, levels: int, epsilon: str = '1e6', + java_exec: str = 'java', class_name: str = 'LRMCGenerateSingleCluster', + java_opts: List[str] = None, copy_inputs: bool = False) -> str: + """ + Build LRMC levels for a single graph. + + Args: + input_edgelist: Path to input edgelist file + out_dir: Output directory + levels: Number of levels to build + epsilon: Epsilon value for Java tool + java_exec: Java executable path + class_name: Fully qualified Java class name + java_opts: Extra options for Java + copy_inputs: Whether to copy input edgelist to stage0 + + Returns: + Path to the generated seeds JSON file + """ + args = Args() + args.input_edgelist = input_edgelist + args.out_dir = out_dir + args.levels = levels + args.epsilon = epsilon + args.java = java_exec + args.class_name = class_name + args.java_opts = java_opts or [] + args.copy_inputs = copy_inputs + + build_single_graph_levels(args) + + # Return path to the seeds file from stage0 + return os.path.join(out_dir, "stage0", f"seeds_{epsilon}.json") + +if __name__ == '__main__': + main() diff --git a/src/grl/analyze_lrmc_sweep.py b/src/grl/analyze_lrmc_sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..183e5d1338bd3852da42f02af26a959cf285b56d --- /dev/null +++ b/src/grl/analyze_lrmc_sweep.py @@ -0,0 +1,364 @@ + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Analyze hyperparameter sweeps for L-RMC/GCN on Cora in a "rich" format similar to analyze_stats.py. + +Usage: + python analyze_lrmc_sweep.py /path/to/hyperparam_sweep.csv --topk 10 --variant pool +""" +import argparse +from pathlib import Path +from collections import defaultdict + +import numpy as np +import pandas as pd + +from rich.console import Console +from rich.table import Table +from rich.panel import Panel +from rich.text import Text +from rich import box + +# ------------------------ +# Helpers +# ------------------------ +def _fmt(x, digits=3): + if x is None or (isinstance(x, float) and (np.isnan(x) or np.isinf(x))): + return "-" + if isinstance(x, float): + return f"{x:.{digits}f}" + return str(x) + +def _to_float(series): + try: + return series.astype(float) + except Exception: + return pd.to_numeric(series, errors="coerce") + +def _unique_counts(df, cols): + return {c: df[c].nunique() for c in cols if c in df.columns} + +def _has_cols(df, cols): + return all(c in df.columns for c in cols) + +def _get_common_hparams(): + # Hyperparameters that exist for both 'baseline' and 'pool' + return ["hidden", "lr", "dropout", "self_loop_scale", "use_a2"] + +# ------------------------ +# Core analysis +# ------------------------ +def analyze(df: pd.DataFrame): + # Normalize column names if needed + df = df.copy() + # Expected columns + expected = [ + "dataset","variant","hidden","epochs","lr","wd","dropout", + "self_loop_scale","use_a2", + "lrmc_inv_weight","lrmc_gamma", + "seed","val_accuracy","test_accuracy" + ] + missing = [c for c in expected if c not in df.columns] + if missing: + raise ValueError(f"CSV missing expected columns: {missing}") + + # Cast numerics + num_cols = ["hidden","epochs","lr","wd","dropout","self_loop_scale", + "lrmc_inv_weight","lrmc_gamma","val_accuracy","test_accuracy"] + for c in num_cols: + df[c] = _to_float(df[c]) + + # Basic summary + summary = { + "file_rows": len(df), + "dataset": ", ".join(sorted(df["dataset"].astype(str).unique())), + "variants": ", ".join(sorted(df["variant"].astype(str).unique())), + "columns": list(df.columns), + "n_columns": len(df.columns), + "unique_counts": _unique_counts(df, ["hidden","lr","dropout","self_loop_scale","use_a2","lrmc_inv_weight","lrmc_gamma"]), + "val_test_corr_all": df["val_accuracy"].corr(df["test_accuracy"]), + "val_test_corr_by_variant": df.groupby("variant").apply(lambda g: g["val_accuracy"].corr(g["test_accuracy"])).to_dict(), + } + + # Best configs (global and per variant) + def best_rows(by_cols=("val_accuracy","test_accuracy"), ascending=(False, False), topk=10, variant=None): + sub = df if variant is None else df[df["variant"]==variant] + if len(sub)==0: + return sub + return sub.sort_values(list(by_cols), ascending=list(ascending)).head(topk) + + best_overall = best_rows(topk=10) + best_by_variant = {v: best_rows(topk=5, variant=v) for v in df["variant"].unique()} + + # Per-hparam aggregates (marginal means) + hparams = ["hidden","lr","dropout","self_loop_scale","use_a2","lrmc_inv_weight","lrmc_gamma"] + per_param = {} + for hp in hparams: + g = df.groupby(hp).agg( + mean_val=("val_accuracy","mean"), + std_val=("val_accuracy","std"), + mean_test=("test_accuracy","mean"), + std_test=("test_accuracy","std"), + n=("val_accuracy","count"), + ).sort_values("mean_val", ascending=False) + per_param[hp] = g + + per_param_by_variant = {} + for v in df["variant"].unique(): + sub = df[df["variant"]==v] + per_param_by_variant[v] = {} + for hp in hparams: + g = sub.groupby(hp).agg( + mean_val=("val_accuracy","mean"), + std_val=("val_accuracy","std"), + mean_test=("test_accuracy","mean"), + std_test=("test_accuracy","std"), + n=("val_accuracy","count"), + ).sort_values("mean_val", ascending=False) + per_param_by_variant[v][hp] = g + + # Matched comparisons (baseline vs pool), using common hyperparameters + commons = _get_common_hparams() + matched = df.groupby(["variant"]+commons).agg( + mean_val=("val_accuracy","mean"), + mean_test=("test_accuracy","mean"), + best_val=("val_accuracy","max"), + best_test=("test_accuracy","max"), + n=("val_accuracy","count"), + ).reset_index() + + baseline_mean = matched[matched["variant"]=="baseline"].set_index(commons) + pool_mean = matched[matched["variant"]=="pool"].set_index(commons) + + # Compare mean vs mean + comp_mean = pool_mean[["mean_val","mean_test"]].join( + baseline_mean[["mean_val","mean_test"]], + lsuffix="_pool", rsuffix="_base", how="inner" + ) + comp_mean["delta_val"] = comp_mean["mean_val_pool"] - comp_mean["mean_val_base"] + comp_mean["delta_test"] = comp_mean["mean_test_pool"] - comp_mean["mean_test_base"] + + # Compare best vs best per setting + baseline_best = matched[matched["variant"]=="baseline"].set_index(commons)[["best_val","best_test"]] + pool_best = matched[matched["variant"]=="pool"].set_index(commons)[["best_val","best_test"]] + comp_best = pool_best.join(baseline_best, lsuffix="_pool", rsuffix="_base", how="inner") + comp_best["delta_best_val"] = comp_best["best_val_pool"] - comp_best["best_val_base"] + comp_best["delta_best_test"] = comp_best["best_test_pool"] - comp_best["best_test_base"] + + return { + "df": df, + "summary": summary, + "best_overall": best_overall, + "best_by_variant": best_by_variant, + "per_param": per_param, + "per_param_by_variant": per_param_by_variant, + "comp_mean": comp_mean, + "comp_best": comp_best, + "commons": commons, + } + +# ------------------------ +# Rendering (rich style) +# ------------------------ +def render_summary(console: Console, A): + s = A["summary"] + title = Text("L‑RMC/GCN Hyperparameter Sweep — Summary", style="bold white") + body = Text() + body.append(f"File rows: ", style="bold green"); body.append(str(s["file_rows"])+"\n") + body.append(f"Dataset(s): ", style="bold green"); body.append(f"{s['dataset']}\n") + body.append(f"Variants: ", style="bold green"); body.append(f"{s['variants']}\n") + body.append(f"Val/Test Corr (all): ", style="bold green"); body.append(f"{_fmt(s['val_test_corr_all'])}\n") + for v,c in s["val_test_corr_by_variant"].items(): + body.append(f"Val/Test Corr ({v}): ", style="bold green"); body.append(f"{_fmt(c)}\n") + # Unique counts + uc = s["unique_counts"] + uc_text = ", ".join([f"{k}={v}" for k,v in uc.items()]) + body.append(f"Unique values per hparam: ", style="bold green"); body.append(uc_text+"\n") + console.print(Panel(body, title=title, border_style="cyan", box=box.ROUNDED)) + +def render_top_configs(console: Console, A, topk=10): + # Overall (by val desc, tiebreak by test desc) + df = A["best_overall"] + table = Table(title=f"Top {min(topk, len(df))} Configs by Val Accuracy (overall)", box=box.SIMPLE_HEAVY) + for col in ["variant","val_accuracy","test_accuracy","hidden","lr","dropout","self_loop_scale","use_a2","lrmc_inv_weight","lrmc_gamma"]: + style = "yellow" if col=="val_accuracy" else ("green" if col=="test_accuracy" else "cyan") + table.add_column(col, style=style, justify="right" if "accuracy" in col else "left") + for _,row in df.iterrows(): + table.add_row( + str(row["variant"]), + _fmt(row["val_accuracy"]), _fmt(row["test_accuracy"]), + _fmt(row["hidden"]), _fmt(row["lr"]), _fmt(row["dropout"]), + _fmt(row["self_loop_scale"]), str(row["use_a2"]), + _fmt(row["lrmc_inv_weight"]), _fmt(row["lrmc_gamma"]) + ) + console.print(table) + + # Per-variant bests + for v, sub in A["best_by_variant"].items(): + table = Table(title=f"Top {min(5, len(sub))} Configs for variant='{v}' (by Val)", box=box.MINIMAL_HEAVY_HEAD) + for col in ["val_accuracy","test_accuracy","hidden","lr","dropout","self_loop_scale","use_a2","lrmc_inv_weight","lrmc_gamma"]: + style = "yellow" if col=="val_accuracy" else ("green" if col=="test_accuracy" else "cyan") + table.add_column(col, style=style, justify="right" if "accuracy" in col else "right") + for _,row in sub.iterrows(): + table.add_row( + _fmt(row["val_accuracy"]), _fmt(row["test_accuracy"]), + _fmt(row["hidden"]), _fmt(row["lr"]), _fmt(row["dropout"]), + _fmt(row["self_loop_scale"]), str(row["use_a2"]), + _fmt(row["lrmc_inv_weight"]), _fmt(row["lrmc_gamma"]) + ) + console.print(table) + +def render_per_param(console: Console, A, variant=None): + title = f"Per‑Hyperparameter Effects (marginal means){'' if variant is None else f' — variant={variant}'}" + console.print(Panel(Text(title, style="bold white"), border_style="magenta", box=box.ROUNDED)) + per_param = A["per_param"] if variant is None else A["per_param_by_variant"][variant] + + for hp, g in per_param.items(): + table = Table(title=f"{hp}", box=box.SIMPLE_HEAD) + table.add_column(hp, style="cyan") + table.add_column("n", style="white", justify="right") + table.add_column("val_mean", style="yellow", justify="right") + table.add_column("val_std", style="yellow", justify="right") + table.add_column("test_mean", style="green", justify="right") + table.add_column("test_std", style="green", justify="right") + + # Find best by val_mean and by test_mean + if len(g)>0: + best_val_idx = g["mean_val"].idxmax() + best_test_idx = g["mean_test"].idxmax() + else: + best_val_idx = best_test_idx = None + + for idx, row in g.reset_index().iterrows(): + key = row[hp] + is_best_val = (key == best_val_idx) + is_best_test = (key == best_test_idx) + + def mark(s, best): + return f"[bold green]{s}[/]" if best else s + + table.add_row( + f"{key}", + _fmt(row["n"]), + mark(_fmt(row["mean_val"]), is_best_val), + _fmt(row["std_val"]), + mark(_fmt(row["mean_test"]), is_best_test), + _fmt(row["std_test"]), + ) + console.print(table) + +def render_matched_comparisons(console: Console, A): + console.print(Panel(Text("Baseline vs Pool — Matched Hyperparameter Comparisons", style="bold white"), border_style="blue", box=box.ROUNDED)) + comp_mean = A["comp_mean"].reset_index() + comp_best = A["comp_best"].reset_index() + commons = A["commons"] + + # High-level stats + win_rate_mean = float((comp_mean["delta_test"] > 0).mean()) if len(comp_mean)>0 else float("nan") + win_rate_best = float((comp_best["delta_best_test"] > 0).mean()) if len(comp_best)>0 else float("nan") + + stats_text = Text() + stats_text.append("Pool > Baseline (by mean test across same settings): ", style="bold green") + stats_text.append(f"{_fmt(100*win_rate_mean, 1)}% of settings\n") + stats_text.append("Pool best > Baseline best (per setting): ", style="bold green") + stats_text.append(f"{_fmt(100*win_rate_best, 1)}% of settings\n") + console.print(Panel(stats_text, border_style="green", box=box.SQUARE)) + + # Show the top positive / negative settings + def table_from_df(df, deltas_col, title): + df = df.sort_values(deltas_col, ascending=False) + head = df.head(8) + tail = df.tail(8) + for part, name in [(head, "Top Gains (Pool minus Baseline)"), (tail, "Largest Drops (Pool minus Baseline)")]: + + table = Table(title=f"{title} — {name}", box=box.MINIMAL_HEAVY_HEAD) + for c in commons + ["mean_test_pool","mean_test_base","delta_test"]: + style = "green" if c in ("mean_test_pool","mean_test_base","delta_test") else "cyan" + table.add_column(c, style=style, justify="right") + for _,r in part.iterrows(): + row = [str(r[c]) for c in commons] + [_fmt(r["mean_test_pool"]), _fmt(r["mean_test_base"]), _fmt(r["delta_test"])] + table.add_row(*row) + console.print(table) + + if len(comp_mean)>0: + table_from_df(comp_mean, "delta_test", "Mean Test Accuracy (matched)") + if len(comp_best)>0: + # For best-vs-best we only need the deltas + df = comp_best.sort_values("delta_best_test", ascending=False) + head = df.head(8) + tail = df.tail(8) + for part, name in [(head, "Top Gains"), (tail, "Largest Drops")]: + table = Table(title=f"Best-vs-Best Test Accuracy — {name}", box=box.SIMPLE_HEAVY) + for c in commons + ["best_test_pool","best_test_base","delta_best_test"]: + style = "green" if c in ("best_test_pool","best_test_base","delta_best_test") else "cyan" + table.add_column(c, style=style, justify="right") + for _,r in part.iterrows(): + row = [str(r[c]) for c in commons] + [_fmt(r["best_test_pool"]), _fmt(r["best_test_base"]), _fmt(r["delta_best_test"])] + table.add_row(*row) + console.print(table) + +def recommend_settings(console: Console, A): + """Recommend a configuration per variant based on marginal means and sanity checks.""" + per_pool = A["per_param_by_variant"].get("pool", {}) + per_base = A["per_param_by_variant"].get("baseline", {}) + + # Pool recommendations (favor generalization: choose by mean_test where reasonable) + rec_pool = { + "hidden": per_pool.get("hidden", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "hidden" in per_pool else None, + "lr": per_pool.get("lr", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "lr" in per_pool else None, + "dropout": per_pool.get("dropout", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "dropout" in per_pool else None, + "self_loop_scale": per_pool.get("self_loop_scale", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "self_loop_scale" in per_pool else None, + "use_a2": per_pool.get("use_a2", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "use_a2" in per_pool else None, + "lrmc_inv_weight": per_pool.get("lrmc_inv_weight", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "lrmc_inv_weight" in per_pool else None, + "lrmc_gamma": per_pool.get("lrmc_gamma", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "lrmc_gamma" in per_pool else None, + } + + # Baseline recommendations + rec_base = { + "hidden": per_base.get("hidden", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "hidden" in per_base else None, + "lr": per_base.get("lr", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "lr" in per_base else None, + "dropout": per_base.get("dropout", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "dropout" in per_base else None, + "self_loop_scale": per_base.get("self_loop_scale", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "self_loop_scale" in per_base else None, + "use_a2": per_base.get("use_a2", pd.DataFrame()).get("mean_test", pd.Series()).idxmax() if "use_a2" in per_base else None, + } + + # Render + def render_panel(title, rec): + txt = Text() + for k,v in rec.items(): + txt.append(f"{k}: ", style="bold cyan"); txt.append(f"{v}\n") + console.print(Panel(txt, title=title, border_style="green", box=box.ROUNDED)) + + render_panel("Recommended settings — variant=pool (by mean test)", rec_pool) + render_panel("Recommended settings — variant=baseline (by mean test)", rec_base) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("csv_path", type=str, help="Path to hyperparam_sweep.csv") + parser.add_argument("--topk", type=int, default=10, help="How many top configs to display") + parser.add_argument("--variant", type=str, default=None, help="Filter to a specific variant (baseline or pool) for per-parameter tables") + args = parser.parse_args() + + csv_path = Path(args.csv_path) + if not csv_path.exists(): + raise SystemExit(f"CSV not found: {csv_path}") + + df = pd.read_csv(csv_path) + A = analyze(df) + + console = Console() + render_summary(console, A) + render_top_configs(console, A, topk=args.topk) + + # Per-parameter effects (all runs) + render_per_param(console, A, variant=None) + # Per-parameter effects per variant (if requested) + if args.variant is not None: + render_per_param(console, A, variant=args.variant) + + render_matched_comparisons(console, A) + recommend_settings(console, A) + +if __name__ == "__main__": + main() diff --git a/src/grl/hyperparam_sweep.csv b/src/grl/hyperparam_sweep.csv new file mode 100644 index 0000000000000000000000000000000000000000..c7c2e9580bfb81676630d031d89e9be96f2ecbb9 --- /dev/null +++ b/src/grl/hyperparam_sweep.csv @@ -0,0 +1,2593 @@ +dataset,seeds,variant,hidden,epochs,lr,wd,dropout,self_loop_scale,use_a2,lrmc_inv_weight,lrmc_gamma,seed,val_accuracy,test_accuracy +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.774,0.79 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.768,0.794 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.784,0.806 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.79,0.805 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.796,0.809 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.798,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.802,0.81 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.81,0.818 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.786,0.8 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.786,0.801 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.792,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.788,0.809 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.804,0.814 +Cora,seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.804,0.814 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.79,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.792,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.794,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.79,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.788,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.792,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.792,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.794,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.788,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.788,0.772 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.786,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.784,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.794,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.794,0.783 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.794,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.792,0.76 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.796,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.796,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.792,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.792,0.753 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.788,0.763 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.792,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.792,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.794,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.792,0.783 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.794,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.794,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.796,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.792,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.794,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.794,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.792,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.788,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.794,0.765 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.794,0.765 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.786,0.764 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.804,0.788 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.804,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.804,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.804,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.804,0.784 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.806,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.806,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.804,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.802,0.77 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.802,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.8,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.8,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.798,0.784 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.8,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.798,0.777 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.798,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.798,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.796,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.794,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.796,0.763 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.794,0.763 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.792,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.796,0.77 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.804,0.777 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.802,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.802,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.8,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.804,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.802,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.802,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.8,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.798,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.798,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.802,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.802,0.77 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.79,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.798,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.796,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.796,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.794,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.796,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.784,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.784,0.762 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.782,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.78,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.784,0.759 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.784,0.754 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.794,0.76 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.792,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.794,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.79,0.753 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.79,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.756 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.79,0.759 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.79,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.79,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.792,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.798,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.792,0.746 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.792,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.792,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.798,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.794,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.794,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.794,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.792,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.788,0.776 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.792,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.786,0.767 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.786,0.765 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.804,0.789 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.804,0.784 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.802,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.804,0.782 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.804,0.782 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.804,0.783 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.802,0.777 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.802,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.8,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.802,0.779 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.802,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.8,0.771 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.8,0.788 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.796,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.798,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.798,0.772 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.796,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.798,0.778 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.798,0.772 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.794,0.772 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.796,0.777 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.796,0.773 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.794,0.77 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.792,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.802,0.777 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.8,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.802,0.774 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.8,0.768 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.802,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.802,0.769 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.8,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.798,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.798,0.78 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.796,0.766 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.798,0.775 +Cora,seeds_5e4.json,pool,16,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.8,0.77 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.796,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.796,0.781 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.8,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.792,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.796,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.796,0.781 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.79,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.792,0.773 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.796,0.787 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.794,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.788,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.786,0.773 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.796,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.794,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.796,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.796,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.796,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.792,0.779 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.792,0.76 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.788,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.784,0.767 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.786,0.758 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.79,0.759 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.806,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.81,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.806,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.802,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.798,0.787 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.804,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.798,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.794,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.792,0.783 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.792,0.78 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.788,0.78 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.802,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.804,0.798 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.804,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.806,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.802,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.802,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.802,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.804,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.804,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.802,0.784 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.802,0.78 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.802,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.8,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.796,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.804,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.802,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.796,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.798,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.798,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.8,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.788,0.772 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.792,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.79,0.779 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.804,0.806 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.802,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.806,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.804,0.795 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.796,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.798,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.8,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.804,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.804,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.802,0.776 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.796,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.798,0.783 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.8,0.801 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.798,0.795 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.8,0.783 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.796,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.792,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.796,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.788,0.781 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.794,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.79,0.784 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.788,0.784 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.79,0.78 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.786,0.775 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.8,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.812,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.798,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.802,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.79,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.794,0.783 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.794,0.777 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.798,0.766 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.792,0.777 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.788,0.777 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.79,0.766 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.794,0.75 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.812,0.8 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.808,0.795 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.806,0.795 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.802,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.806,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.802,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.804,0.787 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.798,0.785 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.796,0.787 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.798,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.796,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.796,0.779 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.81,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.81,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.808,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.808,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.806,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.806,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.808,0.789 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.812,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.79,0.773 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.798,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.808,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.802,0.782 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.81,0.803 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.808,0.799 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.81,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.806,0.795 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.806,0.802 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.806,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.802,0.79 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.802,0.786 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.796,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.796,0.792 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.8,0.788 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.794,0.781 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.808,0.806 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.798,0.796 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.802,0.793 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.808,0.799 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.8,0.8 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.802,0.802 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.802,0.797 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.804,0.794 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.788,0.776 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.794,0.791 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.792,0.773 +Cora,seeds_5e4.json,pool,16,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.794,0.783 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.794,0.79 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.796,0.799 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.798,0.812 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.792,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.79,0.76 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.796,0.763 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.792,0.766 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.788,0.788 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.796,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.778,0.752 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.786,0.772 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.792,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.8,0.798 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.808,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.808,0.808 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.804,0.799 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.8,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.81,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.8,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.798,0.793 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.788,0.773 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.804,0.796 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.804,0.796 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.798,0.776 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.794,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.794,0.794 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.792,0.788 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.804,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.802,0.774 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.804,0.791 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.788,0.779 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.792,0.748 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.782,0.772 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.788,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.788,0.771 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.792,0.788 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.798,0.81 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.792,0.796 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.798,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.798,0.782 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.786,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.784,0.774 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.792,0.776 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.776,0.771 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.786,0.771 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.8,0.768 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.804,0.806 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.8,0.804 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.804,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.788,0.803 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.81,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.796,0.773 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.794,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.804,0.782 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.8,0.784 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.776,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.788,0.772 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.808,0.792 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.808,0.813 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.798,0.81 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.796,0.799 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.794,0.8 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.792,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.8,0.8 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.808,0.809 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.79,0.773 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.788,0.78 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.784,0.779 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.794,0.792 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.79,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.802,0.795 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.782,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.792,0.798 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.774,0.783 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.794,0.774 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.782,0.778 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.784,0.774 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.776,0.759 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.788,0.776 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.78,0.76 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.782,0.76 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.796,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.796,0.792 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.798,0.792 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.806,0.803 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.802,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.786,0.781 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.794,0.764 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.79,0.767 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.794,0.768 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.794,0.759 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.792,0.769 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.778,0.763 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.802,0.783 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.792,0.775 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.796,0.79 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.792,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.798,0.768 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.794,0.779 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.792,0.763 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.796,0.758 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.796,0.749 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.792,0.755 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.768,0.747 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.788,0.762 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.796,0.79 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.802,0.795 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.792,0.794 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.794,0.778 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.79,0.786 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.8,0.791 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.796,0.795 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.794,0.795 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.788,0.775 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.794,0.762 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.792,0.768 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.78,0.785 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.792,0.787 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.8,0.781 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.79,0.788 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.798,0.788 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.796,0.784 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.802,0.772 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.788,0.76 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.788,0.779 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.808,0.771 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.778,0.763 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.776,0.752 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.766,0.759 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.802,0.799 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.788,0.795 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.802,0.789 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.806,0.79 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.804,0.77 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.806,0.775 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.82,0.778 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.808,0.781 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.798,0.797 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.804,0.775 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.798,0.767 +Cora,seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.794,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.788,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.786,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.78,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.778,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.79,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.786,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.788,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.782,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.788,0.794 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.79,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.79,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.784,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.79,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.788,0.779 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.788,0.776 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.788,0.777 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.79,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.79,0.795 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.788,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.786,0.777 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.788,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.786,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.788,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.786,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.786,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.782,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.782,0.783 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.782,0.783 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.788,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.788,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.784,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.782,0.779 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.786,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.786,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.786,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.782,0.779 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.786,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.788,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.79,0.783 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.786,0.78 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.786,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.786,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.786,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.792,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.792,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.786,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.788,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.78,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.78,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.786,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.782,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.78,0.775 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.78,0.774 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.78,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.782,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.78,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.78,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.784,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.784,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.782,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.782,0.777 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.788,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.784,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.782,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.786,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.784,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.784,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.782,0.778 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.792,0.798 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.786,0.796 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.788,0.796 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.784,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.796,0.798 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.788,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.792,0.797 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.786,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.794,0.797 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.79,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.786,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.798,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.794,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.792,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.788,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.798,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.79,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.79,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.792,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.796,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.794,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.79,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.784,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.782,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.782,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.784,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.79,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.788,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.784,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.782,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.788,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.788,0.785 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.786,0.783 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.782,0.779 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.79,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.792,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.792,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.79,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.794,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.796,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.792,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.79,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.796,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.794,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.794,0.784 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.788,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.786,0.786 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.788,0.789 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.788,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.784,0.78 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.788,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.792,0.794 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.79,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.784,0.782 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.79,0.796 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.792,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.786,0.781 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.784,0.779 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.786,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.782,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.784,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.79,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.79,0.792 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.788,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.786,0.788 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.784,0.793 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.79,0.79 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.792,0.791 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.788,0.787 +Cora,seeds_5e4.json,pool,32,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.786,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.782,0.801 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.788,0.798 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.784,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.782,0.795 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.784,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.78,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.78,0.792 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.78,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.776,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.784,0.775 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.782,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.78,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.794,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.802,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.798,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.792,0.803 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.808,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.8,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.796,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.794,0.802 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.796,0.784 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.8,0.788 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.794,0.792 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.788,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.784,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.782,0.779 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.782,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.782,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.782,0.772 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.786,0.776 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.784,0.777 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.786,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.776,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.788,0.784 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.782,0.777 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.794,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.798,0.794 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.798,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.796,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.796,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.796,0.798 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.788,0.776 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.79,0.777 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.794,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.79,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.79,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.788,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.788,0.795 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.788,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.79,0.792 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.784,0.788 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.782,0.784 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.786,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.784,0.786 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.78,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.778,0.778 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.788 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.776,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.78,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.798,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.798,0.797 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.796,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.794,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.796,0.795 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.796,0.799 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.802,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.794,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.796,0.797 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.798,0.801 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.806,0.792 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.79,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.786,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.784,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.784,0.768 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.786,0.798 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.782,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.782,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.78,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.78,0.765 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.782,0.772 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.788,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.782,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.796,0.795 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.796,0.801 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.8,0.805 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.796,0.802 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.794,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.798,0.807 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.796,0.796 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.792,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.808,0.798 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.794,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.788,0.786 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.796,0.778 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.786,0.797 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.79,0.774 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.79,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.788,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.786,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.788,0.779 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.784,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.782,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.786,0.787 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.784,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.81,0.788 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.796,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.794,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.796,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.788,0.778 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.788,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.796,0.778 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.788,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.81,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.792,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.796,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.796,0.794 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.792,0.788 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.792,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.79,0.778 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.792,0.785 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.786,0.793 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.79,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.784,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.79,0.786 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.784,0.772 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.79,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.784,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.788,0.78 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.8,0.786 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.792,0.781 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.804,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.796,0.789 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.79,0.79 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.796,0.782 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.798,0.775 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.796,0.783 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.786,0.775 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.798,0.773 +Cora,seeds_5e4.json,pool,32,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.794,0.761 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.788,0.794 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.794,0.79 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.794,0.795 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.786,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.774,0.778 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.794,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.802,0.791 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.78,0.791 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.794,0.788 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.776,0.772 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.786,0.776 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.77,0.786 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.794,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.792,0.798 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.798,0.806 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.802,0.797 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.78,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.792,0.8 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.798,0.809 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.784,0.784 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.774,0.773 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.768,0.792 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.788,0.781 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.788,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.788,0.785 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.78,0.781 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.794,0.799 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.788,0.802 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.788,0.788 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.776,0.786 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.766,0.78 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.774,0.775 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.776,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.782,0.777 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.778,0.777 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.772,0.756 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.806,0.811 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.802,0.803 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.792,0.78 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.798,0.778 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.794,0.779 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.782,0.776 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.794,0.795 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.79,0.79 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.778,0.762 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.782,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.772,0.766 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.78,0.762 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.802,0.81 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.792,0.805 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.796,0.794 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.796,0.8 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.778,0.77 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.784,0.773 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.766,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.786,0.797 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.768,0.787 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.788,0.783 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.782,0.759 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.78,0.761 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.786,0.792 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.786,0.782 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.796,0.8 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.792,0.784 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.784,0.796 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.782,0.796 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.79,0.802 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.798,0.791 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.764,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.768,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.786,0.803 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.77,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.79,0.795 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.798,0.807 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.78,0.798 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.784,0.799 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.794,0.799 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.784,0.768 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.78,0.773 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.784,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.778,0.787 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.772,0.762 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.774,0.755 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.804,0.777 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.792,0.794 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.792,0.804 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.792,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.784,0.776 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.78,0.791 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.79 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.79,0.796 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.784,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.784,0.779 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.77,0.762 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.774,0.787 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.758,0.753 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.78,0.799 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.796,0.782 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.79,0.801 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.774,0.803 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.78,0.783 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.776,0.77 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.77,0.761 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.79,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.762,0.743 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.772,0.779 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.78,0.782 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.792,0.761 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.806,0.772 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.798,0.806 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.796,0.787 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.792,0.78 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.768,0.779 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.774,0.791 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.746,0.756 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.784,0.782 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.776,0.778 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.758,0.759 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.762,0.76 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.794,0.781 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.802,0.815 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.784,0.795 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.78,0.776 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.782,0.773 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.772,0.778 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.768,0.774 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.774,0.765 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.784,0.793 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.768,0.771 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.766,0.768 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.776,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.776,0.773 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.78,0.78 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.774,0.771 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.794,0.789 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.79,0.786 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.764,0.759 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.78,0.758 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.776,0.764 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.788,0.803 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.77,0.767 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.758,0.741 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.768,0.76 +Cora,seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.76,0.759 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.7,42,0.784,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.8,42,0.784,0.793 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.01,0.9,42,0.786,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.01,1.0,42,0.786,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.7,42,0.786,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.8,42,0.788,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.05,0.9,42,0.786,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.05,1.0,42,0.784,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.7,42,0.784,0.791 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.8,42,0.786,0.799 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.1,0.9,42,0.784,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,False,0.1,1.0,42,0.784,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.7,42,0.792,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.8,42,0.794,0.794 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.01,0.9,42,0.792,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.01,1.0,42,0.792,0.776 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.7,42,0.794,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.8,42,0.79,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.05,0.9,42,0.792,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.05,1.0,42,0.792,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.7,42,0.794,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.8,42,0.79,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.1,0.9,42,0.788,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.0,True,0.1,1.0,42,0.792,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.7,42,0.794,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.8,42,0.796,0.799 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.01,0.9,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.01,1.0,42,0.796,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.7,42,0.792,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.8,42,0.796,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.05,0.9,42,0.792,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.05,1.0,42,0.794,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.7,42,0.792,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.8,42,0.79,0.791 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.1,0.9,42,0.792,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,False,0.1,1.0,42,0.79,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.8,42,0.798,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.01,0.9,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.01,1.0,42,0.798,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.8,42,0.8,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.05,0.9,42,0.8,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.05,1.0,42,0.798,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.7,42,0.8,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.8,42,0.8,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.1,0.9,42,0.8,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.3,True,0.1,1.0,42,0.798,0.784 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.7,42,0.784,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.8,42,0.784,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.01,0.9,42,0.79,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.01,1.0,42,0.79,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.7,42,0.784,0.799 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.8,42,0.784,0.785 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.05,0.9,42,0.788,0.795 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.05,1.0,42,0.792,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.7,42,0.784,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.1,0.9,42,0.788,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,False,0.1,1.0,42,0.79,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.7,42,0.798,0.807 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.8,42,0.798,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.01,0.9,42,0.796,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.01,1.0,42,0.796,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.7,42,0.798,0.807 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.8,42,0.798,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.05,0.9,42,0.796,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.05,1.0,42,0.796,0.799 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.7,42,0.798,0.807 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.8,42,0.796,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.1,0.9,42,0.794,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.4,0.5,True,0.1,1.0,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.7,42,0.792,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.8,42,0.79,0.793 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.01,0.9,42,0.792,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.01,1.0,42,0.792,0.792 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.7,42,0.792,0.791 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.8,42,0.788,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.05,0.9,42,0.79,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.05,1.0,42,0.788,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.7,42,0.79,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.8,42,0.79,0.796 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.1,0.9,42,0.788,0.791 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,False,0.1,1.0,42,0.788,0.781 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.7,42,0.794,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.8,42,0.794,0.808 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.01,0.9,42,0.79,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.01,1.0,42,0.792,0.803 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.7,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.807 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.05,0.9,42,0.79,0.777 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.05,1.0,42,0.79,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.8,42,0.792,0.785 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.1,0.9,42,0.788,0.778 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.0,True,0.1,1.0,42,0.79,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.7,42,0.794,0.805 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.8,42,0.794,0.805 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.01,0.9,42,0.8,0.794 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.01,1.0,42,0.794,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.7,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.8,42,0.792,0.797 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.05,0.9,42,0.794,0.803 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.05,1.0,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.7,42,0.792,0.805 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.8,42,0.79,0.803 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.1,0.9,42,0.792,0.804 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,False,0.1,1.0,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.7,42,0.8,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.8,42,0.802,0.782 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.01,0.9,42,0.802,0.784 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.01,1.0,42,0.8,0.806 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.7,42,0.802,0.791 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.8,42,0.802,0.782 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.05,0.9,42,0.802,0.78 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.05,1.0,42,0.8,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.7,42,0.8,0.789 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.8,42,0.8,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.1,0.9,42,0.8,0.782 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.3,True,0.1,1.0,42,0.802,0.787 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.7,42,0.792,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.8,42,0.792,0.804 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.01,0.9,42,0.792,0.805 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.01,1.0,42,0.794,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.7,42,0.79,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.8,42,0.792,0.805 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.05,0.9,42,0.792,0.801 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.05,1.0,42,0.792,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.7,42,0.79,0.802 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.8,42,0.788,0.8 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.1,0.9,42,0.792,0.799 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,False,0.1,1.0,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.7,42,0.798,0.804 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.8,42,0.8,0.806 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.01,0.9,42,0.8,0.788 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.01,1.0,42,0.794,0.79 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.7,42,0.798,0.806 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.8,42,0.798,0.808 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.05,0.9,42,0.796,0.803 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.05,1.0,42,0.796,0.786 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.7,42,0.798,0.804 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.8,42,0.798,0.794 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.1,0.9,42,0.796,0.798 +Cora,seeds_5e4.json,pool,64,200,0.001,0.0005,0.5,0.5,True,0.1,1.0,42,0.792,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.7,42,0.802,0.786 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.8,42,0.796,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.01,0.9,42,0.802,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.01,1.0,42,0.794,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.7,42,0.792,0.777 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.8,42,0.792,0.787 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.05,0.9,42,0.8,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.05,1.0,42,0.8,0.801 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.7,42,0.8,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.8,42,0.79,0.775 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.1,0.9,42,0.804,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,False,0.1,1.0,42,0.792,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.7,42,0.8,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.8,42,0.798,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.01,0.9,42,0.8,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.01,1.0,42,0.808,0.797 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.7,42,0.796,0.778 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.8,42,0.794,0.788 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.05,0.9,42,0.798,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.05,1.0,42,0.794,0.783 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.7,42,0.798,0.792 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.8,42,0.792,0.778 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.1,0.9,42,0.804,0.772 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.0,True,0.1,1.0,42,0.796,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.7,42,0.798,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.8,42,0.806,0.807 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.01,0.9,42,0.794,0.788 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.01,1.0,42,0.806,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.7,42,0.804,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.8,42,0.8,0.806 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.05,0.9,42,0.798,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.05,1.0,42,0.796,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.7,42,0.804,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.8,42,0.796,0.787 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.1,0.9,42,0.792,0.788 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,False,0.1,1.0,42,0.794,0.781 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.7,42,0.794,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.8,42,0.792,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.01,1.0,42,0.794,0.78 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.7,42,0.79,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.8,42,0.798,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.05,0.9,42,0.806,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.05,1.0,42,0.792,0.781 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.7,42,0.802,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.8,42,0.796,0.778 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.1,0.9,42,0.792,0.773 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.3,True,0.1,1.0,42,0.79,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.7,42,0.794,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.8,42,0.8,0.81 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.01,0.9,42,0.802,0.804 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.01,1.0,42,0.8,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.7,42,0.8,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.8,42,0.792,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.05,0.9,42,0.8,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.05,1.0,42,0.792,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.7,42,0.792,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.1,0.9,42,0.792,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,False,0.1,1.0,42,0.794,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.7,42,0.79,0.786 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.8,42,0.796,0.807 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.01,0.9,42,0.792,0.797 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.01,1.0,42,0.8,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.7,42,0.796,0.792 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.8,42,0.8,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.05,0.9,42,0.794,0.772 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.05,1.0,42,0.794,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.7,42,0.792,0.792 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.8,42,0.806,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.1,0.9,42,0.792,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.4,0.5,True,0.1,1.0,42,0.79,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.7,42,0.794,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.8,42,0.806,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.01,0.9,42,0.806,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.01,1.0,42,0.806,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.7,42,0.798,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.8,42,0.804,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.05,0.9,42,0.8,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.05,1.0,42,0.8,0.797 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.7,42,0.794,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.8,42,0.8,0.786 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.1,0.9,42,0.8,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,False,0.1,1.0,42,0.794,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.7,42,0.798,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.8,42,0.8,0.794 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.01,0.9,42,0.798,0.788 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.01,1.0,42,0.8,0.771 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.7,42,0.802,0.784 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.8,42,0.792,0.806 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.05,0.9,42,0.796,0.78 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.05,1.0,42,0.802,0.806 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.7,42,0.792,0.801 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.8,42,0.792,0.804 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.1,0.9,42,0.798,0.778 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.0,True,0.1,1.0,42,0.798,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.7,42,0.792,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.8,42,0.794,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.01,0.9,42,0.802,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.01,1.0,42,0.802,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.7,42,0.798,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.8,42,0.792,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.05,0.9,42,0.812,0.799 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.05,1.0,42,0.806,0.811 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.7,42,0.79,0.797 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.8,42,0.802,0.801 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.1,0.9,42,0.802,0.787 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,False,0.1,1.0,42,0.802,0.8 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.7,42,0.796,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.8,42,0.794,0.782 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.01,1.0,42,0.8,0.793 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.7,42,0.79,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.8,42,0.796,0.787 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.05,0.9,42,0.798,0.812 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.05,1.0,42,0.8,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.7,42,0.792,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.8,42,0.798,0.785 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.1,0.9,42,0.8,0.772 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.3,True,0.1,1.0,42,0.8,0.786 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.7,42,0.796,0.804 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.8,42,0.802,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.01,0.9,42,0.794,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.01,1.0,42,0.796,0.792 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.7,42,0.804,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.8,42,0.79,0.802 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.05,0.9,42,0.796,0.806 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.05,1.0,42,0.796,0.797 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.7,42,0.8,0.801 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.8,42,0.792,0.791 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.1,0.9,42,0.794,0.784 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,False,0.1,1.0,42,0.792,0.769 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.7,42,0.792,0.798 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.8,42,0.8,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.01,0.9,42,0.798,0.787 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.01,1.0,42,0.8,0.795 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.7,42,0.8,0.807 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.8,42,0.798,0.79 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.05,0.9,42,0.8,0.807 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.05,1.0,42,0.8,0.803 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.7,42,0.79,0.811 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.8,42,0.804,0.801 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.1,0.9,42,0.788,0.796 +Cora,seeds_5e4.json,pool,64,200,0.01,0.0005,0.5,0.5,True,0.1,1.0,42,0.798,0.787 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.7,42,0.792,0.778 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.8,42,0.796,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.01,0.9,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.01,1.0,42,0.792,0.785 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.8,42,0.788,0.791 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.05,0.9,42,0.79,0.765 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.05,1.0,42,0.792,0.787 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.7,42,0.78,0.791 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.8,42,0.782,0.781 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.1,0.9,42,0.792,0.778 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,False,0.1,1.0,42,0.798,0.782 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.7,42,0.794,0.806 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.8,42,0.788,0.79 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.01,0.9,42,0.808,0.776 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.01,1.0,42,0.814,0.796 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.7,42,0.784,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.8,42,0.784,0.789 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.05,0.9,42,0.812,0.779 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.05,1.0,42,0.798,0.801 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.7,42,0.782,0.769 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.8,42,0.796,0.788 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.1,0.9,42,0.788,0.754 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.0,True,0.1,1.0,42,0.804,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.7,42,0.802,0.812 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.8,42,0.79,0.802 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.01,0.9,42,0.792,0.78 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.01,1.0,42,0.812,0.795 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.7,42,0.794,0.794 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.8,42,0.788,0.767 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.05,0.9,42,0.786,0.772 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.05,1.0,42,0.796,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.7,42,0.786,0.785 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.8,42,0.79,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.1,0.9,42,0.78,0.783 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,False,0.1,1.0,42,0.784,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.8,42,0.794,0.791 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.01,0.9,42,0.798,0.78 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.01,1.0,42,0.81,0.804 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.7,42,0.8,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.8,42,0.778,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.05,0.9,42,0.784,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.05,1.0,42,0.788,0.783 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.7,42,0.786,0.777 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.8,42,0.778,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.1,0.9,42,0.784,0.78 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.3,True,0.1,1.0,42,0.78,0.768 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.7,42,0.798,0.807 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.8,42,0.796,0.79 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.01,0.9,42,0.788,0.766 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.01,1.0,42,0.788,0.793 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.7,42,0.804,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.8,42,0.784,0.765 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.05,0.9,42,0.796,0.787 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.05,1.0,42,0.788,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.7,42,0.762,0.765 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.8,42,0.786,0.758 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.1,0.9,42,0.786,0.791 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,False,0.1,1.0,42,0.778,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.7,42,0.776,0.764 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.8,42,0.784,0.779 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.01,0.9,42,0.798,0.795 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.01,1.0,42,0.804,0.812 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.7,42,0.784,0.772 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.8,42,0.776,0.771 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.05,0.9,42,0.802,0.764 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.05,1.0,42,0.792,0.781 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.7,42,0.79,0.788 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.8,42,0.782,0.779 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.1,0.9,42,0.786,0.776 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.4,0.5,True,0.1,1.0,42,0.768,0.771 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.7,42,0.8,0.773 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.8,42,0.804,0.796 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.01,0.9,42,0.792,0.78 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.01,1.0,42,0.792,0.801 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.7,42,0.786,0.798 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.8,42,0.784,0.79 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.05,0.9,42,0.794,0.791 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.05,1.0,42,0.784,0.785 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.7,42,0.78,0.777 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.8,42,0.786,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.1,0.9,42,0.794,0.779 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,False,0.1,1.0,42,0.776,0.77 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.7,42,0.8,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.8,42,0.804,0.795 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.9,42,0.81,0.792 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,1.0,42,0.802,0.788 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.7,42,0.778,0.797 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.8,42,0.816,0.778 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.05,0.9,42,0.792,0.771 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.05,1.0,42,0.804,0.773 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.7,42,0.794,0.793 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.8,42,0.784,0.746 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.1,0.9,42,0.78,0.764 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.1,1.0,42,0.798,0.746 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.7,42,0.796,0.783 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.8,42,0.778,0.786 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.01,0.9,42,0.798,0.773 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.01,1.0,42,0.806,0.798 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.7,42,0.788,0.762 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.8,42,0.794,0.766 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.05,0.9,42,0.78,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.05,1.0,42,0.796,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.7,42,0.778,0.766 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.8,42,0.776,0.769 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.1,0.9,42,0.778,0.77 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,False,0.1,1.0,42,0.78,0.778 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.7,42,0.808,0.795 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.8,42,0.802,0.798 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.01,0.9,42,0.8,0.802 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.01,1.0,42,0.794,0.799 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.7,42,0.802,0.774 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.8,42,0.786,0.784 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.05,0.9,42,0.786,0.778 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.05,1.0,42,0.8,0.789 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.7,42,0.806,0.819 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.8,42,0.792,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.1,0.9,42,0.786,0.782 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.3,True,0.1,1.0,42,0.802,0.789 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.7,42,0.8,0.784 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.8,42,0.79,0.781 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.01,0.9,42,0.782,0.78 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.01,1.0,42,0.78,0.773 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.7,42,0.778,0.764 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.8,42,0.776,0.76 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.05,0.9,42,0.782,0.775 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.05,1.0,42,0.784,0.783 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.7,42,0.782,0.773 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.8,42,0.774,0.774 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.1,0.9,42,0.784,0.783 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,False,0.1,1.0,42,0.776,0.772 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.7,42,0.794,0.797 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.8,42,0.798,0.782 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.01,0.9,42,0.802,0.798 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.01,1.0,42,0.786,0.776 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.7,42,0.794,0.779 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.8,42,0.792,0.788 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.05,0.9,42,0.8,0.795 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.05,1.0,42,0.782,0.785 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.7,42,0.786,0.808 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.8,42,0.788,0.77 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.1,0.9,42,0.776,0.77 +Cora,seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.5,True,0.1,1.0,42,0.782,0.778 diff --git a/src/grl/hyperparam_sweep2.csv b/src/grl/hyperparam_sweep2.csv new file mode 100644 index 0000000000000000000000000000000000000000..1e5216d16c8c3b8759f43aaa28353071b59b6d0c --- /dev/null +++ b/src/grl/hyperparam_sweep2.csv @@ -0,0 +1,69 @@ +file_path,dataset,seeds,variant,hidden,epochs,lr,wd,dropout,self_loop_scale,use_a2,lrmc_inv_weight,lrmc_gamma,seed,val_accuracy,test_accuracy,return_code,stderr +RUN_003afb67.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.808,0.81,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_038d042a.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.794,0.793,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_0fd9cb82.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.784,0.799,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_19ca15ff.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.824,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_1b21d6e2.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.796,0.809,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_1c6c0939.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.804,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_2a286c32.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.796,0.809,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_2a44e6ed.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.796,0.806,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_3899b503.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.81,0.818,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_4752f02a.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.804,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_4a799189.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.788,0.808,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_4bb05ca7.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.796,0.807,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_58a6d307.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.808,0.808,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_59370f54.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.808,0.81,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_5aedcd5a.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.798,0.788,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_5e520ace.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.8,0.821,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_6040c17d.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.79,0.798,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_61d675ba.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.788,0.784,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_68fd2f76.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.81,0.818,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_6fa2ccb3.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.804,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_8065bc6e.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.802,0.809,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_82167306.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.778,0.792,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_85c87c68.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.796,0.809,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_868d1c3a.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.808,0.81,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_905508ef.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.796,0.809,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_97dc5537.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.81,0.818,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_98615ab7.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.3,42,0.782,0.797,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_9d928719.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.804,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_a623aa1f.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.81,0.818,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_a8931d80.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.794,0.782,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_aa9f531b.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.804,0.814,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_b322469e.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.798,0.789,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_ba4585d1.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.788,0.8,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_c0947734.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.808,0.81,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_c83d8019.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.1,42,0.788,0.798,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_cc086e43.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,24,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.808,0.81,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_d9c868e4.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,32,200,0.1,0.0005,0.5,0.0,True,0.01,0.5,42,0.81,0.818,0, +RUN_f1b8acc8.json,Cora,cora_seeds/stage0/seeds_5e4.json,baseline,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.4,42,0.796,0.809,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." +RUN_f2499874.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,64,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.798,0.793,0,/usr/local/lib/python3.12/site-packages/torch_sparse/matmul.py:97: UserWarning: Sparse CSR tensor su... +RUN_f5af93f3.json,Cora,cora_seeds/stage0/seeds_5e4.json,pool,16,200,0.1,0.0005,0.5,0.0,True,0.01,0.2,42,0.79,0.805,0,"Downloading https://github.com/kimiyoung/planetoid/raw/master/data/ind.cora.x +Downloading https://gi..." diff --git a/src/highlight_seeds_dot.py b/src/highlight_seeds_dot.py index 371ae2aaa403d37ec637d0bb5b8b8d56990dab02..b8f45de37972f27d3566529bd09a1ed74546e236 100644 --- a/src/highlight_seeds_dot.py +++ b/src/highlight_seeds_dot.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ highlight_seeds_dot.py @@ -26,7 +25,7 @@ def load_seeds(seeds_path: Path) -> set[int]: """Return the seed-node indices from the first cluster.""" with seeds_path.open("r", encoding="utf-8") as f: data = json.load(f) - return {n - 1 for n in data["clusters"][0]["seed_nodes"]} + return set(data["clusters"][0]["seed_nodes"]) def highlight_dot(dot_path: Path, seeds: set[int], out_path: Path) -> None: diff --git a/src/lrmc_pool_proteins.py b/src/lrmc_pool_proteins.py new file mode 100644 index 0000000000000000000000000000000000000000..3405374f11d11f8aab5e7ea73cbfddd40e7f65bd --- /dev/null +++ b/src/lrmc_pool_proteins.py @@ -0,0 +1,391 @@ +import torch +from typing import List, Tuple, Dict, Optional +from torch_geometric.data import Data, InMemoryDataset +from torch_geometric.utils import to_undirected +from generate_lrmc_seeds import generate_lrmc_cluster +from tqdm import tqdm +import contextlib +import os + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +# ============ Helpers ============== + +def _edge_list_from_pyg(data: Data) -> List[Tuple[int, int]]: + """Return an undirected, deduplicated 0-indexed edge list with no self-loops.""" + ei = to_undirected(data.edge_index, num_nodes=data.num_nodes) + u, v = ei[0].tolist(), ei[1].tolist() + E = set() + for a, b in zip(u, v): + if a == b: + continue + x, y = (a, b) if a < b else (b, a) + E.add((x, y)) + return list(E) + +def _induced_subgraph_edges(all_edges: List[Tuple[int,int]], nodes: List[int]) -> Tuple[List[Tuple[int,int]], Dict[int,int]]: + """ + Restrict `all_edges` to the induced subgraph on `nodes`. + Returns: + - edge list in the subgraph with 0..(k-1) node ids + - map old_id -> sub_id + """ + nodes_sorted = sorted(nodes) + idmap = {old:i for i, old in enumerate(nodes_sorted)} + S = set(nodes_sorted) + sub_edges = [] + for u, v in all_edges: + if u in S and v in S: + sub_edges.append((idmap[u], idmap[v])) + return sub_edges, idmap + +def _assign_singletons(unassigned: List[int], existing_clusters: List[List[int]]) -> List[List[int]]: + for u in unassigned: + existing_clusters.append([u]) + return existing_clusters + +def compute_lrmc_cover_for_graph( + data: Data, + epsilon: float, + java_exec: str = 'java', + java_opts: Optional[List[str]] = None, + target_ratio: float = 0.5, # keep about 50% of nodes in multi-node clusters + min_size: int = 4, # ignore tiny clusters (2-cliques cause blow-ups) + max_clusters: Optional[int] = None +) -> Tuple[torch.Tensor, int]: + """ + Returns: + cluster: LongTensor [num_nodes] with local cluster ids 0..m-1 + num_clusters: m + """ + N = data.num_nodes + all_edges = _edge_list_from_pyg(data) + remaining = set(range(N)) + clusters: List[List[int]] = [] + + # Stop when we covered target_ratio of nodes OR hit max_clusters + covered_target = int(target_ratio * N) + while len(clusters) < (max_clusters or 10**9) and len(remaining) > 0: + if N - len(remaining) >= covered_target: + break + + # Build induced subgraph on remaining nodes + sub_edges, idmap = _induced_subgraph_edges(all_edges, sorted(remaining)) + if len(sub_edges) == 0: + break # nothing dense left + + # Run your Java-backed L-RMC on the current residual graph + res = generate_lrmc_cluster(sub_edges, epsilon=epsilon, java_exec=java_exec, java_opts=java_opts, quiet = True) + sub_seed_nodes = res.get('seed_nodes', []) + # Map subgraph node ids back to original ids + cluster_nodes = [node for node, sid in idmap.items() if sid in set(sub_seed_nodes)] + + if len(cluster_nodes) < min_size: + break # no more useful clusters + + clusters.append(cluster_nodes) + # HARD (near-disjoint) variant: remove all members from 'remaining' + remaining -= set(cluster_nodes) + + # Assign any leftover nodes as singletons to stabilize pool ratio + if len(remaining) > 0: + clusters = _assign_singletons(sorted(list(remaining)), clusters) + + print(f"L-RMC cluster sizes: {sorted([len(c) for c in clusters], reverse=True)}") + + # Build per-node cluster ID vector + cluster_id = torch.empty(N, dtype=torch.long) + for cid, nodes in enumerate(clusters): + cluster_id[torch.tensor(nodes, dtype=torch.long)] = cid + num_clusters = len(clusters) + return cluster_id, num_clusters + +def precompute_lrmc_assignments( + dataset, + epsilon: float, + java_exec: str = 'java', + java_opts: Optional[List[str]] = None, + target_ratio: float = 0.5, + min_size: int = 4, + max_clusters: Optional[int] = None +): + """ + Adds: + data.cluster : LongTensor [num_nodes] (local cluster id) + data.num_clusters : LongTensor [1] (number of clusters) + to each Data in the dataset. + """ + data_list = [] + for i in tqdm(range(len(dataset)), desc="Precomputing LRMC assignments"): + d = dataset[i].clone() + with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f): + cluster, m = compute_lrmc_cover_for_graph( + d, epsilon, java_exec, java_opts, target_ratio, min_size, max_clusters + ) + d.cluster = cluster + d.num_clusters = torch.tensor([m], dtype=torch.long) + data_list.append(d) + + class LRMCWrapped(InMemoryDataset): + def __init__(self, data_list): + super().__init__(root=None) + self.data, self.slices = self.collate(data_list) + + return LRMCWrapped(data_list) + +# ============ Modeling ============== + +import torch.nn as nn +import torch.nn.functional as F +from torch_scatter import scatter_mean +from torch_geometric.utils import to_undirected +from torch_geometric.nn import GCNConv, global_mean_pool, global_max_pool + +class LRMCPool(nn.Module): + def __init__(self, aggregate: str = "mean"): + super().__init__() + assert aggregate in ("mean",) + self.aggregate = aggregate + + @torch.no_grad() + def _build_pooled_edges( + self, edge_index: torch.Tensor, batch: torch.Tensor, + cluster: torch.Tensor, num_clusters_vec: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Map original edges (u,v) to (Cu, Cv) using global cluster ids. + Returns pooled edge_index [2, E'] (undirected, deduped) and optional edge_weight (None here). + """ + B = int(num_clusters_vec.numel()) + # cluster offsets per graph + # num_clusters_vec: [B]; e.g., tensor([m1, m2, ...]) + offsets = torch.zeros(B, dtype=torch.long, device=cluster.device) + offsets[1:] = torch.cumsum(num_clusters_vec[:-1], dim=0) + + # global cluster id per node: cluster + offset_of_its_graph + global_cluster = cluster + offsets[batch] + + u, v = edge_index + cu = global_cluster[u] + cv = global_cluster[v] + + # remove self-loops at pooled level + mask = (cu != cv) + cu = cu[mask]; cv = cv[mask] + + pooled = torch.stack([cu, cv], dim=0) + # make undirected & deduplicate + pooled = to_undirected(pooled) + pooled = torch.unique(pooled, dim=1) # unique columns + + return pooled, None # no weights for now + + def forward( + self, x: torch.Tensor, edge_index: torch.Tensor, batch: torch.Tensor, + cluster: torch.Tensor, num_clusters_vec: torch.Tensor + ): + """ + x: [N, F], edge_index: [2, E], batch: [N] (graph id per node) + cluster: [N] local cluster id per node + num_clusters_vec: [B] number of clusters for each graph in the batch + """ + device = x.device + B = int(num_clusters_vec.numel()) + # compute offsets as above + offsets = torch.zeros(B, dtype=torch.long, device=device) + offsets[1:] = torch.cumsum(num_clusters_vec[:-1], dim=0) + global_cluster = cluster + offsets[batch] # [N] + + # pooled node embeddings + M = int(num_clusters_vec.sum().item()) # total pooled nodes across batch + if self.aggregate == "mean": + x_pool = scatter_mean(x, global_cluster, dim=0, dim_size=M) # [M, F] + + # pooled edges and batch for pooled nodes + edge_index_pooled, _ = self._build_pooled_edges(edge_index, batch, cluster, num_clusters_vec) + # batch id of each pooled node: [0,0,...,1,1,...] shaped [M] + batch_pooled = torch.arange(B, device=device).repeat_interleave(num_clusters_vec) + + return x_pool, edge_index_pooled, batch_pooled + + +class LRMCPoolNet(nn.Module): + def __init__(self, in_dim: int, hidden: int, num_classes: int, dropout: float = 0.2): + super().__init__() + self.conv1 = GCNConv(in_dim, hidden) + self.conv2 = GCNConv(hidden, hidden) + self.pool = LRMCPool(aggregate="mean") + self.dropout = dropout + + # Readout combines mean+max at two levels -> 4*hidden features + self.lin = nn.Sequential( + nn.Linear(4*hidden, 2*hidden), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(2*hidden, num_classes), + ) + + def forward(self, data): + x, ei, batch = data.x, data.edge_index, data.batch + # Level 0 conv + x0 = F.relu(self.conv1(x, ei)) + x0 = F.dropout(x0, p=self.dropout, training=self.training) + + # Level 0 readout (before pooling) + g0_mean = global_mean_pool(x0, batch) + g0_max = global_max_pool(x0, batch) + + # Pool using precomputed L-RMC assignments + x1, ei1, batch1 = self.pool( + x0, ei, batch, cluster=data.cluster, num_clusters_vec=data.num_clusters + ) + + # Level 1 conv on pooled graph + x1 = F.relu(self.conv2(x1, ei1)) + x1 = F.dropout(x1, p=self.dropout, training=self.training) + + # Level 1 readout (after pooling) + g1_mean = global_mean_pool(x1, batch1) + g1_max = global_max_pool(x1, batch1) + + g = torch.cat([g0_mean, g0_max, g1_mean, g1_max], dim=-1) + out = self.lin(g) + return out + +# ============ Data ============== + +from torch_geometric.loader import DataLoader +from sklearn.model_selection import StratifiedKFold +from torch.optim import Adam +from torch.nn.functional import cross_entropy +import numpy as np +import random + +def make_loaders(dataset, train_idx, test_idx, batch_size=64): + train_ds = dataset.index_select(train_idx) + test_ds = dataset.index_select(test_idx) + return ( + DataLoader(train_ds, batch_size=batch_size, shuffle=True), + DataLoader(test_ds, batch_size=batch_size, shuffle=False), + ) + +def seed_all(seed: int = 42): + random.seed(seed); np.random.seed(seed) + torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + +def run_fold(train_loader, val_loader, test_loader, in_dim, num_classes, hidden=64, lr=1e-3, wd=5e-4, epochs=200, dropout=0.2): + model = LRMCPoolNet(in_dim=in_dim, hidden=hidden, num_classes=num_classes, dropout=dropout).to(device) + opt = Adam(model.parameters(), lr=lr, weight_decay=wd) + + best_val_acc = 0.0 + test_acc_at_best_val = 0.0 + + for epoch in range(1, epochs+1): + # === train === + model.train() + total_loss = 0.0 + for data in train_loader: + data = data.to(device) + opt.zero_grad() + out = model(data) + loss = cross_entropy(out, data.y) + loss.backward() + opt.step() + total_loss += loss.item() * data.num_graphs + + # === eval === + model.eval() + def _eval(loader): + correct = 0; total = 0 + with torch.no_grad(): + for d in loader: + d = d.to(device) + logits = model(d) + pred = logits.argmax(dim=-1) + correct += int((pred == d.y).sum()) + total += d.num_graphs + return correct / total + + train_acc = _eval(train_loader) + val_acc = _eval(val_loader) + test_acc = _eval(test_loader) + + if val_acc > best_val_acc: + best_val_acc = val_acc + test_acc_at_best_val = test_acc + + if epoch % 20 == 0 or epoch == 1: + print(f"[{epoch:03d}] loss={total_loss/len(train_loader.dataset):.4f} " + f"train_acc={train_acc*100:.2f}% val_acc={val_acc*100:.2f}% test_acc={test_acc*100:.2f}% (test_at_best_val={test_acc_at_best_val*100:.2f}%)") + return test_acc_at_best_val + +def main(): + from torch_geometric.datasets import TUDataset + from easydict import EasyDict as edict + + config = edict( + dataset_root = "data", + dataset_name = "PROTEINS", + kfold_splits = 10, + seed = 42, + # === LRMC Hyperparams === + epsilon = int(1e3), + target_ratio = 0.5, + min_size = 4, + max_clusters = None, + # === Training Hyperparams === + batch_size = 64, + hidden = 64, + lr = 0.0001, + wd = 0.001, + epochs = 500, + dropout = 0.2, + ) + + dataset = TUDataset(root=config.dataset_root, name=config.dataset_name, use_node_attr=False) + print(dataset) + + labels = torch.tensor([data.y.item() for data in dataset]) + skf = StratifiedKFold(n_splits=config.kfold_splits, shuffle=True, random_state=config.seed) + fold_indices = [(train_idx, test_idx) for train_idx, test_idx in skf.split(torch.arange(len(dataset)), labels)] + + # Pick epsilon & other knobs based on your synthetic tuning + dataset = precompute_lrmc_assignments( + dataset, + epsilon=config.epsilon, + target_ratio=config.target_ratio, + min_size=config.min_size, + max_clusters=config.max_clusters + ) + + seed_all(config.seed) + num_classes = int(dataset.num_classes) + in_dim = dataset.num_features if dataset.num_features > 0 else 10 # if OneHotDegree(10) + + fold_scores = [] + for k, (train_idx, test_idx) in enumerate(fold_indices, 1): + print(f"\n=== Fold {k}/10 ===") + + # Create validation set from training set (90/10 split) + train_labels = labels[train_idx] + skf_val = StratifiedKFold(n_splits=10, shuffle=True, random_state=config.seed) + train_sub_idx, val_sub_idx = next(skf_val.split(np.arange(len(train_idx)), train_labels)) + + fold_train_idx = train_idx[train_sub_idx] + fold_val_idx = train_idx[val_sub_idx] + + train_loader = DataLoader(dataset.index_select(fold_train_idx), batch_size=config.batch_size, shuffle=True) + val_loader = DataLoader(dataset.index_select(fold_val_idx), batch_size=config.batch_size, shuffle=False) + test_loader = DataLoader(dataset.index_select(test_idx), batch_size=config.batch_size, shuffle=False) + + best = run_fold(train_loader, val_loader, test_loader, in_dim, num_classes, + hidden=config.hidden, lr=config.lr, wd=config.wd, + epochs=config.epochs, dropout=config.dropout) + fold_scores.append(best) + + print("\nCV mean acc: {:.2f}% ± {:.2f}%".format(100*np.mean(fold_scores), 100*np.std(fold_scores))) + +if __name__ == "__main__": + main() diff --git a/src/old/LRMCseedsProteins_streamsafe2.java b/src/old/LRMCseedsProteins_streamsafe2.java new file mode 100644 index 0000000000000000000000000000000000000000..8af16da88898b6571be16c385239aefc2a2476f7 --- /dev/null +++ b/src/old/LRMCseedsProteins_streamsafe2.java @@ -0,0 +1,437 @@ +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.lang.reflect.Method; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.function.Consumer; + +/** + * LRMCseedsProteins_streamsafe.java + * + * Memory-lean-ish seeds exporter for Reddit. It will try to call a STREAMING + * reconstruction entry point: + * + * public static void runLaplacianRMCStreaming(List[] adj1Based, + * java.util.function.Consumer sink) + * + * If not found, it falls back to the old runLaplacianRMC(adj1Based) and will + * materialize all snapshots (may OOM on Reddit). The idea is you can add the + * streaming method later without changing this file again. + * + * Usage: + * java -Xmx8g -Xms4g LRMCseedsProteins_streamsafe \ + * reddit_edges.txt seeds_reddit.json [DIAM|INV_SQRT_LAMBDA2] [epsilon] + */ +public class LRMCseedsProteins_streamsafe2 { + + public static boolean USE_CORA = false; + + public static void main(String[] args) throws Exception { + if (args.length < 2) { + System.err.println("Usage: java LRMCseedsProteins_streamsafe2 [alpha_kind] [epsilon]"); + return; + } + + final Path outSeeds; + final AlphaKind alphaKind; + final double eps; + GraphData G; + + if (!USE_CORA) { + final Path edgesPath = Paths.get(args[0]); + final Path outPath = Paths.get(args[1]); + alphaKind = (args.length >= 3 ? parseAlpha(args[2]) : AlphaKind.DIAM); + eps = (args.length >= 4 ? Double.parseDouble(args[3]) : 1e-6); + + if (java.nio.file.Files.isDirectory(edgesPath)) { + java.nio.file.Files.createDirectories(outPath); + try (java.util.stream.Stream stream = java.nio.file.Files.list(edgesPath)) { + stream.filter(java.nio.file.Files::isRegularFile) + .filter(p -> { + String s = p.getFileName().toString().toLowerCase(java.util.Locale.ROOT); + return s.endsWith(".txt") || s.endsWith(".csv"); + }) + .forEach(p -> { + try { + GraphData Gi = loadRedditEdgeList(p); + String base = p.getFileName().toString().replaceFirst("\\.(txt|csv)$", ""); + Path outFile = outPath.resolve(base + ".json"); + runOnce(Gi, outFile, alphaKind, eps); + } catch (Exception e) { + throw new RuntimeException("Failed on " + p, e); + } + }); + } + return; + } else { + G = loadRedditEdgeList(edgesPath); + System.out.printf(Locale.US, "# Loaded edge list: n=%d, m=%d%n", G.n, G.m); + runOnce(G, outPath, alphaKind, eps); + return; + } + } else { + final Path content = Paths.get(args[0]); + final Path cites = Paths.get(args[1]); + outSeeds = Paths.get(args[2]); + alphaKind = (args.length >= 4 ? parseAlpha(args[3]) : AlphaKind.DIAM); + eps = (args.length >= 5 ? Double.parseDouble(args[4]) : 1e-6); + + G = loadCora(content, cites); + + System.out.printf(Locale.US, "# Loaded Cora edge list: n=%d, m=%d%n", G.n, G.m); + } + + PeakTracker tracker = new PeakTracker(G, eps, alphaKind); + + System.out.println("# Found streaming entry point. Running streaming reconstruction..."); + clique2_ablations_parallel2.runLaplacianRMCStreaming(G.adj1Based, tracker); + + tracker.writeJson(outSeeds); + System.out.println("# Done. wrote " + outSeeds.toAbsolutePath()); + } + + static void runOnce(GraphData G, Path outSeeds, AlphaKind alphaKind, double eps) throws Exception { + PeakTracker tracker = new PeakTracker(G, eps, alphaKind); + try { + System.out.println("# Running streaming reconstruction..."); + clique2_ablations_parallel2.runLaplacianRMCStreaming(G.adj1Based, tracker); + } catch (Throwable t) { + System.err.println("# Streaming entry point failed, attempting fallback: " + t); + // Fallback to non-streaming if available + try { + Method m = clique2_ablations_parallel2.class.getDeclaredMethod("runLaplacianRMCCore", List[].class, Consumer.class); + m.invoke(null, G.adj1Based, tracker); + } catch (Throwable t2) { + throw new RuntimeException("No available reconstruction entry point", t2); + } + } + tracker.writeJson(outSeeds); + System.out.println("# Done. wrote " + outSeeds.toAbsolutePath()); + } + + // Streaming peak tracker + static final class PeakTracker implements Consumer { + final GraphData G; + final double epsilon; + final AlphaKind alphaKind; + + final boolean[] inC; + final Map bestIdxByComp = new LinkedHashMap<>(); + final Map bestScoreByComp = new HashMap<>(); + final List arrivals = new ArrayList<>(); + int idx = 0; + + static final class Rec { + final int compId; + final int sid; + final double score; + final int[] nodes; + Rec(int compId, int sid, double score, int[] nodes) { + this.compId = compId; this.sid = sid; this.score = score; this.nodes = nodes; + } + } + + PeakTracker(GraphData G, double epsilon, AlphaKind alphaKind) { + this.G = G; this.epsilon = epsilon; this.alphaKind = alphaKind; + this.inC = new boolean[G.n]; + } + + @Override + public void accept(clique2_ablations_parallel2.SnapshotDTO s) { + final int[] nodes = s.nodes; + final int k = nodes.length; + if (k == 0) return; + for (int u : nodes) inC[u] = true; + + final double dbar = s.sumDegIn / Math.max(1.0, k); + final double Q = s.Q; +// final double alpha = (alphaKind == AlphaKind.DIAM) +// ? approxDiameter(nodes, G.adj1Based, inC) +// : 1.0; // simple fallback for lambda2 + + final double sc = k / (Q + epsilon); +// final int compId = getSnapshotComponentId(s, nodes); + final int compId = s.componentId; + final int sid = idx++; + + if (!bestIdxByComp.containsKey(compId) || sc > bestScoreByComp.get(compId)) { + bestIdxByComp.put(compId, sid); + bestScoreByComp.put(compId, sc); + } + arrivals.add(new Rec(compId, sid, sc, Arrays.copyOf(nodes, nodes.length))); + + for (int u : nodes) inC[u] = false; + } + + void writeJson(Path outJson) throws IOException { + final int n = G.n; + boolean[] covered = new boolean[n]; + int coveredCount = 0; + + try (BufferedWriter w = Files.newBufferedWriter(outJson, StandardCharsets.UTF_8)) { + w.write("{\n"); + w.write("\"meta\":{"); + w.write("\"epsilon\":" + epsilon); + w.write(",\"alpha_kind\":\"" + (alphaKind == AlphaKind.DIAM ? "DIAM" : "INV_SQRT_LAMBDA2") + "\""); + w.write(",\"n\":" + G.n); + w.write(",\"m\":" + G.m); + w.write(",\"mode\":\"peaks_per_component+singletons(stream-or-fallback)\""); + w.write("},\n"); + w.write("\"clusters\":[\n"); + + boolean first = true; + int nextClusterId = 0; + + for (Rec r : arrivals) { + Integer best = bestIdxByComp.get(r.compId); + if (best != null && best == r.sid) { + if (!first) w.write(",\n"); + first = false; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":" + r.compId); + w.write(",\"snapshot_id\":" + r.sid); + w.write(",\"score\":" + r.score); + w.write(",\"k_seed\":" + r.nodes.length); + w.write(",\"members\":" + intArrayToJson(r.nodes)); + w.write(",\"seed_nodes\":" + intArrayToJson(r.nodes)); + w.write("}"); + for (int u : r.nodes) if (!covered[u]) { covered[u] = true; coveredCount++; } + } + } + + for (int u = 0; u < n; u++) { + if (!covered[u]) { + if (!first) w.write(",\n"); + first = false; + int[] singleton = new int[]{u}; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":-1"); + w.write(",\"snapshot_id\":-1"); + w.write(",\"score\":0.0"); + w.write(",\"k_seed\":1"); + w.write(",\"members\":" + intArrayToJson(singleton)); + w.write(",\"seed_nodes\":" + intArrayToJson(singleton)); + w.write(",\"is_singleton\":true"); + w.write("}"); + } + } + w.write("\n]}"); + } + } + } + + // Load Reddit from edge list (preallocated) + static GraphData loadRedditEdgeList(Path edgesFile) throws IOException { + int[] deg = new int[1 << 16]; + int maxNode = -1; + long mUndir = 0; + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + int needed = Math.max(u, v) + 1; + if (needed > deg.length) { + int newLen = deg.length; + while (newLen < needed) newLen <<= 1; + deg = Arrays.copyOf(deg, newLen); + } + deg[u]++; deg[v]++; + if (u < v) mUndir++; + if (u > maxNode) maxNode = u; + if (v > maxNode) maxNode = v; + } + } + final int n = maxNode + 1; + @SuppressWarnings("unchecked") + List[] adj1 = (List[]) new List[n + 1]; + for (int i = 1; i <= n; i++) adj1[i] = new ArrayList<>(deg[i - 1]); + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + adj1[u + 1].add(v + 1); + adj1[v + 1].add(u + 1); + } + } + GraphData G = new GraphData(); + G.n = n; G.m = mUndir; G.adj1Based = adj1; + G.labels = new int[n]; Arrays.fill(G.labels, -1); + G.labelNames = new String[0]; + return G; + } + + static GraphData loadCora(Path content, Path cites) throws IOException { + Map id2idx = new LinkedHashMap<>(); + Map lbl2idx = new LinkedHashMap<>(); + List lblNames = new ArrayList<>(); + List labelsList = new ArrayList<>(); + + // Pass 1: content defines node universe and labels + try (BufferedReader br = Files.newBufferedReader(content, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty()) continue; + String[] tok = s.split("\\s+"); + String id = tok[0]; + String lab = tok[tok.length - 1]; + int u = id2idx.computeIfAbsent(id, _k -> id2idx.size()); + int c = lbl2idx.computeIfAbsent(lab, _k -> { + lblNames.add(lab); + return lblNames.size() - 1; + }); + // Extend labels list to position u if needed + while (labelsList.size() <= u) labelsList.add(0); + labelsList.set(u, c); + } + } + int n = id2idx.size(); + int[] labels = new int[n]; + for (int i = 0; i < n; i++) labels[i] = labelsList.get(i); + + // Temp adjacency as sets to dedup + @SuppressWarnings("unchecked") + HashSet[] adjSet1 = new HashSet[n + 1]; + for (int i = 1; i <= n; i++) adjSet1[i] = new HashSet<>(); + + // Pass 2: cites edges + long mUndir = 0; + try (BufferedReader br = Files.newBufferedReader(cites, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + Integer ui = id2idx.get(tok[0]); + Integer vi = id2idx.get(tok[1]); + if (ui == null || vi == null) continue; // skip unknown ids + int a = ui + 1, b = vi + 1; // to 1-based + if (a == b) continue; + if (adjSet1[a].add(b)) { + adjSet1[b].add(a); + mUndir++; + } + } + } + + @SuppressWarnings("unchecked") + List[] adj1 = new ArrayList[n + 1]; + for (int i = 1; i <= n; i++) { + adj1[i] = new ArrayList<>(adjSet1[i]); + } + + GraphData G = new GraphData(); + G.n = n; + G.m = mUndir; + G.adj1Based = adj1; + G.labels = labels; + G.labelNames = lblNames.toArray(new String[0]); + return G; + } + + // Helpers + static double approxDiameter(int[] nodes, List[] adj1, boolean[] inC) { + if (nodes.length <= 1) return 0.0; + int start = nodes[0]; + BFSResult a = bfsFarthest(start, adj1, inC); + BFSResult b = bfsFarthest(a.node, adj1, inC); + return (double) b.dist; + } + + static BFSResult bfsFarthest(int src, List[] adj1, boolean[] inC) { + int nTot = inC.length; + int[] dist = new int[nTot]; + Arrays.fill(dist, -1); + ArrayDeque q = new ArrayDeque<>(); + q.add(src); + dist[src] = 0; + int bestNode = src, bestDist = 0; + while (!q.isEmpty()) { + int u = q.removeFirst(); + int du = dist[u]; + if (du > bestDist) { bestDist = du; bestNode = u; } + for (int v1 : adj1[u + 1]) { + int v = v1 - 1; + if (!inC[v]) continue; + if (dist[v] >= 0) continue; + dist[v] = du + 1; + q.add(v); + } + } + return new BFSResult(bestNode, bestDist); + } + + static int getSnapshotComponentId(Object snap, int[] nodes) { + try { + java.lang.reflect.Field f; + Class cls = snap.getClass(); + try { f = cls.getDeclaredField("root"); } + catch (NoSuchFieldException e1) { + try { f = cls.getDeclaredField("componentId"); } + catch (NoSuchFieldException e2) { + try { f = cls.getDeclaredField("compId"); } + catch (NoSuchFieldException e3) { + try { f = cls.getDeclaredField("id"); } + catch (NoSuchFieldException e4) { f = null; } + } + } + } + if (f != null) { + f.setAccessible(true); + Object v = f.get(snap); + if (v instanceof Integer) return ((Integer) v).intValue(); + if (v instanceof Long) return ((Long) v).intValue(); + if (v != null) return Integer.parseInt(String.valueOf(v)); + } + } catch (Throwable t) { /* ignore */ } + int mn = Integer.MAX_VALUE; + for (int u : nodes) if (u < mn) mn = u; + return mn; + } + + static AlphaKind parseAlpha(String s) { + String t = s.trim().toUpperCase(Locale.ROOT); + if (t.startsWith("DIAM")) return AlphaKind.DIAM; + if (t.contains("LAMBDA")) return AlphaKind.INV_SQRT_LAMBDA2; + return AlphaKind.DIAM; + } + + static String intArrayToJson(int[] arr) { + StringBuilder sb = new StringBuilder(); + sb.append('['); + for (int i = 0; i < arr.length; i++) { + if (i > 0) sb.append(','); + sb.append(arr[i]); + } + sb.append(']'); + return sb.toString(); + } + + enum AlphaKind {DIAM, INV_SQRT_LAMBDA2} + + static final class GraphData { + int n; + long m; + List[] adj1Based; + int[] labels; + String[] labelNames; + } + static final class BFSResult { final int node, dist; BFSResult(int node, int dist) { this.node = node; this.dist = dist; } } +} diff --git a/src/old/LRMCseedsReddit_streamsafe.java b/src/old/LRMCseedsReddit_streamsafe.java new file mode 100644 index 0000000000000000000000000000000000000000..5b6e7430f845ac46779b61cdeab5f9c6a2beeda7 --- /dev/null +++ b/src/old/LRMCseedsReddit_streamsafe.java @@ -0,0 +1,391 @@ +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.lang.reflect.Method; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.function.Consumer; + +/** + * LRMCseedsReddit_streamsafe.java + * + * Memory-lean-ish seeds exporter for Reddit. It will try to call a STREAMING + * reconstruction entry point: + * + * public static void runLaplacianRMCStreaming(List[] adj1Based, + * java.util.function.Consumer sink) + * + * If not found, it falls back to the old runLaplacianRMC(adj1Based) and will + * materialize all snapshots (may OOM on Reddit). The idea is you can add the + * streaming method later without changing this file again. + * + * Usage: + * java -Xmx8g -Xms4g LRMCseedsReddit_streamsafe \ + * reddit_edges.txt seeds_reddit.json [DIAM|INV_SQRT_LAMBDA2] [epsilon] + */ +public class LRMCseedsReddit_streamsafe { + + public static boolean USE_CORA = false; + + public static void main(String[] args) throws Exception { + if (args.length < 2) { + System.err.println("Usage: java LRMCseedsReddit_streamsafe [alpha_kind] [epsilon]"); + return; + } + + final Path outSeeds; + final AlphaKind alphaKind; + final double eps; + GraphData G; + + if (!USE_CORA) { + final Path edgesPath = Paths.get(args[0]); + outSeeds = Paths.get(args[1]); + alphaKind = (args.length >= 3 ? parseAlpha(args[2]) : AlphaKind.DIAM); + eps = (args.length >= 4 ? Double.parseDouble(args[3]) : 1e-6); + + G = loadRedditEdgeList(edgesPath); + + System.out.printf(Locale.US, "# Loaded Reddit edge list: n=%d, m=%d%n", G.n, G.m); + } else { + final Path content = Paths.get(args[0]); + final Path cites = Paths.get(args[1]); + outSeeds = Paths.get(args[2]); + alphaKind = (args.length >= 4 ? parseAlpha(args[3]) : AlphaKind.DIAM); + eps = (args.length >= 5 ? Double.parseDouble(args[4]) : 1e-6); + + G = loadCora(content, cites); + + System.out.printf(Locale.US, "# Loaded Cora edge list: n=%d, m=%d%n", G.n, G.m); + } + + PeakTracker tracker = new PeakTracker(G, eps, alphaKind); + + System.out.println("# Found streaming entry point. Running streaming reconstruction..."); + clique2_ablations_parallel2.runLaplacianRMCStreaming(G.adj1Based, tracker); + + tracker.writeJson(outSeeds); + System.out.println("# Done. wrote " + outSeeds.toAbsolutePath()); + } + + // Streaming peak tracker + static final class PeakTracker implements Consumer { + final GraphData G; + final double epsilon; + final AlphaKind alphaKind; + + final boolean[] inC; + final Map bestIdxByComp = new LinkedHashMap<>(); + final Map bestScoreByComp = new HashMap<>(); + final List arrivals = new ArrayList<>(); + int idx = 0; + + static final class Rec { + final int compId; + final int sid; + final double score; + final int[] nodes; + Rec(int compId, int sid, double score, int[] nodes) { + this.compId = compId; this.sid = sid; this.score = score; this.nodes = nodes; + } + } + + PeakTracker(GraphData G, double epsilon, AlphaKind alphaKind) { + this.G = G; this.epsilon = epsilon; this.alphaKind = alphaKind; + this.inC = new boolean[G.n]; + } + + @Override + public void accept(clique2_ablations_parallel2.SnapshotDTO s) { + final int[] nodes = s.nodes; + final int k = nodes.length; + if (k == 0) return; + for (int u : nodes) inC[u] = true; + + final double dbar = s.sumDegIn / Math.max(1.0, k); + final double Q = s.Q; + + final double sc = k / (Q + epsilon); + final int compId = s.componentId; + final int sid = idx++; + + if (!bestIdxByComp.containsKey(compId) || sc > bestScoreByComp.get(compId)) { + bestIdxByComp.put(compId, sid); + bestScoreByComp.put(compId, sc); + } + arrivals.add(new Rec(compId, sid, sc, Arrays.copyOf(nodes, nodes.length))); + + for (int u : nodes) inC[u] = false; + } + + void writeJson(Path outJson) throws IOException { + final int n = G.n; + boolean[] covered = new boolean[n]; + int coveredCount = 0; + + try (BufferedWriter w = Files.newBufferedWriter(outJson, StandardCharsets.UTF_8)) { + w.write("{\n"); + w.write("\"meta\":{"); + w.write("\"epsilon\":" + epsilon); + w.write(",\"alpha_kind\":\"" + (alphaKind == AlphaKind.DIAM ? "DIAM" : "INV_SQRT_LAMBDA2") + "\""); + w.write(",\"n\":" + G.n); + w.write(",\"m\":" + G.m); + w.write(",\"mode\":\"peaks_per_component+singletons(stream-or-fallback)\""); + w.write("},\n"); + w.write("\"clusters\":[\n"); + + boolean first = true; + int nextClusterId = 0; + + for (Rec r : arrivals) { + Integer best = bestIdxByComp.get(r.compId); + if (best != null && best == r.sid) { + if (!first) w.write(",\n"); + first = false; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":" + r.compId); + w.write(",\"snapshot_id\":" + r.sid); + w.write(",\"score\":" + r.score); + w.write(",\"k_seed\":" + r.nodes.length); + w.write(",\"members\":" + intArrayToJson(r.nodes)); + w.write(",\"seed_nodes\":" + intArrayToJson(r.nodes)); + w.write("}"); + for (int u : r.nodes) if (!covered[u]) { covered[u] = true; coveredCount++; } + } + } + + for (int u = 0; u < n; u++) { + if (!covered[u]) { + if (!first) w.write(",\n"); + first = false; + int[] singleton = new int[]{u}; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":-1"); + w.write(",\"snapshot_id\":-1"); + w.write(",\"score\":0.0"); + w.write(",\"k_seed\":1"); + w.write(",\"members\":" + intArrayToJson(singleton)); + w.write(",\"seed_nodes\":" + intArrayToJson(singleton)); + w.write(",\"is_singleton\":true"); + w.write("}"); + } + } + w.write("\n]}"); + } + } + } + + // Load Reddit from edge list (preallocated) + static GraphData loadRedditEdgeList(Path edgesFile) throws IOException { + int[] deg = new int[1 << 16]; + int maxNode = -1; + long mUndir = 0; + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + int needed = Math.max(u, v) + 1; + if (needed > deg.length) { + int newLen = deg.length; + while (newLen < needed) newLen <<= 1; + deg = Arrays.copyOf(deg, newLen); + } + deg[u]++; deg[v]++; + if (u < v) mUndir++; + if (u > maxNode) maxNode = u; + if (v > maxNode) maxNode = v; + } + } + final int n = maxNode + 1; + @SuppressWarnings("unchecked") + List[] adj1 = (List[]) new List[n + 1]; + for (int i = 1; i <= n; i++) adj1[i] = new ArrayList<>(deg[i - 1]); + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + adj1[u + 1].add(v + 1); + adj1[v + 1].add(u + 1); + } + } + GraphData G = new GraphData(); + G.n = n; G.m = mUndir; G.adj1Based = adj1; + G.labels = new int[n]; Arrays.fill(G.labels, -1); + G.labelNames = new String[0]; + return G; + } + + static GraphData loadCora(Path content, Path cites) throws IOException { + Map id2idx = new LinkedHashMap<>(); + Map lbl2idx = new LinkedHashMap<>(); + List lblNames = new ArrayList<>(); + List labelsList = new ArrayList<>(); + + // Pass 1: content defines node universe and labels + try (BufferedReader br = Files.newBufferedReader(content, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty()) continue; + String[] tok = s.split("\\s+"); + String id = tok[0]; + String lab = tok[tok.length - 1]; + int u = id2idx.computeIfAbsent(id, _k -> id2idx.size()); + int c = lbl2idx.computeIfAbsent(lab, _k -> { + lblNames.add(lab); + return lblNames.size() - 1; + }); + // Extend labels list to position u if needed + while (labelsList.size() <= u) labelsList.add(0); + labelsList.set(u, c); + } + } + int n = id2idx.size(); + int[] labels = new int[n]; + for (int i = 0; i < n; i++) labels[i] = labelsList.get(i); + + // Temp adjacency as sets to dedup + @SuppressWarnings("unchecked") + HashSet[] adjSet1 = new HashSet[n + 1]; + for (int i = 1; i <= n; i++) adjSet1[i] = new HashSet<>(); + + // Pass 2: cites edges + long mUndir = 0; + try (BufferedReader br = Files.newBufferedReader(cites, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + Integer ui = id2idx.get(tok[0]); + Integer vi = id2idx.get(tok[1]); + if (ui == null || vi == null) continue; // skip unknown ids + int a = ui + 1, b = vi + 1; // to 1-based + if (a == b) continue; + if (adjSet1[a].add(b)) { + adjSet1[b].add(a); + mUndir++; + } + } + } + + @SuppressWarnings("unchecked") + List[] adj1 = new ArrayList[n + 1]; + for (int i = 1; i <= n; i++) { + adj1[i] = new ArrayList<>(adjSet1[i]); + } + + GraphData G = new GraphData(); + G.n = n; + G.m = mUndir; + G.adj1Based = adj1; + G.labels = labels; + G.labelNames = lblNames.toArray(new String[0]); + return G; + } + + // Helpers + static double approxDiameter(int[] nodes, List[] adj1, boolean[] inC) { + if (nodes.length <= 1) return 0.0; + int start = nodes[0]; + BFSResult a = bfsFarthest(start, adj1, inC); + BFSResult b = bfsFarthest(a.node, adj1, inC); + return (double) b.dist; + } + + static BFSResult bfsFarthest(int src, List[] adj1, boolean[] inC) { + int nTot = inC.length; + int[] dist = new int[nTot]; + Arrays.fill(dist, -1); + ArrayDeque q = new ArrayDeque<>(); + q.add(src); + dist[src] = 0; + int bestNode = src, bestDist = 0; + while (!q.isEmpty()) { + int u = q.removeFirst(); + int du = dist[u]; + if (du > bestDist) { bestDist = du; bestNode = u; } + for (int v1 : adj1[u + 1]) { + int v = v1 - 1; + if (!inC[v]) continue; + if (dist[v] >= 0) continue; + dist[v] = du + 1; + q.add(v); + } + } + return new BFSResult(bestNode, bestDist); + } + + static int getSnapshotComponentId(Object snap, int[] nodes) { + try { + java.lang.reflect.Field f; + Class cls = snap.getClass(); + try { f = cls.getDeclaredField("root"); } + catch (NoSuchFieldException e1) { + try { f = cls.getDeclaredField("componentId"); } + catch (NoSuchFieldException e2) { + try { f = cls.getDeclaredField("compId"); } + catch (NoSuchFieldException e3) { + try { f = cls.getDeclaredField("id"); } + catch (NoSuchFieldException e4) { f = null; } + } + } + } + if (f != null) { + f.setAccessible(true); + Object v = f.get(snap); + if (v instanceof Integer) return ((Integer) v).intValue(); + if (v instanceof Long) return ((Long) v).intValue(); + if (v != null) return Integer.parseInt(String.valueOf(v)); + } + } catch (Throwable t) { /* ignore */ } + int mn = Integer.MAX_VALUE; + for (int u : nodes) if (u < mn) mn = u; + return mn; + } + + static AlphaKind parseAlpha(String s) { + String t = s.trim().toUpperCase(Locale.ROOT); + if (t.startsWith("DIAM")) return AlphaKind.DIAM; + if (t.contains("LAMBDA")) return AlphaKind.INV_SQRT_LAMBDA2; + return AlphaKind.DIAM; + } + + static String intArrayToJson(int[] arr) { + StringBuilder sb = new StringBuilder(); + sb.append('['); + for (int i = 0; i < arr.length; i++) { + if (i > 0) sb.append(','); + sb.append(arr[i]); + } + sb.append(']'); + return sb.toString(); + } + + enum AlphaKind {DIAM, INV_SQRT_LAMBDA2} + + static final class GraphData { + int n; + long m; + List[] adj1Based; + int[] labels; + String[] labelNames; + } + static final class BFSResult { final int node, dist; BFSResult(int node, int dist) { this.node = node; this.dist = dist; } } +} diff --git a/src/old/LRMCseedsTUD_streamsafe.java b/src/old/LRMCseedsTUD_streamsafe.java new file mode 100644 index 0000000000000000000000000000000000000000..9749135b83349152441bf4e80c485cf3c189c7e5 --- /dev/null +++ b/src/old/LRMCseedsTUD_streamsafe.java @@ -0,0 +1,261 @@ +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.function.Consumer; + +/** + * LRMCseedsTUD_streamsafe.java + * + * Streaming LRMC seeder for graph-classification datasets (e.g., TUDataset: + * ENZYMES, PROTEINS, COLLAB, D&D). Accepts either a single canonical + * 0-indexed undirected edgelist file (u v with u < v), or a directory of such + * files, and produces seeds JSON using the streaming reconstruction entry point + * in clique2_ablations_parallel2. + * + * Usage: + * Single file: + * java -Xmx4g LRMCseedsTUD_streamsafe input_graph.txt output_seeds.json [DIAM|INV_SQRT_LAMBDA2] [epsilon] + * + * Directory of files: + * java -Xmx4g LRMCseedsTUD_streamsafe input_dir output_dir [DIAM|INV_SQRT_LAMBDA2] [epsilon] + * + * Input edgelists are expected 0-indexed, undirected, no self-loops, one line + * per edge with u < v. Output JSON mirrors LRMCseedsReddit_streamsafe format. + */ +public class LRMCseedsTUD_streamsafe { + + public static void main(String[] args) throws Exception { + if (args.length < 2) { + System.err.println("Usage: java LRMCseedsTUD_streamsafe [alpha_kind] [epsilon]"); + return; + } + + final Path inPath = Paths.get(args[0]); + final Path outPath = Paths.get(args[1]); + final AlphaKind alphaKind = (args.length >= 3 ? parseAlpha(args[2]) : AlphaKind.DIAM); + final double eps = (args.length >= 4 ? Double.parseDouble(args[3]) : 1e-6); + + if (Files.isDirectory(inPath)) { + Files.createDirectories(outPath); + try (java.util.stream.Stream stream = Files.list(inPath)) { + stream.filter(Files::isRegularFile) + .filter(p -> { + String s = p.getFileName().toString().toLowerCase(Locale.ROOT); + return s.endsWith(".txt") || s.endsWith(".csv"); + }) + .forEach(p -> { + try { + GraphData Gi = loadEdgeList0Based(p); + String base = p.getFileName().toString().replaceFirst("\\.(txt|csv)$", ""); + Path outFile = outPath.resolve(base + ".json"); + runOnce(Gi, outFile, alphaKind, eps); + } catch (Exception e) { + throw new RuntimeException("Failed on " + p, e); + } + }); + } + } else { + GraphData G = loadEdgeList0Based(inPath); + System.out.printf(Locale.US, "# Loaded edge list: n=%d, m=%d%n", G.n, G.m); + runOnce(G, outPath, alphaKind, eps); + } + } + + static void runOnce(GraphData G, Path outSeeds, AlphaKind alphaKind, double eps) throws Exception { + PeakTracker tracker = new PeakTracker(G, eps, alphaKind); + // Streaming entry point (required by current pipeline) + clique2_ablations_parallel2.runLaplacianRMCStreaming(G.adj1Based, tracker); + tracker.writeJson(outSeeds); + System.out.println("# Done. wrote " + outSeeds.toAbsolutePath()); + } + + // Streaming peak tracker (same output schema as Reddit seeder) + static final class PeakTracker implements Consumer { + final GraphData G; + final double epsilon; + final AlphaKind alphaKind; + + final boolean[] inC; + final Map bestIdxByComp = new LinkedHashMap<>(); + final Map bestScoreByComp = new HashMap<>(); + final List arrivals = new ArrayList<>(); + int idx = 0; + + static final class Rec { + final int compId; + final int sid; + final double score; + final int[] nodes; + Rec(int compId, int sid, double score, int[] nodes) { + this.compId = compId; this.sid = sid; this.score = score; this.nodes = nodes; + } + } + + PeakTracker(GraphData G, double epsilon, AlphaKind alphaKind) { + this.G = G; this.epsilon = epsilon; this.alphaKind = alphaKind; + this.inC = new boolean[G.n]; + } + + @Override + public void accept(clique2_ablations_parallel2.SnapshotDTO s) { + final int[] nodes = s.nodes; + final int k = nodes.length; + if (k == 0) return; + for (int u : nodes) inC[u] = true; + + final double Q = s.Q; + final double sc = k / (Q + epsilon); + final int compId = s.componentId; + final int sid = idx++; + + if (!bestIdxByComp.containsKey(compId) || sc > bestScoreByComp.get(compId)) { + bestIdxByComp.put(compId, sid); + bestScoreByComp.put(compId, sc); + } + arrivals.add(new Rec(compId, sid, sc, Arrays.copyOf(nodes, nodes.length))); + + for (int u : nodes) inC[u] = false; + } + + void writeJson(Path outJson) throws IOException { + final int n = G.n; + boolean[] covered = new boolean[n]; + + try (BufferedWriter w = Files.newBufferedWriter(outJson, StandardCharsets.UTF_8)) { + w.write("{\n"); + w.write("\"meta\":{"); + w.write("\"epsilon\":" + epsilon); + w.write(",\"alpha_kind\":\"" + (alphaKind == AlphaKind.DIAM ? "DIAM" : "INV_SQRT_LAMBDA2") + "\""); + w.write(",\"n\":" + G.n); + w.write(",\"m\":" + G.m); + w.write(",\"mode\":\"peaks_per_component+singletons(stream)\""); + w.write("},\n"); + w.write("\"clusters\":[\n"); + + boolean first = true; + int nextClusterId = 0; + + for (Rec r : arrivals) { + Integer best = bestIdxByComp.get(r.compId); + if (best != null && best == r.sid) { + if (!first) w.write(",\n"); + first = false; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":" + r.compId); + w.write(",\"snapshot_id\":" + r.sid); + w.write(",\"score\":" + r.score); + w.write(",\"k_seed\":" + r.nodes.length); + w.write(",\"members\":" + intArrayToJson(r.nodes)); + w.write(",\"seed_nodes\":" + intArrayToJson(r.nodes)); + w.write("}"); + for (int u : r.nodes) covered[u] = true; + } + } + + for (int u = 0; u < n; u++) { + if (!covered[u]) { + if (!first) w.write(",\n"); + first = false; + int[] singleton = new int[]{u}; + w.write(" {\"cluster_id\":" + (nextClusterId++)); + w.write(",\"component_id\":-1"); + w.write(",\"snapshot_id\":-1"); + w.write(",\"score\":0.0"); + w.write(",\"k_seed\":1"); + w.write(",\"members\":" + intArrayToJson(singleton)); + w.write(",\"seed_nodes\":" + intArrayToJson(singleton)); + w.write(",\"is_singleton\":true"); + w.write("}"); + } + } + w.write("\n]}"); + } + } + } + + // Loader for canonical 0-based undirected edgelists (u < v). Builds 1-based adjacency. + static GraphData loadEdgeList0Based(Path edgesFile) throws IOException { + int[] deg = new int[1 << 12]; + int maxNode = -1; + long mUndir = 0; + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + int needed = Math.max(u, v) + 1; + if (needed > deg.length) { + int newLen = deg.length; + while (newLen < needed) newLen <<= 1; + deg = Arrays.copyOf(deg, newLen); + } + deg[u]++; deg[v]++; + if (u < v) mUndir++; + if (u > maxNode) maxNode = u; + if (v > maxNode) maxNode = v; + } + } + final int n = maxNode + 1; + @SuppressWarnings("unchecked") + List[] adj1 = (List[]) new List[n + 1]; + for (int i = 1; i <= n; i++) adj1[i] = new ArrayList<>(deg[i - 1]); + try (BufferedReader br = Files.newBufferedReader(edgesFile, StandardCharsets.UTF_8)) { + String s; + while ((s = br.readLine()) != null) { + s = s.trim(); + if (s.isEmpty() || s.startsWith("#")) continue; + String[] tok = s.split("\\s+|,"); + if (tok.length < 2) continue; + int u = Integer.parseInt(tok[0]); + int v = Integer.parseInt(tok[1]); + if (u == v) continue; + adj1[u + 1].add(v + 1); + adj1[v + 1].add(u + 1); + } + } + GraphData G = new GraphData(); + G.n = n; G.m = mUndir; G.adj1Based = adj1; + G.labels = new int[n]; Arrays.fill(G.labels, -1); + G.labelNames = new String[0]; + return G; + } + + static AlphaKind parseAlpha(String s) { + String t = s.trim().toUpperCase(Locale.ROOT); + if (t.startsWith("DIAM")) return AlphaKind.DIAM; + if (t.contains("LAMBDA")) return AlphaKind.INV_SQRT_LAMBDA2; + return AlphaKind.DIAM; + } + + static String intArrayToJson(int[] arr) { + StringBuilder sb = new StringBuilder(); + sb.append('['); + for (int i = 0; i < arr.length; i++) { + if (i > 0) sb.append(','); + sb.append(arr[i]); + } + sb.append(']'); + return sb.toString(); + } + + enum AlphaKind {DIAM, INV_SQRT_LAMBDA2} + + static final class GraphData { + int n; + long m; + List[] adj1Based; + int[] labels; + String[] labelNames; + } +} + diff --git a/src/old/clique2_nosquared.java b/src/old/clique2_nosquared.java new file mode 100644 index 0000000000000000000000000000000000000000..ae982e24e1b537829b0a21ada719bd5ff5d1dffa --- /dev/null +++ b/src/old/clique2_nosquared.java @@ -0,0 +1,204 @@ +import java.io.*; +import java.util.*; + +public class clique2_nosquared { + static int n, m; + + public static void main(String[] args) throws Exception { + if (args.length < 2) { + System.err.println("Usage: java clique2_fixb "); + } + final double EPS = Double.parseDouble(args[0]); + + Scanner r; + try { + r = new Scanner(new FileReader(args[1])); + } catch (IOException e) { + System.err.println("Could not open " + args[1] + ". Falling back to stdin."); + r = new Scanner(System.in); + } + + n = r.nextInt(); + m = r.nextInt(); + + @SuppressWarnings("unchecked") + List[] adj = new ArrayList[n + 1]; + for (int i = 1; i <= n; i++) adj[i] = new ArrayList<>(); + for (int i = 0; i < m; i++) { + int a = r.nextInt(), b = r.nextInt(); + adj[a].add(b); + adj[b].add(a); + } + r.close(); + + long t0 = System.nanoTime(); + Result res = runLaplacianRMC(adj, EPS); + long t1 = System.nanoTime(); + + System.out.printf(Locale.US, "%.6f, %d%n", res.bestSL, res.bestRoot); + System.out.printf(Locale.US, "Runtime: %.3f ms%n", (t1 - t0) / 1_000_000.0); + } + + static Result runLaplacianRMC(List[] adj, double EPS) { + // Phase 1: peel by nondecreasing degree with stale-check heap + int[] deg = new int[n + 1]; + PriorityQueue pq = new PriorityQueue<>(); + for (int i = 1; i <= n; i++) { + deg[i] = adj[i].size(); + pq.add(new Pair(i, deg[i])); + } + Deque stack = new ArrayDeque<>(n); + while (!pq.isEmpty()) { + Pair p = pq.poll(); + if (p.degree != deg[p.node]) continue; // stale + stack.push(p); + for (int v : adj[p.node]) { + if (deg[v] > 0) { + deg[v]--; + pq.add(new Pair(v, deg[v])); + } + } + deg[p.node] = 0; + } + + // Phase 2: reverse reconstruction with batched Laplacian update + + DSU dsu = new DSU(n); + boolean[] inGraph = new boolean[n + 1]; + + // Internal degree inside the evolving graph + int[] d = new int[n + 1]; + + // Component energy E[root] + long[] compEnergy = new long[n + 1]; + + // Stamps for distinct-root accumulation and for marking A + int[] rootSeenStamp = new int[n + 1]; + int stamp = 1; + + int[] inAStamp = new int[n + 1]; + int aStamp = 1; + + double bestSL = 0.0; + int bestRoot = 0; + + while (!stack.isEmpty()) { + Pair item = stack.pop(); + int u = item.node; + + // Collect already-in neighbors A + List A = new ArrayList<>(); + for (int v : adj[u]) if (inGraph[v]) A.add(v); + + // Sum energies from distinct neighbor roots BEFORE union + long mergedEnergy = 0L; + stamp++; + for (int v : A) { + int rv = dsu.find(v); + if (rootSeenStamp[rv] != stamp) { + rootSeenStamp[rv] = stamp; + mergedEnergy += compEnergy[rv]; + } + } + + // Mark A for O(1) membership tests + aStamp++; + for (int v : A) inAStamp[v] = aStamp; + + // Compute delta on old edges touching A, but only within current in-graph + long deltaOld = 0L; + for (int w : A) { + for (int x : adj[w]) { + if (!inGraph[x]) continue; // not in current graph + if (inAStamp[x] == aStamp) continue; // x also in A, change = 0 + deltaOld += 2L * ((long) d[w] - (long) d[x]) + 1L; + } + } + + // Contribution from the |A| new edges (u, w) with w in A + int degU = A.size(); + long deltaNew = 0L; + for (int w : A) { + long t = (long) degU - ((long) d[w] + 1L); + deltaNew += t * t; + } + + // Create u and union with all neighbors in A + dsu.makeIfNeeded(u); + int root = u; + for (int v : A) root = dsu.union(root, v); + + // Activate u and update degrees + inGraph[u] = true; + d[u] = degU; + for (int w : A) d[w]++; + + // The new component's energy + compEnergy[root] = mergedEnergy + deltaOld + deltaNew; + + // Score + int compRoot = dsu.find(u); + int compSize = dsu.size[compRoot]; + double sL = compSize / (compEnergy[compRoot] + EPS); + if (sL > bestSL) { + bestSL = sL; + bestRoot = compRoot; + } + } + + Result out = new Result(); + out.bestSL = bestSL; + out.bestRoot = bestRoot; + return out; + } + + // Helpers + + static class Result { + double bestSL; + int bestRoot; + } + + static class Pair implements Comparable { + final int node, degree; + Pair(int node, int degree) { this.node = node; this.degree = degree; } + public int compareTo(Pair o) { + if (degree != o.degree) return Integer.compare(degree, o.degree); + return Integer.compare(node, o.node); + } + } + + static class DSU { + final int[] parent; + final int[] size; + final boolean[] made; + + DSU(int n) { + parent = new int[n + 1]; + size = new int[n + 1]; + made = new boolean[n + 1]; + } + void makeIfNeeded(int v) { + if (!made[v]) { + made[v] = true; + parent[v] = v; + size[v] = 1; + } + } + int find(int v) { + if (!made[v]) return v; // treat as isolated until made + if (parent[v] != v) parent[v] = find(parent[v]); + return parent[v]; + } + int union(int a, int b) { + makeIfNeeded(a); + makeIfNeeded(b); + int ra = find(a), rb = find(b); + if (ra == rb) return ra; + if (size[ra] < size[rb]) { int t = ra; ra = rb; rb = t; } + parent[rb] = ra; + size[ra] += size[rb]; + return ra; + } + } +} diff --git a/src/old/clique_test.java b/src/old/clique_test.java new file mode 100644 index 0000000000000000000000000000000000000000..540a48ca040996205cf2d669e6e90e0c30a94613 --- /dev/null +++ b/src/old/clique_test.java @@ -0,0 +1,67 @@ +import java.util.*; + +public class clique_test { + static int n, m; + + public static void main(String[] args) throws Exception { + long t0 = System.nanoTime(); + Result res = new Result(); + long t1 = System.nanoTime(); + + System.out.printf(Locale.US, "%.6f, %d%n", res.bestSL, res.bestRoot); + System.out.printf(Locale.US, "Runtime: %.3f ms%n", (t1 - t0) / 1_000_000.0); + } + + static Result runLaplacianRMC(List[] adj, double EPS) { + return new Result(); + } + + // Helpers + static class Result { + double bestSL; + int bestRoot; + } + + static class Pair implements Comparable { + final int node, degree; + Pair(int node, int degree) { this.node = node; this.degree = degree; } + public int compareTo(Pair o) { + if (degree != o.degree) return Integer.compare(degree, o.degree); + return Integer.compare(node, o.node); + } + } + + static class DSU { + final int[] parent; + final int[] size; + final boolean[] made; + + DSU(int n) { + parent = new int[n + 1]; + size = new int[n + 1]; + made = new boolean[n + 1]; + } + void makeIfNeeded(int v) { + if (!made[v]) { + made[v] = true; + parent[v] = v; + size[v] = 1; + } + } + int find(int v) { + if (!made[v]) return v; // treat as isolated until made + if (parent[v] != v) parent[v] = find(parent[v]); + return parent[v]; + } + int union(int a, int b) { + makeIfNeeded(a); + makeIfNeeded(b); + int ra = find(a), rb = find(b); + if (ra == rb) return ra; + if (size[ra] < size[rb]) { int t = ra; ra = rb; rb = t; } + parent[rb] = ra; + size[ra] += size[rb]; + return ra; + } + } +} diff --git a/src/old/export_reddit_edgelist.py b/src/old/export_reddit_edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..de12139a72899a8b8d4eb5e1b629f63c32d05fdf --- /dev/null +++ b/src/old/export_reddit_edgelist.py @@ -0,0 +1,41 @@ +# export_reddit_edgelist_canonical.py +# Writes EACH undirected edge exactly once: "u v" with u < v (0-based), from PyG Reddit. +# This halves the edge count relative to to_undirected and avoids duplication downstream. +# +# Usage: +# python export_reddit_edgelist_canonical.py --out reddit_edges.txt --root ./data/Reddit + +import argparse +from pathlib import Path +import torch +from torch_geometric.datasets import Reddit + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--root", type=str, default="./data/Reddit") + ap.add_argument("--out", type=str, default="reddit_edges.txt") + args = ap.parse_args() + + ds = Reddit(root=args.root); data = ds[0] + ei = data.edge_index # directed; in this dataset it's effectively undirected + outp = Path(args.out); outp.parent.mkdir(parents=True, exist_ok=True) + + # canonical pairs u v: + u, v = v, u + key = (u << 32) | v + if key in seen: + continue + seen.add(key) + f.write(f"{u} {v}\n") + print(f"Wrote {len(seen)} undirected edges to {outp} (nodes: {data.num_nodes})") + +if __name__ == "__main__": + main() diff --git a/src/old/gcn_lrmc_node_classification.py b/src/old/gcn_lrmc_node_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..5046d573a59f6f5e4b47771fd926c587e5e8ac3a --- /dev/null +++ b/src/old/gcn_lrmc_node_classification.py @@ -0,0 +1,333 @@ +# gcn_lrmc_node_classify.py +# Node classification with GCN + L-RMC (static pooling + unpool + skip) +# Usage: +# python gcn_lrmc_node_classify.py --dataset Cora --lrmc_json /path/to/lrmc_seeds.json +# Options: +# --use_a2 true|false (default true; use A^2 before pooling as in Graph U-Nets) +# --epochs 200 --lr 0.005 --hidden 64 --cluster_hidden 64 --dropout 0.5 + +import argparse, json, os +import numpy as np, torch +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.optim import Adam +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import GCNConv, BatchNorm +from torch_geometric.utils import coalesce, to_undirected, remove_self_loops +from torch_geometric.utils import add_self_loops +from torch_scatter import scatter_mean +from torch_sparse import spspmm + +from rich import print + +# ----------------------------- +# L-RMC assignment utilities +# ----------------------------- + +def load_lrmc_assignment(json_path, num_nodes): + """ + Build a single hard assignment: node -> cluster_id in [0, K-1]. + If nodes appear in multiple clusters, keep the one with highest 'score'. + If any nodes are unassigned, put them into their own singleton clusters. + Returns: + assignment: LongTensor [num_nodes] with cluster ids + clusters: list of lists (members per cluster) aligned to remapped cluster ids + """ + with open(json_path, 'r') as f: + seeds = json.load(f) + + clusters_raw = seeds.get("clusters", []) + # Sort clusters by score descending to prefer higher-scoring clusters on conflicts + clusters_raw = sorted(clusters_raw, key=lambda c: float(c.get("score", 0.0)), reverse=True) + + chosen_cluster_for_node = [-1] * num_nodes + tmp_clusters = [] # will collect chosen clusters (members), before remap + + for c in clusters_raw: + members = c.get("members", []) + # skip empty + if not members: + continue + # take only members not yet assigned + new_members = [u for u in members if 0 <= u < num_nodes and chosen_cluster_for_node[u] == -1] + if not new_members: + continue + # tentatively assign this cluster to those nodes (others in the cluster were already taken) + tmp_clusters.append(new_members) + cid = len(tmp_clusters) - 1 + for u in new_members: + chosen_cluster_for_node[u] = cid + + # Any nodes still -1 → singleton clusters + for u in range(num_nodes): + if chosen_cluster_for_node[u] == -1: + tmp_clusters.append([u]) + cid = len(tmp_clusters) - 1 + chosen_cluster_for_node[u] = cid + + # Remap cluster ids to [0..K-1] (already contiguous by construction) + assignment = torch.tensor(chosen_cluster_for_node, dtype=torch.long) + clusters = tmp_clusters + return assignment, clusters + +def lrmc_stats(assignment, clusters, edge_index): + N = assignment.numel(); K = int(assignment.max()) + 1 + sizes = [len(c) for c in clusters] + sing = sum(1 for s in sizes if s==1) + print(f"[L-RMC] N={N} K={K} mean|C|={np.mean(sizes):.2f} " + f"median|C|={np.median(sizes):.0f} singleton%={100*sing/K:.1f}%") + # how many edges are intra-cluster? + same = (assignment[edge_index[0]] == assignment[edge_index[1]]).sum().item() + print(f"[L-RMC] intra-cluster edge ratio = {same/edge_index.size(1):.3f}") + +# ----------------------------- +# Graph helpers +# ----------------------------- + +def compute_A2_union(edge_index, num_nodes, device): + """ + Compute A^2 (binary) and return union edges A OR A^2, undirected & coalesced. + """ + # Make undirected and coalesced (no weights) + ei = to_undirected(coalesce(edge_index, num_nodes=num_nodes), num_nodes=num_nodes) + + # Build ones weights for sparse-sparse multiply + E = ei.size(1) + if E == 0: + return ei # empty graph + val = torch.ones(E, device=device) + # spspmm: (m x k) @ (k x n) where here m=n=k=num_nodes + ei2, val2 = spspmm(ei, val, ei, val, num_nodes, num_nodes, num_nodes) + # Remove self-loops from A2 (optional; GCNConv adds its own self-loops later) + ei2, _ = remove_self_loops(ei2) + # Binarize & union with A + # (coalesce later will drop duplicates anyway) + ei_aug = torch.cat([ei, ei2], dim=1) + ei_aug = to_undirected(coalesce(ei_aug, num_nodes=num_nodes), num_nodes=num_nodes) + return ei_aug + + +def build_cluster_edges(edge_index_aug, assignment, num_clusters): + """ + Map node edges to cluster edges: (u,v) -> (c(u), c(v)), undirected + coalesced. + """ + c_src = assignment[edge_index_aug[0]] + c_dst = assignment[edge_index_aug[1]] + c_ei = torch.stack([c_src, c_dst], dim=0) + c_ei = to_undirected(coalesce(c_ei, num_nodes=num_clusters), num_nodes=num_clusters) + return c_ei + + +# ----------------------------- +# Model +# ----------------------------- + +class Gate(nn.Module): + def __init__(self, d_enc, d_c): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(d_enc + d_c, d_enc, bias=True), + nn.ReLU(), + nn.Linear(d_enc, d_enc, bias=True), + nn.Sigmoid(), + ) + def forward(self, h_enc, h_cluster_broadcast): + g = self.mlp(torch.cat([h_enc, h_cluster_broadcast], dim=-1)) + return h_enc + g * h_cluster_broadcast # residual gated add + +class GCN_LRMC_NodeClassifier(nn.Module): + """ + Encoder: GCN -> GCN on original graph + Pool: aggregate encoder features per L-RMC cluster + Coarse: GCN -> (optional GCN) on cluster graph + Unpool: broadcast cluster features back to nodes + Decoder: GCN (on original graph) -> logits + """ + def __init__(self, in_dim, hidden_dim, cluster_hidden_dim, out_dim, + edge_index, assignment, cluster_edge_index, dropout=0.5): + super().__init__() + self.edge_index = edge_index # original graph edges + self.assignment = assignment # [N] + self.cluster_edge_index = cluster_edge_index # edges on cluster graph + self.num_clusters = int(assignment.max().item() + 1) + self.dropout = dropout + + # Encoder on node graph + self.enc1 = GCNConv(in_dim, hidden_dim, improved=True) + self.enc2 = GCNConv(hidden_dim, hidden_dim, improved=True) + + # GCN(s) on cluster graph + self.cgc1 = GCNConv(hidden_dim, cluster_hidden_dim, improved=True) + self.cgc2 = GCNConv(cluster_hidden_dim, cluster_hidden_dim, improved=True) + + # Decoder on node graph (combine skip from encoder + broadcast from cluster) + dec_in = hidden_dim + cluster_hidden_dim + self.dec1 = GCNConv(dec_in, hidden_dim, improved=True) + self.cls = GCNConv(hidden_dim, out_dim, improved=True) # final logits + + self.bn_e1 = BatchNorm(hidden_dim) + self.bn_e2 = BatchNorm(hidden_dim) + self.bn_c1 = BatchNorm(cluster_hidden_dim) + self.bn_c2 = BatchNorm(cluster_hidden_dim) + self.bn_d1 = BatchNorm(hidden_dim) + self.gate = Gate(hidden_dim, cluster_hidden_dim) + + def forward(self, x): + # Encoder on original graph + h = F.dropout(x, p=self.dropout, training=self.training) + h = F.relu(self.bn_e1(self.enc1(h, self.edge_index))) + h = F.dropout(h, p=self.dropout, training=self.training) + h2 = F.relu(self.bn_e2(self.enc2(h, self.edge_index))) + h = h + h2 + h_enc = h # skip for decoder + + # Pool: aggregate encoder features to clusters (mean) + # cluster_x: [K, hidden_dim] + cluster_x = scatter_mean(h_enc, self.assignment, dim=0, dim_size=self.num_clusters) + + # Coarse GCN(s) on cluster graph + hc = F.dropout(cluster_x, p=self.dropout, training=self.training) + hc = F.relu(self.bn_c1(self.cgc1(cluster_x, self.cluster_edge_index))) + hc = F.dropout(hc, p=self.dropout, training=self.training) + hc2 = F.relu(self.bn_c2(self.cgc2(hc, self.cluster_edge_index))) + hc = hc + hc2 + + # Unpool: broadcast coarse features back to nodes via assignment + hc_broadcast = hc[self.assignment] # [N, cluster_hidden_dim] + + # # after hc_broadcast is computed + # g_in = torch.cat([h_enc, hc_broadcast], dim=1) + # gate = torch.sigmoid(nn.Linear(g_in.size(1), h_enc.size(1)).to(g_in.device)(g_in)) + # h_dec_in = h_enc + gate * hc_broadcast # gated residual instead of concat + + # Decoder on original graph + h_dec_in = torch.cat([h_enc, hc_broadcast], dim=1) # [N, hidden_dim + cluster_hidden_dim] + h = F.dropout(h_dec_in, p=self.dropout, training=self.training) + h = F.relu(self.dec1(h, self.edge_index)) + h = F.dropout(h, p=self.dropout, training=self.training) + out = self.cls(h, self.edge_index) # logits [N, C] + return out + + +# ----------------------------- +# Train / Eval +# ----------------------------- + +@torch.no_grad() +def evaluate(model, data): + model.eval() + out = model(data.x) + y = data.y + pred = out.argmax(dim=-1) + + def acc(mask): + m = mask if mask.dtype == torch.bool else mask.bool() + if m.sum() == 0: + return 0.0 + return (pred[m] == y[m]).float().mean().item() + + return acc(data.train_mask), acc(data.val_mask), acc(data.test_mask) + + +def train_loop(model, data, epochs=200, lr=5e-3, weight_decay=5e-4, patience=100): + optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) + best_val, best_test = 0.0, 0.0 + best_state = None + no_improve = 0 + + for epoch in range(1, epochs + 1): + model.train() + optimizer.zero_grad() + logits = model(data.x) + loss = F.cross_entropy(logits[data.train_mask], data.y[data.train_mask]) + loss.backward() + optimizer.step() + + tr, va, te = evaluate(model, data) + if va > best_val: + best_val, best_test = va, te + best_state = {k: v.detach().cpu().clone() for k, v in model.state_dict().items()} + no_improve = 0 + else: + no_improve += 1 + + + print(f"Epoch {epoch:03d} | loss={loss.item():.4f} | " + f"train={tr*100:.2f}% val={va*100:.2f}% test={te*100:.2f}% test@best={best_test*100:.2f}%") + + if no_improve >= patience: + print(f"Early stopping at epoch {epoch} (no val improvement for {patience})") + break + + if best_state is not None: + model.load_state_dict(best_state) + tr, va, te = evaluate(model, data) + print(f"\nFinal (reloaded best): train={tr*100:.2f}% val={va*100:.2f}% test={te*100:.2f}%") + return te + + +# ----------------------------- +# Main +# ----------------------------- + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", type=str, default="Cora", choices=["Cora", "Citeseer", "Pubmed"]) + parser.add_argument("--lrmc_json", type=str, required=True) + parser.add_argument("--use_a2", type=str, default="true", help="Use A^2 before pooling (true/false)") + parser.add_argument("--hidden", type=int, default=64) + parser.add_argument("--cluster_hidden", type=int, default=64) + parser.add_argument("--dropout", type=float, default=0.5) + parser.add_argument("--epochs", type=int, default=200) + parser.add_argument("--lr", type=float, default=5e-3) + parser.add_argument("--weight_decay", type=float, default=5e-4) + parser.add_argument("--seed", type=int, default=42) + args = parser.parse_args() + + torch.manual_seed(args.seed) + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dataset = Planetoid(root=os.path.join("data", args.dataset), name=args.dataset) + data = dataset[0].to(device) + num_nodes = data.num_nodes + in_dim = dataset.num_node_features + out_dim = dataset.num_classes + + # Load L-RMC assignment + assignment, clusters = load_lrmc_assignment(args.lrmc_json, num_nodes) + assignment = assignment.to(device) + num_clusters = int(assignment.max().item() + 1) + print(f"[L-RMC] Loaded clusters: K={num_clusters} (N={num_nodes})") + + lrmc_stats(assignment, clusters, data.edge_index) + + # Build augmented node edge_index (A or A^2 ∪ A), then cluster edges + use_a2 = args.use_a2.lower() in ("1", "true", "yes", "y") + if use_a2: + edge_index_aug = compute_A2_union(data.edge_index, num_nodes, device) + print("[L-RMC] Using A^2 ∪ A before pooling (connectivity augmentation).") + else: + edge_index_aug = to_undirected(coalesce(data.edge_index, num_nodes=num_nodes), num_nodes=num_nodes) + print("[L-RMC] Using original A for pooling.") + + cluster_edge_index = build_cluster_edges(edge_index_aug, assignment, num_clusters) + + # Build model + model = GCN_LRMC_NodeClassifier( + in_dim=in_dim, + hidden_dim=args.hidden, + cluster_hidden_dim=args.cluster_hidden, + out_dim=out_dim, + edge_index=data.edge_index, # original graph for enc/dec + assignment=assignment, # node -> cluster + cluster_edge_index=cluster_edge_index, # cluster graph for coarse GCN + dropout=args.dropout, + ).to(device) + + # Train / evaluate + test_acc = train_loop(model, data, epochs=args.epochs, lr=args.lr, + weight_decay=args.weight_decay, patience=100) + +if __name__ == "__main__": + main() diff --git a/src/old/input.txt b/src/old/input.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e56322f57a5bcb5416fabc711260c0debc2b4f0 --- /dev/null +++ b/src/old/input.txt @@ -0,0 +1,11 @@ +8 10 +1 2 +1 3 +1 4 +2 4 +2 3 +3 4 +1 5 +2 6 +3 7 +4 8 \ No newline at end of file diff --git a/src/old/make_proteins_edgelists_and_seeds.py b/src/old/make_proteins_edgelists_and_seeds.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8a983d3b195175f46f3105c9ad2ee9b245ad20 --- /dev/null +++ b/src/old/make_proteins_edgelists_and_seeds.py @@ -0,0 +1,176 @@ +# make_proteins_edgelists_and_seeds.py +# Originally exported PROTEINS/Cora edgelists and called a Java LRMC seeder. +# Extended to canonicalize edgelists (0-indexed, undirected unique edges) and +# to call the Java Reddit_streamsafe seeder for a single file as well. + +import argparse, subprocess +from pathlib import Path +from typing import Tuple, Iterable +from torch_geometric.datasets import TUDataset, Planetoid + +from rich import print + + +def export_edgelists(root: Path, out_dir: Path): + ds = Planetoid(root=str(root), name='Cora') + out_dir.mkdir(parents=True, exist_ok=True) + for i, data in enumerate(ds): + with (out_dir / f"graph_{i:06d}.txt").open('w') as f: + for u, v in data.edge_index.t().tolist(): + if (v > u): + f.write(f"{u} {v}\n") + print(f"[export] wrote {len(ds)} edge lists to {out_dir}") + + +def run_java_seeder(edges_dir: Path, seeds_dir: Path, alpha="DIAM", eps="1e-6"): + seeds_dir.mkdir(parents=True, exist_ok=True) + # Use LRMCseedsProteins_streamsafe2 in directory mode (after our patch) + subprocess.run([ + "javac", "LRMCseedsProteins_streamsafe2.java", "clique2_ablations_parallel2.java" + ], check=True) + subprocess.run([ + "java", "LRMCseedsProteins_streamsafe2", str(edges_dir), str(seeds_dir), alpha, eps + ], check=True) + + +def canonicalize_edgelist_file(in_path: Path, out_path: Path, input_is_one_indexed: bool = False) -> Tuple[int, int]: + """ + Read an edgelist file with lines "u v" possibly 1-indexed, possibly containing + duplicates and/or both directions, and write a canonical 0-indexed undirected + edgelist with exactly one line per edge (u < v) and no self-loops. + Returns (n_nodes_approx, n_edges_out). + """ + seen = set() + max_id = -1 + with in_path.open('r') as f: + for line in f: + s = line.strip() + if not s or s.startswith('#'): + continue + parts = s.replace(',', ' ').split() + if len(parts) < 2: + continue + try: + u = int(parts[0]); v = int(parts[1]) + except ValueError: + continue + if input_is_one_indexed: + u -= 1; v -= 1 + if u == v: + continue + if u > v: + u, v = v, u + seen.add((u, v)) + if u > max_id: max_id = u + if v > max_id: max_id = v + edges = sorted(seen) + out_path.parent.mkdir(parents=True, exist_ok=True) + with out_path.open('w') as w: + for u, v in edges: + w.write(f"{u} {v}\n") + return (max_id + 1, len(edges)) + + +def run_java_seeder_single(edges_file: Path, seeds_out: Path, alpha: str = "DIAM", eps: str = "1e-6"): + """Compile and run the Reddit_streamsafe Java seeder on a single canonical edgelist.""" + subprocess.run([ + "javac", "LRMCseedsReddit_streamsafe.java", "clique2_ablations_parallel2.java" + ], check=True) + subprocess.run([ + "java", "LRMCseedsReddit_streamsafe", str(edges_file), str(seeds_out), alpha, eps + ], check=True) + + +def export_cora_edgelist_from_content_cites(content: Path, cites: Path, out_path: Path) -> Tuple[int, int]: + """ + Export a canonical 0-indexed undirected edgelist (u < v) from Cora's + content/cites files using the exact node-id mapping that the Java loader + uses (insertion order from cora.content). + Returns (n_nodes, m_edges). + """ + id2idx = {} + # Pass 1: build mapping from content (insertion order) + with content.open('r', encoding='utf-8') as f: + for line in f: + s = line.strip() + if not s: + continue + parts = s.split() + paper_id = parts[0] + if paper_id not in id2idx: + id2idx[paper_id] = len(id2idx) + + seen = set() + max_idx = -1 + # Pass 2: read cites and map to indices; dedup undirected edges + with cites.open('r', encoding='utf-8') as f: + for line in f: + s = line.strip() + if not s or s.startswith('#'): + continue + parts = s.replace(',', ' ').split() + if len(parts) < 2: + continue + a, b = parts[0], parts[1] + if a not in id2idx or b not in id2idx: + continue + u = id2idx[a]; v = id2idx[b] + if u == v: + continue + if u > v: + u, v = v, u + seen.add((u, v)) + if u > max_idx: max_idx = u + if v > max_idx: max_idx = v + + edges = sorted(seen) + out_path.parent.mkdir(parents=True, exist_ok=True) + with out_path.open('w', encoding='utf-8') as w: + for u, v in edges: + w.write(f"{u} {v}\n") + return (len(id2idx), len(edges)) + + +if __name__ == "__main__": + ap = argparse.ArgumentParser() + ap.add_argument("--data_root", type=str, default="./data") + ap.add_argument("--edges_out", type=str, default="./proteins_edgelists") + ap.add_argument("--seeds_out", type=str, default="./proteins_seeds") + ap.add_argument("--alpha", type=str, default="DIAM") + ap.add_argument("--eps", type=str, default="1e-6") + ap.add_argument("--no_java", action="store_true") + # Canonicalization / single-file seeding for Cora edgelists + ap.add_argument("--canonicalize_in", type=str, default="", help="Input edgelist to canonicalize (optional)") + ap.add_argument("--canonicalize_out", type=str, default="", help="Output path for canonicalized edgelist") + ap.add_argument("--one_indexed", action="store_true", help="Treat input as 1-indexed during canonicalization") + ap.add_argument("--java_single_in", type=str, default="", help="Canonical edgelist to feed into Java seeder (overrides directory mode)") + ap.add_argument("--java_single_out", type=str, default="", help="Output seeds JSON for single-file Java run") + ap.add_argument("--cora_content", type=str, default="", help="Path to cora.content (to export canonical edgelist)") + ap.add_argument("--cora_cites", type=str, default="", help="Path to cora.cites (to export canonical edgelist)") + ap.add_argument("--export_cora_out", type=str, default="", help="Where to write the canonical Cora edgelist") + args = ap.parse_args() + + root = Path(args.data_root) + edges_dir = Path(args.edges_out) + seeds_dir = Path(args.seeds_out) + + # Optional canonicalization path (e.g., for src/cora/graph_000000.txt) + # if args.canonicalize_in and args.canonicalize_out: + # n, m = canonicalize_edgelist_file(Path(args.canonicalize_in), Path(args.canonicalize_out), input_is_one_indexed=args.one_indexed) + # print(f"[canonicalize] wrote {args.canonicalize_out} (n≈{n}, m={m})") + # + # # Optional: export edgelist from Cora content/cites using Java's mapping + # if args.cora_content and args.cora_cites and args.export_cora_out: + # n, m = export_cora_edgelist_from_content_cites(Path(args.cora_content), Path(args.cora_cites), Path(args.export_cora_out)) + # print(f"[cora-export] wrote {args.export_cora_out} (n={n}, m={m})") + # + # # Optional single-file Java run on a canonical edgelist + # if args.java_single_in and args.java_single_out: + # run_java_seeder_single(Path(args.java_single_in), Path(args.java_single_out), args.alpha, args.eps) + # print(f"[java] wrote seeds to {args.java_single_out}") + + # Original directory-based export + Java for dataset + # if not args.canonicalize_in and not args.java_single_in: + export_edgelists(root, edges_dir) + if not args.no_java: + run_java_seeder(edges_dir, seeds_dir, args.alpha, args.eps) diff --git a/src/old/train_gcn_lrmc_pool.py b/src/old/train_gcn_lrmc_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..75b904f335cc9cb4acc0debf00aa386ce448b843 --- /dev/null +++ b/src/old/train_gcn_lrmc_pool.py @@ -0,0 +1,435 @@ +# sweep_lowlab_compress.py +# Low-label and compression sweeps for L-RMC, DiffPool, and gPool on Cora. + +import json +import math +import random +from pathlib import Path +from statistics import mean, pstdev + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch_geometric.data import Data +from torch_geometric.nn import GCNConv, DenseGCNConv +from torch_geometric.nn.dense import dense_diff_pool + +from rich import print + +# ============ Paths ============ +SEEDS_JSON = "../seeds_diam_1e-6.json" # your L-RMC export +CORA_CONTENT = "../cora/cora.content" +CORA_CITES = "../cora/cora.cites" + +# ============ Sweep settings ============ +LABEL_BUDGETS = [20, 10, 5, 3] # train_per_class +K_RATIOS = [0.10, 0.20, 0.40, 0.80] # K / N target +SEEDS = [0, 1, 2, 3, 4] # random seeds per cell + +# ============ Train hyperparams ============ +HIDDEN = 64 +DROPOUT = 0.5 +LR = 0.01 +WEIGHT_DECAY = 5e-4 +EPOCHS = 300 +PATIENCE = 50 + +# DiffPool extras +DIFFPOOL_AUX_WEIGHT = 1e-2 # link + entropy regularizers + +# ============ Utils ============ +def set_seed(seed: int): + random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + +def to_undirected(edge_index, num_nodes): + # Unique undirected edges without self loops + edges = edge_index.t().tolist() + uniq = set() + out = [] + for u, v in edges: + if u == v: + continue + a, b = (u, v) if u < v else (v, u) + key = (a, b) + if key not in uniq: + uniq.add(key) + out.append([a, b]) + if not out: + return torch.empty((2, 0), dtype=torch.long) + return torch.tensor(out, dtype=torch.long).t().contiguous() + +def macro_f1_from_logits(logits, y, mask): + with torch.no_grad(): + pred = logits.argmax(dim=1) + y_ = y[mask] + p_ = pred[mask] + C = int(y.max().item() + 1) + cm = torch.zeros((C, C), dtype=torch.long, device=logits.device) + for t, q in zip(y_, p_): + cm[t, q] += 1 + eps = 1e-12 + tp = cm.diag().to(torch.float) + fp = cm.sum(dim=0).to(torch.float) - tp + fn = cm.sum(dim=1).to(torch.float) - tp + precision = tp / (tp + fp + eps) + recall = tp / (tp + fn + eps) + f1 = 2 * precision * recall / (precision + recall + eps) + present = cm.sum(dim=1) > 0 + return f1[present].mean().item() if present.any() else 0.0 + +def accuracy_from_logits(logits, y, mask): + with torch.no_grad(): + pred = logits.argmax(dim=1) + correct = (pred[mask] == y[mask]).sum().item() + total = int(mask.sum().item()) + return correct / max(total, 1) + +# ============ Data ============ +def load_cora_from_content_and_cites(content_path: str, cites_path: str): + lines = Path(content_path).read_text().strip().splitlines() + n = len(lines) + paper_ids, features, labels_raw = [], [], [] + for line in lines: + toks = line.strip().split() + paper_ids.append(toks[0]) + labels_raw.append(toks[-1]) + features.append([int(x) for x in toks[1:-1]]) + classes = sorted(set(labels_raw)) + cls2idx = {c: i for i, c in enumerate(classes)} + y = torch.tensor([cls2idx[c] for c in labels_raw], dtype=torch.long) + x = torch.tensor(features, dtype=torch.float) + + id2idx = {pid: i for i, pid in enumerate(paper_ids)} + edges = [] + for line in Path(cites_path).read_text().strip().splitlines(): + a, b = line.strip().split() + if a in id2idx and b in id2idx: + edges.append((id2idx[a], id2idx[b])) + if not edges: + raise RuntimeError("No edges from cites file.") + edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous() + edge_index = to_undirected(edge_index, n) + + data = Data(x=x, edge_index=edge_index, y=y) + data.num_nodes = n + data.num_classes = len(classes) + return data + +def make_planetoid_style_split(y, num_classes, train_per_class=20, val_size=500, test_size=1000): + N = y.size(0) + all_idx = torch.arange(N) + train_mask = torch.zeros(N, dtype=torch.bool) + val_mask = torch.zeros(N, dtype=torch.bool) + test_mask = torch.zeros(N, dtype=torch.bool) + for c in range(num_classes): + idx_c = all_idx[(y == c)] + if idx_c.numel() == 0: + continue + sel = idx_c[torch.randperm(idx_c.numel())[: min(train_per_class, idx_c.numel())]] + train_mask[sel] = True + remaining = all_idx[~train_mask] + remaining = remaining[torch.randperm(remaining.numel())] + val_k = min(val_size, remaining.numel()) + val_mask[remaining[:val_k]] = True + rem2 = remaining[val_k:] + test_k = min(test_size, rem2.numel()) + test_mask[rem2[:test_k]] = True + return train_mask, val_mask, test_mask + +# ============ L-RMC seeds and pooling ============ +def load_lrmc_partition(path: str, num_nodes: int): + obj = json.loads(Path(path).read_text()) + clusters = obj["clusters"] + cid_of_node = {} + for c in clusters: + cid = int(c["cluster_id"]) + for u in c["members"]: + cid_of_node[int(u)] = cid + cluster_id = torch.full((num_nodes,), -1, dtype=torch.long) + for u, cid in cid_of_node.items(): + if 0 <= u < num_nodes: + cluster_id[u] = cid + if (cluster_id < 0).any(): + miss = int((cluster_id < 0).sum().item()) + raise RuntimeError(f"{miss} nodes not covered by seeds.") + K = int(cluster_id.max().item() + 1) + return cluster_id, K + +def pool_by_partition_weighted(x, edge_index, cluster_id, K): + if x.dim() != 2: + raise ValueError(f"Expected x to have shape [N, F], got {x.shape}") + if cluster_id.shape != (x.shape[0],): + raise ValueError(f"Expected cluster_id to have shape [{x.shape[0]}], got {cluster_id.shape}") + sums = torch.zeros((K, x.size(1)), device=x.device, dtype=x.dtype) + sums.index_add_(0, cluster_id, x) + counts = torch.bincount(cluster_id, minlength=K).clamp_min(1).to(x.device).unsqueeze(1).to(x.dtype) + x_pooled = sums / counts + cu = cluster_id[edge_index[0]] + cv = cluster_id[edge_index[1]] + pairs = torch.stack([cu, cv], dim=1) + uniq, w = torch.unique(pairs, dim=0, return_counts=True) + mask = uniq[:, 0] != uniq[:, 1] + edge_index_pooled = uniq[mask].t().contiguous() + edge_weight = w[mask].to(torch.float) + return x_pooled, edge_index_pooled, edge_weight + +def compress_partition_to_K(cluster_id, K_target, edge_index): + cid = cluster_id.clone() + K_now = int(cid.max().item() + 1) + if K_now <= K_target: + return cid, K_now + sizes = torch.bincount(cid, minlength=K_now) + kept = set(int(k) for k in torch.topk(sizes, K_target).indices.tolist()) + # inter-cluster weights + cu = cid[edge_index[0]].tolist() + cv = cid[edge_index[1]].tolist() + w = {} + for a, b in zip(cu, cv): + if a == b: + continue + w[(a, b)] = w.get((a, b), 0) + 1 + w[(b, a)] = w.get((b, a), 0) + 1 + mapping = {} + largest_kept = max(kept, key=lambda k: sizes[k].item()) + for c in range(K_now): + if c in kept: + mapping[c] = c + else: + candidates = [(w.get((c, k), 0), k) for k in kept] + mapping[c] = max(candidates)[1] if candidates else largest_kept + for i in range(cid.numel()): + cid[i] = mapping[int(cid[i].item())] + kept_sorted = sorted(set(int(x) for x in cid.tolist())) + remap = {old: new for new, old in enumerate(kept_sorted)} + for i in range(cid.numel()): + cid[i] = remap[int(cid[i].item())] + return cid, len(kept_sorted) + +# ============ Models ============ +class LrmcSeededPoolGCN(nn.Module): + def __init__(self, in_dim, hidden_dim, out_dim, cluster_id, K, dropout=0.5): + super().__init__() + self.conv1 = GCNConv(in_dim, hidden_dim, add_self_loops=True, normalize=True) + self.conv2 = GCNConv(hidden_dim, out_dim, add_self_loops=True, normalize=True) + self.lin_skip = nn.Linear(hidden_dim, out_dim, bias=True) + self.score = nn.Linear(hidden_dim, 1, bias=False) + self.dropout = dropout + self.register_buffer("cluster_id", cluster_id) + self.K = K + + def forward(self, x, edge_index): + if x.dim() != 2: + raise ValueError(f"Expected x to have shape [N, F], got {x.shape}") + x1 = F.relu(self.conv1(x, edge_index)) + if x1.shape[1] != HIDDEN: + raise ValueError(f"Expected x1 to have shape [N, {HIDDEN}], got {x1.shape}") + x1 = F.dropout(x1, p=self.dropout, training=self.training) + gate = torch.tanh(self.score(x1)) # Remove .unsqueeze(-1) + if gate.shape != (x1.shape[0], 1): + raise ValueError(f"Expected gate to have shape [{x1.shape[0]}, 1], got {gate.shape}") + x1_g = x1 * gate + if x1_g.shape != x1.shape: + raise ValueError(f"Expected x1_g to have shape {x1.shape}, got {x1_g.shape}") + x_p, ei_p, ew_p = pool_by_partition_weighted(x1_g, edge_index, self.cluster_id, self.K) + x_p = self.conv2(x_p, ei_p, edge_weight=ew_p) + up = x_p[self.cluster_id] + skip = self.lin_skip(x1) + logits = up + skip + return logits, 0.0 + +class TopKPoolBroadcastGCN(nn.Module): + # gPool-style: learn scores, keep K, assign dropped to nearest kept by degree, weighted pooled GCN + skip. + def __init__(self, in_dim, hidden_dim, out_dim, K_target, dropout=0.5): + super().__init__() + self.conv1 = GCNConv(in_dim, hidden_dim, add_self_loops=True, normalize=True) + self.conv2 = GCNConv(hidden_dim, out_dim, add_self_loops=True, normalize=True) + self.lin_skip = nn.Linear(hidden_dim, out_dim, bias=True) + self.score = nn.Linear(hidden_dim, 1, bias=False) + self.dropout = dropout + self.K_target = K_target + @staticmethod + def _degrees(edge_index, N): + return torch.bincount(edge_index[0], minlength=N).to(torch.long) + def forward(self, x, edge_index): + N = x.size(0) + x1 = F.relu(self.conv1(x, edge_index)) + x1 = F.dropout(x1, p=self.dropout, training=self.training) + raw = self.score(x1).squeeze(-1) + gate = torch.tanh(raw).unsqueeze(-1) + x1_g = x1 * gate + K = min(self.K_target, N) + kept = torch.topk(raw, K, sorted=True).indices + keep_mask = torch.zeros(N, dtype=torch.bool, device=x.device); keep_mask[kept] = True + deg = self._degrees(edge_index, N).to(x.device) + u_list, v_list = edge_index[0].tolist(), edge_index[1].tolist() + neigh = [[] for _ in range(N)] + for a, b in zip(u_list, v_list): + neigh[a].append(b); neigh[b].append(a) + cluster_id = torch.full((N,), -1, dtype=torch.long, device=x.device) + cluster_id[kept] = torch.arange(kept.numel(), device=x.device, dtype=torch.long) + best_global_kept = kept[torch.argmax(deg[kept])].item() if kept.numel() > 0 else 0 + for u in range(N): + if keep_mask[u]: + continue + cand = [w for w in neigh[u] if keep_mask[w]] + cluster_id[u] = cluster_id[max(cand, key=lambda z: int(deg[z].item()))] if cand else cluster_id[best_global_kept] + Kc = int(cluster_id.max().item() + 1) + x_p, ei_p, ew_p = pool_by_partition_weighted(x1_g, edge_index, cluster_id, Kc) + x_p = self.conv2(x_p, ei_p, edge_weight=ew_p) + up = x_p[cluster_id] + skip = self.lin_skip(x1) + logits = up + skip + return logits, 0.0 + +class DiffPoolGCNNode(nn.Module): + # One DiffPool layer with K clusters, skip head to nodes. + def __init__(self, in_dim, hidden_dim, out_dim, K_clusters, dropout=0.5): + super().__init__() + self.dropout = dropout + self.K = K_clusters + self.gnn_embed1 = DenseGCNConv(in_dim, hidden_dim) + self.gnn_embed2 = DenseGCNConv(hidden_dim, hidden_dim) + self.gnn_assign1 = DenseGCNConv(in_dim, hidden_dim) + self.gnn_assign2 = DenseGCNConv(hidden_dim, K_clusters) + self.gnn_post1 = DenseGCNConv(hidden_dim, hidden_dim) + self.gnn_post2 = DenseGCNConv(hidden_dim, out_dim) + self.lin_skip = nn.Linear(hidden_dim, out_dim, bias=True) + def forward(self, x, edge_index): + N, device = x.size(0), x.device + adj_dense = torch.zeros((N, N), device=device) + adj_dense[edge_index[0], edge_index[1]] = 1.0 + idx = torch.arange(N, device=device) + adj_dense[idx, idx] = 1.0 + x = x.unsqueeze(0) # [1, N, F] + adj = adj_dense.unsqueeze(0) # [1, N, N] + mask = torch.ones((1, N), device=device) + z = F.relu(self.gnn_embed1(x, adj, mask)) + z = F.dropout(z, p=self.dropout, training=self.training) + z = F.relu(self.gnn_embed2(z, adj, mask)) + s = F.relu(self.gnn_assign1(x, adj, mask)) + s = F.dropout(s, p=self.dropout, training=self.training) + s = self.gnn_assign2(s, adj, mask).softmax(dim=-1) # [1, N, K] + x_pool, adj_pool, link_loss, ent_loss = dense_diff_pool(z, adj, s, mask) + h = F.relu(self.gnn_post1(x_pool, adj_pool)) + h = F.dropout(h, p=self.dropout, training=self.training) + h = self.gnn_post2(h, adj_pool) # [1, K, C] + skip = self.lin_skip(z.squeeze(0)) # [N, C] + logits_nodes = torch.matmul(s.squeeze(0), h.squeeze(0)) + skip + aux_loss = link_loss + ent_loss + return logits_nodes, aux_loss + +# ============ Train ============ +def train_one(model, data, train_mask, val_mask, test_mask, device, aux_weight=0.0): + model = model.to(device) + data = data.to(device) + train_mask = train_mask.to(device) + val_mask = val_mask.to(device) + test_mask = test_mask.to(device) + + opt = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=WEIGHT_DECAY) + best_state = None + best_val = -math.inf + bad = 0 + + for epoch in range(1, EPOCHS + 1): + model.train() + opt.zero_grad() + logits, aux_loss = model(data.x, data.edge_index) + loss = F.cross_entropy(logits[train_mask], data.y[train_mask]) + if aux_weight > 0.0: + loss = loss + aux_weight * aux_loss + loss.backward() + opt.step() + + model.eval() + with torch.no_grad(): + logits, _ = model(data.x, data.edge_index) + val_metric = accuracy_from_logits(logits, data.y, val_mask) + + if val_metric > best_val: + best_val = val_metric + best_state = {k: v.detach().cpu().clone() for k, v in model.state_dict().items()} + bad = 0 + else: + bad += 1 + + if bad >= PATIENCE: + break + + if best_state is not None: + model.load_state_dict({k: v.to(device) for k, v in best_state.items()}) + + model.eval() + with torch.no_grad(): + logits, _ = model(data.x, data.edge_index) + test_acc = accuracy_from_logits(logits, data.y, test_mask) + test_f1 = macro_f1_from_logits(logits, data.y, test_mask) + return test_acc, test_f1 + +# ============ Sweep runner ============ +def run_sweeps(): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + data = load_cora_from_content_and_cites(CORA_CONTENT, CORA_CITES) + N = data.num_nodes + cluster_id_full, K_full = load_lrmc_partition(SEEDS_JSON, data.num_nodes) + + print(f"Loaded Cora: N={data.num_nodes}, E={data.edge_index.size(1)}, F={data.num_features}, C={data.num_classes}") + print(f"L-RMC base K = {K_full} (K/N = {K_full/N:.3f})") + + print("\nResults averaged over seeds:", SEEDS) + print("tpc, K/N, K, Method, acc_mean, acc_std, f1_mean, f1_std") + + for tpc in LABEL_BUDGETS: + for ratio in K_RATIOS: + K_target = max(1, int(ratio * N)) + accs = { "LRMC": [], "gPool": [], "DiffPool": [] } + f1s = { "LRMC": [], "gPool": [], "DiffPool": [] } + + for s in SEEDS: + set_seed(s) + train_mask, val_mask, test_mask = make_planetoid_style_split( + data.y, data.num_classes, train_per_class=tpc, val_size=500, test_size=1000 + ) + + # Equal K across methods + cid_eq, K_eq = compress_partition_to_K(cluster_id_full, K_target, data.edge_index) + + # L-RMC + lrmc_model = LrmcSeededPoolGCN( + in_dim=data.num_features, hidden_dim=HIDDEN, out_dim=data.num_classes, + cluster_id=cid_eq.to(data.x.device), K=K_eq, dropout=DROPOUT, + ) + a, f = train_one(lrmc_model, data, train_mask, val_mask, test_mask, device) + accs["LRMC"].append(a); f1s["LRMC"].append(f) + + # gPool + g_model = TopKPoolBroadcastGCN( + in_dim=data.num_features, hidden_dim=HIDDEN, out_dim=data.num_classes, + K_target=K_eq, dropout=DROPOUT, + ) + a, f = train_one(g_model, data, train_mask, val_mask, test_mask, device) + accs["gPool"].append(a); f1s["gPool"].append(f) + + # DiffPool + d_model = DiffPoolGCNNode( + in_dim=data.num_features, hidden_dim=HIDDEN, out_dim=data.num_classes, + K_clusters=K_eq, dropout=0.3, # a little lower dropout helps DiffPool + ) + a, f = train_one(d_model, data, train_mask, val_mask, test_mask, device, + aux_weight=DIFFPOOL_AUX_WEIGHT) + accs["DiffPool"].append(a); f1s["DiffPool"].append(f) + + def ms(x): # mean, std + return mean(x), (0.0 if len(x) < 2 else pstdev(x)) + + for name in ["LRMC", "gPool", "DiffPool"]: + am, asd = ms(accs[name]) + fm, fsd = ms(f1s[name]) + print(f"{tpc:3d}, {ratio:0.2f}, {K_eq:4d}, {name:7s}, " + f"{am:.3f}, {asd:.3f}, {fm:.3f}, {fsd:.3f}") + +if __name__ == "__main__": + run_sweeps() diff --git a/src/plot_cora_summary.py b/src/plot_cora_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a30c0c202538d2a41d12c4bb8f3881dd9c53b8 --- /dev/null +++ b/src/plot_cora_summary.py @@ -0,0 +1,177 @@ +import argparse +import json +import os +import glob +import numpy as np +import matplotlib.pyplot as plt +from rich import print + +def plot_cora_summary(summary_dir: str, output_filename: str): + json_files = glob.glob(os.path.join(summary_dir, 'seeds_*.json')) + + core_sizes = [] + train_accs = [] + val_accs = [] + test_accs = [] + + baseline_train_accs = [] + baseline_val_accs = [] + baseline_test_accs = [] + + dataset_num_nodes_set = set() + + for file_path in json_files: + with open(file_path, 'r') as f: + data = json.load(f) + + core_size = data.get('core', {}).get('size') + if core_size is None or core_size == 0: + continue + + core_sizes.append(core_size) + + multi_run = data.get('multi_run', {}) + core_model = multi_run.get('core_model', {}) + baseline = multi_run.get('baseline', {}) + + train_accs.append(core_model.get('train', {}).get('mean', 0)) + val_accs.append(core_model.get('val', {}).get('mean', 0)) + test_accs.append(core_model.get('test', {}).get('mean', 0)) + + baseline_train_accs.append(baseline.get('train', {}).get('mean', 0)) + baseline_val_accs.append(baseline.get('val', {}).get('mean', 0)) + baseline_test_accs.append(baseline.get('test', {}).get('mean', 0)) + + num_nodes = data.get('dataset', {}).get('num_nodes') + if num_nodes is not None: + dataset_num_nodes_set.add(num_nodes) + + if not dataset_num_nodes_set: + raise RuntimeError("Could not find any 'dataset.num_nodes' in the JSON files.") + if len(dataset_num_nodes_set) > 1: + print(f"[yellow]Warning: Multiple 'num_nodes' values found across JSON files: {dataset_num_nodes_set}[/yellow]") + + dataset_num_nodes = sorted(dataset_num_nodes_set)[0] + baseline_x_pos = 1.0 + + avg_baseline_train = np.mean(baseline_train_accs) if baseline_train_accs else 0 + avg_baseline_val = np.mean(baseline_val_accs) if baseline_val_accs else 0 + avg_baseline_test = np.mean(baseline_test_accs) if baseline_test_accs else 0 + + sorted_indices = np.argsort(core_sizes) + sorted_core_sizes = np.array(core_sizes)[sorted_indices] + sorted_train_accs = np.array(train_accs)[sorted_indices] + sorted_val_accs = np.array(val_accs)[sorted_indices] + sorted_test_accs = np.array(test_accs)[sorted_indices] + sorted_core_percentages = sorted_core_sizes / dataset_num_nodes + + plt.figure(figsize=(10, 6)) + + plt.plot(sorted_core_percentages, sorted_train_accs, 'o-', label='Train Accuracy') + plt.plot(sorted_core_percentages, sorted_val_accs, 's--', label='Validation Accuracy') + plt.plot(sorted_core_percentages, sorted_test_accs, '^-.', label='Test Accuracy') + + plt.plot(baseline_x_pos, avg_baseline_train, 'o', markersize=10, label='Baseline Train') + plt.plot(baseline_x_pos, avg_baseline_val, 's', markersize=10, label='Baseline Val') + plt.plot(baseline_x_pos, avg_baseline_test, '^', markersize=10, label='Baseline Test') + + plt.xlabel('Percentage of nodes used') + plt.ylabel('Accuracy') + plt.gca().set_ylim(bottom=0) + plt.title('Cora Model Accuracy vs. Core Size') + plt.legend() + plt.grid(True) + + if sorted_core_percentages.size == 0: + min_core, max_core = 0, 0 + else: + min_core, max_core = sorted_core_percentages.min(), sorted_core_percentages.max() + + left = -0.05 * max_core if max_core > 0 else -0.1 + right = baseline_x_pos * 1.05 if baseline_x_pos > max_core else max_core * 1.05 + + plt.xlim(left=left, right=right) + + plt.tight_layout() + plt.savefig(output_filename) + print(f"Plot saved as {output_filename}") + +def plot_compare(): + summary_dirs = ["cora_seeds/summary", "cora_seeds/summary_rand"] + labels = ["Test Accuracy (Default)", "Test Accuracy (Random)"] + markers = ['^-.', 'x:'] + + plt.figure(figsize=(10, 6)) + + for summary_dir, label, marker in zip(summary_dirs, labels, markers): + json_files = glob.glob(os.path.join(summary_dir, 'seeds_*.json')) + + core_sizes = [] + test_accs = [] + dataset_num_nodes_set = set() + + for file_path in json_files: + with open(file_path, 'r') as f: + data = json.load(f) + + core_size = data.get('core', {}).get('size') + if core_size is None or core_size == 0: + continue + + core_sizes.append(core_size) + + multi_run = data.get('multi_run', {}) + core_model = multi_run.get('core_model', {}) + + test_accs.append(core_model.get('test', {}).get('mean', 0)) + + num_nodes = data.get('dataset', {}).get('num_nodes') + if num_nodes is not None: + dataset_num_nodes_set.add(num_nodes) + + if not dataset_num_nodes_set: + raise RuntimeError(f"Could not find any 'dataset.num_nodes' in the JSON files in {summary_dir}.") + + dataset_num_nodes = sorted(dataset_num_nodes_set)[0] + sorted_indices = np.argsort(core_sizes) + sorted_core_sizes = np.array(core_sizes)[sorted_indices] + sorted_test_accs = np.array(test_accs)[sorted_indices] + sorted_core_percentages = sorted_core_sizes / dataset_num_nodes + + plt.plot(sorted_core_percentages, sorted_test_accs, marker, label=label) + + plt.xlabel('Percentage of nodes used') + plt.ylabel('Accuracy') + plt.gca().set_ylim(bottom=0) + plt.title('Cora Model Test Accuracy Comparison') + plt.legend() + plt.grid(True) + + plt.tight_layout() + output_filename = "cora_summary_compare.png" + plt.savefig(output_filename) + print(f"Comparison plot saved as {output_filename}") + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Plot Cora model accuracy vs. core size.') + parser.add_argument('--summary_dir', type=str, help='Directory containing the summary JSON files.') + parser.add_argument('--baseline', type=str, choices=['random', 'compare'], help='Use a baseline method to determine default paths if --summary_dir is not provided.') + args = parser.parse_args() + + if args.baseline == 'compare': + plot_compare() + else: + if args.summary_dir: + summary_dir = args.summary_dir + if 'rand' in summary_dir: + output_filename = "cora_summary_rand.png" + else: + output_filename = "cora_summary.png" + elif args.baseline == 'random': + summary_dir = "cora_seeds/summary_rand" + output_filename = "cora_summary_rand.png" + else: + summary_dir = "cora_seeds/summary" + output_filename = "cora_summary.png" + + plot_cora_summary(summary_dir, output_filename) \ No newline at end of file