repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes_1024x1024_boundary.py | configs/_base_/datasets/cityscapes_1024x1024_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes_1024x1024.py
'''
_base_ = './cityscapes_boundary.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations',unlabeled_aux=False),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes_1024x1024.py | configs/_base_/datasets/cityscapes_1024x1024.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes_1024x1024.py
'''
_base_ = './cityscapes.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations',unlabeled_aux=False),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/pascal_context_boundary.py | configs/_base_/datasets/pascal_context_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/pascal_context.py
'''
# dataset settings
dataset_type = 'PascalContextDataset_boundary'
data_root = '../data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=False),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=16,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes_boundary.py | configs/_base_/datasets/cityscapes_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes.py
'''
# dataset settings
dataset_type = 'CityscapesDataset_boundary'
data_root = '../data/cityscapes/data_proc_nis'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(1024, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4, # means batch_size = 2 for each GPU
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes.py | configs/_base_/datasets/cityscapes.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes.py
'''
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = '../data/cityscapes/data_proc_nis'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(1024, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4, # means batch_size = 2 for each GPU
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/cityscapes_512x1024_boundary.py | configs/_base_/datasets/cityscapes_512x1024_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes_1024x1024.py
'''
_base_ = './cityscapes_boundary.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations',unlabeled_aux=False),
dict(type='Resize', img_scale=(1024, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/pascal_context.py | configs/_base_/datasets/pascal_context.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/pascal_context.py
'''
# dataset settings
dataset_type = 'PascalContextDataset'
data_root = '../data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=False),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=16,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/camvid_boundary.py | configs/_base_/datasets/camvid_boundary.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes.py
'''
# dataset settings
dataset_type = 'CamVidDataset_boundary'
data_root = '../data/camvid'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (520, 520)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=crop_size, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255,sebound_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg','gt_semantic_sebound']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(960, 720),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8, # means batch_size = 2 for each GPU
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/train',
ann_dir='annotations_tvt/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/test',
ann_dir='annotations_tvt/test',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/test',
ann_dir='annotations_tvt/test',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/pascal_context_59.py | configs/_base_/datasets/pascal_context_59.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/pascal_context_59.py
'''
# dataset settings
dataset_type = 'PascalContextDataset59'
data_root = '../data/VOCdevkit/VOC2010/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=16,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassContext',
split='ImageSets/SegmentationContext/val.txt',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/datasets/camvid.py | configs/_base_/datasets/camvid.py | '''
This file is modified from:
https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/datasets/cityscapes.py
'''
# dataset settings
dataset_type = 'CamVidDataset'
data_root = '../data/camvid'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (520, 520)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(960,720), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(960, 720),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/train',
ann_dir='annotations_tvt/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/test',
ann_dir='annotations_tvt/test',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images_tvt/test',
ann_dir='annotations_tvt/test',
pipeline=test_pipeline))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/models/Mobile_Seed.py | configs/_base_/models/Mobile_Seed.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='EncoderDecoderRefine',
down_ratio = 1,
pretrained=None,
backbone=dict(
type='AFFormer_for_MS_base',
strides=[4, 2, 2, 2],
drop_path_rate = 0.1),
decode_head=dict(),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/_base_/models/afformer.py | configs/_base_/models/afformer.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='afformer_base',
strides=[4, 2, 2, 2]),
decode_head=dict(
type='CLS',
in_channels=256,
in_index=[0, 1, 2 ,3],
channels=512,
aff_channels=512,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=ham_norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_base_cityscapes.py | configs/AFFormer/AFFormer_base_cityscapes.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
] # filepath in _base_ list are imported as current file in Config.fromfile module,similar to parent class in Python
model = dict(
pretrained='./ckpt/AFFormer_base_ImageNet1k_new.pth',
backbone=dict(
type='afformer_base',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=19
))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=4, workers_per_gpu=4)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_small_cityscapes.py | configs/AFFormer/AFFormer_small_cityscapes.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_small_ImageNet1k.pth',
backbone=dict(
type='afformer_small',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=19
))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=8, workers_per_gpu=8)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_tiny_pascal_context.py | configs/AFFormer/AFFormer_tiny_pascal_context.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/pascal_context.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='afformer_tiny',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=60
)
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 1 GPUs with 16 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_tiny_camvid.py | configs/AFFormer/AFFormer_tiny_camvid.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/camvid.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='afformer_tiny',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=11
)
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_tiny_cityscapes.py | configs/AFFormer/AFFormer_tiny_cityscapes.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='afformer_tiny',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=19
)
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=8, workers_per_gpu=8)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/AFFormer/AFFormer_tiny_pascal_context_59.py | configs/AFFormer/AFFormer_tiny_pascal_context_59.py | _base_ = [
'../_base_/models/afformer.py', '../_base_/datasets/pascal_context_59.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='afformer_tiny',
strides=[4, 2, 2, 2]),
decode_head=dict(
in_channels=[216],
in_index=[3],
channels=256,
aff_channels=256,
aff_kwargs=dict(MD_R=16),
num_classes=59
)
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 1 GPUs with 16 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/Mobile_Seed/MS_tiny_cityscapes.py | configs/Mobile_Seed/MS_tiny_cityscapes.py | _base_ = [
'../_base_/models/Mobile_Seed.py', '../_base_/datasets/cityscapes_1024x1024_boundary.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='AFFormer_for_MS_tiny'),
decode_head=[
dict(
type="BoundaryHead",
bound_channels = [16,16,32,32],
bound_ratio = 2,
in_channels = [16,64,216,216], # /2 /4 /8 /8
in_index = [1,3,5,6],
channels= 16 + 16 + 32 + 32,
num_classes=1,
loss_decode= dict(type='ML_BCELoss', use_sigmoid=True, loss_weight=1.0,loss_name = "loss_be")),
dict(
type="RefineHead",
fuse_channel = 96,
in_channels=[216],
in_index=[-1],
channels=256,
num_classes=19,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0,loss_name = "loss_ce")),
],
# test_cfg = dict(mode='slide',crop_size=(512, 512), stride=(384, 384))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0 , by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=8, workers_per_gpu=8)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/Mobile_Seed/MS_tiny_camvid.py | configs/Mobile_Seed/MS_tiny_camvid.py | _base_ = [
'../_base_/models/Mobile_Seed.py', '../_base_/datasets/camvid_boundary.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='AFFormer_for_MS_tiny'),
decode_head=[
dict(
type="BoundaryHead",
in_channels = [16,64,216,216], # /2 /4 /8 /8
bound_channels = [16,16,32,32],
bound_ratio = 2,
in_index = [1,3,5,6],
channels= 16 + 16 + 32 + 32,
num_classes=1,
loss_decode= dict(type='ML_BCELoss', use_sigmoid=True, loss_weight=1.0,loss_name = "loss_be")),
dict(
type="RefineHead",
fuse_channel = 96,
in_channels=[216],
in_index=[-1],
channels=256,
num_classes=11,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0,loss_name = "loss_ce")),
],
# test_cfg = dict(mode='slide',crop_size=(512, 512), stride=(384, 384))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0 , by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/Mobile_Seed/MS_tiny_pascal_context_59.py | configs/Mobile_Seed/MS_tiny_pascal_context_59.py | _base_ = [
'../_base_/models/Mobile_Seed.py', '../_base_/datasets/pascal_context_59_boundary.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='AFFormer_for_MS_tiny'),
decode_head=[
dict(
type="BoundaryHead",
bound_channels = [16,16,32,32],
bound_ratio = 2,
in_channels = [16,64,216,216], # /2 /4 /8 /8
in_index = [1,3,5,6],
channels= 16 + 16 + 32 + 32,
num_classes=1,
loss_decode= dict(type='ML_BCELoss', use_sigmoid=True, loss_weight=1.0,loss_name = "loss_be")),
dict(
type="RefineHead",
fuse_channel = 96,
in_channels=[216],
in_index=[-1],
channels=256,
num_classes=59,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0,loss_name = "loss_ce")),
],
# test_cfg = dict(mode='slide',crop_size=(512, 512), stride=(384, 384))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0 , by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/configs/Mobile_Seed/MS_tiny_pascal_context.py | configs/Mobile_Seed/MS_tiny_pascal_context.py | _base_ = [
'../_base_/models/Mobile_Seed.py', '../_base_/datasets/pascal_context_boundary.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='./ckpt/AFFormer_tiny_ImageNet1k.pth',
backbone=dict(
type='AFFormer_for_MS_tiny'),
decode_head=[
dict(
type="BoundaryHead",
bound_channels = [16,16,32,32],
bound_ratio = 2,
in_channels = [16,64,216,216], # /2 /4 /8 /8
in_index = [1,3,5,6],
channels= 16 + 16 + 32 + 32,
num_classes=1,
loss_decode= dict(type='ML_BCELoss', use_sigmoid=True, loss_weight=1.0,loss_name = "loss_be")),
dict(
type="RefineHead",
fuse_channel = 96,
in_channels=[216],
in_index=[-1],
channels=256,
num_classes=60,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0,loss_name = "loss_ce")),
],
# test_cfg = dict(mode='slide',crop_size=(512, 512), stride=(384, 384))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.0004, betas=(0.9, 0.999), weight_decay=0.01)
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0 , by_epoch=False)
# By default, models are trained on 2 GPUs with 4 images per GPU
data=dict(samples_per_gpu=16, workers_per_gpu=16)
find_unused_parameters=True
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
WHU-USI3DV/Mobile-Seed | https://github.com/WHU-USI3DV/Mobile-Seed/blob/e9af36ea9ac9dac5506880096cd38423dc3dbf51/demo/image_demo.py | demo/image_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
from mmseg.core.evaluation import get_palette
import numpy as np
import cv2
from scipy.ndimage import distance_transform_edt
from glob import glob
import os
from tqdm import tqdm
def apply_mask(image, mask, color):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] + color[c],
image[:, :, c])
return image
def visualize_prediction(path, pred):
n, h, w = pred.shape
image = np.zeros((h, w, 3))
# image = image.astype(np.uint32)
colors = [[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]]
# pred = np.where(pred >= 0.5, 1, 0)
boundary_sum = np.zeros((h, w))
for i in range(n):
color = colors[i]
boundary = pred[i,:,:]
boundary_sum = boundary_sum + boundary
masked_image = apply_mask(image, boundary, color)
boundary_sum = np.array([boundary_sum, boundary_sum, boundary_sum])
boundary_sum = np.transpose(boundary_sum, (1, 2, 0))
idx = boundary_sum > 0
masked_image[idx] = masked_image[idx]/boundary_sum[idx]
masked_image[~idx] = 255
cv2.imwrite(path,masked_image[...,::-1])
def mask_to_onehot(mask, num_classes):
"""
Converts a segmentation mask (H,W) to (K,H,W) where the last dim is a one
hot encoding vector
"""
_mask = [mask == i for i in range(num_classes)]
return np.array(_mask).astype(np.uint8)
def onehot_to_mask(mask):
"""
Converts a mask (K,H,W) to (H,W)
"""
_mask = np.argmax(mask, axis=0)
_mask[_mask != 0] += 1
return _mask
def onehot_to_multiclass_boundarys(mask, radius, num_classes):
"""
Converts a segmentation mask (K,H,W) to an multi-class boundary map (K,H,W)
"""
# We need to pad the borders for boundary conditions
mask_pad = np.pad(mask, ((0, 0), (1, 1), (1, 1)), mode='reflect')
channels = []
for i in range(num_classes):
dist = distance_transform_edt(mask_pad[i, :])+distance_transform_edt(1.0-mask_pad[i, :])
dist = dist[1:-1, 1:-1]
dist[dist > radius] = 0
dist = (dist > 0).astype(np.uint8)
channels.append(dist)
return np.array(channels)
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('out_seg',help='Path to output segment file')
parser.add_argument('--out_sebound',help='Path to output semantic boundary file')
parser.add_argument('--out_bibound',help='Path to output binary boundary file')
parser.add_argument(
'--device', default='cuda', help='Device used for inference')
parser.add_argument(
'--palette',
default='cityscapes',
help='Color palette used for segmentation map')
parser.add_argument(
'--opacity',
type=float,
default=0.5,
help='Opacity of painted segmentation map. In (0, 1] range.')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_segmentor(args.config, args.checkpoint, device=args.device)
result = inference_segmentor(model, args.img)
seg_pred,bound_pred = result
# show the results
# reset outfile name
# args.out_folder = args.img.replace("color","seq_pred")
# args.outss_file = args.out_file.replace(".png",outss_suffix)
# args.outse_file = args.outss_file.replace(outss_suffix,outse_suffix)
# args.outed_file = args.outss_file.replace(outss_suffix,outed_suffix)
# cv2.imwrite(outcolor_path,cv2.imread(i))
show_result_pyplot(
model,
args.img,
seg_pred,
get_palette(args.palette),
opacity=args.opacity,
out_file=args.out_seg)
if args.out_sebound:
onehot_mask = mask_to_onehot(seg_pred[0],19) # one input img in default
sebound_mask = onehot_to_multiclass_boundarys(onehot_mask,2,19)
visualize_prediction(args.out_sebound,sebound_mask)
if args.out_bibound:
bound_pred = (bound_pred[0] * 255.0).astype(np.uint8)
bound_pred = cv2.applyColorMap(bound_pred,13)
cv2.imwrite(args.out_bibound,bound_pred)
if __name__ == '__main__':
main()
| python | BSD-2-Clause | e9af36ea9ac9dac5506880096cd38423dc3dbf51 | 2026-01-05T07:13:30.482047Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/setup.py | setup.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
from torch.utils.cpp_extension import BuildExtension
from setuptools import setup
import torch
import subprocess
import re
# This version string should be updated when releasing a new version.
_VERSION = '0.2.4'
RELEASE = os.getenv("RELEASE", "FALSE")
ROOT_PATH = os.path.abspath(os.path.join(os.getcwd()))
WITH_VINEYARD = os.getenv('WITH_VINEYARD', 'OFF')
WITH_CUDA = os.getenv('WITH_CUDA', 'ON')
sys.path.append(os.path.join(ROOT_PATH, 'graphlearn_torch', 'python', 'utils'))
from build_glt import glt_ext_module, glt_v6d_ext_module
GLT_V6D_EXT_NAME = "py_graphlearn_torch_vineyard"
GLT_EXT_NAME = "py_graphlearn_torch"
def get_gcc_use_cxx_abi():
output = subprocess.run("cmake .", capture_output=True, text=True, shell=True)
print('output', str(output))
match = re.search(r"GCC_USE_CXX11_ABI: (\d)", str(output))
if match:
return match.group(1)
else:
return None
GCC_USE_CXX11_ABI = get_gcc_use_cxx_abi()
class CustomizedBuildExtension(BuildExtension):
def _add_gnu_cpp_abi_flag(self, extension):
gcc_use_cxx_abi = GCC_USE_CXX11_ABI if extension.name == GLT_V6D_EXT_NAME else str(int(torch._C._GLIBCXX_USE_CXX11_ABI))
print('GCC_USE_CXX11_ABI for {}: {}', extension.name, gcc_use_cxx_abi)
self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + gcc_use_cxx_abi)
ext_modules = [
glt_ext_module(
name=GLT_EXT_NAME,
root_path=ROOT_PATH,
with_cuda=WITH_CUDA == "ON",
release=RELEASE == "TRUE"
)
]
if WITH_VINEYARD == "ON":
ext_modules.append(
glt_v6d_ext_module(
name=GLT_V6D_EXT_NAME,
root_path=ROOT_PATH,
),
)
setup(
name='graphlearn-torch',
version=_VERSION,
author='GLT Team',
description='Graph Learning for PyTorch (GraphLearn-for-PyTorch)',
url="https://github.com/alibaba/graphlearn-for-pytorch",
python_requires='>=3.6',
requires=['torch'],
cmdclass={'build_ext': CustomizedBuildExtension},
ext_package='graphlearn_torch',
ext_modules=ext_modules,
package_dir={'graphlearn_torch': 'graphlearn_torch/python'},
packages=[
'graphlearn_torch', 'graphlearn_torch.channel', 'graphlearn_torch.data',
'graphlearn_torch.distributed', 'graphlearn_torch.loader',
'graphlearn_torch.partition', 'graphlearn_torch.sampler',
'graphlearn_torch.utils'
]
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/benchmarks/api/run_dist_bench.py | benchmarks/api/run_dist_bench.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import yaml
import argparse
import paramiko
import click
if __name__ == "__main__":
parser = argparse.ArgumentParser('Run DistRandomSampler benchmarks.')
parser.add_argument('--config', type=str, default='bench_dist_config.yml',
help='paths to configuration file for benchmarks')
parser.add_argument('--epochs', type=int, default=1,
help='repeat epochs for sampling')
parser.add_argument('--batch_size', type=int, default=2048,
help='batch size for sampling')
parser.add_argument('--shuffle', action="store_true",
help='whether to shuffle input seeds at each epoch')
parser.add_argument('--with_edge', action="store_true",
help='whether to sample with edge ids')
parser.add_argument('--collect_features', action='store_true',
help='whether to collect features for sampled results')
parser.add_argument('--worker_concurrency', type=int, default=4,
help='concurrency for each sampling worker')
parser.add_argument('--channel_size', type=str, default='4GB',
help='memory used for shared-memory channel')
parser.add_argument('--master_addr', type=str, default='0.0.0.0',
help='master ip address for synchronization across all training nodes')
parser.add_argument('--master_port', type=str, default='12345',
help='port for synchronization across all training nodes')
args = parser.parse_args()
config = open(args.config, 'r')
config = yaml.safe_load(config)
dataset = config['dataset']
ip_list, port_list, username_list = config['nodes'], config['ports'], config['usernames']
dst_path_list = config['dst_paths']
node_ranks = config['node_ranks']
num_nodes = len(node_ranks)
visible_devices = config['visible_devices']
python_bins = config['python_bins']
num_cores = len(visible_devices[0].split(','))
dataset_path = "../../data/"
passwd_dict = {}
for username, ip in zip(username_list, ip_list):
passwd_dict[ip+username] = click.prompt('passwd for '+username+'@'+ip,
hide_input=True)
for username, ip, port, dst, noderk, device, pythonbin in zip(
username_list,
ip_list,
port_list,
dst_path_list,
node_ranks,
visible_devices,
python_bins,
):
trans = paramiko.Transport((ip, port))
trans.connect(username=username, password=passwd_dict[ip+username])
ssh = paramiko.SSHClient()
ssh._transport = trans
to_bench_dir = 'cd '+dst+'/benchmarks/api/ '
exec_bench = "tmux new -d 'CUDA_VISIBLE_DEVICES="+device+" "+pythonbin+" bench_dist_neighbor_loader.py --dataset="+dataset+" --node_rank="+str(noderk)+" --num_nodes="+str(num_nodes)+" --sample_nprocs="+str(num_cores)+" --master_addr="+args.master_addr+" --master_port="+args.master_port+ " --batch_size="+str(args.batch_size)+" --channel_size="+args.channel_size+" --epochs="+str(args.epochs)
if args.collect_features:
exec_bench += " --collect_features"
if args.with_edge:
exec_bench += " --with_edge"
if args.shuffle:
exec_bench += " --shuffle"
print(to_bench_dir + ' && '+ exec_bench + " '")
stdin, stdout, stderr = ssh.exec_command(to_bench_dir+' && '+exec_bench+" '", bufsize=1)
print(stdout.read().decode())
print(stderr.read().decode())
ssh.close()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/benchmarks/api/bench_sampler.py | benchmarks/api/bench_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import time
import torch
import graphlearn_torch as glt
import os.path as osp
from ogb.nodeproppred import PygNodePropPredDataset
from torch_geometric.loader import NeighborSampler
def test_glt_ogbnproducts(mode='GPU'):
if mode == 'GPU':
graph_mode = 'CUDA'
else:
graph_mode = 'ZERO_COPY'
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
train_idx = dataset.get_idx_split()["train"]
train_loader = torch.utils.data.DataLoader(train_idx,
batch_size=1024,
pin_memory=True,
shuffle=True)
csr_topo = glt.data.Topology(dataset[0].edge_index)
g = glt.data.Graph(csr_topo, graph_mode, device=0)
device = torch.device('cuda:0')
sampler = glt.sampler.NeighborSampler(g, [15, 10, 5], device=device)
total_time = 0
sampled_edges = 0
for seeds in train_loader:
seeds = seeds.to(0)
torch.cuda.synchronize()
start = time.time()
row = sampler.sample_from_nodes(seeds).row
torch.cuda.synchronize()
total_time += time.time() - start
sampled_edges += row.shape[0]
print('Sampled Edges per secs: {} M'.format(sampled_edges / total_time / 1000000))
def test_quiver_ogbnproducts(mode='GPU'):
import quiver
if mode == 'GPU':
quiver_mode = 'GPU'
else:
quiver_mode = 'UVA'
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
train_idx = dataset.get_idx_split()["train"]
train_loader = torch.utils.data.DataLoader(train_idx,
batch_size=1024,
pin_memory=True,
shuffle=True)
csr_topo = quiver.CSRTopo(dataset[0].edge_index)
quiver_sampler = quiver.pyg.GraphSageSampler(csr_topo, [15, 10, 5],
device=0,
mode=quiver_mode)
total_time = 0
sampled_edges = 0
for seeds in train_loader:
seeds = seeds.to(0)
torch.cuda.synchronize()
start = time.time()
_, _, adjs = quiver_sampler.sample(seeds)
torch.cuda.synchronize()
total_time += time.time() - start
for adj in adjs:
sampled_edges += adj.edge_index.shape[1]
print('Sampled Edges per secs: {} M'.format(sampled_edges / total_time / 1000000))
def test_pyg_ogbnproducts():
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
train_idx = dataset.get_idx_split()["train"]
train_loader = NeighborSampler(dataset[0].edge_index,
node_idx=train_idx,
sizes=[15, 10, 5],
batch_size=1024,
shuffle=True)
total_time = 0
sampled_edges = 0
start = time.time()
for _, _, adjs in train_loader:
total_time += time.time() - start
for adj in adjs:
sampled_edges += adj.edge_index.shape[1]
start = time.time()
print('Sampled Edges per secs: {} M'.format(sampled_edges / total_time / 1000000))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Test Sampler benchmarks.")
parser.add_argument('--backend', type=str, default='glt',
help='glt, quiver, or pyg')
parser.add_argument('--sample_mode', type=str, default='GPU',
help='GPU or ZERO_COPY')
args = parser.parse_args()
if args.backend == 'glt':
test_glt_ogbnproducts(args.sample_mode)
elif args.backend == 'quiver':
test_quiver_ogbnproducts(args.sample_mode)
else:
test_pyg_ogbnproducts() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/benchmarks/api/bench_feature.py | benchmarks/api/bench_feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import time
import torch
import graphlearn_torch as glt
import os.path as osp
from ogb.nodeproppred import PygNodePropPredDataset
from torch_geometric.loader import NeighborSampler
def test_glt_ogbnproducts(split_ratio):
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
train_idx = dataset.get_idx_split()["train"]
train_loader = torch.utils.data.DataLoader(train_idx,
batch_size=1024,
pin_memory=True,
shuffle=True)
csr_topo = glt.data.Topology(dataset[0].edge_index)
g = glt.data.Graph(csr_topo, 'CUDA', device=0)
device = torch.device('cuda:0')
sampler = glt.sampler.NeighborSampler(g, [15, 10, 5], device=device)
cpu_tensor, id2index = glt.data.sort_by_in_degree(
dataset[0].x, split_ratio, csr_topo)
feature = glt.data.Feature(cpu_tensor,
id2index,
split_ratio,
device_group_list=[glt.data.DeviceGroup(0, [0])],
device=0)
total_num = 0
total_time = 0
for seeds in train_loader:
nid = sampler.sample_from_nodes(seeds).node
torch.cuda.synchronize()
start = time.time()
res = feature[nid]
torch.cuda.synchronize()
total_time += time.time() - start
total_num += res.numel()
torch.cuda.synchronize()
print('Lookup {} ids, takes {} secs, Throughput {} GB/s.'\
.format(total_num, total_time, total_num * 4 / total_time/ (1024**3)))
def test_quiver_ogbnproducts(split_ratio):
import quiver
cache_size = str(950 * split_ratio) + 'M'
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
train_idx = dataset.get_idx_split()["train"]
train_loader = torch.utils.data.DataLoader(train_idx,
batch_size=1024,
pin_memory=True,
shuffle=True)
csr_topo = quiver.CSRTopo(dataset[0].edge_index)
quiver_sampler = quiver.pyg.GraphSageSampler(csr_topo, [15, 10, 5],
device=0,
mode="GPU")
quiver_feature = quiver.Feature(rank=0,
device_list=[0],
device_cache_size=cache_size,
cache_policy="device_replicate",
csr_topo=csr_topo)
quiver_feature.from_cpu_tensor(dataset[0].x)
total_num = 0
total_time = 0
for seeds in train_loader:
nid, _, _ = quiver_sampler.sample(seeds)
torch.cuda.synchronize()
start = time.time()
res = quiver_feature[nid]
torch.cuda.synchronize()
total_time += time.time() - start
total_num += res.numel()
torch.cuda.synchronize()
print('Lookup {} ids, takes {} secs, Throughput {} GB/s.'\
.format(total_num, total_time, total_num * 4 / total_time/ (1024**3)))
def test_pyg_ogbnproducts():
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
feature = dataset[0].x
train_idx = dataset.get_idx_split()["train"]
train_loader = NeighborSampler(dataset[0].edge_index,
node_idx=train_idx,
sizes=[15, 10, 5],
batch_size=1024,
shuffle=True)
total_num = 0
total_time = 0
for _, n_id, _ in train_loader:
start = time.time()
res = feature[n_id]
# torch.cuda.synchronize()
total_time += time.time() - start
total_num += res.numel()
# torch.cuda.synchronize()
print('Lookup {} ids, takes {} secs, Throughput {} GB/s.'\
.format(total_num, total_time, total_num * 4 / total_time/ (1024**3)))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Test Feature Lookup benchmarks.")
parser.add_argument('--backend', type=str, default='glt',
help='glt, quiver, or pyg')
parser.add_argument('--split_ratio', type=float, default=0.2)
args = parser.parse_args()
if args.backend == 'glt':
test_glt_ogbnproducts(args.split_ratio)
elif args.backend == 'quiver':
test_quiver_ogbnproducts(args.split_ratio)
else:
test_pyg_ogbnproducts() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/benchmarks/api/bench_dist_neighbor_loader.py | benchmarks/api/bench_dist_neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import time
import torch
import torch.distributed as dist
import graphlearn_torch as glt
if __name__ == "__main__":
print('*** DistNeighborLoader Benchmarks ***')
parser = argparse.ArgumentParser('DistRandomSampler benchmarks.')
parser.add_argument('--dataset', type=str, default='products',
help='name of the dataset for benchmark')
parser.add_argument('--num_nodes', type=int, default=2,
help='number of worker nodes')
parser.add_argument('--node_rank', type=int, default=0,
help='worker node rank')
parser.add_argument('--sample_nprocs', type=int, default=2,
help='number of processes for sampling')
parser.add_argument('--epochs', type=int, default=1,
help='repeat epochs for sampling')
parser.add_argument('--batch_size', type=int, default=2048,
help='batch size for sampling')
parser.add_argument('--shuffle', action="store_true",
help='whether to shuffle input seeds at each epoch')
parser.add_argument('--with_edge', action="store_true",
help='whether to sample with edge ids')
parser.add_argument('--collect_features', action='store_true',
help='whether to collect features for sampled results')
parser.add_argument('--worker_concurrency', type=int, default=4,
help='concurrency for each sampling worker')
parser.add_argument('--channel_size', type=str, default='4GB',
help='memory used for shared-memory channel')
parser.add_argument('--master_addr', type=str, default='localhost',
help='master ip address for synchronization across all training nodes')
parser.add_argument('--master_port', type=str, default='11234',
help='port for synchronization across all training nodes')
args = parser.parse_args()
dataset_name = args.dataset
num_nodes = args.num_nodes
node_rank = args.node_rank
sampling_nprocs = args.sample_nprocs
device_count = torch.cuda.device_count()
epochs = args.epochs
batch_size = args.batch_size
shuffle = args.shuffle
with_edge = args.with_edge
collect_features = args.collect_features
worker_concurrency = args.worker_concurrency
channel_size = args.channel_size
master_addr = str(args.master_addr)
sampling_master_port = int(args.master_port)
torch_pg_master_port = sampling_master_port + 1
print('- dataset: {}'.format(dataset_name))
print('- total nodes: {}'.format(num_nodes))
print('- node rank: {}'.format(node_rank))
print('- device count: {}'.format(device_count))
print('- sampling nprocs per training proc: {}'.format(sampling_nprocs))
print('- epochs: {}'.format(epochs))
print('- batch size: {}'.format(batch_size))
print('- shuffle: {}'.format(shuffle))
print('- sample with edge id: {}'.format(with_edge))
print('- collect remote features: {}'.format(collect_features))
print('- sampling concurrency per worker: {}'.format(worker_concurrency))
print('- channel size: {}'.format(channel_size))
print('- master addr: {}'.format(master_addr))
print('- sampling master port: {}'.format(sampling_master_port))
print('** Loading dist dataset ...')
root = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', 'data', dataset_name)
dataset = glt.distributed.DistDataset()
dataset.load(
root_dir=osp.join(root, 'ogbn-'+dataset_name+'-partitions'),
partition_idx=node_rank,
graph_mode='ZERO_COPY',
device_group_list=[glt.data.DeviceGroup(0, [0]), glt.data.DeviceGroup(1, [1])], # 2 GPUs
device=0
)
print('** Loading input seeds ...')
seeds_dir = osp.join(root, 'ogbn-'+dataset_name+'-test-partitions')
seeds_data = torch.load(osp.join(seeds_dir, f'partition{node_rank}.pt'))
print('** Initializing worker group context ...')
glt.distributed.init_worker_group(
world_size=num_nodes,
rank=node_rank,
group_name='dist-neighbor-loader-benchmarks'
)
dist_context = glt.distributed.get_context()
print('** Initializing process group')
dist.init_process_group('gloo', rank=dist_context.rank,
world_size=dist_context.world_size,
init_method='tcp://{}:{}'.format(master_addr, torch_pg_master_port))
print('** Launching dist neighbor loader ...')
dist_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[15, 10, 5],
input_nodes=seeds_data,
batch_size=batch_size,
shuffle=shuffle,
drop_last=True,
with_edge=with_edge,
collect_features=collect_features,
to_device=torch.device('cuda:0'),
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_count) for i in range(sampling_nprocs)],
worker_concurrency=worker_concurrency,
master_addr=master_addr,
master_port=sampling_master_port,
channel_size=channel_size,
pin_memory=True
)
)
print('** Benchmarking ...')
f = open('benchmark.txt', 'a+')
for epoch in range(epochs):
num_sampled_nodes = 0
num_sampled_edges = 0
num_collected_features = 0
start = time.time()
for i, batch in enumerate(dist_loader):
if i % 100 == 0:
f.write('Epoch {}, Batch {}\n'.format(epoch, i))
num_sampled_nodes += batch.node.numel()
num_sampled_edges += batch.edge_index.size(1)
if batch.x is not None:
num_collected_features += batch.x.size(0)
torch.cuda.synchronize()
total_time = time.time() - start
f.write('** Epoch {} **\n'.format(epoch))
f.write('- total time: {}s\n'.format(total_time))
f.write('- total sampled nodes: {}\n'.format(num_sampled_nodes))
f.write('- sampling nodes per sec: {} M\n'.format((num_sampled_nodes / total_time) / 1000000))
f.write('- total sampled edges: {}\n'.format(num_sampled_edges))
f.write('- sampling edges per sec: {} M\n'.format((num_sampled_edges / total_time) / 1000000))
f.write('- total collected features: {}\n'.format(num_collected_features))
f.write('- collecting features per sec: {} M\n'.format((num_collected_features / total_time) / 1000000))
dist.barrier()
time.sleep(1)
print('** Exit ...')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/typing.py | graphlearn_torch/python/typing.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
import torch
import numpy as np
from enum import Enum
# Types for basic graph entity #################################################
# Node-types are denoted by a single string
NodeType = str
# Edge-types are denotes by a triplet of strings.
EdgeType = Tuple[str, str, str]
EDGE_TYPE_STR_SPLIT = '__'
def as_str(type: Union[NodeType, EdgeType]) -> str:
if isinstance(type, NodeType):
return type
elif isinstance(type, (list, tuple)) and len(type) == 3:
return EDGE_TYPE_STR_SPLIT.join(type)
return ''
def reverse_edge_type(etype: EdgeType):
src, edge, dst = etype
if not src == dst:
if edge.split("_", 1)[0] == 'rev': # undirected edge with `rev_` prefix.
edge = edge.split("_", 1)[1]
else:
edge = 'rev_' + edge
return (dst, edge, src)
# A representation of tensor data
TensorDataType = Union[torch.Tensor, np.ndarray]
NodeLabel = Union[TensorDataType, Dict[NodeType, TensorDataType]]
NodeIndex = Union[TensorDataType, Dict[NodeType, TensorDataType]]
class Split(Enum):
train = 'train'
valid = 'valid'
test = 'test'
# Types for partition data #####################################################
class GraphPartitionData(NamedTuple):
r""" Data and indexing info of a graph partition.
"""
# edge index (rows, cols)
edge_index: Tuple[torch.Tensor, torch.Tensor]
# edge ids tensor corresponding to `edge_index`
eids: torch.Tensor
# weights tensor corresponding to `edge_index`
weights: Optional[torch.Tensor] = None
class FeaturePartitionData(NamedTuple):
r""" Data and indexing info of a node/edge feature partition.
"""
# node/edge feature tensor
feats: Optional[torch.Tensor]
# node/edge ids tensor corresponding to `feats`
ids: Optional[torch.Tensor]
# feature cache tensor
cache_feats: Optional[torch.Tensor]
# cached node/edge ids tensor corresponding to `cache_feats`
cache_ids: Optional[torch.Tensor]
HeteroGraphPartitionData = Dict[EdgeType, GraphPartitionData]
HeteroFeaturePartitionData = Dict[Union[NodeType, EdgeType], FeaturePartitionData]
# Types for neighbor sampling ##################################################
Seeds = Union[torch.Tensor, str]
InputNodes = Union[Seeds, NodeType, Tuple[NodeType, Seeds], Tuple[NodeType, List[Seeds]]]
EdgeIndexTensor = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
InputEdges = Union[EdgeIndexTensor, EdgeType, Tuple[EdgeType, EdgeIndexTensor]]
NumNeighbors = Union[List[int], Dict[EdgeType, List[int]]]
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/__init__.py | graphlearn_torch/python/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
# Disable warning messages of LibTorch by default.
os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR"
from . import channel
from . import data
from . import distributed
from . import loader
from . import partition
from . import sampler
from . import utils
from .typing import *
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/link_neighbor_loader.py | graphlearn_torch/python/loader/link_neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional
import torch
from ..loader import LinkLoader
from ..data import Dataset
from ..sampler import NegativeSampling, NeighborSampler
from ..typing import NumNeighbors, InputEdges
class LinkNeighborLoader(LinkLoader):
r"""A link-based data loader derived as an extension of the node-based
:class:`torch_geometric.loader.NeighborLoader`.
This loader allows for mini-batch training of GNNs on large-scale graphs
where full-batch training is not feasible.
More specifically, this loader first selects a sample of edges from the
set of input edges :obj:`edge_label_index` (which may or not be edges in
the original graph) and then constructs a subgraph from all the nodes
present in this list by sampling :obj:`num_neighbors` neighbors in each
iteration.
Args:
data (Dataset): The `graphlearn_torch.data.Dataset` object.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]): The
number of neighbors to sample for each node in each iteration.
In heterogeneous graphs, may also take in a dictionary denoting
the amount of neighbors to sample for each individual edge type.
If an entry is set to :obj:`-1`, all neighbors will be included.
neighbor_sampler (graphlearn_torch.sampler.BaseSampler, optional):
The sampler implementation to be used with this loader.
Needs to implement
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_edges`.
The sampler implementation must be compatible with the input
:obj:`data` object.
edge_label_index (Tensor or EdgeType or Tuple[EdgeType, Tensor]):
The edge indices, holding source and destination nodes to start
sampling from.
If set to :obj:`None`, all edges will be considered.
In heterogeneous graphs, needs to be passed as a tuple that holds
the edge type and corresponding edge indices.
(default: :obj:`None`)
edge_label (Tensor, optional): The labels of edge indices from which to
start sampling from. Must be the same length as
the :obj:`edge_label_index`. (default: :obj:`None`)
neg_sampling (NegativeSampling, optional): The negative sampling
configuration.
For negative sampling mode :obj:`"binary"`, samples can be accessed
via the attributes :obj:`edge_label_index` and :obj:`edge_label` in
the respective edge type of the returned mini-batch.
In case :obj:`edge_label` does not exist, it will be automatically
created and represents a binary classification task (:obj:`0` =
negative edge, :obj:`1` = positive edge).
In case :obj:`edge_label` does exist, it has to be a categorical
label from :obj:`0` to :obj:`num_classes - 1`.
After negative sampling, label :obj:`0` represents negative edges,
and labels :obj:`1` to :obj:`num_classes` represent the labels of
positive edges.
Note that returned labels are of type :obj:`torch.float` for binary
classification (to facilitate the ease-of-use of
:meth:`F.binary_cross_entropy`) and of type
:obj:`torch.long` for multi-class classification (to facilitate the
ease-of-use of :meth:`F.cross_entropy`).
For negative sampling mode :obj:`"triplet"`, samples can be
accessed via the attributes :obj:`src_index`, :obj:`dst_pos_index`
and :obj:`dst_neg_index` in the respective node types of the
returned mini-batch.
:obj:`edge_label` needs to be :obj:`None` for :obj:`"triplet"`
negative sampling mode.
If set to :obj:`None`, no negative sampling strategy is applied.
(default: :obj:`None`)
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
strategy: (str): Set sampling strategy for the default neighbor sampler
provided by graphlearn-torch. (default: ``"random"``).
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`,
:obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`.
"""
def __init__(
self,
data: Dataset,
num_neighbors: NumNeighbors,
neighbor_sampler: Optional[NeighborSampler] = None,
edge_label_index: InputEdges = None,
edge_label: Optional[torch.Tensor] = None,
neg_sampling: Optional[NegativeSampling] = None,
with_edge: bool = False,
with_weight: bool = False,
batch_size: int = 1,
shuffle: bool = False,
drop_last: bool = False,
strategy: str = "random",
device: torch.device = torch.device('cuda:0'),
seed: Optional[int] = None,
**kwargs,
):
with_neg = True if neg_sampling is not None else False
if neighbor_sampler is None:
neighbor_sampler = NeighborSampler(
data.graph,
num_neighbors=num_neighbors,
strategy=strategy,
with_edge=with_edge,
with_neg=with_neg,
with_weight=with_weight,
device=device,
edge_dir=data.edge_dir,
seed=seed
)
super().__init__(
data=data,
link_sampler=neighbor_sampler,
edge_label_index=edge_label_index,
edge_label=edge_label,
neg_sampling=neg_sampling,
device=device,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
edge_dir=data.edge_dir,
**kwargs,
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/transform.py | graphlearn_torch/python/loader/transform.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithPer express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Dict, Optional, Literal
import torch
import torch.nn.functional as F
from torch_geometric.data import Data, HeteroData
from ..sampler import SamplerOutput, HeteroSamplerOutput
from ..typing import NodeType, EdgeType, reverse_edge_type
def to_data(
sampler_out: SamplerOutput,
batch_labels: Optional[torch.Tensor] = None,
node_feats: Optional[torch.Tensor] = None,
edge_feats: Optional[torch.Tensor] = None,
**kwargs
) -> Data:
edge_index = torch.stack([sampler_out.row, sampler_out.col])
data = Data(x=node_feats, edge_index=edge_index,
edge_attr=edge_feats, y=batch_labels, **kwargs)
data.edge = sampler_out.edge
data.node = sampler_out.node
data.batch = sampler_out.batch
data.batch_size = sampler_out.batch.numel() if data.batch is not None else 0
data.num_sampled_nodes = sampler_out.num_sampled_nodes
data.num_sampled_edges = sampler_out.num_sampled_edges
# update meta data
if isinstance(sampler_out.metadata, dict):
for k, v in sampler_out.metadata.items():
if k == 'edge_label_index':
# In binary negative sampling from edges, we reverse the
# edge_label_index and put it into the reversed edgetype subgraph.
data['edge_label_index'] = torch.stack((v[1], v[0]), dim=0)
else:
data[k] = v
elif sampler_out.metadata is not None:
data['metadata'] = sampler_out.metadata
return data
def to_hetero_data(
hetero_sampler_out: HeteroSamplerOutput,
batch_label_dict: Optional[Dict[NodeType, torch.Tensor]] = None,
node_feat_dict: Optional[Dict[NodeType, torch.Tensor]] = None,
edge_feat_dict: Optional[Dict[EdgeType, torch.Tensor]] = None,
edge_dir: Literal['in', 'out'] = 'out',
**kwargs
) -> HeteroData:
data = HeteroData(**kwargs)
edge_index_dict = hetero_sampler_out.get_edge_index()
if hetero_sampler_out.num_sampled_edges:
num_hops = max(map(
lambda x: len(x), list(hetero_sampler_out.num_sampled_edges.values()))
)
else:
num_hops = 0
# edges
for k, v in edge_index_dict.items():
data[k].edge_index = v
if hetero_sampler_out.edge is not None:
data[k].edge = hetero_sampler_out.edge.get(k, None)
if edge_feat_dict is not None:
data[k].edge_attr = edge_feat_dict.get(k, None)
if k not in hetero_sampler_out.num_sampled_edges:
hetero_sampler_out.num_sampled_edges[k] = \
torch.tensor([0] * num_hops, device=data[k].edge_index.device)
else:
hetero_sampler_out.num_sampled_edges[k] = F.pad(
hetero_sampler_out.num_sampled_edges[k],
(0, num_hops - hetero_sampler_out.num_sampled_edges[k].size(0))
)
# nodes
for k, v in hetero_sampler_out.node.items():
data[k].node = v
if node_feat_dict is not None:
data[k].x = node_feat_dict.get(k, None)
if k not in hetero_sampler_out.num_sampled_nodes:
hetero_sampler_out.num_sampled_nodes[k] = \
torch.tensor([0] * (num_hops + 1), device=data[k].node.device)
else:
hetero_sampler_out.num_sampled_nodes[k] = F.pad(
hetero_sampler_out.num_sampled_nodes[k],
(0, num_hops + 1 - hetero_sampler_out.num_sampled_nodes[k].size(0))
)
# seed nodes
for k, v in hetero_sampler_out.batch.items():
data[k].batch = v
data[k].batch_size = v.numel()
if batch_label_dict is not None:
data[k].y = batch_label_dict.get(k, None)
# update num_sampled_nodes & num_sampled_edges
data.num_sampled_nodes = hetero_sampler_out.num_sampled_nodes
data.num_sampled_edges = hetero_sampler_out.num_sampled_edges
# update meta data
input_type = hetero_sampler_out.input_type
if isinstance(hetero_sampler_out.metadata, dict):
# if edge_dir == 'out', we need to reverse the edge type
res_edge_type = reverse_edge_type(input_type) if edge_dir == 'out' else input_type
for k, v in hetero_sampler_out.metadata.items():
if k == 'edge_label_index':
if edge_dir == 'out':
data[res_edge_type]['edge_label_index'] = \
torch.stack((v[1], v[0]), dim=0)
else:
data[res_edge_type]['edge_label_index'] = v
elif k == 'edge_label':
data[res_edge_type]['edge_label'] = v
elif k == 'src_index':
data[input_type[0]]['src_index'] = v
elif k in ['dst_pos_index', 'dst_neg_index']:
data[input_type[-1]][k] = v
else:
data[k] = v
elif hetero_sampler_out.metadata is not None:
data['metadata'] = hetero_sampler_out.metadata
return data
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/node_loader.py | graphlearn_torch/python/loader/node_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Union
import torch
from ..data import Dataset
from ..sampler import BaseSampler, SamplerOutput, HeteroSamplerOutput
from ..typing import InputNodes
from .transform import to_data, to_hetero_data
class NodeLoader(object):
r"""A base data loader that performs node sampling for mini-batch training
of GNNs on large-scale graphs.
Args:
data (Dataset): The `graphlearn_torch.data.Dataset` object.
node_sampler (graphlearn_torch.sampler.BaseSampler): The sampler
implementation to be used with this loader.
Needs to implement
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_nodes`.
The sampler implementation must be compatible with the input
:obj:`data` object.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]): The
number of neighbors to sample for each node in each iteration.
In heterogeneous graphs, may also take in a dictionary denoting
the amount of neighbors to sample for each individual edge type.
If an entry is set to :obj:`-1`, all neighbors will be included.
input_nodes (torch.Tensor or str or Tuple[str, torch.Tensor]): The
indices of nodes for which neighbors are sampled to create
mini-batches.
Needs to be either given as a :obj:`torch.LongTensor` or
:obj:`torch.BoolTensor`.
In heterogeneous graphs, needs to be passed as a tuple that holds
the node type and node indices.
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
"""
def __init__(
self,
data: Dataset,
node_sampler: BaseSampler,
input_nodes: InputNodes,
device: torch.device = torch.device('cuda:0'),
**kwargs
):
self.data = data
self.sampler = node_sampler
self.input_nodes = input_nodes
self.device = device
if isinstance(input_nodes, tuple):
input_type, input_seeds = self.input_nodes
else:
input_type, input_seeds = None, self.input_nodes
self._input_type = input_type
label = self.data.get_node_label(self._input_type)
if label is not None:
assert(isinstance(label, torch.Tensor))
self.input_t_label = label.to(self.device)
self._seed_loader = torch.utils.data.DataLoader(input_seeds, **kwargs)
def __iter__(self):
self._seeds_iter = iter(self._seed_loader)
return self
def __next__(self):
raise NotImplementedError
def _collate_fn(self, sampler_out: Union[SamplerOutput, HeteroSamplerOutput]):
r"""format sampler output to Data/HeteroData"""
if isinstance(sampler_out, SamplerOutput):
x = self.data.node_features[sampler_out.node]
y = self.input_t_label[sampler_out.node] \
if self.input_t_label is not None else None
if self.data.edge_features is not None and sampler_out.edge is not None:
edge_attr = self.data.edge_features[sampler_out.edge]
else:
edge_attr = None
res_data = to_data(sampler_out, batch_labels=y,
node_feats=x, edge_feats=edge_attr)
else: # hetero
x_dict = {}
x_dict = {ntype : self.data.get_node_feature(ntype)[ids] for ntype, ids in sampler_out.node.items()}
input_t_ids = sampler_out.node[self._input_type]
y_dict = {self._input_type: self.input_t_label[input_t_ids]} \
if self.input_t_label is not None else None
edge_attr_dict = {}
if sampler_out.edge is not None:
for etype, eids in sampler_out.edge.items():
efeat = self.data.get_edge_feature(etype)
if efeat is not None:
edge_attr_dict[etype] = efeat[eids]
res_data = to_hetero_data(sampler_out, batch_label_dict=y_dict,
node_feat_dict=x_dict,
edge_feat_dict=edge_attr_dict,
edge_dir=self.data.edge_dir)
return res_data
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/link_loader.py | graphlearn_torch/python/loader/link_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple, Union, Optional, Literal
import torch
from .transform import to_data, to_hetero_data
from ..utils import convert_to_tensor
from ..data import Dataset
from ..sampler import (
BaseSampler,
EdgeSamplerInput,
NegativeSampling,
SamplerOutput,
HeteroSamplerOutput
)
from ..typing import InputEdges, reverse_edge_type
class LinkLoader(object):
r"""A data loader that performs mini-batch sampling from link information,
using a generic :class:`~graphlearn_torch.sampler.BaseSampler`
implementation that defines a
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_edges` function and
is supported on the provided input :obj:`data` object.
.. note::
Negative sampling for triplet case is currently implemented in an
approximate way, *i.e.* negative edges may contain false negatives.
Args:
data (Dataset): The `graphlearn_torch.data.Dataset` object.
link_sampler (graphlearn_torch.sampler.BaseSampler): The sampler
implementation to be used with this loader.
Needs to implement
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_edges`.
The sampler implementation must be compatible with the input
:obj:`data` object.
edge_label_index (Tensor or EdgeType or Tuple[EdgeType, Tensor]):
The edge indices, holding source and destination nodes to start
sampling from.
If set to :obj:`None`, all edges will be considered.
In heterogeneous graphs, needs to be passed as a tuple that holds
the edge type and corresponding edge indices.
(default: :obj:`None`)
edge_label (Tensor, optional): The labels of edge indices from which to
start sampling from. Must be the same length as
the :obj:`edge_label_index`. (default: :obj:`None`)
neg_sampling (NegativeSampling, optional): The negative sampling
configuration.
For negative sampling mode :obj:`"binary"`, samples can be accessed
via the attributes :obj:`edge_label_index` and :obj:`edge_label` in
the respective edge type of the returned mini-batch.
In case :obj:`edge_label` does not exist, it will be automatically
created and represents a binary classification task (:obj:`0` =
negative edge, :obj:`1` = positive edge).
In case :obj:`edge_label` does exist, it has to be a categorical
label from :obj:`0` to :obj:`num_classes - 1`.
After negative sampling, label :obj:`0` represents negative edges,
and labels :obj:`1` to :obj:`num_classes` represent the labels of
positive edges.
Note that returned labels are of type :obj:`torch.float` for binary
classification (to facilitate the ease-of-use of
:meth:`F.binary_cross_entropy`) and of type
:obj:`torch.long` for multi-class classification (to facilitate the
ease-of-use of :meth:`F.cross_entropy`).
For negative sampling mode :obj:`"triplet"`, samples can be
accessed via the attributes :obj:`src_index`, :obj:`dst_pos_index`
and :obj:`dst_neg_index` in the respective node types of the
returned mini-batch.
:obj:`edge_label` needs to be :obj:`None` for :obj:`"triplet"`
negative sampling mode.
If set to :obj:`None`, no negative sampling strategy is applied.
(default: :obj:`None`)
device (torch.device, optional): The device to put the data on.
If set to :obj:`None`, the CPU is used.
edge_dir (str:["in", "out"]): The edge direction for sampling.
Can be either :str:`"out"` or :str:`"in"`.
(default: :str:`"out"`)
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`,
:obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`.
"""
def __init__(
self,
data: Dataset,
link_sampler: BaseSampler,
edge_label_index: InputEdges = None,
edge_label: Optional[torch.Tensor] = None,
neg_sampling: Optional[NegativeSampling] = None,
device: torch.device = torch.device('cuda:0'),
edge_dir: Literal['out', 'in'] = 'out',
**kwargs,
):
# Get edge type (or `None` for homogeneous graphs):
input_type, edge_label_index = get_edge_label_index(
data, edge_label_index)
self.data = data
self.link_sampler = link_sampler
self.neg_sampling = NegativeSampling.cast(neg_sampling)
self.device = device
self.edge_dir = edge_dir
if (self.neg_sampling is not None and self.neg_sampling.is_binary()
and edge_label is not None and edge_label.min() == 0):
# Increment labels such that `zero` now denotes "negative".
edge_label = edge_label + 1
if (self.neg_sampling is not None and self.neg_sampling.is_triplet()
and edge_label is not None):
raise ValueError("'edge_label' needs to be undefined for "
"'triplet'-based negative sampling. Please use "
"`src_index`, `dst_pos_index` and "
"`neg_pos_index` of the returned mini-batch "
"instead to differentiate between positive and "
"negative samples.")
self.input_data = EdgeSamplerInput(
row=edge_label_index[0].clone(),
col=edge_label_index[1].clone(),
label=edge_label,
input_type=input_type,
neg_sampling=self.neg_sampling,
)
input_index = range(len(edge_label_index[0]))
self._seed_loader = torch.utils.data.DataLoader(input_index, **kwargs)
def __iter__(self):
self._seeds_iter = iter(self._seed_loader)
return self
def __next__(self):
seeds = self._seeds_iter._next_data().to(self.device)
# Currently, we support the out-edge sampling manner, so we reverse the
# direction of src and dst for the output so that features of the sampled
# nodes during training can be aggregated from k-hop to (k-1)-hop nodes.
sampler_out = self.link_sampler.sample_from_edges(self.input_data[seeds])
result = self._collate_fn(sampler_out)
return result
def _collate_fn(self, sampler_out: Union[SamplerOutput, HeteroSamplerOutput]):
r"""format sampler output to Data/HeteroData
For the out-edge sampling scheme (i.e. the direction of edges in
the output is inverse to the original graph), we put the reversed
edge_label_index into the (dst, rev_to, src) subgraph for
HeteroSamplerOutput and (dst, to, src) for SamplerOutput.
However, for the in-edge sampling scheme (i.e. the direction of edges
in the output is the same as the original graph), we do not need to
reverse the edge type of the sampler_out.
"""
if isinstance(sampler_out, SamplerOutput):
x = self.data.node_features[sampler_out.node]
if self.data.edge_features is not None and sampler_out.edge is not None:
edge_attr = self.data.edge_features[sampler_out.edge]
else:
edge_attr = None
res_data = to_data(sampler_out,
node_feats=x,
edge_feats=edge_attr,
)
else: # hetero
x_dict = {}
x_dict = {ntype : self.data.get_node_feature(ntype)[ids.to(torch.int64)] for ntype, ids in sampler_out.node.items()}
edge_attr_dict = {}
if sampler_out.edge is not None:
for etype, eids in sampler_out.edge.items():
if self.edge_dir == 'out':
efeat = self.data.get_edge_feature(reverse_edge_type(etype))
elif self.edge_dir == 'in':
efeat = self.data.get_edge_feature(etype)
if efeat is not None:
edge_attr_dict[etype] = efeat[eids.to(torch.int64)]
res_data = to_hetero_data(sampler_out,
node_feat_dict=x_dict,
edge_feat_dict=edge_attr_dict,
edge_dir=self.edge_dir,
)
return res_data
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
def get_edge_label_index(
data: Dataset,
edge_label_index: InputEdges
) -> Tuple[Optional[str], torch.Tensor]:
edge_type = None
# # Need the edge index in COO for LinkNeighborLoader:
def _get_edge_index(edge_type):
row, col, _, _ = data.get_graph(edge_type).topo.to_coo()
return (row, col)
if not isinstance(edge_label_index, Tuple):
if edge_label_index is None:
return None, _get_edge_index(edge_type)
return None, convert_to_tensor(edge_label_index)
if isinstance(edge_label_index[0], str):
edge_type = edge_label_index
return edge_type, _get_edge_index(edge_type)
assert len(edge_label_index) == 2
edge_type, edge_label_index = convert_to_tensor(edge_label_index)
if edge_label_index is None:
row, col, _, _ = data.get_graph(edge_type).topo.to_coo()
return edge_type, (row, col)
return edge_type, edge_label_index | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/subgraph_loader.py | graphlearn_torch/python/loader/subgraph_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional
import torch
from . import NodeLoader
from ..data import Dataset
from ..sampler import NeighborSampler, NodeSamplerInput
from ..typing import InputNodes, NumNeighbors
class SubGraphLoader(NodeLoader):
r""" A distributed loader for subgraph sampling.
Args:
data (Dataset): The `graphlearn_torch.data.Dataset` object.
num_neighbors (List[int]): The
number of neighbors to sample for each node in each iteration.
If an entry is set to :obj:`-1`, all neighbors will be included.
input_nodes (torch.Tensor or str or Tuple[str, torch.Tensor]): The
indices of nodes for which neighbors are sampled to create
mini-batches.
Needs to be either given as a :obj:`torch.LongTensor` or
:obj:`torch.BoolTensor`.
In heterogeneous graphs, needs to be passed as a tuple that holds
the node type and node indices.
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
strategy: (str): Set sampling strategy for the default neighbor sampler
provided by graphlearn-torch. (default: ``"random"``).
"""
def __init__(
self,
data: Dataset,
input_nodes: InputNodes,
num_neighbors: Optional[NumNeighbors] = None,
neighbor_sampler: Optional[NeighborSampler] = None,
batch_size: int = 1,
shuffle: bool = False,
drop_last: bool = False,
with_edge: bool = False,
strategy: str = 'random',
device: torch.device = torch.device('cuda:0'),
seed: Optional[int] = None,
**kwargs
):
if neighbor_sampler is None:
neighbor_sampler = NeighborSampler(
data.graph,
num_neighbors=num_neighbors,
strategy=strategy,
with_edge=with_edge,
device=device,
seed=seed
)
super().__init__(
data=data,
node_sampler=neighbor_sampler,
input_nodes=input_nodes,
device=device,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
**kwargs,
)
def __next__(self):
seeds = self._seeds_iter._next_data().to(self.device)
inputs = NodeSamplerInput(
node=seeds,
input_type=self._input_type
)
out = self.sampler.subgraph(inputs)
result = self._collate_fn(out)
result.mapping = result.metadata
return result
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/__init__.py | graphlearn_torch/python/loader/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .link_loader import *
from .link_neighbor_loader import *
from .node_loader import *
from .neighbor_loader import *
from .transform import *
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/loader/neighbor_loader.py | graphlearn_torch/python/loader/neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional
import torch
from ..loader import NodeLoader
from ..data import Dataset
from ..sampler import NeighborSampler, NodeSamplerInput
from ..typing import InputNodes, NumNeighbors
class NeighborLoader(NodeLoader):
r"""A data loader that performs node neighbor sampling for mini-batch training
of GNNs on large-scale graphs.
Args:
data (Dataset): The `graphlearn_torch.data.Dataset` object.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]): The
number of neighbors to sample for each node in each iteration.
In heterogeneous graphs, may also take in a dictionary denoting
the amount of neighbors to sample for each individual edge type.
If an entry is set to :obj:`-1`, all neighbors will be included.
input_nodes (torch.Tensor or str or Tuple[str, torch.Tensor]): The
indices of nodes for which neighbors are sampled to create
mini-batches.
Needs to be either given as a :obj:`torch.LongTensor` or
:obj:`torch.BoolTensor`.
In heterogeneous graphs, needs to be passed as a tuple that holds
the node type and node indices.
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
strategy: (str): Set sampling strategy for the default neighbor sampler
provided by graphlearn-torch. (default: ``"random"``).
as_pyg_v1 (bool): Set to ``True`` to return result as the NeighborSampler
in PyG v1. (default: ``False``).
"""
def __init__(
self,
data: Dataset,
num_neighbors: NumNeighbors,
input_nodes: InputNodes,
neighbor_sampler: Optional[NeighborSampler] = None,
batch_size: int = 1,
shuffle: bool = False,
drop_last: bool = False,
with_edge: bool = False,
with_weight: bool = False,
strategy: str = 'random',
device: torch.device = torch.device('cuda:0'),
as_pyg_v1: bool = False,
seed: Optional[int] = None,
**kwargs
):
if neighbor_sampler is None:
neighbor_sampler = NeighborSampler(
data.graph,
num_neighbors=num_neighbors,
strategy=strategy,
with_edge=with_edge,
with_weight=with_weight,
device=device,
edge_dir=data.edge_dir,
seed=seed
)
self.as_pyg_v1 = as_pyg_v1
self.edge_dir = data.edge_dir
super().__init__(
data=data,
node_sampler=neighbor_sampler,
input_nodes=input_nodes,
device=device,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
**kwargs,
)
def __next__(self):
seeds = self._seeds_iter._next_data().to(self.device)
if not self.as_pyg_v1:
inputs = NodeSamplerInput(
node=seeds,
input_type=self._input_type
)
out = self.sampler.sample_from_nodes(inputs)
result = self._collate_fn(out)
else:
return self.sampler.sample_pyg_v1(seeds)
return result | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_context.py | graphlearn_torch/python/distributed/dist_context.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from enum import Enum
from typing import Optional, List
class DistRole(Enum):
r""" Role types for distributed context groups.
"""
WORKER = 1 # As a worker in a distributed worker group (non-server mode)
SERVER = 2 # As a server in a distributed server group (server-client mode)
CLIENT = 3 # As a client in a distributed client group (server-client mode)
_DEFAULT_WORKER_GROUP = '_default_worker'
_DEFAULT_SERVER_GROUP = '_default_server'
_DEFAULT_CLIENT_GROUP = '_default_client'
class DistContext(object):
r""" Distributed context info of the current process.
Args:
role (DistRole): The role type of the current context group.
group_name (str): A unique name of the current role group.
world_size (int): The number of processes in the current role group.
rank (int): The current process rank within the current role group.
global_world_size (int): The total number of processes in all role groups.
global_rank (int): The current process rank within all role groups.
"""
def __init__(self,
role: DistRole,
group_name: str,
world_size: int,
rank: int,
global_world_size: int,
global_rank: int):
assert world_size > 0 and rank in range(world_size)
assert global_world_size > 0 and global_rank in range(global_world_size)
assert world_size <= global_world_size
self.role = role
self.group_name = group_name
self.world_size = world_size
self.rank = rank
self.global_world_size = global_world_size
self.global_rank = global_rank
def __repr__(self) -> str:
cls = self.__class__.__name__
info = []
for key, value in self.__dict__.items():
info.append(f"{key}: {value}")
info = ", ".join(info)
return f"{cls}({info})"
def __eq__(self, obj):
if not isinstance(obj, DistContext):
return False
for key, value in self.__dict__.items():
if value != obj.__dict__[key]:
return False
return True
def is_worker(self) -> bool:
return self.role == DistRole.WORKER
def is_server(self) -> bool:
return self.role == DistRole.SERVER
def is_client(self) -> bool:
return self.role == DistRole.CLIENT
def num_servers(self) -> int:
if self.role == DistRole.SERVER:
return self.world_size
if self.role == DistRole.CLIENT:
return self.global_world_size - self.world_size
return 0
def num_clients(self) -> int:
if self.role == DistRole.CLIENT:
return self.world_size
if self.role == DistRole.SERVER:
return self.global_world_size - self.world_size
return 0
@property
def worker_name(self) -> str:
r""" Get worker name of the current process of this context.
"""
return f"{self.group_name}_{self.rank}"
_dist_context: DistContext = None
r""" Distributed context on the current process.
"""
_clients_to_servers: dict = None
r""" A dict mapping from client rank to server ranks. int -> List[int]"""
def get_context() -> DistContext:
r""" Get distributed context info of the current process.
"""
return _dist_context
def get_clients_to_servers() -> dict:
r""" Get client to servers mapping.
"""
return _clients_to_servers
def _set_worker_context(world_size: int, rank: int,
group_name: Optional[str] = None):
r""" Set distributed context info as a non-server worker on the current
process.
"""
global _dist_context
_dist_context = DistContext(
role=DistRole.WORKER,
group_name=(group_name if group_name is not None
else _DEFAULT_WORKER_GROUP),
world_size=world_size,
rank=rank,
global_world_size=world_size,
global_rank=rank
)
def _set_server_context(num_servers: int, server_rank: int,
server_group_name: Optional[str] = None, num_clients: int = 0):
r""" Set distributed context info as a server on the current process.
"""
assert num_servers > 0
global _dist_context
_dist_context = DistContext(
role=DistRole.SERVER,
group_name=(server_group_name if server_group_name is not None
else _DEFAULT_SERVER_GROUP),
world_size=num_servers,
rank=server_rank,
global_world_size=num_servers+num_clients,
global_rank=server_rank
)
def _set_client_context(num_servers: int, num_clients: int, client_rank: int,
client_group_name: Optional[str] = None):
r""" Set distributed context info as a client on the current process.
"""
assert num_servers > 0 and num_clients > 0
global _dist_context
_dist_context = DistContext(
role=DistRole.CLIENT,
group_name=(client_group_name if client_group_name is not None
else _DEFAULT_CLIENT_GROUP),
world_size=num_clients,
rank=client_rank,
global_world_size=num_servers+num_clients,
global_rank=num_servers+client_rank
)
assign_server_by_order()
def assign_server_by_order():
r"""Assign servers to each client in turn.
e.g. 2 clients and 4 servers, then the assignment is: {0: [0, 1], 1: [2, 3]},
5 clients and 2 servers, then the assignment is: {0: [0], 1: [1], 2: [0], 3: [1], 4: [0]}."""
ctx = get_context()
assert ctx is not None and ctx.is_client()
client_num, server_num = ctx.world_size, ctx.global_world_size - ctx.world_size
global _clients_to_servers
_clients_to_servers = {}
cur_server = 0
for i in range(client_num):
if i not in _clients_to_servers:
_clients_to_servers[i] = []
for j in range(server_num // client_num):
_clients_to_servers[i].append(cur_server)
cur_server = (cur_server + 1) % server_num
if i < server_num % client_num:
_clients_to_servers[i].append(cur_server)
cur_server = (cur_server + 1) % server_num
if len(_clients_to_servers[i]) == 0:
_clients_to_servers[i].append(cur_server)
cur_server = (cur_server + 1) % server_num
return _clients_to_servers[ctx.rank]
def init_worker_group(world_size: int, rank: int,
group_name: Optional[str] = None):
r""" Initialize a simple worker group on the current process, this method
should be called only in a non-server distribution mode with a group of
parallel workers.
Args:
world_size (int): Number of all processes participating in the distributed
worker group.
rank (int): Rank of the current process withing the distributed group (it
should be a number between 0 and ``world_size``-1).
group_name (str): A unique name of the distributed group that current
process belongs to. If set to ``None``, a default name will be used.
"""
_set_worker_context(world_size, rank, group_name) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_link_neighbor_loader.py | graphlearn_torch/python/distributed/dist_link_neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional, Literal
import torch
from ..sampler import (
EdgeSamplerInput, SamplingType, SamplingConfig, NegativeSampling
)
from ..loader import get_edge_label_index
from ..typing import InputEdges, NumNeighbors
from .dist_dataset import DistDataset
from .dist_options import AllDistSamplingWorkerOptions
from .dist_loader import DistLoader
class DistLinkNeighborLoader(DistLoader):
r""" A distributed loader that preform sampling from edges.
Args:
data (DistDataset, optional): The ``DistDataset`` object of a partition of
graph data and feature data, along with distributed patition books. The
input dataset must be provided in non-server distribution mode.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]):
The number of neighbors to sample for each node in each iteration.
In heterogeneous graphs, may also take in a dictionary denoting
the amount of neighbors to sample for each individual edge type.
batch_size (int): How many samples per batch to load (default: ``1``).
edge_label_index (Tensor or EdgeType or Tuple[EdgeType, Tensor]):
The edge indices, holding source and destination nodes to start
sampling from.
If set to :obj:`None`, all edges will be considered.
In heterogeneous graphs, needs to be passed as a tuple that holds
the edge type and corresponding edge indices.
(default: :obj:`None`)
edge_label (Tensor, optional): The labels of edge indices from which to
start sampling from. Must be the same length as
the :obj:`edge_label_index`. (default: :obj:`None`)
neg_sampling (NegativeSampling, optional): The negative sampling
configuration.
For negative sampling mode :obj:`"binary"`, samples can be accessed
via the attributes :obj:`edge_label_index` and :obj:`edge_label` in
the respective edge type of the returned mini-batch.
In case :obj:`edge_label` does not exist, it will be automatically
created and represents a binary classification task (:obj:`0` =
negative edge, :obj:`1` = positive edge).
In case :obj:`edge_label` does exist, it has to be a categorical
label from :obj:`0` to :obj:`num_classes - 1`.
After negative sampling, label :obj:`0` represents negative edges,
and labels :obj:`1` to :obj:`num_classes` represent the labels of
positive edges.
Note that returned labels are of type :obj:`torch.float` for binary
classification (to facilitate the ease-of-use of
:meth:`F.binary_cross_entropy`) and of type
:obj:`torch.long` for multi-class classification (to facilitate the
ease-of-use of :meth:`F.cross_entropy`).
For negative sampling mode :obj:`"triplet"`, samples can be
accessed via the attributes :obj:`src_index`, :obj:`dst_pos_index`
and :obj:`dst_neg_index` in the respective node types of the
returned mini-batch.
:obj:`edge_label` needs to be :obj:`None` for :obj:`"triplet"`
negative sampling mode.
If set to :obj:`None`, no negative sampling strategy is applied.
(default: :obj:`None`)
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
edge_dir (str:["in", "out"]): The edge direction for sampling.
Can be either :str:`"out"` or :str:`"in"`.
(default: :str:`"out"`)
collect_features (bool): Set to ``True`` to collect features for nodes
of each sampled subgraph. (default: ``False``).
to_device (torch.device, optional): The target device that the sampled
results should be copied to. If set to ``None``, the current cuda device
(got by ``torch.cuda.current_device``) will be used if available,
otherwise, the cpu device will be used. (default: ``None``).
worker_options (optional): The options for launching sampling workers.
(1) If set to ``None`` or provided with a ``CollocatedDistWorkerOptions``
object, a single collocated sampler will be launched on the current
process, while the separate sampling mode will be disabled . (2) If
provided with a ``MpDistWorkerOptions`` object, the sampling workers will
be launched on spawned subprocesses, and a share-memory based channel
will be created for sample message passing from multiprocessing workers
to the current loader. (3) If provided with a ``RemoteDistWorkerOptions``
object, the sampling workers will be launched on remote sampling server
nodes, and a remote channel will be created for cross-machine message
passing. (default: ``None``).
"""
def __init__(self,
data: Optional[DistDataset],
num_neighbors: NumNeighbors,
batch_size: int = 1,
edge_label_index: InputEdges = None,
edge_label: Optional[torch.Tensor] = None,
neg_sampling: Optional[NegativeSampling] = None,
shuffle: bool = False,
drop_last: bool = False,
with_edge: bool = False,
with_weight: bool = False,
edge_dir: Literal['in', 'out'] = 'out',
collect_features: bool = False,
to_device: Optional[torch.device] = None,
random_seed: Optional[int] = None,
worker_options: Optional[AllDistSamplingWorkerOptions] = None):
# Get edge type (or `None` for homogeneous graphs):
input_type, edge_label_index = get_edge_label_index(
data, edge_label_index)
with_neg = neg_sampling is not None
self.neg_sampling = NegativeSampling.cast(neg_sampling)
if (self.neg_sampling is not None and self.neg_sampling.is_binary()
and edge_label is not None and edge_label.min() == 0):
# Increment labels such that `zero` now denotes "negative".
edge_label = edge_label + 1
if (self.neg_sampling is not None and self.neg_sampling.is_triplet()
and edge_label is not None):
raise ValueError("'edge_label' needs to be undefined for "
"'triplet'-based negative sampling. Please use "
"`src_index`, `dst_pos_index` and "
"`neg_pos_index` of the returned mini-batch "
"instead to differentiate between positive and "
"negative samples.")
input_data = EdgeSamplerInput(
row=edge_label_index[0].clone(),
col=edge_label_index[1].clone(),
label=edge_label,
input_type=input_type,
neg_sampling=self.neg_sampling,
)
sampling_config = SamplingConfig(
SamplingType.LINK, num_neighbors, batch_size, shuffle,
drop_last, with_edge, collect_features, with_neg,
with_weight=with_weight, edge_dir=edge_dir, seed=random_seed
)
super().__init__(
data, input_data, sampling_config, to_device, worker_options
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_feature.py | graphlearn_torch/python/distributed/dist_feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from ..data import Feature
from ..typing import (
EdgeType, NodeType,
)
from ..sampler import (
SamplerOutput, HeteroSamplerOutput,
)
from ..partition import (
PartitionBook, GLTPartitionBook, HeteroNodePartitionDict, HeteroEdgePartitionDict
)
from ..utils import get_available_device, ensure_device
from .rpc import (
RpcDataPartitionRouter, RpcCalleeBase, rpc_register, rpc_request_async
)
# Given a set of node ids, the `PartialFeature` stores the feature info
# of a subset of the original ids, the first tensor is the features of the
# subset node ids, and the second tensor records the index of the subset
# node ids.
PartialFeature = Tuple[torch.Tensor, torch.Tensor]
def communicate_node_num(send_tensor):
if not torch.is_tensor(send_tensor):
send_tensor = torch.tensor(send_tensor, dtype=torch.int64)
recv_tensor = torch.zeros(send_tensor.shape[0], dtype=torch.int64)
else:
recv_tensor = torch.zeros(send_tensor.shape[0], dtype=send_tensor.dtype)
scount = [1 for i in range(send_tensor.shape[0])]
rcount = [1 for i in range(send_tensor.shape[0])]
sync_req = dist.all_to_all_single(recv_tensor, send_tensor, rcount, scount, async_op=True)
sync_req.wait()
dist.barrier()
return send_tensor, recv_tensor
class RpcFeatureLookupCallee(RpcCalleeBase):
r""" A wrapper for rpc callee that will perform feature lookup from
remote processes.
"""
def __init__(self, dist_feature):
super().__init__()
self.dist_feature = dist_feature
def call(self, *args, **kwargs):
return self.dist_feature.local_get(*args, **kwargs)
class DistFeature(object):
r""" Distributed feature data manager for global feature lookups.
Args:
num_partitions: Number of data partitions.
partition_id: Data partition idx of current process.
local_feature: Local ``Feature`` instance.
feature_pb: Partition book which records node/edge ids to worker node
ids mapping on feature store.
local_only: Use this instance only for local feature lookup or stitching.
If set to ``True``, the related rpc callee will not be registered and
users should ensure that lookups for remote features are not invoked
through this instance. Default to ``False``.
device: Device used for computing. Default to ``None``.
Note that`local_feature` and `feature_pb` should be a dictionary
for hetero data.
"""
def __init__(self,
num_partitions: int,
partition_idx: int,
local_feature: Union[Feature,
Dict[Union[NodeType, EdgeType], Feature]],
feature_pb: Union[PartitionBook,
HeteroNodePartitionDict,
HeteroEdgePartitionDict],
local_only: bool = False,
rpc_router: Optional[RpcDataPartitionRouter] = None,
device: Optional[torch.device] = None):
self.num_partitions = num_partitions
self.partition_idx = partition_idx
self.device = get_available_device(device)
ensure_device(self.device)
self.local_feature = local_feature
if isinstance(self.local_feature, dict):
self.data_cls = 'hetero'
for _, feat in self.local_feature.items():
if isinstance(feat, Feature):
feat.lazy_init_with_ipc_handle()
elif isinstance(self.local_feature, Feature):
self.data_cls = 'homo'
self.local_feature.lazy_init_with_ipc_handle()
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"feature type '{type(self.local_feature)}'")
self.feature_pb = feature_pb
if isinstance(self.feature_pb, dict):
assert self.data_cls == 'hetero'
for key, feat in self.feature_pb.items():
if not isinstance(feat, PartitionBook):
self.feature_pb[key] = GLTPartitionBook(feat)
elif isinstance(self.feature_pb, PartitionBook):
assert self.data_cls == 'homo'
elif isinstance(self.feature_pb, torch.Tensor):
assert self.data_cls == 'homo'
self.feature_pb = GLTPartitionBook(self.feature_pb)
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"patition book type '{type(self.feature_pb)}'")
self.rpc_router = rpc_router
if not local_only:
if self.rpc_router is None:
raise ValueError(f"'{self.__class__.__name__}': a rpc router must be "
f"provided when `local_only` set to `False`")
rpc_callee = RpcFeatureLookupCallee(self)
self.rpc_callee_id = rpc_register(rpc_callee)
else:
self.rpc_callee_id = None
def _get_local_store(self, input_type: Optional[Union[NodeType, EdgeType]]):
if self.data_cls == 'hetero':
assert input_type is not None
return self.local_feature[input_type], self.feature_pb[input_type]
return self.local_feature, self.feature_pb
def local_get(
self,
ids: torch.Tensor,
input_type: Optional[Union[NodeType, EdgeType]] = None
) -> torch.Tensor:
r""" Lookup features in the local feature store, the input node/edge ids
should be guaranteed to be all local to the current feature store.
"""
feat, _ = self._get_local_store(input_type)
# TODO: check performance with `return feat[ids].cpu()`
return feat.cpu_get(ids)
def get_all2all (
self,
sampler_result: Union[SamplerOutput, HeteroSamplerOutput],
ntype_list: List[NodeType]
) -> Dict[NodeType, torch.tensor]:
r""" Lookup features synchronously using torch.distributed.all_to_all.
"""
remote_feats_dict = self.remote_selecting_get_all2all(sampler_result, ntype_list)
feat_dict = {}
for ntype, nodes in sampler_result.node.items():
nodes = nodes.to(torch.long)
local_feat = self._local_selecting_get(nodes, ntype)
remote_feats = remote_feats_dict.get(ntype, None)
feat_dict[ntype] = self._stitch(nodes, local_feat, remote_feats)
return feat_dict
def async_get(
self,
ids: torch.Tensor,
input_type: Optional[Union[NodeType, EdgeType]] = None
) -> torch.futures.Future:
r""" Lookup features asynchronously and return a future.
"""
remote_fut = self._remote_selecting_get(ids, input_type)
local_feature = self._local_selecting_get(ids, input_type)
res_fut = torch.futures.Future()
def on_done(*_):
try:
remote_feature_list = remote_fut.wait()
result = self._stitch(ids, local_feature, remote_feature_list)
except Exception as e:
res_fut.set_exception(e)
else:
res_fut.set_result(result)
remote_fut.add_done_callback(on_done)
return res_fut
def __getitem__(
self,
input: Union[torch.Tensor, Tuple[Union[NodeType, EdgeType], torch.Tensor]]
) -> torch.Tensor:
r""" Lookup features synchronously in a '__getitem__' way.
"""
if isinstance(input, torch.Tensor):
input_type, ids = None, input
elif isinstance(input, tuple):
input_type, ids = ids[0], ids[1]
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"type for feature lookup: '{type(input)}'")
fut = self.async_get(ids, input_type)
return fut.wait()
def _local_selecting_get(
self,
ids: torch.Tensor,
input_type: Optional[Union[NodeType, EdgeType]] = None
) -> torch.Tensor:
r""" Select node/edge ids only in the local feature store and lookup
features of them.
Args:
ids: input node/edge ids.
input_type: input node/edge type for heterogeneous feature lookup.
Return:
PartialFeature: features and index for local node/edge ids.
"""
feat, pb = self._get_local_store(input_type)
input_order= torch.arange(ids.size(0),
dtype=torch.long,
device=self.device)
partition_ids = pb[ids.to(pb.device)].to(self.device)
ids = ids.to(self.device)
local_mask = (partition_ids == self.partition_idx)
local_ids = torch.masked_select(ids, local_mask)
local_index = torch.masked_select(input_order, local_mask)
return feat[local_ids], local_index
def remote_selecting_prepare(
self,
sampler_result: Union[SamplerOutput, HeteroSamplerOutput],
ntype_list: List[NodeType]
):
rfeat_recv_dict = {}
rfeat_send_dict = {}
for ntype in ntype_list:
ids = sampler_result.node.get(ntype, None)
if ids is None:
send_remote_count = torch.zeros(self.num_partitions, dtype=torch.int64)
else:
ids = ids.to(torch.long)
_, pb = self._get_local_store(ntype)
ids = ids.to(self.device)
partition_ids = pb[ids.to(pb.device)].to(self.device)
send_remote_count = []
for pidx in range(0, self.num_partitions):
if pidx == self.partition_idx:
send_remote_count.append(0)
else:
remote_mask = (partition_ids == pidx)
remote_ids = torch.masked_select(ids, remote_mask)
ssize = remote_ids.numel()
send_remote_count.append(ssize)
send_sr, recv_sr = communicate_node_num(send_remote_count)
rfeat_recv_dict[ntype] = recv_sr
rfeat_send_dict[ntype] = send_sr
return rfeat_send_dict, rfeat_recv_dict
def communicate_node_id (
self,
sampler_result: Union[SamplerOutput, HeteroSamplerOutput],
ntype_list: List[NodeType]
):
offset = 0
indexes = {}
send_ids = []
remote_cnt_list = torch.zeros(self.num_partitions, dtype=torch.long)
for ntype in ntype_list:
indexes[ntype] = [None] * self.num_partitions
for pidx in range(0, self.num_partitions):
remote_cnt_sum = 0
for ntype in ntype_list:
nodes = sampler_result.node.get(ntype, None)
if nodes is None:
continue
nodes = nodes.to(torch.long)
_, pb = self._get_local_store(ntype)
input_order= torch.arange(nodes.size(0),
dtype=torch.long,
device=self.device)
partition_ids = pb[nodes.to(pb.device)].to(self.device)
nodes = nodes.to(self.device)
if pidx == self.partition_idx:
continue
else:
remote_mask = (partition_ids == pidx)
remote_ids = torch.masked_select(nodes, remote_mask)
indexes[ntype][pidx] = torch.masked_select(input_order, remote_mask)
ssize = remote_ids.numel()
send_ids[offset: offset + ssize] = remote_ids.tolist()
remote_cnt_sum = remote_cnt_sum + remote_ids.numel()
offset = offset + ssize
remote_cnt_list[pidx] = remote_cnt_sum
assert len(send_ids) == sum(remote_cnt_list)
send_sr, recv_sr = communicate_node_num(remote_cnt_list)
_, trecv = sum(send_sr), sum(recv_sr)
self.recv_rn_count = []
for pidx in range(self.num_partitions):
self.recv_rn_count.append(int(recv_sr[pidx]))
self.recv_rn_gnid = torch.zeros(trecv, dtype=torch.long)
dist.all_to_all_single(self.recv_rn_gnid, torch.tensor(send_ids),
self.recv_rn_count, remote_cnt_list.tolist(),
async_op=False)
return remote_cnt_list, indexes
def communicate_node_feats(
self,
ntype_list: List[NodeType],
remote_cnt: torch.Tensor,
send_num_dict: Dict[NodeType, List[int]],
recv_num_dict: Dict[NodeType, List[int]],
indexes: Dict[NodeType, List]
):
rfeats_list = []
offset = 0
for pidx in range(self.num_partitions):
if pidx == self.partition_idx:
continue
else:
for ntype in ntype_list:
feat_num = recv_num_dict.get(ntype)[pidx]
if feat_num > 0:
feat, _ = self._get_local_store(ntype)
ntype_ids = self.recv_rn_gnid[offset:offset+feat_num]
offset = offset + feat_num
rfeats_list.append(feat[ntype_ids])
rfeats_send = torch.cat(rfeats_list, dim=0)
feat_size = rfeats_send.shape[1]
send_count = self.recv_rn_count
recv_count = remote_cnt.tolist()
recv_feats = torch.zeros((sum(recv_count), feat_size), dtype=rfeats_send.dtype)
req = dist.all_to_all_single(recv_feats, rfeats_send,
recv_count, send_count,
async_op=True)
req.wait()
dist.barrier()
recv_feat_list = torch.split(recv_feats, recv_count, dim = 0)
remote_feats_dict = {}
for ntype in ntype_list:
remote_feats_dict[ntype] = []
for pidx in range(self.num_partitions):
if pidx == self.partition_idx:
continue
else:
offset = 0
for ntype in ntype_list:
send_num = send_num_dict.get(ntype)[pidx]
if send_num > 0:
ntype_feat = recv_feat_list[pidx][offset:offset+send_num, :]
remote_feats_dict[ntype].append((ntype_feat, indexes[ntype][pidx]))
offset = offset + send_num
return remote_feats_dict
def remote_selecting_get_all2all(
self,
sampler_result: Union[SamplerOutput, HeteroSamplerOutput],
ntype_list: List[NodeType]
) -> Dict[NodeType, List]:
rfeat_send_dict, rfeat_recv_dict = self.remote_selecting_prepare(sampler_result, ntype_list)
remote_cnt, indexes = self.communicate_node_id(sampler_result, ntype_list)
dist.barrier()
remote_feats_dict = self.communicate_node_feats(ntype_list, remote_cnt, rfeat_send_dict, rfeat_recv_dict, indexes)
return remote_feats_dict
def _remote_selecting_get(
self,
ids: torch.Tensor,
input_type: Optional[Union[NodeType, EdgeType]] = None
) -> torch.futures.Future:
r""" Select node/edge ids only in the remote feature stores and fetch
their features.
Args:
ids: input node/edge ids.
input_type: input node/edge type for heterogeneous feature lookup.
Return:
torch.futures.Future: a torch future with a list of `PartialFeature`,
which corresponds to partial features on different remote workers.
"""
assert (
self.rpc_callee_id is not None
), "Remote feature lookup is disabled in 'local_only' mode."
_, pb = self._get_local_store(input_type)
ids = ids.to(pb.device)
input_order= torch.arange(ids.size(0),
dtype=torch.long)
partition_ids = pb[ids].cpu()
futs, indexes = [], []
for pidx in range(0, self.num_partitions):
if pidx == self.partition_idx:
continue
remote_mask = (partition_ids == pidx)
remote_ids = torch.masked_select(ids, remote_mask)
if remote_ids.shape[0] > 0:
to_worker = self.rpc_router.get_to_worker(pidx)
futs.append(rpc_request_async(to_worker,
self.rpc_callee_id,
args=(remote_ids.cpu(), input_type)))
indexes.append(torch.masked_select(input_order, remote_mask))
collect_fut = torch.futures.collect_all(futs)
res_fut = torch.futures.Future()
def on_done(*_):
try:
fut_list = collect_fut.wait()
result = []
for i, fut in enumerate(fut_list):
result.append((fut.wait(), indexes[i]))
except Exception as e:
res_fut.set_exception(e)
else:
res_fut.set_result(result)
collect_fut.add_done_callback(on_done)
return res_fut
def _stitch(
self,
ids: torch.Tensor,
local: PartialFeature,
remotes: List[PartialFeature]
) -> torch.Tensor:
r""" Stitch local and remote partial features into a complete one.
Args:
ids: the complete input node/edge ids.
local: partial feature of local node/edge ids.
remotes: partial feature list of remote node/edge ids.
"""
feat = torch.zeros(ids.shape[0],
local[0].shape[1],
dtype=local[0].dtype,
device=self.device)
feat[local[1].to(self.device)] = local[0].to(self.device)
for remote in remotes:
feat[remote[1].to(self.device)] = remote[0].to(self.device)
return feat
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_client.py | graphlearn_torch/python/distributed/dist_client.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from typing import Optional
from .dist_context import DistRole, get_context, _set_client_context
from .dist_server import DistServer, _call_func_on_server
from .rpc import init_rpc, shutdown_rpc, rpc_global_request_async, barrier
def init_client(num_servers: int, num_clients: int, client_rank: int,
master_addr: str, master_port: int, num_rpc_threads: int = 4,
client_group_name: Optional[str] = None, is_dynamic: bool = False):
r""" Initialize the current process as a client and establish connections
with all other servers and clients. Note that this method should be called
only in the server-client distribution mode.
Args:
num_servers (int): Number of processes participating in the server group.
num_clients (int): Number of processes participating in the client group.
client_rank (int): Rank of the current process withing the client group (it
should be a number between 0 and ``num_clients``-1).
master_addr (str): The master TCP address for RPC connection between all
servers and clients, the value of this parameter should be same for all
servers and clients.
master_port (int): The master TCP port for RPC connection between all
servers and clients, the value of this parameter should be same for all
servers and clients.
num_rpc_threads (int): The number of RPC worker threads used for the
current client. (Default: ``4``).
client_group_name (str): A unique name of the client group that current
process belongs to. If set to ``None``, a default name will be used.
(Default: ``None``).
is_dynamic (bool): Whether the world size is dynamic. (Default: ``False``).
"""
if client_group_name:
client_group_name = client_group_name.replace('-', '_')
_set_client_context(num_servers, num_clients, client_rank, client_group_name)
# Note that a client RPC agent will never remote requests, thus set the
# number of rpc threads to ``1`` is enough.
init_rpc(master_addr, master_port, num_rpc_threads=num_rpc_threads, is_dynamic=is_dynamic)
def shutdown_client():
r""" Shutdown the client on the current process, notify other servers to
exit, and destroy all connections.
"""
current_context = get_context()
if current_context is None:
logging.warning("'shutdown_client': try to shutdown client when the "
"current process has not been initialized as a client.")
return
if not current_context.is_client():
raise RuntimeError(f"'shutdown_client': role type of the current process "
f"context is not a client, got {current_context.role}.")
# step 1: synchronize with all other clients.
barrier()
# step 2: use client-0 to notify all servers to exit after all clients
# have reached here.
current_context = get_context()
if current_context.rank == 0:
for server_rank in range(current_context.num_servers()):
exit_status = request_server(server_rank, DistServer.exit)
assert exit_status is True, f"Failed to exit server {server_rank}"
# step 3: shutdown rpc across all servers and clients.
shutdown_rpc()
def async_request_server(server_rank: int, func, *args, **kwargs):
r""" The entry to perform an asynchronous request on a remote server, calling
on the client side.
"""
args = [func] + list(args)
return rpc_global_request_async(
target_role=DistRole.SERVER,
role_rank=server_rank,
func=_call_func_on_server,
args=args,
kwargs=kwargs,
)
def request_server(server_rank: int, func, *args, **kwargs):
r""" The entry to perform a synchronous request on a remote server, calling
on the client side.
"""
fut = async_request_server(server_rank, func, *args, **kwargs)
return fut.wait()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_subgraph_loader.py | graphlearn_torch/python/distributed/dist_subgraph_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional, Literal
import torch
from ..sampler import NodeSamplerInput, SamplingType, SamplingConfig
from ..typing import InputNodes, NumNeighbors
from .dist_dataset import DistDataset
from .dist_options import AllDistSamplingWorkerOptions
from .dist_loader import DistLoader
class DistSubGraphLoader(DistLoader):
r""" A distributed loader for subgraph sampling.
Args:
data (DistDataset, optional): The ``DistDataset`` object of a partition of
graph data and feature data, along with distributed patition books. The
input dataset must be provided in non-server distribution mode.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]):
The number of neighbors to sample for each node in each iteration.
input_nodes (torch.Tensor): The seed nodes.
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
collect_features (bool): Set to ``True`` to collect features for nodes
of each sampled subgraph. (default: ``False``).
to_device (torch.device, optional): The target device that the sampled
results should be copied to. If set to ``None``, the current cuda device
(got by ``torch.cuda.current_device``) will be used if available,
otherwise, the cpu device will be used. (default: ``None``).
worker_options (optional): The options for launching sampling workers.
(1) If set to ``None`` or provided with a ``CollocatedDistWorkerOptions``
object, a single collocated sampler will be launched on the current
process, while the separate sampling mode will be disabled . (2) If
provided with a ``MpDistWorkerOptions`` object, the sampling workers will
be launched on spawned subprocesses, and a share-memory based channel
will be created for sample message passing from multiprocessing workers
to the current loader. (3) If provided with a ``RemoteDistWorkerOptions``
object, the sampling workers will be launched on remote sampling server
nodes, and a remote channel will be created for cross-machine message
passing. (default: ``None``).
"""
def __init__(self,
data: Optional[DistDataset],
input_nodes: InputNodes,
num_neighbors: Optional[NumNeighbors] = None,
batch_size: int = 1,
shuffle: bool = False,
drop_last: bool = False,
with_edge: bool = False,
with_weight: bool = False,
edge_dir: Literal['in', 'out'] = 'out',
collect_features: bool = False,
to_device: Optional[torch.device] = None,
random_seed: Optional[int] = None,
worker_options: Optional[AllDistSamplingWorkerOptions] = None):
if isinstance(input_nodes, tuple):
input_type, input_seeds = input_nodes
else:
input_type, input_seeds = None, input_nodes
input_data = NodeSamplerInput(node=input_seeds, input_type=input_type)
# TODO: currently only support out-sample
sampling_config = SamplingConfig(
SamplingType.SUBGRAPH, num_neighbors, batch_size, shuffle,
drop_last, with_edge, collect_features, with_neg=False,
with_weight=with_weight, edge_dir=edge_dir, seed=random_seed
)
super().__init__(
data, input_data, sampling_config, to_device, worker_options
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_graph.py | graphlearn_torch/python/distributed/dist_graph.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Dict, Optional, Union
import torch
from ..data import Graph
from ..typing import (NodeType, EdgeType)
from ..partition import (
PartitionBook, GLTPartitionBook,
HeteroNodePartitionDict, HeteroEdgePartitionDict
)
class DistGraph(object):
r""" Simple wrapper for graph data with distributed context.
TODO: support graph operations.
Args:
num_partitions: Number of data partitions.
partition_id: Data partition idx of current process.
local_graph: local `Graph` instance.
node_pb: Partition book which records vertex ids to worker node ids.
edge_pb: Partition book which records edge ids to worker node ids.
Note that`local_graph`, `node_pb` and `edge_pb` should be a dictionary
for hetero data.
"""
def __init__(self,
num_partitions: int,
partition_idx: int,
local_graph: Union[Graph, Dict[EdgeType, Graph]],
node_pb: Union[PartitionBook, HeteroNodePartitionDict],
edge_pb: Union[PartitionBook, HeteroEdgePartitionDict]=None):
self.num_partitions = num_partitions
self.partition_idx = partition_idx
self.local_graph = local_graph
if isinstance(self.local_graph, dict):
self.data_cls = 'hetero'
for _, graph in self.local_graph.items():
graph.lazy_init()
elif isinstance(self.local_graph, Graph):
self.data_cls = 'homo'
self.local_graph.lazy_init()
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"graph type '{type(self.local_graph)}'")
self.node_pb = node_pb
if self.node_pb is not None:
if isinstance(self.node_pb, dict):
assert self.data_cls == 'hetero'
for key, feat in self.node_pb.items():
if not isinstance(feat, PartitionBook):
self.node_pb[key] = GLTPartitionBook(feat)
elif isinstance(self.node_pb, PartitionBook):
assert self.data_cls == 'homo'
elif isinstance(self.node_pb, torch.Tensor):
assert self.data_cls == 'homo'
self.node_pb = GLTPartitionBook(self.node_pb)
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"node patition book type '{type(self.node_pb)}'")
self.edge_pb = edge_pb
if self.edge_pb is not None:
if isinstance(self.edge_pb, dict):
assert self.data_cls == 'hetero'
for key, feat in self.edge_pb.items():
if not isinstance(feat, PartitionBook):
self.edge_pb[key] = GLTPartitionBook(feat)
elif isinstance(self.edge_pb, PartitionBook):
assert self.data_cls == 'homo'
elif isinstance(self.edge_pb, torch.Tensor):
assert self.data_cls == 'homo'
self.edge_pb = GLTPartitionBook(self.edge_pb)
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"edge patition book type '{type(self.edge_pb)}'")
def get_local_graph(self, etype: Optional[EdgeType]=None):
r""" Get a `Graph` obj of a specific edge type.
"""
if self.data_cls == 'hetero':
assert etype is not None
return self.local_graph[etype]
return self.local_graph
def get_node_partitions(self, ids: torch.Tensor,
ntype: Optional[NodeType]=None):
r""" Get the partition ids of node ids with a specific node type.
"""
if self.data_cls == 'hetero':
assert ntype is not None
pb = self.node_pb[ntype]
else:
pb = self.node_pb
return pb[ids.to(pb.device)]
def get_edge_partitions(self, eids: torch.Tensor,
etype: Optional[EdgeType]=None):
r""" Get the partition ids of edge ids with a specific edge type.
PS: tehre is no edge pb implementation when loading graph from v6d
"""
if self.data_cls == 'hetero':
assert etype is not None
assert isinstance(self.edge_pb[etype], GLTPartitionBook)
pb = self.edge_pb[etype]
else:
assert isinstance(self.edge_pb[etype], GLTPartitionBook)
pb = self.edge_pb
return pb[eids.to(pb.device)]
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/rpc.py | graphlearn_torch/python/distributed/rpc.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import atexit
import time
import collections
import functools
import logging
import threading
from abc import ABC, abstractmethod
from typing import Dict, List, Set
from torch.distributed import rpc
from .dist_context import DistRole, get_context
SERVER_INIT_CHECK_INTERVAL = 3.0
MAX_RETYR_TIMES = 60
_rpc_init_lock = threading.RLock()
_rpc_inited: bool = False
r""" State of rpc initialization on the current process.
"""
_rpc_worker_names: Dict[DistRole, List[str]] = None
r""" Dict from role type to all rpc worker names in this role group.
"""
_rpc_current_group_worker_names: Set[str] = None
r""" Set of rpc worker names in the current role group. Used in all_gather
in a role group.
"""
_rpc_master_addr: str = None
r""" Master address used by rpc agent on the current process.
"""
_rpc_master_port: int = None
r""" Master port used by rpc agent on the current process.
"""
def rpc_is_initialized():
r""" Check whether rpc has been initialized on the current process.
"""
return _rpc_inited
def _require_initialized(func):
r""" A function wrapper to check whether RPC has been initialized, otherwise
an error will be raised. Note that the implementation of this method is same
to ``torch.distributed.rpc.api._require_initialized``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if rpc_is_initialized() is not True:
raise RuntimeError( "RPC has not been initialized or has been shutdowned")
return func(*args, **kwargs)
return wrapper
@_require_initialized
def get_rpc_master_addr():
r""" Get the master address for rpc communication on the current process.
"""
return _rpc_master_addr
@_require_initialized
def get_rpc_master_port():
r""" Get the master port for rpc communication on the current process.
"""
return _rpc_master_port
@_require_initialized
def get_rpc_current_group_worker_names() -> List[str]:
r""" Get the rpc worker names (sorted by rank) of the current group.
"""
current_role = get_context().role
return _rpc_worker_names[current_role]
@_require_initialized
def get_rpc_worker_names() -> Dict[DistRole, List[str]]:
r""" Get the rpc worker names (each sorted by rank) of each current group.
"""
return _rpc_worker_names
## All gather objects only from the current role group.
_role_based_all_gather_dict_lock = threading.RLock()
_role_based_all_gather_sequence_id = 0
_role_based_all_gather_sequence_id_to_states: collections.defaultdict = \
collections.defaultdict(rpc.AllGatherStates)
def _role_based_gather_to_leader(sequence_id, worker_name, obj):
with _role_based_all_gather_dict_lock:
assert (
worker_name in _rpc_current_group_worker_names
), f"{worker_name} is not expected by leader."
states = _role_based_all_gather_sequence_id_to_states[sequence_id]
assert (
worker_name not in states.gathered_objects
), f"{worker_name} reported intent sequence id {sequence_id} twice."
states.gathered_objects[worker_name] = obj
if _rpc_current_group_worker_names == set(states.gathered_objects.keys()):
states.proceed_signal.set()
def _role_based_broadcast_to_followers(sequence_id, objects_map):
with _role_based_all_gather_dict_lock:
states = _role_based_all_gather_sequence_id_to_states[sequence_id]
assert (
not states.proceed_signal.is_set()
), f"Termination signal sequence id {sequence_id} got set twice."
states.gathered_objects = objects_map
states.proceed_signal.set()
@_require_initialized
def all_gather(obj, timeout=None):
r""" Gathers objects only from the current role group in a list. This
function blocks until all workers in the current role group have received
the gathered results. The implementation of this method is refer to
``torch.distributed.rpc.api._all_gather``.
"""
assert (
_rpc_current_group_worker_names is not None
), "`_rpc_current_group_worker_names` is not initialized for `all_gather`."
leader_name = sorted(_rpc_current_group_worker_names)[0]
self_name = get_context().worker_name
global _role_based_all_gather_sequence_id
with _role_based_all_gather_dict_lock:
sequence_id = _role_based_all_gather_sequence_id
_role_based_all_gather_sequence_id += 1
is_leader = leader_name == self_name
if timeout is None:
timeout = rpc.get_rpc_timeout()
# Phase 1: Followers send it's object to the leader
if is_leader:
_role_based_gather_to_leader(sequence_id, self_name, obj)
else:
rpc.rpc_sync(
leader_name,
_role_based_gather_to_leader,
args=(sequence_id, self_name, obj),
timeout=timeout,
)
with _role_based_all_gather_dict_lock:
states = _role_based_all_gather_sequence_id_to_states[sequence_id]
states.proceed_signal.wait()
# Phase 2: Leader broadcast gathered results to all followers
# Leader's signal is the first to be unblocked, after receiving all
# followers' data objects.
if is_leader:
worker_name_to_response_future_dict = {}
for follower_name in _rpc_current_group_worker_names - {leader_name}:
fut = rpc.rpc_async(
follower_name,
_role_based_broadcast_to_followers,
args=(sequence_id, states.gathered_objects),
timeout=timeout
)
worker_name_to_response_future_dict[follower_name] = fut
errors = []
for follower_name, fut in worker_name_to_response_future_dict.items():
try:
fut.wait()
except RuntimeError as ex:
errors.append((follower_name, ex))
if errors:
raise RuntimeError(
f"Followers {[e[0] for e in errors]} timed out in all_gather "
f"after {timeout:.2f} seconds. The first exception is {errors[0][1]}"
)
return states.gathered_objects
@_require_initialized
def barrier(timeout=None):
r""" Block until all local and remote RPC processes in the current role
group reach this method.
"""
try:
all_gather(obj=None, timeout=timeout)
except RuntimeError as ex:
logging.error("Failed to respond to 'barrier' in time, got error: %s", ex)
## All gather objects from all role groups.
@_require_initialized
def global_all_gather(obj, timeout=None):
r""" Gathers objects from all role groups in a list, using the implementation
of ``torch.distributed.rpc.api._all_gather``.
"""
if timeout is None:
return rpc.api._all_gather(obj)
return rpc.api._all_gather(obj, timeout=timeout)
@_require_initialized
def global_barrier(timeout=None):
r""" Block until all local and remote RPC processes across all role groups
reach this method.
"""
try:
global_all_gather(obj=None, timeout=timeout)
except RuntimeError as ex:
logging.error("Failed to respond to 'global_barrier' "
"in time, got error %s", ex)
## RPC initialization and shutdown
def init_rpc(master_addr: str,
master_port: int,
num_rpc_threads: int = 16,
rpc_timeout: float = 180,
is_dynamic: bool = False):
r""" Initialize rpc on the current process.
"""
with _rpc_init_lock:
if rpc_is_initialized() is True:
return
if rpc_is_initialized() is None:
raise RuntimeError("'init_rpc': Try to re-init rpc after shutdown.")
ctx = get_context()
if ctx is None:
raise RuntimeError("'init_rpc': Distributed context has not been set.")
options = rpc.TensorPipeRpcBackendOptions(
# _transports=['ibv', 'uv'],
_transports=['uv'],
_channels=['mpt_uv', 'basic'],
num_worker_threads=num_rpc_threads,
rpc_timeout=rpc_timeout,
init_method=f'tcp://{master_addr}:{master_port}'
)
rpc.init_rpc(
name=ctx.worker_name,
rank=ctx.global_rank,
world_size=None if is_dynamic else ctx.global_world_size,
rpc_backend_options=options
)
global _rpc_inited
_rpc_inited = True
global _rpc_current_group_worker_names
global _rpc_worker_names
_rpc_worker_names = {}
if is_dynamic:
_rpc_worker_names[DistRole.SERVER] = []
_rpc_worker_names[DistRole.CLIENT] = []
if ctx.is_server():
# ensure all servers is inited
for server_rank in range(ctx.world_size):
if server_rank == ctx.rank:
_rpc_worker_names[DistRole.SERVER].append(ctx.group_name + '_' + str(server_rank))
continue
times = 0
is_avail = False
while not is_avail:
try:
is_avail = rpc_global_request_by_rank(server_rank, rpc.is_available)
except:
time.sleep(SERVER_INIT_CHECK_INTERVAL)
logging.info(f"RETRY {times}: server {ctx.rank} waits server {server_rank}...")
times += 1
if times >= MAX_RETYR_TIMES:
raise RuntimeError(f"TIMEOUT: server {ctx.rank} waits server {server_rank} timeout."
f"Check if server {server_rank} is ready.")
_rpc_worker_names[DistRole.SERVER].append(ctx.group_name + '_' + str(server_rank))
_rpc_current_group_worker_names = set(_rpc_worker_names[DistRole.SERVER])
return
if ctx.is_client():
for server_rank in range(ctx.global_rank - ctx.rank):
times = 0
is_avail = False
while not is_avail:
try:
is_avail = rpc_global_request_by_rank(server_rank, rpc.is_available)
except:
time.sleep(SERVER_INIT_CHECK_INTERVAL)
logging.info(f"RETRY {times}: client {ctx.rank} waits server {server_rank}...")
times += 1
if times >= MAX_RETYR_TIMES:
raise RuntimeError(f"TIMEOUT: client {ctx.rank} waits server {server_rank} timeout."
f"Check if server {server_rank} is ready.")
server_name = rpc_global_request_by_rank(server_rank, rpc.get_worker_info).name
_rpc_worker_names[DistRole.SERVER].append(server_name)
_rpc_current_group_worker_names = set([ctx.group_name + '_' + str(client_rank) for client_rank in range(ctx.world_size)])
return
gathered_results = global_all_gather(
obj=(ctx.role, ctx.world_size, ctx.rank), timeout=rpc_timeout
)
for worker_name, (role, role_size, role_rank) in gathered_results.items():
worker_list = _rpc_worker_names.get(role, None)
if worker_list is None:
worker_list = [None for _ in range(role_size)]
else:
if len(worker_list) != role_size:
raise RuntimeError(f"'init_rpc': world size of role {role} gathered "
f"from {worker_name} is inconsistent with others.")
if worker_list[role_rank] is not None:
raise RuntimeError(f"'init_rpc': try to set worker name twice with "
f"the same rank {role_rank} of role {role}")
worker_list[role_rank] = worker_name
_rpc_worker_names[role] = worker_list
_rpc_current_group_worker_names = set(_rpc_worker_names[ctx.role])
global_barrier(timeout=rpc_timeout)
# TODO(hongyi): in server-client mode, if "torch.distributed.init_process_group" follows "global_barrier",
# some participants may randomly hang
time.sleep(1)
def shutdown_rpc(graceful=True):
r""" Shutdown rpc agent on the current process.
If `graceful` set to `False`, other mechanisms should ensure that all
rpc requests are completed before shutting down rpc servers.
"""
if rpc_is_initialized() is True:
rpc.shutdown(graceful=graceful)
global _rpc_inited
_rpc_inited = None
atexit.register(shutdown_rpc, False)
## RPC synchronization and routing with data partition mapping.
class RpcDataPartitionRouter(object):
r""" A router that can select a remote rpc worker with a certain data
partition to perform a rpc request.
"""
def __init__(self, partition2workers: List[List[str]]):
for pidx, rpc_worker_list in enumerate(partition2workers):
if len(rpc_worker_list) == 0:
raise ValueError(f"'RpcDataPartitionRouter': no rpc worker is "
f"responsible for data partition '{pidx}'.")
self.partition2workers = partition2workers
self.rpc_worker_indexs = [0 for _ in range(len(partition2workers))]
def get_to_worker(self, data_partition_idx: int) -> str:
rpc_worker_list = self.partition2workers[data_partition_idx]
worker_index = self.rpc_worker_indexs[data_partition_idx]
to_worker = rpc_worker_list[worker_index]
self.rpc_worker_indexs[data_partition_idx] = \
(worker_index + 1) % len(rpc_worker_list)
return to_worker
@_require_initialized
def rpc_sync_data_partitions(num_data_partitions: int,
current_partition_idx: int):
r""" Synchronize the data partition info across all workers only in the
current role group.
Note that all data should be partitioned and used with a single role group.
Args:
num_data_partitions (int): The number of all data partitions.
current_partition_idx (int): The data partition idx that the current
process is responsible for, some compution tasks on this data partition
may be send to the current process from remote workers.
"""
ctx = get_context()
partition2workers = [[] for _ in range(num_data_partitions)]
gathered_results = all_gather(
(ctx.role, num_data_partitions, current_partition_idx))
for worker_name, (role, nparts, idx) in gathered_results.items():
if role != ctx.role:
raise RuntimeError(f"'rpc_sync_data_partition_mapping': inconsistent "
f"role type '{role}' gathered from {worker_name}, "
f"current role type is '{ctx.role}'.")
if nparts != num_data_partitions:
raise RuntimeError(f"'rpc_sync_data_partition_mapping': inconsistent "
f"data partition number '{nparts}' gathered from "
f"{worker_name}, the value on current process is "
f"'{ctx.role}'.")
partition2workers[idx].append(worker_name)
return partition2workers
## RPC registration in the current role group.
class RpcCalleeBase(ABC):
r""" A wrapper base for rpc callee that will perform rpc requests from
remote processes.
Note that the callee will be called only from rpc workers in the current
role group.
"""
def __init__(self):
pass
@abstractmethod
def call(self, *args, **kwargs):
r""" The real processing entry for rpc requests, need to be overwrite.
"""
_rpc_callee_lock = threading.RLock()
_rpc_callee_id: int = 0
_rpc_callee_pool: Dict[int, RpcCalleeBase] = {}
@_require_initialized
def rpc_register(callee: RpcCalleeBase):
r""" Register a callee for rpc requests only in the current role group,
this method will block until all local and remote RPC processes of the
current role group reach this method.
"""
global _rpc_callee_id, _rpc_callee_pool
with _rpc_callee_lock:
callee_id = _rpc_callee_id
_rpc_callee_id += 1
if callee_id in _rpc_callee_pool:
raise RuntimeError(f"'rpc_register': try to register with the "
f"callee id {callee_id} twice.")
_rpc_callee_pool[callee_id] = callee
current_role = get_context().role
callee_ids = all_gather((current_role, callee_id))
for name, (role, cid) in callee_ids.items():
if role != current_role:
raise RuntimeError(f"'rpc_register': get inconsistent role '{role}' "
f"from {name}, current role is '{current_role}'.")
if cid != callee_id:
raise RuntimeError(f"'rpc_register': get inconsistent callee id '{cid}' "
f"from {name}, current callee id is '{callee_id}'.")
return callee_id
## RPC request entries only for the current role group.
def _rpc_call(callee_id, *args, **kwargs):
r""" Entry for rpc requests within the current role group.
"""
return _rpc_callee_pool.get(callee_id).call(*args, **kwargs)
@_require_initialized
def rpc_request_async(worker_name, callee_id, args=None, kwargs=None):
r""" Perform a rpc request asynchronously within the current role
group. and return a future.
"""
return rpc.rpc_async(
to=worker_name,
func=_rpc_call,
args=(callee_id, *args),
kwargs=kwargs
)
@_require_initialized
def rpc_request(worker_name, callee_id, args=None, kwargs=None):
r""" Perform a rpc request synchronously within the current role
group and return the results.
"""
fut = rpc_request_async(worker_name, callee_id, args, kwargs)
return fut.wait()
## RPC request entries to other rpc worker on arbitrary role group.
@_require_initialized
def rpc_global_request_async(target_role: DistRole, role_rank: int,
func, args=None, kwargs=None):
r""" Perform a rpc request asynchronously to other rpc worker on
arbitrary role group and return a future.
"""
if get_context().is_worker():
assert target_role == DistRole.WORKER
else:
assert target_role in (DistRole.SERVER, DistRole.CLIENT)
target_worker = _rpc_worker_names[target_role][role_rank]
return rpc.rpc_async(to=target_worker, func=func, args=args, kwargs=kwargs)
@_require_initialized
def rpc_global_request(target_role: DistRole, role_rank: int,
func, args=None, kwargs=None):
r""" Perform a rpc request synchronously to other rpc worker on
arbitrary role group and return the results.
"""
fut = rpc_global_request_async(target_role, role_rank, func, args, kwargs)
return fut.wait()
@_require_initialized
def rpc_global_request_by_rank(global_rank: int, func, args=None, kwargs=None):
r""" Perform a rpc request synchronously to other rpc worker by rank
and return the results.
"""
fut = rpc.rpc_async(global_rank, func, args, kwargs)
return fut.wait()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_sampling_producer.py | graphlearn_torch/python/distributed/dist_sampling_producer.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import queue
import time, datetime
from enum import Enum
from typing import Optional, Union
import torch
import torch.multiprocessing as mp
from torch._C import _set_worker_signal_handlers
from torch.utils.data.dataloader import DataLoader
from ..channel import ChannelBase
from ..sampler import (
NodeSamplerInput, EdgeSamplerInput, SamplingType, SamplingConfig
)
from ..utils import ensure_device
from ..utils import seed_everything
from ..distributed.dist_context import get_context
from .dist_context import init_worker_group
from .dist_dataset import DistDataset
from .dist_neighbor_sampler import DistNeighborSampler
from .dist_options import _BasicDistSamplingWorkerOptions
from .rpc import init_rpc, shutdown_rpc
MP_STATUS_CHECK_INTERVAL = 5.0
r""" Interval (in seconds) to check status of processes to avoid hanging in
multiprocessing sampling.
"""
class MpCommand(Enum):
r""" Enum class for multiprocessing sampling command
"""
SAMPLE_ALL = 0
STOP = 1
def _sampling_worker_loop(rank,
data: DistDataset,
sampler_input: Union[NodeSamplerInput, EdgeSamplerInput],
unshuffled_index: Optional[torch.Tensor],
sampling_config: SamplingConfig,
worker_options: _BasicDistSamplingWorkerOptions,
channel: ChannelBase,
task_queue: mp.Queue,
sampling_completed_worker_count: mp.Value,
mp_barrier):
r""" Subprocess work loop for sampling worker.
"""
dist_sampler = None
try:
init_worker_group(
world_size=worker_options.worker_world_size,
rank=worker_options.worker_ranks[rank],
group_name='_sampling_worker_subprocess'
)
if worker_options.use_all2all:
torch.distributed.init_process_group(
backend='gloo',
timeout=datetime.timedelta(seconds=worker_options.rpc_timeout),
rank=worker_options.worker_ranks[rank],
world_size=worker_options.worker_world_size,
init_method='tcp://{}:{}'.format(worker_options.master_addr, worker_options.master_port)
)
if worker_options.num_rpc_threads is None:
num_rpc_threads = min(data.num_partitions, 16)
else:
num_rpc_threads = worker_options.num_rpc_threads
current_device = worker_options.worker_devices[rank]
ensure_device(current_device)
_set_worker_signal_handlers()
torch.set_num_threads(num_rpc_threads + 1)
init_rpc(
master_addr=worker_options.master_addr,
master_port=worker_options.master_port,
num_rpc_threads=num_rpc_threads,
rpc_timeout=worker_options.rpc_timeout
)
if sampling_config.seed is not None:
seed_everything(sampling_config.seed)
dist_sampler = DistNeighborSampler(
data, sampling_config.num_neighbors, sampling_config.with_edge,
sampling_config.with_neg, sampling_config.with_weight,
sampling_config.edge_dir, sampling_config.collect_features, channel,
worker_options.use_all2all, worker_options.worker_concurrency,
current_device, seed=sampling_config.seed
)
dist_sampler.start_loop()
if unshuffled_index is not None:
unshuffled_index_loader = DataLoader(
unshuffled_index, batch_size=sampling_config.batch_size,
shuffle=False, drop_last=sampling_config.drop_last
)
else:
unshuffled_index_loader = None
mp_barrier.wait()
keep_running = True
while keep_running:
try:
command, args = task_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if command == MpCommand.SAMPLE_ALL:
seeds_index = args
if seeds_index is None:
loader = unshuffled_index_loader
else:
loader = DataLoader(
seeds_index, batch_size=sampling_config.batch_size,
shuffle=False, drop_last=sampling_config.drop_last
)
if sampling_config.sampling_type == SamplingType.NODE:
for index in loader:
dist_sampler.sample_from_nodes(sampler_input[index])
elif sampling_config.sampling_type == SamplingType.LINK:
for index in loader:
dist_sampler.sample_from_edges(sampler_input[index])
elif sampling_config.sampling_type == SamplingType.SUBGRAPH:
for index in loader:
dist_sampler.subgraph(sampler_input[index])
dist_sampler.wait_all()
with sampling_completed_worker_count.get_lock():
sampling_completed_worker_count.value += 1 # non-atomic, lock is necessary
elif command == MpCommand.STOP:
keep_running = False
else:
raise RuntimeError("Unknown command type")
except KeyboardInterrupt:
# Main process will raise KeyboardInterrupt anyways.
pass
if dist_sampler is not None:
dist_sampler.shutdown_loop()
shutdown_rpc(graceful=False)
class DistMpSamplingProducer(object):
r""" A subprocess group of distributed sampling workers.
Note that this producer is only used for workload with separate sampling
and training, all sampled results will be sent to the output channel.
"""
def __init__(self,
data: DistDataset,
sampler_input: Union[NodeSamplerInput, EdgeSamplerInput],
sampling_config: SamplingConfig,
worker_options: _BasicDistSamplingWorkerOptions,
output_channel: ChannelBase):
self.data = data
self.sampler_input = sampler_input.share_memory()
self.input_len = len(self.sampler_input)
self.sampling_config = sampling_config
self.worker_options = worker_options
self.worker_options._assign_worker_devices()
self.num_workers = self.worker_options.num_workers
self.output_channel = output_channel
self.sampling_completed_worker_count = mp.Value('I', lock=True)
current_ctx = get_context()
self.worker_options._set_worker_ranks(current_ctx)
self._task_queues = []
self._workers = []
self._barrier = None
self._shutdown = False
self._worker_seeds_ranges = self._get_worker_seeds_ranges()
def init(self):
r""" Create the subprocess pool. Init samplers and rpc server.
"""
if self.sampling_config.seed is not None:
seed_everything(self.sampling_config.seed)
if not self.sampling_config.shuffle:
unshuffled_indexes = self._get_seeds_indexes()
else:
unshuffled_indexes = [None] * self.num_workers
mp_context = mp.get_context('spawn')
barrier = mp_context.Barrier(self.num_workers + 1)
for rank in range(self.num_workers):
task_queue = mp_context.Queue(
self.num_workers * self.worker_options.worker_concurrency)
self._task_queues.append(task_queue)
w = mp_context.Process(
target=_sampling_worker_loop,
args=(rank, self.data, self.sampler_input, unshuffled_indexes[rank],
self.sampling_config, self.worker_options, self.output_channel,
task_queue, self.sampling_completed_worker_count, barrier)
)
w.daemon = True
w.start()
self._workers.append(w)
barrier.wait()
def shutdown(self):
r""" Shutdown sampler event loop and rpc server. Join the subprocesses.
"""
if not self._shutdown:
self._shutdown = True
try:
for q in self._task_queues:
q.put((MpCommand.STOP, None))
for w in self._workers:
w.join(timeout=MP_STATUS_CHECK_INTERVAL)
for q in self._task_queues:
q.cancel_join_thread()
q.close()
finally:
for w in self._workers:
if w.is_alive():
w.terminate()
def produce_all(self):
r""" Perform sampling for all input seeds.
"""
if self.sampling_config.shuffle:
seeds_indexes = self._get_seeds_indexes()
for rank in range(self.num_workers):
seeds_indexes[rank].share_memory_()
else:
seeds_indexes = [None] * self.num_workers
self.sampling_completed_worker_count.value = 0
for rank in range(self.num_workers):
self._task_queues[rank].put((MpCommand.SAMPLE_ALL, seeds_indexes[rank]))
time.sleep(0.1)
def is_all_sampling_completed_and_consumed(self):
if self.output_channel.empty():
return self.is_all_sampling_completed()
def is_all_sampling_completed(self):
return self.sampling_completed_worker_count.value == self.num_workers
def _get_worker_seeds_ranges(self):
num_worker_batches = [0] * self.num_workers
num_total_complete_batches = (self.input_len // self.sampling_config.batch_size)
for rank in range(self.num_workers):
num_worker_batches[rank] += \
(num_total_complete_batches // self.num_workers)
for rank in range(num_total_complete_batches % self.num_workers):
num_worker_batches[rank] += 1
index_ranges = []
start = 0
for rank in range(self.num_workers):
end = start + num_worker_batches[rank] * self.sampling_config.batch_size
if rank == self.num_workers - 1:
end = self.input_len
index_ranges.append((start, end))
start = end
return index_ranges
def _get_seeds_indexes(self):
if self.sampling_config.shuffle:
index = torch.randperm(self.input_len)
else:
index = torch.arange(self.input_len)
seeds_indexes = []
for rank in range(self.num_workers):
start, end = self._worker_seeds_ranges[rank]
seeds_indexes.append(index[start:end])
return seeds_indexes
class DistCollocatedSamplingProducer(object):
r""" A sampling producer with a collocated distributed sampler.
Note that the sampled results will be returned directly and this producer
will be blocking when processing each batch.
"""
def __init__(self,
data: DistDataset,
sampler_input: Union[NodeSamplerInput, EdgeSamplerInput],
sampling_config: SamplingConfig,
worker_options: _BasicDistSamplingWorkerOptions,
device: torch.device):
self.data = data
self.sampler_input = sampler_input
self.sampling_config = sampling_config
self.worker_options = worker_options
self.device = device
def init(self):
index = torch.arange(len(self.sampler_input))
self._index_loader = DataLoader(
index,
batch_size=self.sampling_config.batch_size,
shuffle=self.sampling_config.shuffle,
drop_last=self.sampling_config.drop_last
)
self._index_iter = self._index_loader._get_iterator()
if self.worker_options.num_rpc_threads is None:
num_rpc_threads = min(self.data.num_partitions, 16)
else:
num_rpc_threads = self.worker_options.num_rpc_threads
init_rpc(
master_addr=self.worker_options.master_addr,
master_port=self.worker_options.master_port,
num_rpc_threads=num_rpc_threads,
rpc_timeout=self.worker_options.rpc_timeout
)
self._collocated_sampler = DistNeighborSampler(
self.data, self.sampling_config.num_neighbors,
self.sampling_config.with_edge, self.sampling_config.with_neg,
self.sampling_config.with_weight,
self.sampling_config.edge_dir, self.sampling_config.collect_features,
channel=None, use_all2all=self.worker_options.use_all2all,
concurrency=1, device=self.device,
seed=self.sampling_config.seed
)
self._collocated_sampler.start_loop()
def shutdown(self):
if self._collocated_sampler is not None:
self._collocated_sampler.shutdown_loop()
def reset(self):
self._index_iter._reset(self._index_loader)
def sample(self):
index = self._index_iter._next_data()
batch = self.sampler_input[index]
if self.sampling_config.sampling_type == SamplingType.NODE:
return self._collocated_sampler.sample_from_nodes(batch)
if self.sampling_config.sampling_type == SamplingType.LINK:
return self._collocated_sampler.sample_from_edges(batch)
if self.sampling_config.sampling_type == SamplingType.SUBGRAPH:
return self._collocated_sampler.subgraph(batch)
raise NotImplementedError
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_neighbor_sampler.py | graphlearn_torch/python/distributed/dist_neighbor_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import queue
from dataclasses import dataclass
from typing import List, Literal, Optional, Union, Tuple, Dict
import torch
from .. import py_graphlearn_torch as pywrap
from ..channel import ChannelBase, SampleMessage
from ..data import Feature
from ..sampler import (
NodeSamplerInput, EdgeSamplerInput,
NeighborOutput, SamplerOutput, HeteroSamplerOutput,
NeighborSampler
)
from ..typing import EdgeType, as_str, NumNeighbors, reverse_edge_type, TensorDataType
from ..utils import (
get_available_device, ensure_device, merge_dict, id2idx,
merge_hetero_sampler_output, format_hetero_sampler_output, count_dict
)
from .dist_dataset import DistDataset
from .dist_feature import DistFeature
from .dist_graph import DistGraph
from .event_loop import ConcurrentEventLoop, wrap_torch_future
from .rpc import (
RpcCalleeBase, rpc_register, rpc_request_async,
RpcDataPartitionRouter, rpc_sync_data_partitions
)
@dataclass
class PartialNeighborOutput:
r""" The sampled neighbor output of a subset of the original ids.
* index: the index of the subset vertex ids.
* output: the sampled neighbor output.
"""
index: torch.Tensor
output: NeighborOutput
class RpcSamplingCallee(RpcCalleeBase):
r""" A wrapper for rpc callee that will perform rpc sampling from
remote processes.
"""
def __init__(self, sampler: NeighborSampler, device: torch.device):
super().__init__()
self.sampler = sampler
self.device = device
def call(self, *args, **kwargs):
ensure_device(self.device)
output = self.sampler.sample_one_hop(*args, **kwargs)
if output is None:
nbrs = torch.tensor([], dtype=torch.int64, device=torch.device('cpu'))
nbrs_num = torch.zeros_like(args[0], dtype=torch.int64,
device=torch.device('cpu'))
edge_ids = torch.tensor([], device=torch.device('cpu'), dtype=torch.int64) \
if self.with_edge else None
return NeighborOutput(nbrs, nbrs_num, edge_ids)
return output.to(torch.device('cpu'))
class RpcSubGraphCallee(RpcCalleeBase):
r""" A wrapper for rpc callee that will perform rpc sampling from
remote processes.
"""
def __init__(self, sampler: NeighborSampler, device: torch.device):
super().__init__()
self.sampler = sampler
self.device = device
def call(self, *args, **kwargs):
ensure_device(self.device)
with_edge = kwargs['with_edge']
output = self.sampler.subgraph_op.node_subgraph(args[0].to(self.device),
with_edge)
eids = output.eids.to('cpu') if with_edge else None
return output.nodes.to('cpu'), output.rows.to('cpu'), output.cols.to('cpu'), eids
class DistNeighborSampler(ConcurrentEventLoop):
r""" Asynchronized and distributed neighbor sampler.
Args:
data (DistDataset): The graph and feature data with partition info.
num_neighbors (NumNeighbors): The number of sampling neighbors on each hop.
with_edge (bool): Whether to sample with edge ids. (default: False).
with_neg (bool): Whether to do negative sampling. (default: False)
edge_dir (str:["in", "out"]): The edge direction for sampling.
Can be either :str:`"out"` or :str:`"in"`.
(default: :str:`"out"`)
collect_features (bool): Whether collect features for sampled results.
(default: False).
channel (ChannelBase, optional): The message channel to send sampled
results. If set to `None`, the sampled results will be returned
directly with `sample_from_nodes`. (default: ``None``).
use_all2all (bool): Whether use all2all API to collect cross nodes'
feature. (deafult: False)
concurrency (int): The max number of concurrent seed batches processed by
the current sampler. (default: ``1``).
device: The device to use for sampling. If set to ``None``, the current
cuda device (got by ``torch.cuda.current_device``) will be used if
available, otherwise, the cpu device will be used. (default: ``None``).
"""
def __init__(self,
data: DistDataset,
num_neighbors: Optional[NumNeighbors] = None,
with_edge: bool = False,
with_neg: bool = False,
with_weight: bool = False,
edge_dir: Literal['in', 'out'] = 'out',
collect_features: bool = False,
channel: Optional[ChannelBase] = None,
use_all2all: bool = False,
concurrency: int = 1,
device: Optional[torch.device] = None,
seed:int = None):
self.data = data
self.use_all2all = use_all2all
self.num_neighbors = num_neighbors
self.max_input_size = 0
self.with_edge = with_edge
self.with_neg = with_neg
self.with_weight = with_weight
self.edge_dir = edge_dir
self.collect_features = collect_features
self.channel = channel
self.concurrency = concurrency
self.device = get_available_device(device)
self.seed = seed
if isinstance(data, DistDataset):
partition2workers = rpc_sync_data_partitions(
num_data_partitions=self.data.num_partitions,
current_partition_idx=self.data.partition_idx
)
self.rpc_router = RpcDataPartitionRouter(partition2workers)
self.dist_graph = DistGraph(
data.num_partitions, data.partition_idx,
data.graph, data.node_pb, data.edge_pb
)
self.dist_node_feature = None
self.dist_edge_feature = None
if self.collect_features:
if data.node_features is not None:
self.dist_node_feature = DistFeature(
data.num_partitions, data.partition_idx,
data.node_features, data.node_feat_pb,
local_only=False, rpc_router=self.rpc_router, device=self.device
)
if self.with_edge and data.edge_features is not None:
self.dist_edge_feature = DistFeature(
data.num_partitions, data.partition_idx,
data.edge_features, data.edge_feat_pb,
local_only=False, rpc_router=self.rpc_router, device=self.device
)
# dist_node_labels should is initialized as a DistFeature object in the v6d case
self.dist_node_labels = self.data.node_labels
if self.dist_graph.data_cls == 'homo':
if self.dist_node_labels is not None and \
not isinstance(self.dist_node_labels, torch.Tensor):
self.dist_node_labels = DistFeature(
self.data.num_partitions, self.data.partition_idx,
self.dist_node_labels, self.data.node_feat_pb,
local_only=False, rpc_router=self.rpc_router, device=self.device
)
else:
assert self.dist_node_labels is None or isinstance(self.dist_node_labels, Dict)
if self.dist_node_labels is not None and \
all(isinstance(value, Feature) for value in self.dist_node_labels.values()):
self.dist_node_labels = DistFeature(
self.data.num_partitions, self.data.partition_idx,
self.data.node_labels, self.data.node_feat_pb,
local_only=False, rpc_router=self.rpc_router, device=self.device
)
else:
raise ValueError(f"'{self.__class__.__name__}': found invalid input "
f"data type '{type(data)}'")
self.sampler = NeighborSampler(
self.dist_graph.local_graph, self.num_neighbors,
self.device, self.with_edge, self.with_neg, self.with_weight,
self.edge_dir, seed=self.seed
)
self.inducer_pool = queue.Queue(maxsize=self.concurrency)
# rpc register
rpc_sample_callee = RpcSamplingCallee(self.sampler, self.device)
self.rpc_sample_callee_id = rpc_register(rpc_sample_callee)
rpc_subgraph_callee = RpcSubGraphCallee(self.sampler, self.device)
self.rpc_subgraph_callee_id = rpc_register(rpc_subgraph_callee)
if self.dist_graph.data_cls == 'hetero':
self.num_neighbors = self.sampler.num_neighbors
self.num_hops = self.sampler.num_hops
self.edge_types = self.sampler.edge_types
super().__init__(self.concurrency)
self._loop.call_soon_threadsafe(ensure_device, self.device)
def sample_from_nodes(
self,
inputs: NodeSamplerInput,
**kwargs
) -> Optional[SampleMessage]:
r""" Sample multi-hop neighbors from nodes, collect the remote features
(optional), and send results to the output channel.
Note that if the output sample channel is specified, this func is
asynchronized and the sampled result will not be returned directly.
Otherwise, this func will be blocked to wait for the sampled result and
return it.
Args:
inputs (NodeSamplerInput): The input data with node indices to start
sampling from.
"""
inputs = NodeSamplerInput.cast(inputs)
if self.channel is None:
return self.run_task(coro=self._send_adapter(self._sample_from_nodes,
inputs))
cb = kwargs.get('callback', None)
self.add_task(coro=self._send_adapter(self._sample_from_nodes, inputs),
callback=cb)
return None
def sample_from_edges(
self,
inputs: EdgeSamplerInput,
**kwargs,
) -> Optional[SampleMessage]:
r""" Sample multi-hop neighbors from edges, collect the remote features
(optional), and send results to the output channel.
Note that if the output sample channel is specified, this func is
asynchronized and the sampled result will not be returned directly.
Otherwise, this func will be blocked to wait for the sampled result and
return it.
Args:
inputs (EdgeSamplerInput): The input data for sampling from edges
including the (1) source node indices, the (2) destination node
indices, the (3) optional edge labels and the (4) input edge type.
"""
if self.channel is None:
return self.run_task(coro=self._send_adapter(self._sample_from_edges,
inputs))
cb = kwargs.get('callback', None)
self.add_task(coro=self._send_adapter(self._sample_from_edges, inputs),
callback=cb)
return None
def subgraph(
self,
inputs: NodeSamplerInput,
**kwargs
) -> Optional[SampleMessage]:
r""" Induce an enclosing subgraph based on inputs and their neighbors(if
self.num_neighbors is not None).
"""
inputs = NodeSamplerInput.cast(inputs)
if self.channel is None:
return self.run_task(coro=self._send_adapter(self._subgraph, inputs))
cb = kwargs.get('callback', None)
self.add_task(coro=self._send_adapter(self._subgraph, inputs), callback=cb)
return None
async def _send_adapter(
self,
async_func,
*args, **kwargs
) -> Optional[SampleMessage]:
sampler_output = await async_func(*args, **kwargs)
res = await self._colloate_fn(sampler_output)
if self.channel is None:
return res
self.channel.send(res)
return None
async def _sample_from_nodes(
self,
inputs: NodeSamplerInput,
) -> Optional[SampleMessage]:
input_seeds = inputs.node.to(self.device)
input_type = inputs.input_type
self.max_input_size = max(self.max_input_size, input_seeds.numel())
inducer = self._acquire_inducer()
is_hetero = (self.dist_graph.data_cls == 'hetero')
if is_hetero:
assert input_type is not None
src_dict = inducer.init_node({input_type: input_seeds})
batch = src_dict
out_nodes, out_rows, out_cols, out_edges = {}, {}, {}, {}
num_sampled_nodes, num_sampled_edges = {}, {}
merge_dict(src_dict, out_nodes)
count_dict(src_dict, num_sampled_nodes, 1)
for i in range(self.num_hops):
task_dict, nbr_dict, edge_dict = {}, {}, {}
for etype in self.edge_types:
req_num = self.num_neighbors[etype][i]
if self.edge_dir == 'in':
srcs = src_dict.get(etype[-1], None)
if srcs is not None and srcs.numel() > 0:
task_dict[reverse_edge_type(etype)] = self._loop.create_task(
self._sample_one_hop(srcs, req_num, etype))
elif self.edge_dir == 'out':
srcs = src_dict.get(etype[0], None)
if srcs is not None and srcs.numel() > 0:
task_dict[etype] = self._loop.create_task(
self._sample_one_hop(srcs, req_num, etype))
for etype, task in task_dict.items():
output: NeighborOutput = await task
if output.nbr.numel() == 0:
continue
nbr_dict[etype] = [src_dict[etype[0]], output.nbr, output.nbr_num]
if output.edge is not None:
edge_dict[etype] = output.edge
if len(nbr_dict) == 0:
continue
nodes_dict, rows_dict, cols_dict = inducer.induce_next(nbr_dict)
merge_dict(nodes_dict, out_nodes)
merge_dict(rows_dict, out_rows)
merge_dict(cols_dict, out_cols)
merge_dict(edge_dict, out_edges)
count_dict(nodes_dict, num_sampled_nodes, i + 2)
count_dict(cols_dict, num_sampled_edges, i + 1)
src_dict = nodes_dict
sample_output = HeteroSamplerOutput(
node={ntype: torch.cat(nodes) for ntype, nodes in out_nodes.items()},
row={etype: torch.cat(rows) for etype, rows in out_rows.items()},
col={etype: torch.cat(cols) for etype, cols in out_cols.items()},
edge=(
{etype: torch.cat(eids) for etype, eids in out_edges.items()}
if self.with_edge else None
),
batch=batch,
num_sampled_nodes=num_sampled_nodes,
num_sampled_edges=num_sampled_edges,
input_type=input_type,
metadata={}
)
else:
srcs = inducer.init_node(input_seeds)
batch = srcs
out_nodes, out_edges = [], []
num_sampled_nodes, num_sampled_edges = [], []
out_nodes.append(srcs)
num_sampled_nodes.append(srcs.size(0))
# Sample subgraph.
for req_num in self.num_neighbors:
output: NeighborOutput = await self._sample_one_hop(srcs, req_num, None)
if output.nbr.numel() == 0:
break
nodes, rows, cols = \
inducer.induce_next(srcs, output.nbr, output.nbr_num)
out_nodes.append(nodes)
out_edges.append((rows, cols, output.edge))
num_sampled_nodes.append(nodes.size(0))
num_sampled_edges.append(cols.size(0))
srcs = nodes
if not out_edges:
sample_output = SamplerOutput(
node=torch.cat(out_nodes),
row=torch.tensor([]).to(self.device),
col=torch.tensor([]).to(self.device),
edge=(torch.tensor([]).to(self.device) if self.with_edge else None),
batch=batch,
num_sampled_nodes=num_sampled_nodes,
num_sampled_edges=num_sampled_edges,
metadata={}
)
else:
sample_output = SamplerOutput(
node=torch.cat(out_nodes),
row=torch.cat([e[0] for e in out_edges]) if out_edges else torch.tensor([]),
col=torch.cat([e[1] for e in out_edges]) if out_edges else torch.tensor([]),
edge=(torch.cat([e[2] for e in out_edges]) if self.with_edge else None),
batch=batch,
num_sampled_nodes=num_sampled_nodes,
num_sampled_edges=num_sampled_edges,
metadata={}
)
# Reclaim inducer into pool.
self.inducer_pool.put(inducer)
return sample_output
async def _sample_from_edges(
self,
inputs: EdgeSamplerInput,
) -> Optional[SampleMessage]:
r"""Performs sampling from an edge sampler input, leveraging a sampling
function of the same signature as `node_sample`.
Currently, we support the out-edge sampling manner, so we reverse the
direction of src and dst for the output so that features of the sampled
nodes during training can be aggregated from k-hop to (k-1)-hop nodes.
Note: Negative sampling is performed locally and unable to fetch positive
edges from remote, so the negative sampling in the distributed case is
currently non-strict for both binary and triplet manner.
"""
src = inputs.row.to(self.device)
dst = inputs.col.to(self.device)
edge_label = None if inputs.label is None else inputs.label.to(self.device)
input_type = inputs.input_type
neg_sampling = inputs.neg_sampling
num_pos = src.numel()
num_neg = 0
# Negative Sampling
self.sampler.lazy_init_neg_sampler()
if neg_sampling is not None:
# When we are doing negative sampling, we append negative information
# of nodes/edges to `src`, `dst`.
# Later on, we can easily reconstruct what belongs to positive and
# negative examples by slicing via `num_pos`.
num_neg = math.ceil(num_pos * neg_sampling.amount)
if neg_sampling.is_binary():
# In the "binary" case, we randomly sample negative pairs of nodes.
if input_type is not None:
neg_pair = self.sampler._neg_sampler[input_type].sample(num_neg)
else:
neg_pair = self.sampler._neg_sampler.sample(num_neg)
src_neg, dst_neg = neg_pair[0], neg_pair[1]
src = torch.cat([src, src_neg], dim=0)
dst = torch.cat([dst, dst_neg], dim=0)
if edge_label is None:
edge_label = torch.ones(num_pos, device=self.device)
size = (src_neg.size()[0], ) + edge_label.size()[1:]
edge_neg_label = edge_label.new_zeros(size)
edge_label = torch.cat([edge_label, edge_neg_label])
elif neg_sampling.is_triplet():
assert num_neg % num_pos == 0
if input_type is not None:
neg_pair = self.sampler._neg_sampler[input_type].sample(num_neg, padding=True)
else:
neg_pair = self.sampler._neg_sampler.sample(num_neg, padding=True)
dst_neg = neg_pair[1]
dst = torch.cat([dst, dst_neg], dim=0)
assert edge_label is None
# Neighbor Sampling
if input_type is not None: # hetero
if input_type[0] != input_type[-1]: # Two distinct node types:
src_seed, dst_seed = src, dst
src, inverse_src = src.unique(return_inverse=True)
dst, inverse_dst = dst.unique(return_inverse=True)
seed_dict = {input_type[0]: src, input_type[-1]: dst}
else: # Only a single node type: Merge both source and destination.
seed = torch.cat([src, dst], dim=0)
seed, inverse_seed = seed.unique(return_inverse=True)
seed_dict = {input_type[0]: seed}
temp_out = []
for it, node in seed_dict.items():
seeds = NodeSamplerInput(node=node, input_type=it)
temp_out.append(await self._sample_from_nodes(seeds))
if len(temp_out) == 2:
out = merge_hetero_sampler_output(temp_out[0],
temp_out[1],
device=self.device,
edge_dir=self.edge_dir)
else:
out = format_hetero_sampler_output(temp_out[0], edge_dir=self.edge_dir)
# edge_label
if neg_sampling is None or neg_sampling.is_binary():
if input_type[0] != input_type[-1]:
inverse_src = id2idx(out.node[input_type[0]])[src_seed]
inverse_dst = id2idx(out.node[input_type[-1]])[dst_seed]
edge_label_index = torch.stack([inverse_src, inverse_dst], dim=0)
else:
edge_label_index = inverse_seed.view(2, -1)
out.metadata.update({'edge_label_index': edge_label_index,
'edge_label': edge_label})
out.input_type = input_type
elif neg_sampling.is_triplet():
if input_type[0] != input_type[-1]:
inverse_src = id2idx(out.node[input_type[0]])[src_seed]
inverse_dst = id2idx(out.node[input_type[-1]])[dst_seed]
src_index = inverse_src
dst_pos_index = inverse_dst[:num_pos]
dst_neg_index = inverse_dst[num_pos:]
else:
src_index = inverse_seed[:num_pos]
dst_pos_index = inverse_seed[num_pos:2 * num_pos]
dst_neg_index = inverse_seed[2 * num_pos:]
dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1)
out.metadata.update({'src_index': src_index,
'dst_pos_index': dst_pos_index,
'dst_neg_index': dst_neg_index})
out.input_type = input_type
else: #homo
seed = torch.cat([src, dst], dim=0)
seed, inverse_seed = seed.unique(return_inverse=True)
out = await self._sample_from_nodes(NodeSamplerInput.cast(seed))
# edge_label
if neg_sampling is None or neg_sampling.is_binary():
edge_label_index = inverse_seed.view(2, -1)
out.metadata.update({'edge_label_index': edge_label_index,
'edge_label': edge_label})
elif neg_sampling.is_triplet():
src_index = inverse_seed[:num_pos]
dst_pos_index = inverse_seed[num_pos:2 * num_pos]
dst_neg_index = inverse_seed[2 * num_pos:]
dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1)
out.metadata.update({'src_index': src_index,
'dst_pos_index': dst_pos_index,
'dst_neg_index': dst_neg_index})
return out
async def _subgraph(
self,
inputs: NodeSamplerInput,
) -> Optional[SampleMessage]:
inputs = NodeSamplerInput.cast(inputs)
input_seeds = inputs.node.to(self.device)
is_hetero = (self.dist_graph.data_cls == 'hetero')
if is_hetero:
raise NotImplementedError
else:
# neighbor sampling.
if self.num_neighbors is not None:
nodes = [input_seeds]
for num in self.num_neighbors:
nbr = await self._sample_one_hop(nodes[-1], num, None)
nodes.append(torch.unique(nbr.nbr))
nodes = torch.cat(nodes)
else:
nodes = input_seeds
nodes, mapping = torch.unique(nodes, return_inverse=True)
nid2idx = id2idx(nodes)
# subgraph inducing.
partition_ids = self.dist_graph.get_node_partitions(nodes)
partition_ids = partition_ids.to(self.device)
rows, cols, eids, futs = [], [], [], []
for i in range(self.data.num_partitions):
pidx = (self.data.partition_idx + i) % self.data.num_partitions
p_ids = torch.masked_select(nodes, (partition_ids == pidx))
if p_ids.shape[0] > 0:
if pidx == self.data.partition_idx:
subgraph = self.sampler.subgraph_op.node_subgraph(nodes, self.with_edge)
# relabel row and col indices.
rows.append(nid2idx[subgraph.nodes[subgraph.rows]])
cols.append(nid2idx[subgraph.nodes[subgraph.cols]])
if self.with_edge:
eids.append(subgraph.eids.to(self.device))
else:
to_worker = self.rpc_router.get_to_worker(pidx)
futs.append(rpc_request_async(to_worker,
self.rpc_subgraph_callee_id,
args=(nodes.cpu(),),
kwargs={'with_edge': self.with_edge}))
if not len(futs) == 0:
res_fut_list = await wrap_torch_future(torch.futures.collect_all(futs))
for res_fut in res_fut_list:
res_nodes, res_rows, res_cols, res_eids = res_fut.wait()
res_nodes = res_nodes.to(self.device)
rows.append(nid2idx[res_nodes[res_rows]])
cols.append(nid2idx[res_nodes[res_cols]])
if self.with_edge:
eids.append(res_eids.to(self.device))
sample_output = SamplerOutput(
node=nodes,
row=torch.cat(rows),
col=torch.cat(cols),
edge=torch.cat(eids) if self.with_edge else None,
device=self.device,
metadata={'mapping': mapping[:input_seeds.numel()]})
return sample_output
def _acquire_inducer(self):
if self.inducer_pool.empty():
return self.sampler.create_inducer(self.max_input_size)
return self.inducer_pool.get()
def _stitch_sample_results(
self,
input_seeds: torch.Tensor,
results: List[PartialNeighborOutput]
) -> NeighborOutput:
r""" Stitch partitioned neighbor outputs into a complete one.
"""
idx_list = [r.index for r in results]
nbrs_list = [r.output.nbr for r in results]
nbrs_num_list = [r.output.nbr_num for r in results]
eids_list = [r.output.edge for r in results] if self.with_edge else []
if self.device.type == 'cuda':
nbrs, nbrs_num, eids = pywrap.cuda_stitch_sample_results(
input_seeds, idx_list, nbrs_list, nbrs_num_list, eids_list)
else:
nbrs, nbrs_num, eids = pywrap.cpu_stitch_sample_results(
input_seeds, idx_list, nbrs_list, nbrs_num_list, eids_list)
return NeighborOutput(nbrs, nbrs_num, eids)
async def _sample_one_hop(
self,
srcs: torch.Tensor,
num_nbr: int,
etype: Optional[EdgeType]
) -> NeighborOutput:
r""" Sample one-hop neighbors and induce the coo format subgraph.
Args:
srcs: input ids, 1D tensor.
num_nbr: request(max) number of neighbors for one hop.
etype: edge type to sample from input ids.
Returns:
Tuple[torch.Tensor, torch.Tensor]: unique node ids and edge_index.
"""
device = self.device
srcs = srcs.to(device)
if self.data.graph_caching:
nbr_out = None
if srcs is not None and srcs.numel() > 0:
nbr_out = self.sampler.sample_one_hop(srcs, num_nbr, etype)
return nbr_out
orders = torch.arange(srcs.size(0), dtype=torch.long, device=device)
if self.edge_dir == 'out':
src_ntype = etype[0] if etype is not None else None
elif self.edge_dir == 'in':
src_ntype = etype[-1] if etype is not None else None
partition_ids = self.dist_graph.get_node_partitions(srcs, src_ntype)
partition_ids = partition_ids.to(device)
partition_results: List[PartialNeighborOutput] = []
remote_orders_list: List[torch.Tensor] = []
futs: List[torch.futures.Future] = []
for i in range(self.data.num_partitions):
pidx = (
(self.data.partition_idx + i) % self.data.num_partitions
)
p_mask = (partition_ids == pidx)
if isinstance(self.dist_graph.node_pb, Dict):
p_ids = self.data.id_select(srcs, p_mask, self.dist_graph.node_pb[src_ntype])
else:
p_ids = self.data.id_select(srcs, p_mask, self.dist_graph.node_pb)
if p_ids.shape[0] > 0:
p_orders = torch.masked_select(orders, p_mask)
if pidx == self.data.partition_idx:
p_nbr_out = self.sampler.sample_one_hop(p_ids, num_nbr, etype)
partition_results.append(PartialNeighborOutput(p_orders, p_nbr_out))
else:
remote_orders_list.append(p_orders)
to_worker = self.rpc_router.get_to_worker(pidx)
futs.append(rpc_request_async(to_worker,
self.rpc_sample_callee_id,
args=(p_ids.cpu(), num_nbr, etype)))
# Without remote sampling results.
if len(remote_orders_list) == 0:
if len(partition_results) > 0:
return partition_results[0].output
else:
return torch.tensor([], device=self.device, dtype=torch.int64)
# With remote sampling results.
if not len(futs) == 0:
res_fut_list = await wrap_torch_future(torch.futures.collect_all(futs))
for i, res_fut in enumerate(res_fut_list):
partition_results.append(
PartialNeighborOutput(
index=remote_orders_list[i],
output=res_fut.wait().to(device)
)
)
return self._stitch_sample_results(srcs, partition_results)
async def _colloate_fn(
self,
output: Union[SamplerOutput, HeteroSamplerOutput]
) -> SampleMessage:
r""" Collect labels and features for the sampled subgrarph if necessary,
and put them into a sample message.
"""
result_map = {}
is_hetero = (self.dist_graph.data_cls == 'hetero')
result_map['#IS_HETERO'] = torch.LongTensor([int(is_hetero)])
if isinstance(output.metadata, dict):
# scan kv and add metadata
for k, v in output.metadata.items():
result_map[f'#META.{k}'] = v
if is_hetero:
for ntype, nodes in output.node.items():
result_map[f'{as_str(ntype)}.ids'] = nodes
if output.num_sampled_nodes is not None:
if ntype in output.num_sampled_nodes:
result_map[f'{as_str(ntype)}.num_sampled_nodes'] = \
torch.tensor(output.num_sampled_nodes[ntype], device=self.device)
for etype, rows in output.row.items():
etype_str = as_str(etype)
result_map[f'{etype_str}.rows'] = rows
result_map[f'{etype_str}.cols'] = output.col[etype]
if self.with_edge:
result_map[f'{etype_str}.eids'] = output.edge[etype]
if output.num_sampled_edges is not None:
if etype in output.num_sampled_edges:
result_map[f'{etype_str}.num_sampled_edges'] = \
torch.tensor(output.num_sampled_edges[etype], device=self.device)
# Collect node labels of input node type.
input_type = output.input_type
assert input_type is not None
if not isinstance(input_type, Tuple):
if self.dist_node_labels is not None:
if isinstance(self.dist_node_labels, DistFeature):
fut = self.dist_node_labels.async_get(output.node[input_type], input_type)
nlabels = await wrap_torch_future(fut)
result_map[f'{as_str(input_type)}.nlabels'] = nlabels.T[0]
else:
node_labels = self.dist_node_labels.get(input_type, None)
if node_labels is not None:
result_map[f'{as_str(input_type)}.nlabels'] = \
node_labels[output.node[input_type].to(node_labels.device)]
# Collect node features.
if self.dist_node_feature is not None:
if self.use_all2all:
sorted_ntype = sorted(self.dist_node_feature.feature_pb.keys())
nfeat_dict = self.dist_node_feature.get_all2all(output, sorted_ntype)
for ntype, nfeats in nfeat_dict.items():
result_map[f'{as_str(ntype)}.nfeats'] = nfeats
else:
nfeat_fut_dict = {}
for ntype, nodes in output.node.items():
nodes = nodes.to(torch.long)
nfeat_fut_dict[ntype] = self.dist_node_feature.async_get(nodes, ntype)
for ntype, fut in nfeat_fut_dict.items():
nfeats = await wrap_torch_future(fut)
result_map[f'{as_str(ntype)}.nfeats'] = nfeats
# Collect edge features
if self.dist_edge_feature is not None and self.with_edge:
efeat_fut_dict = {}
for etype in self.edge_types:
if self.edge_dir == 'out':
eids = result_map.get(f'{as_str(etype)}.eids', None)
elif self.edge_dir == 'in':
eids = result_map.get(
f'{as_str(reverse_edge_type(etype))}.eids', None)
if eids is not None:
eids = eids.to(torch.long)
efeat_fut_dict[etype] = self.dist_edge_feature.async_get(eids, etype)
for etype, fut in efeat_fut_dict.items():
efeats = await wrap_torch_future(fut)
if self.edge_dir == 'out':
result_map[f'{as_str(etype)}.efeats'] = efeats
elif self.edge_dir == 'in':
result_map[f'{as_str(reverse_edge_type(etype))}.efeats'] = efeats
# Collect batch info
if output.batch is not None:
for ntype, batch in output.batch.items():
result_map[f'{as_str(ntype)}.batch'] = batch
else:
result_map['ids'] = output.node
result_map['rows'] = output.row
result_map['cols'] = output.col
if output.num_sampled_nodes is not None:
result_map['num_sampled_nodes'] = \
torch.tensor(output.num_sampled_nodes, device=self.device)
result_map['num_sampled_edges'] = \
torch.tensor(output.num_sampled_edges, device=self.device)
if self.with_edge:
result_map['eids'] = output.edge
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | true |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_neighbor_loader.py | graphlearn_torch/python/distributed/dist_neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional, Literal, List
import torch
from ..sampler import NodeSamplerInput, SamplingType, SamplingConfig, \
RemoteNodeSplitSamplerInput, RemoteNodePathSamplerInput
from ..typing import InputNodes, NumNeighbors, Split
from .dist_dataset import DistDataset
from .dist_options import AllDistSamplingWorkerOptions, RemoteDistSamplingWorkerOptions
from .dist_loader import DistLoader
class DistNeighborLoader(DistLoader):
r""" A distributed loader that preform sampling from nodes.
Args:
data (DistDataset, optional): The ``DistDataset`` object of a partition of
graph data and feature data, along with distributed patition books. The
input dataset must be provided in non-server distribution mode.
num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]):
The number of neighbors to sample for each node in each iteration.
In heterogeneous graphs, may also take in a dictionary denoting
the amount of neighbors to sample for each individual edge type.
input_nodes (torch.Tensor or Tuple[str, torch.Tensor]): The node seeds for
which neighbors are sampled to create mini-batches. In heterogeneous
graphs, needs to be passed as a tuple that holds the node type and
node seeds.
batch_size (int): How many samples per batch to load (default: ``1``).
shuffle (bool): Set to ``True`` to have the data reshuffled at every
epoch (default: ``False``).
drop_last (bool): Set to ``True`` to drop the last incomplete batch, if
the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last
batch will be smaller. (default: ``False``).
with_edge (bool): Set to ``True`` to sample with edge ids and also include
them in the sampled results. (default: ``False``).
edge_dir (str:["in", "out"]): The edge direction for sampling.
Can be either :str:`"out"` or :str:`"in"`.
(default: :str:`"out"`)
collect_features (bool): Set to ``True`` to collect features for nodes
of each sampled subgraph. (default: ``False``).
to_device (torch.device, optional): The target device that the sampled
results should be copied to. If set to ``None``, the current cuda device
(got by ``torch.cuda.current_device``) will be used if available,
otherwise, the cpu device will be used. (default: ``None``).
worker_options (optional): The options for launching sampling workers.
(1) If set to ``None`` or provided with a ``CollocatedDistWorkerOptions``
object, a single collocated sampler will be launched on the current
process, while the separate sampling mode will be disabled . (2) If
provided with a ``MpDistWorkerOptions`` object, the sampling workers will
be launched on spawned subprocesses, and a share-memory based channel
will be created for sample message passing from multiprocessing workers
to the current loader. (3) If provided with a ``RemoteDistWorkerOptions``
object, the sampling workers will be launched on remote sampling server
nodes, and a remote channel will be created for cross-machine message
passing. (default: ``None``).
"""
def __init__(self,
data: Optional[DistDataset],
num_neighbors: NumNeighbors,
input_nodes: InputNodes,
batch_size: int = 1,
shuffle: bool = False,
drop_last: bool = False,
with_edge: bool = False,
with_weight: bool = False,
edge_dir: Literal['in', 'out'] = 'out',
collect_features: bool = False,
to_device: Optional[torch.device] = None,
random_seed: Optional[int] = None,
worker_options: Optional[AllDistSamplingWorkerOptions] = None):
if isinstance(input_nodes, tuple):
input_type, input_seeds = input_nodes
else:
input_type, input_seeds = None, input_nodes
if isinstance(worker_options, RemoteDistSamplingWorkerOptions):
if isinstance(input_seeds, Split):
input_data = RemoteNodeSplitSamplerInput(split=input_seeds, input_type=input_type)
if isinstance(worker_options.server_rank, List):
input_data = [input_data] * len(worker_options.server_rank)
elif isinstance(input_seeds, List):
input_data = []
for elem in input_seeds:
input_data.append(RemoteNodePathSamplerInput(node_path=elem, input_type=input_type))
elif isinstance(input_seeds, str):
input_data = RemoteNodePathSamplerInput(node_path=input_seeds, input_type=input_type)
else:
raise ValueError("Invalid input seeds")
else:
input_data = NodeSamplerInput(node=input_seeds, input_type=input_type)
sampling_config = SamplingConfig(
SamplingType.NODE, num_neighbors, batch_size, shuffle,
drop_last, with_edge, collect_features, with_neg=False,
with_weight=with_weight, edge_dir=edge_dir, seed=random_seed
)
super().__init__(
data, input_data, sampling_config, to_device, worker_options
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/__init__.py | graphlearn_torch/python/distributed/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .dist_client import (
init_client, shutdown_client, async_request_server, request_server
)
from .dist_context import DistRole, DistContext, get_context, init_worker_group
from .dist_dataset import DistDataset
from .dist_feature import PartialFeature, DistFeature
from .dist_graph import DistGraph
from .dist_link_neighbor_loader import DistLinkNeighborLoader
from .dist_loader import DistLoader
from .dist_neighbor_loader import DistNeighborLoader
from .dist_neighbor_sampler import DistNeighborSampler
from .dist_options import (
CollocatedDistSamplingWorkerOptions,
MpDistSamplingWorkerOptions,
RemoteDistSamplingWorkerOptions
)
from .dist_random_partitioner import DistRandomPartitioner
from .dist_sampling_producer import (
DistMpSamplingProducer, DistCollocatedSamplingProducer
)
from .dist_server import (
DistServer, get_server, init_server, wait_and_shutdown_server
)
from .dist_subgraph_loader import DistSubGraphLoader
from .dist_table_dataset import DistTableDataset, DistTableRandomPartitioner
from .event_loop import ConcurrentEventLoop
from .rpc import (
init_rpc, shutdown_rpc, rpc_is_initialized,
get_rpc_master_addr, get_rpc_master_port,
all_gather, barrier, global_all_gather, global_barrier,
RpcDataPartitionRouter, rpc_sync_data_partitions,
RpcCalleeBase, rpc_register, rpc_request_async, rpc_request,
rpc_global_request_async, rpc_global_request
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_dataset.py | graphlearn_torch/python/distributed/dist_dataset.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithPer express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from multiprocessing.reduction import ForkingPickler
from typing import Dict, List, Optional, Union, Literal, Tuple, Callable
import torch
from ..data import Dataset, Graph, Feature, DeviceGroup, vineyard_utils
from ..partition import (
load_partition, cat_feature_cache,
PartitionBook, HeteroNodePartitionDict, HeteroEdgePartitionDict
)
from ..typing import (NodeType, EdgeType, NodeLabel, NodeIndex)
from ..utils import share_memory, default_id_filter, default_id_select
class DistDataset(Dataset):
r""" Graph and feature dataset with distributed partition info.
"""
def __init__(
self,
num_partitions: int = 1,
partition_idx: int = 0,
graph_partition: Union[Graph, Dict[EdgeType, Graph]] = None,
node_feature_partition: Union[Feature, Dict[NodeType, Feature]] = None,
edge_feature_partition: Union[Feature, Dict[EdgeType, Feature]] = None,
whole_node_labels: NodeLabel = None,
node_pb: Union[PartitionBook, HeteroNodePartitionDict] = None,
edge_pb: Union[PartitionBook, HeteroEdgePartitionDict] = None,
node_feat_pb: Union[PartitionBook, HeteroNodePartitionDict] = None,
edge_feat_pb: Union[PartitionBook, HeteroEdgePartitionDict] = None,
edge_dir: Literal['in', 'out'] = 'out',
graph_caching: bool = False,
node_split: Tuple[NodeIndex, NodeIndex, NodeIndex] = None,
id_filter: Callable = default_id_filter,
id_select: Callable = default_id_select
):
super().__init__(
graph_partition,
node_feature_partition,
edge_feature_partition,
whole_node_labels,
edge_dir,
node_split,
)
self.id_filter = id_filter
self.id_select = id_select
self.num_partitions = num_partitions
self.partition_idx = partition_idx
self.graph_caching = graph_caching
self.node_pb = node_pb
self.edge_pb = edge_pb
# As the loaded feature partition may be concatenated with its cached
# features and the partition book for features will be modified, thus we
# need to distinguish them with the original graph partition books.
#
# If the `node_feat_pb` or `edge_feat_pb` is not provided, the `node_pb`
# or `edge_pb` will be used instead for feature lookups.
self._node_feat_pb = node_feat_pb
self._edge_feat_pb = edge_feat_pb
if self.graph is not None:
assert self.node_pb is not None
if self.node_features is not None:
assert self.node_pb is not None or self._node_feat_pb is not None
if self.edge_features is not None:
assert self.edge_pb is not None or self._edge_feat_pb is not None
def load(
self,
root_dir: str,
partition_idx: int,
graph_mode: str = 'ZERO_COPY',
input_layout: Literal['COO', 'CSR', 'CSC'] = 'COO',
feature_with_gpu: bool = True,
graph_caching: bool = False,
device_group_list: Optional[List[DeviceGroup]] = None,
whole_node_label_file: Union[str, Dict[NodeType, str]] = None,
device: Optional[int] = None
):
r""" Load a certain dataset partition from partitioned files and create
in-memory objects (``Graph``, ``Feature`` or ``torch.Tensor``).
Args:
root_dir (str): The directory path to load the graph and feature
partition data.
partition_idx (int): Partition idx to load.
graph_mode (str): Mode for creating graphlearn_torch's ``Graph``, including
``CPU``, ``ZERO_COPY`` or ``CUDA``. (default: ``ZERO_COPY``)
input_layout (str): layout of the input graph, including ``CSR``, ``CSC``
or ``COO``. (default: ``COO``)
feature_with_gpu (bool): A Boolean value indicating whether the created
``Feature`` objects of node/edge features use ``UnifiedTensor``.
If True, it means ``Feature`` consists of ``UnifiedTensor``, otherwise
``Feature`` is a PyTorch CPU Tensor, the ``device_group_list`` and
``device`` will be invliad. (default: ``True``)
graph_caching (bool): A Boolean value indicating whether to load the full
graph totoploy instead of partitioned one.
device_group_list (List[DeviceGroup], optional): A list of device groups
used for feature lookups, the GPU part of feature data will be
replicated on each device group in this list during the initialization.
GPUs with peer-to-peer access to each other should be set in the same
device group properly. (default: ``None``)
whole_node_label_file (str): The path to the whole node labels which are
not partitioned. (default: ``None``)
device: The target cuda device rank used for graph operations when graph
mode is not "CPU" and feature lookups when the GPU part is not None.
(default: ``None``)
"""
(
self.num_partitions,
self.partition_idx,
graph_data,
node_feat_data,
edge_feat_data,
self.node_pb,
self.edge_pb
) = load_partition(root_dir, partition_idx, graph_caching)
# init graph partition
if isinstance(graph_data, dict):
# heterogeneous.
edge_index, edge_ids, edge_weights = {}, {}, {}
for k, v in graph_data.items():
edge_index[k] = v.edge_index
edge_ids[k] = v.eids
edge_weights[k] = v.weights
else:
# homogeneous.
edge_index = graph_data.edge_index
edge_ids = graph_data.eids
edge_weights = graph_data.weights
self.init_graph(edge_index, edge_ids, edge_weights, layout=input_layout,
graph_mode=graph_mode, device=device)
self.graph_caching = graph_caching
# load node feature partition
if node_feat_data is not None:
node_cache_ratio, node_feat, node_feat_id2idx, node_feat_pb = \
_cat_feature_cache(partition_idx, node_feat_data, self.node_pb)
self.init_node_features(
node_feat, node_feat_id2idx, None, node_cache_ratio,
device_group_list, device, feature_with_gpu, dtype=None
)
self._node_feat_pb = node_feat_pb
# load edge feature partition
if edge_feat_data is not None:
edge_cache_ratio, edge_feat, edge_feat_id2idx, edge_feat_pb = \
_cat_feature_cache(partition_idx, edge_feat_data, self.edge_pb)
self.init_edge_features(
edge_feat, edge_feat_id2idx, edge_cache_ratio,
device_group_list, device, feature_with_gpu, dtype=None
)
self._edge_feat_pb = edge_feat_pb
# load whole node labels
if whole_node_label_file is not None:
if isinstance(whole_node_label_file, dict):
whole_node_labels = {}
for ntype, file in whole_node_label_file.items():
whole_node_labels[ntype] = torch.load(file)
else:
whole_node_labels = torch.load(whole_node_label_file)
self.init_node_labels(whole_node_labels)
def random_node_split(
self,
num_val: Union[float, int],
num_test: Union[float, int],
):
r"""Performs a node-level random split by adding :obj:`train_idx`,
:obj:`val_idx` and :obj:`test_idx` attributes to the
:class:`~graphlearn_torch.distributed.DistDataset` object. All nodes except
those in the validation and test sets will be used for training.
Args:
num_val (int or float): The number of validation samples.
If float, it represents the ratio of samples to include in the
validation set.
num_test (int or float): The number of test samples in case
of :obj:`"train_rest"` and :obj:`"random"` split. If float, it
represents the ratio of samples to include in the test set.
"""
if isinstance(self.node_labels, dict):
train_idx = {}
val_idx = {}
test_idx = {}
for node_type, _ in self.node_labels.items():
indices = self.id_filter(self.node_pb[node_type], self.partition_idx)
train_idx[node_type], val_idx[node_type], test_idx[node_type] = random_split(indices, num_val, num_test)
else:
indices = self.id_filter(self.node_pb, self.partition_idx)
train_idx, val_idx, test_idx = random_split(indices, num_val, num_test)
self.init_node_split((train_idx, val_idx, test_idx))
def load_vineyard(
self,
vineyard_id: str,
vineyard_socket: str,
edges: List[EdgeType],
edge_weights: Dict[EdgeType, str] = None,
node_features: Dict[NodeType, List[str]] = None,
edge_features: Dict[EdgeType, List[str]] = None,
node_labels: Dict[NodeType, str] = None,
):
super().load_vineyard(vineyard_id=vineyard_id, vineyard_socket=vineyard_socket,
edges=edges, edge_weights=edge_weights, node_features=node_features,
edge_features=edge_features, node_labels=node_labels)
if isinstance(self.graph, dict):
# hetero
self._node_feat_pb = {}
if node_features:
for ntype, _ in self.node_features.items():
if self.node_pb is not None:
self._node_feat_pb[ntype] = self.node_pb[ntype]
else:
self._node_feat_pb[ntype] = None
else:
# homo
if node_features:
self._node_feat_pb = self.node_pb
self.id_select = vineyard_utils.v6d_id_select
self.id_filter = vineyard_utils.v6d_id_filter
def share_ipc(self):
super().share_ipc()
self.node_pb = share_memory(self.node_pb)
self.edge_pb = share_memory(self.edge_pb)
self._node_feat_pb = share_memory(self._node_feat_pb)
self._edge_feat_pb = share_memory(self._edge_feat_pb)
ipc_hanlde = (
self.num_partitions, self.partition_idx,
self.graph, self.node_features, self.edge_features, self.node_labels,
self.node_pb, self.edge_pb, self._node_feat_pb, self._edge_feat_pb,
self.edge_dir, self.graph_caching,
(self.train_idx, self.val_idx, self.test_idx)
)
return ipc_hanlde
@classmethod
def from_ipc_handle(cls, ipc_handle):
return cls(*ipc_handle)
@property
def node_feat_pb(self):
if self._node_feat_pb is None:
return self.node_pb
return self._node_feat_pb
@property
def edge_feat_pb(self):
if self._edge_feat_pb is None:
return self.edge_pb
return self._edge_feat_pb
def _cat_feature_cache(partition_idx, raw_feat_data, raw_feat_pb):
r""" Cat a feature partition with its cached features.
"""
if isinstance(raw_feat_data, dict):
# heterogeneous.
cache_ratio, feat_data, feat_id2idx, feat_pb = {}, {}, {}, {}
for graph_type, raw_feat in raw_feat_data.items():
cache_ratio[graph_type], feat_data[graph_type], \
feat_id2idx[graph_type], feat_pb[graph_type] = \
cat_feature_cache(partition_idx, raw_feat, raw_feat_pb[graph_type])
else:
# homogeneous.
cache_ratio, feat_data, feat_id2idx, feat_pb = \
cat_feature_cache(partition_idx, raw_feat_data, raw_feat_pb)
return cache_ratio, feat_data, feat_id2idx, feat_pb
## Pickling Registration
def rebuild_dist_dataset(ipc_handle):
ds = DistDataset.from_ipc_handle(ipc_handle)
return ds
def reduce_dist_dataset(dataset: DistDataset):
ipc_handle = dataset.share_ipc()
return (rebuild_dist_dataset, (ipc_handle, ))
ForkingPickler.register(DistDataset, reduce_dist_dataset)
def random_split(
indices: torch.Tensor,
num_val: Union[float, int],
num_test: Union[float, int],
):
num_total = indices.shape[0]
num_val = round(num_total * num_val) if isinstance(num_val, float) else num_val
num_test = round(num_total * num_test) if isinstance(num_test, float) else num_test
perm = torch.randperm(num_total)
val_idx = indices[perm[:num_val]].clone()
test_idx = indices[perm[num_val:num_val + num_test]].clone()
train_idx = indices[perm[num_val + num_test:]].clone()
return train_idx, val_idx, test_idx | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/event_loop.py | graphlearn_torch/python/distributed/event_loop.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import asyncio
import logging
from threading import Thread, BoundedSemaphore
import torch
def wrap_torch_future(f: torch.futures.Future) -> asyncio.futures.Future:
r""" Convert a torch future to a standard asyncio future.
"""
loop = asyncio.get_event_loop()
aio_future = loop.create_future()
def on_done(*_):
try:
result = f.wait()
except Exception as e:
loop.call_soon_threadsafe(aio_future.set_exception, e)
else:
loop.call_soon_threadsafe(aio_future.set_result, result)
f.add_done_callback(on_done)
return aio_future
class ConcurrentEventLoop(object):
r""" Concurrent event loop context.
Args:
concurrency: max processing concurrency.
"""
def __init__(self, concurrency):
self._concurrency = concurrency
self._sem = BoundedSemaphore(concurrency)
self._loop = asyncio.new_event_loop()
self._runner_t = Thread(target=self._run_loop)
self._runner_t.daemon = True
def start_loop(self):
if not self._runner_t.is_alive():
self._runner_t.start()
def shutdown_loop(self):
self.wait_all()
if self._runner_t.is_alive():
self._loop.stop()
self._runner_t.join(timeout=1)
def wait_all(self):
r""" Wait all pending tasks to be finished.
"""
for _ in range(self._concurrency):
self._sem.acquire()
for _ in range(self._concurrency):
self._sem.release()
def add_task(self, coro, callback=None):
r""" Add an asynchronized coroutine task to run.
Args:
coro: the async coroutine func.
callback: the callback func applied on the returned results
after the coroutine task is finished.
Note that any results returned by `callback` func will be ignored,
so it is preferable to handle all in your `callback` func and do
not return any results.
"""
def on_done(f: asyncio.futures.Future):
try:
res = f.result()
if callback is not None:
callback(res)
except Exception as e:
logging.error("coroutine task failed: %s", e)
self._sem.release()
self._sem.acquire()
fut = asyncio.run_coroutine_threadsafe(coro, self._loop)
fut.add_done_callback(on_done)
def run_task(self, coro):
r""" Run a coroutine task synchronously.
"""
with self._sem:
fut = asyncio.run_coroutine_threadsafe(coro, self._loop)
return fut.result()
def _run_loop(self):
self._loop.run_forever()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_random_partitioner.py | graphlearn_torch/python/distributed/dist_random_partitioner.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import threading
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from ..partition import (
save_meta, save_node_pb, save_edge_pb,
save_graph_partition, save_feature_partition,
PartitionBook
)
from ..typing import (
NodeType, EdgeType, TensorDataType,
GraphPartitionData, FeaturePartitionData
)
from ..utils import convert_to_tensor, ensure_dir, index_select
from .dist_context import get_context, init_worker_group
from .rpc import (
init_rpc, rpc_is_initialized, all_gather, barrier,
get_rpc_current_group_worker_names,
rpc_request_async, rpc_register, RpcCalleeBase
)
class RpcUpdatePartitionValueCallee(RpcCalleeBase):
def __init__(self, dist_partition_mgr):
super().__init__()
self.dist_partition_mgr = dist_partition_mgr
def call(self, *args, **kwargs):
self.dist_partition_mgr._update_part_val(*args, **kwargs)
return None
class RpcUpdatePartitionBookCallee(RpcCalleeBase):
def __init__(self, dist_partition_mgr):
super().__init__()
self.dist_partition_mgr = dist_partition_mgr
def call(self, *args, **kwargs):
self.dist_partition_mgr._update_pb(*args, **kwargs)
return None
class DistPartitionManager(object):
r""" A state manager for distributed partitioning.
"""
def __init__(self, total_val_size: int = 1, generate_pb: bool = True):
assert rpc_is_initialized() is True
self.num_parts = get_context().world_size
self.cur_pidx = get_context().rank
self._lock = threading.RLock()
self._worker_names = get_rpc_current_group_worker_names()
self.reset(total_val_size, generate_pb)
val_update_callee = RpcUpdatePartitionValueCallee(self)
self._val_update_callee_id = rpc_register(val_update_callee)
pb_update_callee = RpcUpdatePartitionBookCallee(self)
self._pb_update_callee_id = rpc_register(pb_update_callee)
def reset(self, total_val_size: int, generate_pb: bool = True):
with self._lock:
self.generate_pb = generate_pb
self.cur_part_val_list = []
if self.generate_pb:
self.partition_book = torch.zeros(total_val_size, dtype=torch.int64)
else:
self.partition_book = None
def process(self, res_list: List[Tuple[Any, torch.Tensor]]):
r""" Process partitioned results of the current corresponded distributed
partitioner and synchronize with others.
Args:
res_list: The result list of value and ids for each partition.
"""
assert len(res_list) == self.num_parts
futs = []
for pidx, (val, val_idx) in enumerate(res_list):
if pidx == self.cur_pidx:
self._update_part_val(val, pidx)
else:
futs.append(rpc_request_async(self._worker_names[pidx],
self._val_update_callee_id,
args=(val, pidx)))
if self.generate_pb:
futs.extend(self._broadcast_pb(val_idx, pidx))
_ = torch.futures.wait_all(futs)
def _broadcast_pb(self, val_idx: torch.Tensor, target_pidx: int):
pb_update_futs = []
for pidx in range(self.num_parts):
if pidx == self.cur_pidx:
self._update_pb(val_idx, target_pidx)
else:
pb_update_futs.append(rpc_request_async(self._worker_names[pidx],
self._pb_update_callee_id,
args=(val_idx, target_pidx)))
return pb_update_futs
def _update_part_val(self, val, target_pidx: int):
assert target_pidx == self.cur_pidx
with self._lock:
if val is not None:
self.cur_part_val_list.append(val)
def _update_pb(self, val_idx: torch.Tensor, target_pidx: int):
with self._lock:
self.partition_book[val_idx] = target_pidx
class DistRandomPartitioner(object):
r""" A distributed random partitioner for parallel partitioning with large
scale graph and features.
Each distributed partitioner will process a part of the full graph and
feature data, and partition them in parallel. A distributed partitioner's
rank is corresponding to a partition index, and the number of all distributed
partitioners must be same with the number of output partitions. During
partitioning, the partitioned results will be sent to other distributed
partitioners according to their ranks. After partitioning, each distributed
partitioner will own a partitioned graph with its corresponding rank and
further save the partitioned results into the local output directory.
Args:
output_dir: The output root directory on local machine for partitioned
results.
num_nodes: Number of all graph nodes, should be a dict for hetero data.
edge_index: A part of the edge index data of graph edges, should be a dict
for hetero data.
edge_ids: The edge ids of the input ``edge_index``.
node_feat: A part of the node feature data, should be a dict for hetero data.
node_feat_ids: The node ids corresponding to the input ``node_feat``.
edge_feat: The edge feature data, should be a dict for hetero data.
edge_feat_ids: The edge ids corresponding to the input ``edge_feat``.
num_parts: The number of all partitions. If not provided, the value of
``graphlearn_torch.distributed.get_context().world_size`` will be used.
current_partition_idx: The partition index corresponding to the current
distributed partitioner. If not provided, the value of
``graphlearn_torch.distributed.get_context().rank`` will be used.
node_feat_dtype: The data type of node features.
edge_feat_dtype: The data type of edge features.
edge_assign_strategy: The assignment strategy when partitioning edges,
should be 'by_src' or 'by_dst'.
chunk_size: The chunk size for partitioning.
master_addr: The master TCP address for RPC connection between all
distributed partitioners.
master_port: The master TCP port for RPC connection between all
distributed partitioners.
num_rpc_threads: The number of RPC worker threads to use.
"""
def __init__(
self,
output_dir: str,
num_nodes: Union[int, Dict[NodeType, int]],
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
edge_ids: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_ids: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_ids: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
num_parts: Optional[int] = None,
current_partition_idx: Optional[int] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat_dtype: torch.dtype = torch.float32,
edge_assign_strategy: str = 'by_src',
chunk_size: int = 10000,
master_addr: Optional[str] = None,
master_port: Optional[str] = None,
num_rpc_threads: int = 16,
):
self.output_dir = output_dir
if get_context() is not None:
if num_parts is not None:
assert get_context().world_size == num_parts
if current_partition_idx is not None:
assert get_context().rank == current_partition_idx
else:
assert num_parts is not None
assert current_partition_idx is not None
init_worker_group(
world_size=num_parts,
rank=current_partition_idx,
group_name='distributed_random_partitoner'
)
self.num_parts = get_context().world_size
self.current_partition_idx = get_context().rank
if rpc_is_initialized() is not True:
if master_addr is None:
master_addr = os.environ['MASTER_ADDR']
if master_port is None:
master_port = int(os.environ['MASTER_PORT'])
init_rpc(master_addr, master_port, num_rpc_threads)
self.num_nodes = num_nodes
self.edge_index = convert_to_tensor(edge_index, dtype=torch.int64)
self.edge_ids = convert_to_tensor(edge_ids, dtype=torch.int64)
self.node_feat = convert_to_tensor(node_feat, dtype=node_feat_dtype)
self.node_feat_ids = convert_to_tensor(node_feat_ids, dtype=torch.int64)
if self.node_feat is not None:
assert self.node_feat_ids is not None
self.edge_feat = convert_to_tensor(edge_feat, dtype=edge_feat_dtype)
self.edge_feat_ids = convert_to_tensor(edge_feat_ids, dtype=torch.int64)
if self.edge_feat is not None:
assert self.edge_feat_ids is not None
if isinstance(self.num_nodes, dict):
assert isinstance(self.edge_index, dict)
assert isinstance(self.edge_ids, dict)
assert isinstance(self.node_feat, dict) or self.node_feat is None
assert isinstance(self.node_feat_ids, dict) or self.node_feat_ids is None
assert isinstance(self.edge_feat, dict) or self.edge_feat is None
assert isinstance(self.edge_feat_ids, dict) or self.edge_feat_ids is None
self.data_cls = 'hetero'
self.node_types = sorted(list(self.num_nodes.keys()))
self.edge_types = sorted(list(self.edge_index.keys()))
self.num_local_edges = {}
self.num_edges = {}
for etype, index in self.edge_index.items():
self.num_local_edges[etype] = len(index[0])
self.num_edges[etype] = sum(all_gather(len(index[0])).values())
else:
self.data_cls = 'homo'
self.node_types = None
self.edge_types = None
self.num_local_edges = len(self.edge_index[0])
self.num_edges = sum(all_gather(len(self.edge_index[0])).values())
self.edge_assign_strategy = edge_assign_strategy.lower()
assert self.edge_assign_strategy in ['by_src', 'by_dst']
self.chunk_size = chunk_size
self._partition_mgr = DistPartitionManager()
def _partition_by_chunk(
self,
val: Any,
val_idx: torch.Tensor,
partition_fn,
total_val_size: int,
generate_pb = True
):
r""" Partition generic values and sync with all other partitoners.
"""
val_num = len(val_idx)
chunk_num = (val_num + self.chunk_size - 1) // self.chunk_size
chunk_start_pos = 0
self._partition_mgr.reset(total_val_size, generate_pb)
barrier()
for _ in range(chunk_num):
chunk_end_pos = min(val_num, chunk_start_pos + self.chunk_size)
current_chunk_size = chunk_end_pos - chunk_start_pos
chunk_idx = torch.arange(current_chunk_size, dtype=torch.long)
chunk_val = index_select(val, index=(chunk_start_pos, chunk_end_pos))
chunk_val_idx = val_idx[chunk_start_pos:chunk_end_pos]
chunk_partition_idx = partition_fn(
chunk_val_idx, (chunk_start_pos, chunk_end_pos))
chunk_res = []
for pidx in range(self.num_parts):
mask = (chunk_partition_idx == pidx)
idx = torch.masked_select(chunk_idx, mask)
chunk_res.append((index_select(chunk_val, idx), chunk_val_idx[idx]))
self._partition_mgr.process(chunk_res)
chunk_start_pos += current_chunk_size
barrier()
return (
self._partition_mgr.cur_part_val_list,
self._partition_mgr.partition_book
)
def _partition_node(
self,
ntype: Optional[NodeType] = None
) -> PartitionBook:
r""" Partition graph nodes of a specify node type in parallel.
Args:
ntype (str): The type for input nodes, must be provided for heterogeneous
graph. (default: ``None``)
Returns:
PartitionBook: The partition book of graph nodes.
"""
if 'hetero' == self.data_cls:
assert ntype is not None
node_num = self.num_nodes[ntype]
else:
node_num = self.num_nodes
per_node_num = node_num // self.num_parts
local_node_start = per_node_num * self.current_partition_idx
local_node_end = min(
node_num,
per_node_num * (self.current_partition_idx + 1)
)
local_node_ids = torch.arange(
local_node_start, local_node_end, dtype=torch.int64
)
def _node_pfn(n_ids, _):
partition_idx = n_ids % self.num_parts
rand_order = torch.randperm(len(n_ids))
return partition_idx[rand_order]
_, node_pb = self._partition_by_chunk(
val=None,
val_idx=local_node_ids,
partition_fn=_node_pfn,
total_val_size=node_num,
generate_pb=True
)
return node_pb
def _partition_graph(
self,
node_pbs: Union[PartitionBook, Dict[NodeType, PartitionBook]],
etype: Optional[EdgeType] = None
) -> Tuple[GraphPartitionData, PartitionBook]:
r""" Partition graph topology of a specify edge type in parallel.
Args:
node_pbs (PartitionBook or Dict[NodeType, PartitionBook]): The
partition books of all graph nodes.
etype (Tuple[str, str, str]): The type for input edges, must be provided
for heterogeneous graph. (default: ``None``)
Returns:
GraphPartitionData: The graph data of the current partition.
PartitionBook: The partition book of graph edges.
"""
if 'hetero' == self.data_cls:
assert isinstance(node_pbs, dict)
assert etype is not None
src_ntype, _, dst_ntype = etype
edge_index = self.edge_index[etype]
rows, cols = edge_index[0], edge_index[1]
eids = self.edge_ids[etype]
edge_num = self.num_edges[etype]
if 'by_src' == self.edge_assign_strategy:
target_node_pb = node_pbs[src_ntype]
target_indices = rows
else:
target_node_pb = node_pbs[dst_ntype]
target_indices = cols
else:
edge_index = self.edge_index
rows, cols = edge_index[0], edge_index[1]
eids = self.edge_ids
edge_num = self.num_edges
target_node_pb = node_pbs
target_indices = rows if 'by_src' == self.edge_assign_strategy else cols
def _edge_pfn(_, chunk_range):
chunk_target_indices = index_select(target_indices, chunk_range)
return target_node_pb[chunk_target_indices]
res_list, edge_pb = self._partition_by_chunk(
val=(rows, cols, eids),
val_idx=eids,
partition_fn=_edge_pfn,
total_val_size=edge_num,
generate_pb=True
)
current_graph_part = GraphPartitionData(
edge_index=(
torch.cat([r[0] for r in res_list]),
torch.cat([r[1] for r in res_list]),
),
eids=torch.cat([r[2] for r in res_list])
)
return current_graph_part, edge_pb
def _partition_node_feat(
self,
node_pb: PartitionBook,
ntype: Optional[NodeType] = None,
) -> Optional[FeaturePartitionData]:
r""" Split node features in parallel by the partitioned node results,
and return the current partition of node features.
"""
if self.node_feat is None:
return None
if 'hetero' == self.data_cls:
assert ntype is not None
node_num = self.num_nodes[ntype]
node_feat = self.node_feat[ntype]
node_feat_ids = self.node_feat_ids[ntype]
else:
node_num = self.num_nodes
node_feat = self.node_feat
node_feat_ids = self.node_feat_ids
def _node_feat_pfn(nfeat_ids, _):
return node_pb[nfeat_ids]
res_list, _ = self._partition_by_chunk(
val=(node_feat, node_feat_ids),
val_idx=node_feat_ids,
partition_fn=_node_feat_pfn,
total_val_size=node_num,
generate_pb=False
)
return FeaturePartitionData(
feats=torch.cat([r[0] for r in res_list]),
ids=torch.cat([r[1] for r in res_list]),
cache_feats=None,
cache_ids=None
)
def _partition_edge_feat(
self,
edge_pb: PartitionBook,
etype: Optional[EdgeType] = None,
) -> Optional[FeaturePartitionData]:
r""" Split edge features in parallel by the partitioned edge results,
and return the current partition of edge features.
"""
if self.edge_feat is None:
return None
if 'hetero' == self.data_cls:
assert etype is not None
edge_num = self.num_edges[etype]
edge_feat = self.edge_feat[etype]
edge_feat_ids = self.edge_feat_ids[etype]
else:
edge_num = self.num_edges
edge_feat = self.edge_feat
edge_feat_ids = self.edge_feat_ids
def _edge_feat_pfn(efeat_ids, _):
return edge_pb[efeat_ids]
res_list, _ = self._partition_by_chunk(
val=(edge_feat, edge_feat_ids),
val_idx=edge_feat_ids,
partition_fn=_edge_feat_pfn,
total_val_size=edge_num,
generate_pb=False
)
return FeaturePartitionData(
feats=torch.cat([r[0] for r in res_list]),
ids=torch.cat([r[1] for r in res_list]),
cache_feats=None,
cache_ids=None
)
def partition(self):
r""" Partition graph and feature data into different parts along with all
other distributed partitioners, save the result of the current partition
index into output directory.
"""
ensure_dir(self.output_dir)
if 'hetero' == self.data_cls:
node_pb_dict = {}
for ntype in self.node_types:
node_pb = self._partition_node(ntype)
node_pb_dict[ntype] = node_pb
save_node_pb(self.output_dir, node_pb, ntype)
current_node_feat_part = self._partition_node_feat(node_pb, ntype)
if current_node_feat_part is not None:
save_feature_partition(
self.output_dir, self.current_partition_idx, current_node_feat_part,
group='node_feat', graph_type=ntype
)
del current_node_feat_part
for etype in self.edge_types:
current_graph_part, edge_pb = self._partition_graph(node_pb_dict, etype)
save_edge_pb(self.output_dir, edge_pb, etype)
save_graph_partition(
self.output_dir, self.current_partition_idx, current_graph_part, etype
)
del current_graph_part
current_edge_feat_part = self._partition_edge_feat(edge_pb, etype)
if current_edge_feat_part is not None:
save_feature_partition(
self.output_dir, self.current_partition_idx, current_edge_feat_part,
group='edge_feat', graph_type=etype
)
del current_edge_feat_part
else:
node_pb = self._partition_node()
save_node_pb(self.output_dir, node_pb)
current_node_feat_part = self._partition_node_feat(node_pb)
if current_node_feat_part is not None:
save_feature_partition(
self.output_dir, self.current_partition_idx,
current_node_feat_part, group='node_feat'
)
del current_node_feat_part
current_graph_part, edge_pb = self._partition_graph(node_pb)
save_edge_pb(self.output_dir, edge_pb)
save_graph_partition(
self.output_dir, self.current_partition_idx, current_graph_part
)
del current_graph_part
current_edge_feat_part = self._partition_edge_feat(edge_pb)
if current_edge_feat_part is not None:
save_feature_partition(
self.output_dir, self.current_partition_idx,
current_edge_feat_part, group='edge_feat'
)
del current_edge_feat_part
# save meta.
save_meta(self.output_dir, self.num_parts, self.data_cls,
self.node_types, self.edge_types)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_options.py | graphlearn_torch/python/distributed/dist_options.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from typing import List, Optional, Union, Literal
import torch
from ..utils import assign_device
from .dist_context import DistContext, assign_server_by_order
class _BasicDistSamplingWorkerOptions(object):
r""" Basic options to launch distributed sampling workers.
Args:
num_workers (int): How many workers to use for distributed neighbor
sampling of the current process, must be same for each process of
the current context group. (default: ``1``).
worker_devices (torch.device or List[torch.device], optional): List of
devices assgined to workers of this group. If set to ``None``, the
devices to use will be automatically assigned (the cuda device will be
preferred if available). (default: ``None``).
worker_concurrency (int): The max sampling concurrency with different
seeds batches for each sampling worker, which should not exceed 32.
(default: ``1``).
master_addr (str, optional): Master address for rpc initialization across
all sampling workers. the environment varaible ``MASTER_ADDR`` will be
used if set to ``None``. (default: ``None``).
master_port (str or int, optional): Master port for rpc initialization
across all sampling workers. If set to ``None``, in order to avoid
conflicts with master port already used by other modules (e.g., the
method ``init_process_group`` of ``torch.distributed``), the value of
environment varaible ``MASTER_PORT`` will be increased by one as the
real rpc port for sampling workers. Otherwise, the provided port should
be guaranteed to avoid such conflicts. (default: ``None``).
num_rpc_threads (int, optional): Number of threads used for rpc agent on
each sampling worker. If set to ``None``, the number of rpc threads to
use will be specified according to the actual workload, but will not
exceed 16. (default: ``None``).
rpc_timeout (float): The timeout in seconds for all rpc requests during
distributed sampling and feature collection. (default: ``180``).
"""
def __init__(self,
num_workers: int = 1,
worker_devices: Optional[List[torch.device]] = None,
worker_concurrency: int = 1,
master_addr: Optional[str] = None,
master_port: Optional[Union[str, int]] = None,
num_rpc_threads: Optional[int] = None,
rpc_timeout: float = 180):
self.num_workers = num_workers
# Not sure yet, will be calculated later.
self.worker_world_size = None
self.worker_ranks = None
if worker_devices is None:
self.worker_devices = None
elif isinstance(worker_devices, list) or isinstance(worker_devices, tuple):
assert len(worker_devices) == self.num_workers
self.worker_devices = list(worker_devices)
else:
self.worker_devices = [worker_devices] * self.num_workers
# Worker concurrency should not exceed 32.
self.worker_concurrency = max(worker_concurrency, 1)
self.worker_concurrency = min(self.worker_concurrency, 32)
if master_addr is not None:
self.master_addr = str(master_addr)
elif os.environ.get('MASTER_ADDR') is not None:
self.master_addr = os.environ['MASTER_ADDR']
else:
raise ValueError(f"'{self.__class__.__name__}': missing master address "
"for rpc communication, try to provide it or set it "
"with environment variable 'MASTER_ADDR'")
if master_port is not None:
self.master_port = int(master_port)
elif os.environ.get('MASTER_PORT') is not None:
self.master_port = int(os.environ['MASTER_PORT']) + 1
else:
raise ValueError(f"'{self.__class__.__name__}': missing master port "
"for rpc communication, try to provide it or set it "
"with environment variable 'MASTER_ADDR'")
self.num_rpc_threads = num_rpc_threads
if self.num_rpc_threads is not None:
assert self.num_rpc_threads > 0
self.rpc_timeout = rpc_timeout
def _set_worker_ranks(self, current_ctx: DistContext):
self.worker_world_size = current_ctx.world_size * self.num_workers
self.worker_ranks = [
current_ctx.rank * self.num_workers + i
for i in range(self.num_workers)
]
def _assign_worker_devices(self):
if self.worker_devices is not None:
return
self.worker_devices = [assign_device() for _ in range(self.num_workers)]
class CollocatedDistSamplingWorkerOptions(_BasicDistSamplingWorkerOptions):
r""" Options for launching a single distributed sampling worker collocated
with the current process.
Args:
master_addr (str, optional): Master address for rpc initialization across
all sampling workers. (default: ``None``).
master_port (str or int, optional): Master port for rpc initialization
across all sampling workers. (default: ``None``).
num_rpc_threads (int, optional): Number of threads used for rpc agent on
each sampling worker. (default: ``None``).
rpc_timeout (float): The timeout in seconds for rpc requests.
(default: ``180``).
use_all2all (bool): Whether use all2all to collect distributed node/edge
feature instead of through p2p rpc. (deafult: ``False``).
Please ref to ``_BasicDistSamplingWorkerOptions`` for more detailed comments
of related input arguments.
"""
def __init__(self,
master_addr: Optional[str] = None,
master_port: Optional[Union[str, int]] = None,
num_rpc_threads: Optional[int] = None,
rpc_timeout: float = 180,
use_all2all: bool = False):
super().__init__(1, None, 1, master_addr, master_port,
num_rpc_threads, rpc_timeout)
self.use_all2all = use_all2all
class MpDistSamplingWorkerOptions(_BasicDistSamplingWorkerOptions):
r""" Options for launching distributed sampling workers with multiprocessing.
Note that if ``MpDistWorkerOptions`` is used, all sampling workers will be
launched on spawned subprocesses by ``torch.multiprocessing``. Thus, a
share-memory based channel should be created for message passing of sampled
results, which are produced by those multiprocessing sampling workers and
consumed by the current process.
Args:
num_workers (int): How many workers to use (subprocesses to spwan) for
distributed neighbor sampling of the current process. (default: ``1``).
worker_devices (torch.device or List[torch.device], optional): List of
devices assgined to workers of this group. (default: ``None``).
worker_concurrency (int): The max sampling concurrency for each sampling
worker. (default: ``4``).
master_addr (str, optional): Master address for rpc initialization across
all sampling workers. (default: ``None``).
master_port (str or int, optional): Master port for rpc initialization
across all sampling workers. (default: ``None``).
num_rpc_threads (int, optional): Number of threads used for rpc agent on
each sampling worker. (default: ``None``).
rpc_timeout (float): The timeout in seconds for rpc requests.
(default: ``180``).
channel_size (int or str): The shared-memory buffer size (bytes) allocated
for the channel. The number of ``num_workers * 64MB`` will be used if set
to ``None``. (default: ``None``).
pin_memory (bool): Set to ``True`` to register the underlying shared memory
for cuda, which will achieve better performance if you want to copy
loaded data from channel to cuda device. (default: ``False``).
use_all2all (bool): Whether use all2all to collect distributed node/edge
feature instead of through p2p rpc. (deafult: ``False``).
Please ref to ``_BasicDistSamplingWorkerOptions`` for more detailed comments
of related input arguments.
"""
def __init__(self,
num_workers: int = 1,
worker_devices: Optional[List[torch.device]] = None,
worker_concurrency: int = 4,
master_addr: Optional[str] = None,
master_port: Optional[Union[str, int]] = None,
num_rpc_threads: Optional[int] = None,
rpc_timeout: float = 180,
channel_size: Optional[Union[int, str]] = None,
pin_memory: bool = False,
use_all2all: bool = False):
super().__init__(num_workers, worker_devices, worker_concurrency,
master_addr, master_port, num_rpc_threads, rpc_timeout)
self.channel_capacity = self.num_workers * self.worker_concurrency
if channel_size is None:
self.channel_size = f'{self.num_workers * 64}MB'
else:
self.channel_size = channel_size
self.pin_memory = pin_memory
self.use_all2all = use_all2all
class RemoteDistSamplingWorkerOptions(_BasicDistSamplingWorkerOptions):
r""" Options for launching distributed sampling workers on remote servers.
Note that if ``RemoteDistSamplingWorkerOptions`` is used, all sampling
workers will be launched on remote servers. Thus, a cross-machine based
channel will be created for message passing of sampled results, which are
produced by those remote sampling workers and consumed by the current process.
Args:
server_rank (int or List[int], optional): The rank of server to launch
sampling workers, can be multiple. If set to ``None``, it will be
automatically assigned. (default: ``None``).
num_workers (int): How many workers to launch on the remote server for
distributed neighbor sampling of the current process. (default: ``1``).
worker_devices (torch.device or List[torch.device], optional): List of
devices assgined to workers of this group. (default: ``None``).
worker_concurrency (int): The max sampling concurrency for each sampling
worker. (default: ``4``).
master_addr (str, optional): Master address for rpc initialization across
all sampling workers. (default: ``None``).
master_port (str or int, optional): Master port for rpc initialization
across all sampling workers. (default: ``None``).
num_rpc_threads (int, optional): Number of threads used for rpc agent on
each sampling worker. (default: ``None``).
rpc_timeout (float): The timeout in seconds for rpc requests.
(default: ``180``).
buffer_size (int or str): The size (bytes) allocated for the server-side
buffer. The number of ``num_workers * 64MB`` will be used if set to
``None``. (default: ``None``).
prefetch_size (int): The max prefetched sampled messages for consuming on
the client side. (default: ``4``).
glt_graph: Used in GraphScope side to get parameters. (default: ``None``).
workload_type: Used in GraphScope side, indicates the type of option. This
field must be set when ``workload_type`` is not None. (default: ``None``).
"""
def __init__(self,
server_rank: Optional[Union[int, List[int]]] = None,
num_workers: int = 1,
worker_devices: Optional[List[torch.device]] = None,
worker_concurrency: int = 4,
master_addr: Optional[str] = None,
master_port: Optional[Union[str, int]] = None,
num_rpc_threads: Optional[int] = None,
rpc_timeout: float = 180,
buffer_size: Optional[Union[int, str]] = None,
prefetch_size: int = 4,
worker_key: str = None,
glt_graph = None,
workload_type: Optional[Literal['train', 'validate', 'test']] = None,
use_all2all: bool = False):
# glt_graph is used in GraphScope side to get parameters
if glt_graph:
if not workload_type:
raise ValueError(f"'{self.__class__.__name__}': missing workload_type ")
master_addr = glt_graph.master_addr
if workload_type == 'train':
master_port = glt_graph.train_loader_master_port
elif workload_type == 'validate':
master_port = glt_graph.val_loader_master_port
elif workload_type == 'test':
master_port = glt_graph.test_loader_master_port
worker_key = str(master_port)
super().__init__(num_workers, worker_devices, worker_concurrency,
master_addr, master_port, num_rpc_threads, rpc_timeout)
if server_rank is not None:
self.server_rank = server_rank
else:
self.server_rank = assign_server_by_order()
self.buffer_capacity = self.num_workers * self.worker_concurrency
if buffer_size is None:
self.buffer_size = f'{self.num_workers * 64}MB'
else:
self.buffer_size = buffer_size
self.prefetch_size = prefetch_size
if self.prefetch_size > self.buffer_capacity:
raise ValueError(f"'{self.__class__.__name__}': the prefetch count "
f"{self.prefetch_size} exceeds the buffer capacity "
f"{self.buffer_capacity}")
self.worker_key = worker_key
self.use_all2all = use_all2all
AllDistSamplingWorkerOptions = Union[
CollocatedDistSamplingWorkerOptions,
MpDistSamplingWorkerOptions,
RemoteDistSamplingWorkerOptions
]
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_table_dataset.py | graphlearn_torch/python/distributed/dist_table_dataset.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithPer express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODPS table related distributed partitioner and dataset."""
import datetime
from multiprocessing.reduction import ForkingPickler
import numpy as np
import torch
import time
from typing import Dict, Optional, Union
try:
import common_io
except ImportError:
pass
from ..typing import (
NodeType, EdgeType, TensorDataType,
)
from .dist_dataset import DistDataset, _cat_feature_cache
from .dist_random_partitioner import DistRandomPartitioner
class DistTableRandomPartitioner(DistRandomPartitioner):
r""" A distributed random partitioner for parallel partitioning with large
scale edge tables and node tables.
Each distributed partitioner will process a slice of the full table,
and partition them in parallel. After partitioning, each distributed
partitioner will own a partitioned graph with its corresponding rank.
Args:
num_nodes: Number of all graph nodes, should be a dict for hetero data.
edge_index: A part of the edge index data of graph edges, should be a dict
for hetero data.
edge_ids: The edge ids of the input ``edge_index``.
node_feat: A part of the node feature data, should be a dict for hetero data.
node_feat_ids: The node ids corresponding to the input ``node_feat``.
edge_feat: The edge feature data, should be a dict for hetero data.
edge_feat_ids: The edge ids corresponding to the input ``edge_feat``.
num_parts: The number of all partitions. If not provided, the value of
``graphlearn_torch.distributed.get_context().world_size`` will be used.
current_partition_idx: The partition index corresponding to the current
distributed partitioner. If not provided, the value of
``graphlearn_torch.distributed.get_context().rank`` will be used.
node_feat_dtype: The data type of node features.
edge_feat_dtype: The data type of edge features.
edge_assign_strategy: The assignment strategy when partitioning edges,
should be 'by_src' or 'by_dst'.
chunk_size: The chunk size for partitioning.
master_addr: The master TCP address for RPC connection between all
distributed partitioners.
master_port: The master TCP port for RPC connection between all
distributed partitioners.
num_rpc_threads: The number of RPC worker threads to use.
Returns:
int: Number of all partitions.
int: The current partition idx.
GraphPartitionData/HeteroGraphPartitionData: graph partition data.
FeaturePartitionData/HeteroFeaturePartitionData: node feature partition
data, optional.
FeaturePartitionData/HeteroFeaturePartitionData: edge feature partition
data, optional.
PartitionBook/HeteroNodePartitionDict: node partition book.
PartitionBook/HeteroEdgePartitionDict: edge partition book.
"""
def __init__(
self,
num_nodes: Union[int, Dict[NodeType, int]],
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
edge_ids: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_ids: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_ids: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
num_parts: Optional[int] = None,
current_partition_idx: Optional[int] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat_dtype: torch.dtype = torch.float32,
edge_assign_strategy: str = 'by_src',
chunk_size: int = 10000,
master_addr: Optional[str] = None,
master_port: Optional[str] = None,
num_rpc_threads: int = 16,
):
super().__init__('', num_nodes, edge_index, edge_ids, node_feat, node_feat_ids,
edge_feat, edge_feat_ids, num_parts, current_partition_idx,
node_feat_dtype, edge_feat_dtype, edge_assign_strategy,
chunk_size, master_addr, master_port, num_rpc_threads)
def partition(self):
r""" Partition graph and feature data into different parts along with all
other distributed partitioners, save the result of the current partition
index into output directory.
"""
if 'hetero' == self.data_cls:
node_pb_dict = {}
node_feat_dict = {}
for ntype in self.node_types:
node_pb = self._partition_node(ntype)
node_pb_dict[ntype] = node_pb
current_node_feat_part = self._partition_node_feat(node_pb, ntype)
if current_node_feat_part is not None:
node_feat_dict[ntype] = current_node_feat_part
edge_pb_dict = {}
graph_dict = {}
edge_feat_dict = {}
for etype in self.edge_types:
current_graph_part, edge_pb = self._partition_graph(node_pb_dict, etype)
edge_pb_dict[etype] = edge_pb
graph_dict[etype] = current_graph_part
current_edge_feat_part = self._partition_edge_feat(edge_pb, etype)
if current_edge_feat_part is not None:
edge_feat_dict[etype] = current_edge_feat_part
return (
self.num_parts, self.current_partition_idx,
graph_dict, node_feat_dict, edge_feat_dict, node_pb_dict, edge_pb_dict
)
else:
node_pb = self._partition_node()
node_feat = self._partition_node_feat(node_pb)
graph, edge_pb = self._partition_graph(node_pb)
edge_feat = self._partition_edge_feat(edge_pb)
return (
self.num_parts, self.current_partition_idx,
graph, node_feat, edge_feat, node_pb, edge_pb
)
class DistTableDataset(DistDataset):
""" Creates `DistDataset` from ODPS tables.
Args:
edge_tables: A dict({edge_type : odps_table}) denoting each
bipartite graph input table of heterogeneous graph, where edge_type is
a tuple of (src_type, edge_type, dst_type).
node_tables: A dict({node_type(str) : odps_table}) denoting each
input node table.
num_nodes: Number of all graph nodes, should be a dict for hetero data.
graph_mode: mode in graphlearn_torch's `Graph`, 'CPU', 'ZERO_COPY'
or 'CUDA'.
sort_func: function for feature reordering, return feature data(2D tenosr)
and a map(1D tensor) from id to index.
split_ratio: The proportion of data allocated to the GPU, between 0 and 1.
device_group_list: A list of `DeviceGroup`. Each DeviceGroup must have the
same size. A group of GPUs with peer-to-peer access to each other should
be set in the same device group for high feature collection performance.
directed: A Boolean value indicating whether the graph topology is
directed.
reader_threads: The number of threads of table reader.
reader_capacity: The capacity of table reader.
reader_batch_size: The number of records read at once.
label: A CPU torch.Tensor(homo) or a Dict[NodeType, torch.Tensor](hetero)
with the label data for graph nodes.
device: The target cuda device rank to perform graph operations and
feature lookups.
feature_with_gpu (bool): A Boolean value indicating whether the created
``Feature`` objects of node/edge features use ``UnifiedTensor``.
If True, it means ``Feature`` consists of ``UnifiedTensor``, otherwise
``Feature`` is a PyTorch CPU Tensor, the ``device_group_list`` and
``device`` will be invliad. (default: ``True``)
edge_assign_strategy: The assignment strategy when partitioning edges,
should be 'by_src' or 'by_dst'.
chunk_size: The chunk size for partitioning.
master_addr: The master TCP address for RPC connection between all
distributed partitioners.
master_port: The master TCP port for RPC connection between all
distributed partitioners.
num_rpc_threads: The number of RPC worker threads to use.
"""
def load(
self,
num_partitions=1,
partition_idx=0,
edge_tables=None,
node_tables=None,
num_nodes=0,
graph_mode='ZERO_COPY',
device_group_list=None,
reader_threads=10,
reader_capacity=10240,
reader_batch_size=1024,
label=None,
device=None,
feature_with_gpu=True,
edge_assign_strategy='by_src',
chunk_size=10000,
master_addr=None,
master_port=None,
num_rpc_threads=16,
):
assert isinstance(edge_tables, dict)
assert isinstance(node_tables, dict)
edge_index, eids, feature = {}, {}, {}
edge_hetero = (len(edge_tables) > 1)
node_hetero = (len(node_tables) > 1)
print("Start Loading edge and node tables...")
step = 0
start_time = time.time()
for e_type, table in edge_tables.items():
edge_list = []
reader = common_io.table.TableReader(table,
slice_id=partition_idx,
slice_count=num_partitions,
num_threads=reader_threads,
capacity=reader_capacity)
while True:
try:
data = reader.read(reader_batch_size, allow_smaller_final_batch=True)
edge_list.extend(data)
step += 1
except common_io.exception.OutOfRangeException:
reader.close()
break
if step % 1000 == 0:
print(f"{datetime.datetime.now()}: load "
f"{step * reader_batch_size} edges.")
rows = [e[0] for e in edge_list]
cols = [e[1] for e in edge_list]
eids_array = np.array([e[2] for e in edge_list], dtype=np.int64)
edge_array = np.stack([np.array(rows, dtype=np.int64),
np.array(cols, dtype=np.int64)])
if edge_hetero:
edge_index[e_type] = eids_array
eids[e_type] = eids
else:
edge_index = edge_array
eids = eids_array
del rows
del cols
del edge_list
step = 0
for n_type, table in node_tables.items():
feature_list = []
reader = common_io.table.TableReader(table,
slice_id=partition_idx,
slice_count=num_partitions,
num_threads=reader_threads,
capacity=reader_capacity)
while True:
try:
data = reader.read(reader_batch_size, allow_smaller_final_batch=True)
feature_list.extend(data)
step += 1
except common_io.exception.OutOfRangeException:
reader.close()
break
if step % 1000 == 0:
print(f"{datetime.datetime.now()}: load "
f"{step * reader_batch_size} nodes.")
ids = torch.tensor([feat[0] for feat in feature_list], dtype=torch.long)
if isinstance(feature_list[0][1], bytes):
float_feat= [
list(map(float, feat[1].decode().split(':')))
for feat in feature_list
]
else:
float_feat= [
list(map(float, feat[1].split(':')))
for feat in feature_list
]
if node_hetero:
feature[n_type] = torch.tensor(float_feat)
else:
feature = torch.tensor(float_feat)
del float_feat
del feature_list
load_time = (time.time() - start_time) / 60
print(f'Loading table completed in {load_time:.2f} minutes.')
print("Start partitioning graph and feature...")
p_start = time.time()
dist_partitioner = DistTableRandomPartitioner(
num_nodes, edge_index=edge_index, edge_ids=eids,
node_feat=feature, node_feat_ids=ids,
num_parts=num_partitions, current_partition_idx=partition_idx,
edge_assign_strategy=edge_assign_strategy,
chunk_size=chunk_size, master_addr=master_addr, master_port=master_port,
num_rpc_threads=num_rpc_threads)
(
self.num_partitions,
self.partition_idx,
graph_data,
node_feat_data,
edge_feat_data,
self.node_pb,
self.edge_pb
) = dist_partitioner.partition()
part_time = (time.time() - p_start) / 60
print(f'Partitioning completed in {part_time:.2f} minutes.')
# init graph
if isinstance(graph_data, dict):
# heterogeneous.
edge_index, edge_ids = {}, {}
for k, v in graph_data.items():
edge_index[k] = v.edge_index
edge_ids[k] = v.eids
else:
# homogeneous.
edge_index = graph_data.edge_index
edge_ids = graph_data.eids
self.init_graph(edge_index, edge_ids, layout='COO',
graph_mode=graph_mode, device=device)
# load node feature
if node_feat_data is not None:
node_cache_ratio, node_feat, node_feat_id2idx, node_feat_pb = \
_cat_feature_cache(partition_idx, node_feat_data, self.node_pb)
self.init_node_features(
node_feat, node_feat_id2idx, None, node_cache_ratio,
device_group_list, device, feature_with_gpu, dtype=None
)
self._node_feat_pb = node_feat_pb
# load edge feature
if edge_feat_data is not None:
edge_cache_ratio, edge_feat, edge_feat_id2idx, edge_feat_pb = \
_cat_feature_cache(partition_idx, edge_feat_data, self.edge_pb)
self.init_edge_features(
edge_feat, edge_feat_id2idx, edge_cache_ratio,
device_group_list, device, feature_with_gpu, dtype=None
)
self._edge_feat_pb = edge_feat_pb
# load whole node labels
self.init_node_labels(label)
## Pickling Registration
def rebuild_dist_table_dataset(ipc_handle):
ds = DistTableDataset.from_ipc_handle(ipc_handle)
return ds
def reduce_dist_table_dataset(dataset: DistTableDataset):
ipc_handle = dataset.share_ipc()
return (rebuild_dist_table_dataset, (ipc_handle, ))
ForkingPickler.register(DistTableDataset, reduce_dist_table_dataset) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_server.py | graphlearn_torch/python/distributed/dist_server.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import time
import threading
from typing import Dict, Optional, Union
import warnings
import torch
from ..partition import PartitionBook
from ..channel import ShmChannel, QueueTimeoutError
from ..sampler import NodeSamplerInput, EdgeSamplerInput, SamplingConfig, RemoteSamplerInput
from .dist_context import get_context, _set_server_context
from .dist_dataset import DistDataset
from .dist_options import RemoteDistSamplingWorkerOptions
from .dist_sampling_producer import DistMpSamplingProducer
from .rpc import barrier, init_rpc, shutdown_rpc
SERVER_EXIT_STATUS_CHECK_INTERVAL = 5.0
r""" Interval (in seconds) to check exit status of server.
"""
class DistServer(object):
r""" A server that supports launching remote sampling workers for
training clients.
Note that this server is enabled only when the distribution mode is a
server-client framework, and the graph and feature store will be partitioned
and managed by all server nodes.
Args:
dataset (DistDataset): The ``DistDataset`` object of a partition of graph
data and feature data, along with distributed patition books.
"""
def __init__(self, dataset: DistDataset):
self.dataset = dataset
self._lock = threading.RLock()
self._exit = False
self._cur_producer_idx = 0 # auto incremental index (same as producer count)
# The mapping from the key in worker options (such as 'train', 'test')
# to producer id
self._worker_key2producer_id: Dict[str, int] = {}
self._producer_pool: Dict[int, DistMpSamplingProducer] = {}
self._msg_buffer_pool: Dict[int, ShmChannel] = {}
self._epoch: Dict[int, int] = {} # last epoch for the producer
def shutdown(self):
for producer_id in list(self._producer_pool.keys()):
self.destroy_sampling_producer(producer_id)
assert len(self._producer_pool) == 0
assert len(self._msg_buffer_pool) == 0
def wait_for_exit(self):
r""" Block until the exit flag been set to ``True``.
"""
while not self._exit:
time.sleep(SERVER_EXIT_STATUS_CHECK_INTERVAL)
def exit(self):
r""" Set the exit flag to ``True``.
"""
self._exit = True
return self._exit
def get_dataset_meta(self):
r""" Get the meta info of the distributed dataset managed by the current
server, including partition info and graph types.
"""
return self.dataset.num_partitions, self.dataset.partition_idx, \
self.dataset.get_node_types(), self.dataset.get_edge_types()
def get_node_partition_id(self, node_type, index):
if isinstance(self.dataset.node_pb, PartitionBook):
partition_id = self.dataset.node_pb[index]
return partition_id
elif isinstance(self.dataset.node_pb, Dict):
partition_id = self.dataset.node_pb[node_type][index]
return partition_id
return None
def get_node_feature(self, node_type, index):
feature = self.dataset.get_node_feature(node_type)
return feature[index].cpu()
def get_tensor_size(self, node_type):
feature = self.dataset.get_node_feature(node_type)
return feature.shape
def get_node_label(self, node_type, index):
label = self.dataset.get_node_label(node_type)
return label[index]
def get_edge_index(self, edge_type, layout):
graph = self.dataset.get_graph(edge_type)
row = None
col = None
result = None
if layout == 'coo':
row, col, _, _ = graph.topo.to_coo()
result = (row, col)
else:
raise ValueError(f"Invalid layout {layout}")
return result
def get_edge_size(self, edge_type, layout):
graph = self.dataset.get_graph(edge_type)
if layout == 'coo':
row_count = graph.row_count
col_count = graph.col_count
else:
raise ValueError(f"Invalid layout {layout}")
return (row_count, col_count)
def create_sampling_producer(
self,
sampler_input: Union[NodeSamplerInput, EdgeSamplerInput, RemoteSamplerInput],
sampling_config: SamplingConfig,
worker_options: RemoteDistSamplingWorkerOptions,
) -> int:
r""" Create and initialize an instance of ``DistSamplingProducer`` with
a group of subprocesses for distributed sampling.
Args:
sampler_input (NodeSamplerInput or EdgeSamplerInput): The input data
for sampling.
sampling_config (SamplingConfig): Configuration of sampling meta info.
worker_options (RemoteDistSamplingWorkerOptions): Options for launching
remote sampling workers by this server.
Returns:
A unique id of created sampling producer on this server.
"""
if isinstance(sampler_input, RemoteSamplerInput):
sampler_input = sampler_input.to_local_sampler_input(dataset=self.dataset)
with self._lock:
producer_id = self._worker_key2producer_id.get(worker_options.worker_key)
if producer_id is None:
producer_id = self._cur_producer_idx
self._worker_key2producer_id[worker_options.worker_key] = producer_id
self._cur_producer_idx += 1
buffer = ShmChannel(
worker_options.buffer_capacity, worker_options.buffer_size
)
producer = DistMpSamplingProducer(
self.dataset, sampler_input, sampling_config, worker_options, buffer
)
producer.init()
self._producer_pool[producer_id] = producer
self._msg_buffer_pool[producer_id] = buffer
self._epoch[producer_id] = -1
return producer_id
def destroy_sampling_producer(self, producer_id: int):
r""" Shutdown and destroy a sampling producer managed by this server with
its producer id.
"""
with self._lock:
producer = self._producer_pool.get(producer_id, None)
if producer is not None:
producer.shutdown()
self._producer_pool.pop(producer_id)
self._msg_buffer_pool.pop(producer_id)
self._epoch.pop(producer_id)
def start_new_epoch_sampling(self, producer_id: int, epoch: int):
r""" Start a new epoch sampling tasks for a specific sampling producer
with its producer id.
"""
with self._lock:
cur_epoch = self._epoch[producer_id]
if cur_epoch < epoch:
self._epoch[producer_id] = epoch
producer = self._producer_pool.get(producer_id, None)
if producer is not None:
producer.produce_all()
def fetch_one_sampled_message(self, producer_id: int):
r""" Fetch a sampled message from the buffer of a specific sampling
producer with its producer id.
"""
producer = self._producer_pool.get(producer_id, None)
if producer is None:
warnings.warn('invalid producer_id {producer_id}')
return None, False
if producer.is_all_sampling_completed_and_consumed():
return None, True
buffer = self._msg_buffer_pool.get(producer_id, None)
while True:
try:
msg = buffer.recv(timeout_ms=500)
return msg, False
except QueueTimeoutError as e:
if producer.is_all_sampling_completed():
return None, True
_dist_server: DistServer = None
r""" ``DistServer`` instance of the current process.
"""
def get_server() -> DistServer:
r""" Get the ``DistServer`` instance on the current process.
"""
return _dist_server
def init_server(num_servers: int, server_rank: int, dataset: DistDataset,
master_addr: str, master_port: int, num_clients: int = 0,
num_rpc_threads: int = 16, request_timeout: int = 180,
server_group_name: Optional[str] = None, is_dynamic: bool = False):
r""" Initialize the current process as a server and establish connections
with all other servers and clients. Note that this method should be called
only in the server-client distribution mode.
Args:
num_servers (int): Number of processes participating in the server group.
server_rank (int): Rank of the current process withing the server group (it
should be a number between 0 and ``num_servers``-1).
dataset (DistDataset): The ``DistDataset`` object of a partition of graph
data and feature data, along with distributed patition book info.
master_addr (str): The master TCP address for RPC connection between all
servers and clients, the value of this parameter should be same for all
servers and clients.
master_port (int): The master TCP port for RPC connection between all
servers and clients, the value of this parameter should be same for all
servers and clients.
num_clients (int): Number of processes participating in the client group.
if ``is_dynamic`` is ``True``, this parameter will be ignored.
num_rpc_threads (int): The number of RPC worker threads used for the
current server to respond remote requests. (Default: ``16``).
request_timeout (int): The max timeout seconds for remote requests,
otherwise an exception will be raised. (Default: ``16``).
server_group_name (str): A unique name of the server group that current
process belongs to. If set to ``None``, a default name will be used.
(Default: ``None``).
is_dynamic (bool): Whether the world size is dynamic. (Default: ``False``).
"""
if server_group_name:
server_group_name = server_group_name.replace('-', '_')
_set_server_context(num_servers, server_rank, server_group_name, num_clients)
global _dist_server
_dist_server = DistServer(dataset=dataset)
init_rpc(master_addr, master_port, num_rpc_threads, request_timeout, is_dynamic=is_dynamic)
def wait_and_shutdown_server():
r""" Block until all client have been shutdowned, and further shutdown the
server on the current process and destroy all RPC connections.
"""
current_context = get_context()
if current_context is None:
logging.warning("'wait_and_shutdown_server': try to shutdown server when "
"the current process has not been initialized as a server.")
return
if not current_context.is_server():
raise RuntimeError(f"'wait_and_shutdown_server': role type of "
f"the current process context is not a server, "
f"got {current_context.role}.")
global _dist_server
_dist_server.wait_for_exit()
_dist_server.shutdown()
_dist_server = None
barrier()
shutdown_rpc()
def _call_func_on_server(func, *args, **kwargs):
r""" A callee entry for remote requests on the server side.
"""
if not callable(func):
logging.warning(f"'_call_func_on_server': receive a non-callable "
f"function target {func}")
return None
server = get_server()
if hasattr(server, func.__name__):
return func(server, *args, **kwargs)
return func(*args, **kwargs)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/distributed/dist_loader.py | graphlearn_torch/python/distributed/dist_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, Optional, Union
import concurrent
import torch
from torch_geometric.data import Data, HeteroData
from ..channel import SampleMessage, ShmChannel, RemoteReceivingChannel
from ..loader import to_data, to_hetero_data
from ..sampler import (
NodeSamplerInput, EdgeSamplerInput, RemoteSamplerInput, SamplerOutput,
HeteroSamplerOutput, SamplingConfig, SamplingType
)
from ..typing import (NodeType, EdgeType, as_str, reverse_edge_type)
from ..utils import get_available_device, ensure_device, python_exit_status
from .dist_client import request_server
from .dist_context import get_context
from .dist_dataset import DistDataset
from .dist_options import (
CollocatedDistSamplingWorkerOptions,
MpDistSamplingWorkerOptions,
RemoteDistSamplingWorkerOptions,
AllDistSamplingWorkerOptions,
)
from .dist_sampling_producer import (
DistMpSamplingProducer, DistCollocatedSamplingProducer
)
from .dist_server import DistServer
from .rpc import rpc_is_initialized
class DistLoader(object):
r""" A generic data loader base that performs distributed sampling, which
allows mini-batch training of GNNs on large-scale graphs when full-batch
training is not feasible.
This loader supports launching a collocated sampling worker on the current
process, or launching separate sampling workers on the spawned subprocesses
or remote server nodes. When using the separate sampling mode, a worker group
including the information of separate sampling workers should be provided.
Note that the separate sampling mode supports asynchronous and concurrent
sampling on each separate worker, which will achieve better performance
and is recommended to use. If you want to use a collocated sampling worker,
all sampling for each seed batch will be blocking and synchronous.
When launching a collocated sampling worker or some multiprocessing sampling
workers (on spwaned subprocesses), the distribution mode must be non-server
and only contains a group of parallel worker processes, this means that the
graph and feature store should be partitioned among all those parallel worker
processes and managed by them, sampling and training tasks will run on each
worker process at the same time.
Otherwise, when launching some remote sampling workers, the distribution mode
must be a server-client framework, which contains a group of server workers
and a group of client workers, the graph and feature store will be partitioned
and managed by all server workers. All client workers are responsible for
training tasks and launch some workers on remote servers to perform sampling
tasks, the sampled results will be consumed by client workers with a remote
message channel.
Args:
data (DistDataset, optional): The ``DistDataset`` object of a partition of
graph data and feature data, along with distributed patition books. The
input dataset must be provided in non-server distribution mode.
input_data (NodeSamplerInput or EdgeSamplerInput or RemoteSamplerInput):
The input data for which neighbors or subgraphs are sampled to create
mini-batches. In heterogeneous graphs, needs to be passed as a tuple that
holds the node type and node indices.
sampling_config (SamplingConfig): The Configuration info for sampling.
to_device (torch.device, optional): The target device that the sampled
results should be copied to. If set to ``None``, the current cuda device
(got by ``torch.cuda.current_device``) will be used if available,
otherwise, the cpu device will be used. (default: ``None``).
worker_options (optional): The options for launching sampling workers.
(1) If set to ``None`` or provided with a ``CollocatedDistWorkerOptions``
object, a single collocated sampler will be launched on the current
process, while the separate sampling mode will be disabled . (2) If
provided with a ``MpDistWorkerOptions`` object, the sampling workers will
be launched on spawned subprocesses, and a share-memory based channel
will be created for sample message passing from multiprocessing workers
to the current loader. (3) If provided with a ``RemoteDistWorkerOptions``
object, the sampling workers will be launched on remote sampling server
nodes, and a remote channel will be created for cross-machine message
passing. (default: ``None``).
"""
def __init__(
self,
data: Optional[DistDataset],
input_data: Union[NodeSamplerInput, EdgeSamplerInput, RemoteSamplerInput,
List[RemoteSamplerInput]],
sampling_config: SamplingConfig,
to_device: Optional[torch.device] = None,
worker_options: Optional[AllDistSamplingWorkerOptions] = None
):
self.data = data
self.input_data = input_data
self.sampling_type = sampling_config.sampling_type
self.num_neighbors = sampling_config.num_neighbors
self.batch_size = sampling_config.batch_size
self.shuffle = sampling_config.shuffle
self.drop_last = sampling_config.drop_last
self.with_edge = sampling_config.with_edge
self.with_weight = sampling_config.with_weight
self.collect_features = sampling_config.collect_features
self.edge_dir = sampling_config.edge_dir
self.sampling_config = sampling_config
self.to_device = get_available_device(to_device)
self.worker_options = worker_options
self._shutdowned = False
if self.worker_options is None:
self.worker_options = CollocatedDistSamplingWorkerOptions()
self._is_collocated_worker = isinstance(
self.worker_options, CollocatedDistSamplingWorkerOptions
)
self._is_mp_worker = isinstance(
self.worker_options, MpDistSamplingWorkerOptions
)
self._is_remote_worker = isinstance(
self.worker_options, RemoteDistSamplingWorkerOptions
)
if self.data is not None:
self.num_data_partitions = self.data.num_partitions
self.data_partition_idx = self.data.partition_idx
self._set_ntypes_and_etypes(
self.data.get_node_types(), self.data.get_edge_types()
)
self._num_recv = 0
self._epoch = 0
current_ctx = get_context()
if current_ctx is None:
raise RuntimeError(
f"'{self.__class__.__name__}': the distributed "
f"context of has not been initialized."
)
if self._is_remote_worker:
if not current_ctx.is_client():
raise RuntimeError(
f"'{self.__class__.__name__}': `DistNeighborLoader` "
f"must be used on a client worker process."
)
self._num_expected = float(
'inf'
) # for remote worker, end of epoch is determined by server
# Launch remote sampling workers
self._with_channel = True
self._server_rank_list = self.worker_options.server_rank \
if isinstance(self.worker_options.server_rank, List) else [self.worker_options.server_rank]
self._input_data_list = self.input_data \
if isinstance(self.input_data, List) else [self.input_data]
self._input_type = self._input_data_list[0].input_type
self.num_data_partitions, self.data_partition_idx, ntypes, etypes = \
request_server(self._server_rank_list[0], DistServer.get_dataset_meta)
self._set_ntypes_and_etypes(ntypes, etypes)
self._producer_id_list = []
futures = []
for input_data in self._input_data_list:
if not isinstance(input_data, RemoteSamplerInput):
input_data = input_data.to(torch.device('cpu'))
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(request_server, server_rank, DistServer.create_sampling_producer, input_data, self.sampling_config, self.worker_options) \
for server_rank, input_data in zip(self._server_rank_list, self._input_data_list)]
for future in futures:
producer_id = future.result()
self._producer_id_list.append(producer_id)
self._channel = RemoteReceivingChannel(
self._server_rank_list, self._producer_id_list,
self.worker_options.prefetch_size
)
else:
self._input_len = len(self.input_data)
self._input_type = self.input_data.input_type
self._num_expected = self._input_len // self.batch_size
if not self.drop_last and self._input_len % self.batch_size != 0:
self._num_expected += 1
if self._is_collocated_worker:
if not current_ctx.is_worker():
raise RuntimeError(
f"'{self.__class__.__name__}': only supports "
f"launching a collocated sampler with a non-server "
f"distribution mode, current role of distributed "
f"context is {current_ctx.role}."
)
if self.data is None:
raise ValueError(
f"'{self.__class__.__name__}': missing input dataset "
f"when launching a collocated sampler."
)
# Launch collocated producer
self._with_channel = False
self._collocated_producer = DistCollocatedSamplingProducer(
self.data, self.input_data, self.sampling_config, self.worker_options,
self.to_device
)
self._collocated_producer.init()
elif self._is_mp_worker:
if not current_ctx.is_worker():
raise RuntimeError(
f"'{self.__class__.__name__}': only supports "
f"launching multiprocessing sampling workers with "
f"a non-server distribution mode, current role of "
f"distributed context is {current_ctx.role}."
)
if self.data is None:
raise ValueError(
f"'{self.__class__.__name__}': missing input dataset "
f"when launching multiprocessing sampling workers."
)
# Launch multiprocessing sampling workers
self._with_channel = True
self.worker_options._set_worker_ranks(current_ctx)
self._channel = ShmChannel(
self.worker_options.channel_capacity, self.worker_options.channel_size
)
if self.worker_options.pin_memory:
self._channel.pin_memory()
self._mp_producer = DistMpSamplingProducer(
self.data, self.input_data, self.sampling_config, self.worker_options,
self._channel
)
self._mp_producer.init()
else:
raise ValueError(
f"'{self.__class__.__name__}': found invalid "
f"worker options type '{type(worker_options)}'"
)
def __del__(self):
if python_exit_status is True or python_exit_status is None:
return
self.shutdown()
def shutdown(self):
if self._shutdowned:
return
if self._is_collocated_worker:
self._collocated_producer.shutdown()
elif self._is_mp_worker:
self._mp_producer.shutdown()
else:
if rpc_is_initialized() is True:
for server_rank, producer_id in zip(self._server_rank_list, self._producer_id_list):
request_server(
server_rank, DistServer.destroy_sampling_producer,
producer_id
)
self._shutdowned = True
def __next__(self):
if self._num_recv == self._num_expected:
raise StopIteration
if self._with_channel:
msg = self._channel.recv()
else:
msg = self._collocated_producer.sample()
result = self._collate_fn(msg)
self._num_recv += 1
return result
def __iter__(self):
self._num_recv = 0
if self._is_collocated_worker:
self._collocated_producer.reset()
elif self._is_mp_worker:
self._mp_producer.produce_all()
else:
for server_rank, producer_id in zip(self._server_rank_list, self._producer_id_list):
request_server(
server_rank,
DistServer.start_new_epoch_sampling,
producer_id,
self._epoch
)
self._channel.reset()
self._epoch += 1
return self
def _set_ntypes_and_etypes(self, node_types: List[NodeType],
edge_types: List[EdgeType]):
self._node_types = node_types
self._edge_types = edge_types
self._reversed_edge_types = []
self._etype_str_to_rev = {}
if self._edge_types is not None:
for etype in self._edge_types:
rev_etype = reverse_edge_type(etype)
if self.edge_dir == 'out':
self._reversed_edge_types.append(rev_etype)
self._etype_str_to_rev[as_str(etype)] = rev_etype
elif self.edge_dir == 'in':
self._reversed_edge_types.append(etype)
self._etype_str_to_rev[as_str(rev_etype)] = etype
def _collate_fn(
self,
msg: SampleMessage
) -> Union[Data, HeteroData]:
r""" Collate sampled messages as PyG's Data/HeteroData
"""
ensure_device(self.to_device)
is_hetero = bool(msg['#IS_HETERO'])
# extract meta data
metadata = {}
for k in msg.keys():
if k.startswith('#META.'):
meta_key = str(k[6:])
metadata[meta_key] = msg[k].to(self.to_device)
if len(metadata) == 0:
metadata = None
# Heterogeneous sampling results
if is_hetero:
node_dict, row_dict, col_dict, edge_dict = {}, {}, {}, {}
nfeat_dict, efeat_dict = {}, {}
num_sampled_nodes_dict, num_sampled_edges_dict = {}, {}
for ntype in self._node_types:
ids_key = f'{as_str(ntype)}.ids'
if ids_key in msg:
node_dict[ntype] = msg[ids_key].to(self.to_device)
nfeat_key = f'{as_str(ntype)}.nfeats'
if nfeat_key in msg:
nfeat_dict[ntype] = msg[nfeat_key].to(self.to_device)
num_sampled_nodes_key = f'{as_str(ntype)}.num_sampled_nodes'
if num_sampled_nodes_key in msg:
num_sampled_nodes_dict[ntype] = msg[num_sampled_nodes_key]
for etype_str, rev_etype in self._etype_str_to_rev.items():
rows_key = f'{etype_str}.rows'
cols_key = f'{etype_str}.cols'
if rows_key in msg:
# The edge index should be reversed.
row_dict[rev_etype] = msg[cols_key].to(self.to_device)
col_dict[rev_etype] = msg[rows_key].to(self.to_device)
eids_key = f'{etype_str}.eids'
if eids_key in msg:
edge_dict[rev_etype] = msg[eids_key].to(self.to_device)
num_sampled_edges_key = f'{etype_str}.num_sampled_edges'
if num_sampled_edges_key in msg:
num_sampled_edges_dict[rev_etype] = msg[num_sampled_edges_key]
efeat_key = f'{etype_str}.efeats'
if efeat_key in msg:
efeat_dict[rev_etype] = msg[efeat_key].to(self.to_device)
if len(nfeat_dict) == 0:
nfeat_dict = None
if len(efeat_dict) == 0:
efeat_dict = None
if self.sampling_config.sampling_type in [SamplingType.NODE,
SamplingType.SUBGRAPH]:
batch_key = f'{self._input_type}.batch'
if msg.get(batch_key) is not None:
batch_dict = {
self._input_type: msg[f'{self._input_type}.batch'].to(self.to_device)
}
else:
batch_dict = {
self._input_type: node_dict[self._input_type][:self.batch_size]
}
batch_labels_key = f'{self._input_type}.nlabels'
if batch_labels_key in msg:
batch_labels = msg[batch_labels_key].to(self.to_device)
else:
batch_labels = None
batch_label_dict = {self._input_type: batch_labels}
else:
batch_dict = {}
batch_label_dict = {}
output = HeteroSamplerOutput(node_dict, row_dict, col_dict,
edge_dict if len(edge_dict) else None,
batch_dict,
num_sampled_nodes=num_sampled_nodes_dict,
num_sampled_edges=num_sampled_edges_dict,
edge_types=self._reversed_edge_types,
input_type=self._input_type,
device=self.to_device,
metadata=metadata)
res_data = to_hetero_data(
output, batch_label_dict, nfeat_dict, efeat_dict, self.edge_dir)
# Homogeneous sampling results
else:
ids = msg['ids'].to(self.to_device)
rows = msg['rows'].to(self.to_device)
cols = msg['cols'].to(self.to_device)
eids = msg['eids'].to(self.to_device) if 'eids' in msg else None
num_sampled_nodes = msg['num_sampled_nodes'] if 'num_sampled_nodes' in msg else None
num_sampled_edges = msg['num_sampled_edges'] if 'num_sampled_edges' in msg else None
nfeats = msg['nfeats'].to(self.to_device) if 'nfeats' in msg else None
efeats = msg['efeats'].to(self.to_device) if 'efeats' in msg else None
if self.sampling_config.sampling_type in [SamplingType.NODE,
SamplingType.SUBGRAPH]:
if msg.get('batch') is not None:
batch = msg['batch'].to(self.to_device)
else:
batch = ids[:self.batch_size]
batch_labels = msg['nlabels'].to(self.to_device) if 'nlabels' in msg else None
else:
batch = None
batch_labels = None
# The edge index should be reversed.
output = SamplerOutput(ids, cols, rows, eids, batch,
num_sampled_nodes, num_sampled_edges,
device=self.to_device, metadata=metadata)
res_data = to_data(output, batch_labels, nfeats, efeats)
return res_data
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/topo.py | graphlearn_torch/python/utils/topo.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Optional, Tuple
import torch
import torch_sparse
def ptr2ind(ptr: torch.Tensor) -> torch.Tensor:
r""" Convert an index pointer tensor to an indice tensor.
"""
ind = torch.arange(ptr.numel() - 1, device=ptr.device)
return ind.repeat_interleave(ptr[1:] - ptr[:-1])
def coo_to_csr(
row: torch.Tensor,
col: torch.Tensor,
edge_id: Optional[torch.Tensor] = None,
edge_weight: Optional[torch.Tensor] = None,
node_sizes: Optional[Tuple[int, int]] = None
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
r""" Tranform edge index from COO to CSR.
Args:
row (torch.Tensor): The row indices.
col (torch.Tensor): The column indices.
edge_id (torch.Tensor, optional): The edge ids corresponding to the input
edge index.
edge_weight (torch.Tensor, optional): The edge weights corresponding to the
input edge index.
node_sizes (Tuple[int, int], optional): The number of nodes in row and col.
"""
if node_sizes is None:
node_sizes = (int(row.max()) + 1, int(col.max()) + 1)
assert len(node_sizes) == 2
assert row.numel() == col.numel()
if edge_id is not None:
assert edge_id.numel() == row.numel()
adj_t = torch_sparse.SparseTensor(
row=row, col=col, value=edge_id, sparse_sizes=node_sizes
)
edge_ids, edge_weights = adj_t.storage.value(), None
if edge_weight is not None:
assert edge_weight.numel() == row.numel()
adj_w = torch_sparse.SparseTensor(
row=row, col=col, value=edge_weight, sparse_sizes=node_sizes
)
edge_weights = adj_w.storage.value()
return adj_t.storage.rowptr(), adj_t.storage.col(), edge_ids, edge_weights
def coo_to_csc(
row: torch.Tensor,
col: torch.Tensor,
edge_id: Optional[torch.Tensor] = None,
edge_weight: Optional[torch.Tensor] = None,
node_sizes: Optional[Tuple[int, int]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
r""" Tranform edge index from COO to CSC.
Args:
row (torch.Tensor): The row indices.
col (torch.Tensor): The column indices.
edge_id (torch.Tensor, optional): The edge ids corresponding to the input
edge index.
edge_weight (torch.Tensor, optional): The edge weights corresponding to the
input edge index.
node_sizes (Tuple[int, int], optional): The number of nodes in row and col.
"""
if node_sizes is not None:
node_sizes = (node_sizes[1], node_sizes[0])
r_colptr, r_row, r_edge_id, r_edge_weight = coo_to_csr(
col, row, edge_id, edge_weight, node_sizes
)
return r_row, r_colptr, r_edge_id, r_edge_weight
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/exit_status.py | graphlearn_torch/python/utils/exit_status.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import atexit
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is same as used in
Pytorch's dataloader:
https://github.com/pytorch/pytorch/blob/f1a6f32b72b7c2b73277f89bbf7e7459a400d80a/torch/utils/data/_utils/__init__.py
"""
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/singleton.py | graphlearn_torch/python/utils/singleton.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
def singleton(cls):
r""" Singleton class decorator.
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/common.py | graphlearn_torch/python/utils/common.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import socket
from typing import Any, Dict, Callable, Optional, Literal
from ..typing import reverse_edge_type
from .tensor import id2idx
import numpy
import random
import torch
import pickle
def ensure_dir(dir_path: str):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def seed_everything(seed: int):
r"""Sets the seed for generating random numbers in :pytorch:`PyTorch`,
:obj:`numpy` and :python:`Python`.
Args:
seed (int): The desired seed.
"""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def merge_dict(in_dict: Dict[Any, Any], out_dict: Dict[Any, Any]):
for k, v in in_dict.items():
vals = out_dict.get(k, [])
vals.append(v)
out_dict[k] = vals
def count_dict(in_dict: Dict[Any, Any], out_dict: Dict[Any, Any], target_len):
for k, v in in_dict.items():
vals = out_dict.get(k, [])
vals += [0] * (target_len - len(vals) - 1)
vals.append(len(v))
out_dict[k] = vals
def get_free_port(host: str = 'localhost') -> int:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
port = s.getsockname()[1]
s.close()
return port
def index_select(data, index):
if data is None:
return None
if isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k] = index_select(v, index)
return new_data
if isinstance(data, list):
new_data = []
for v in data:
new_data.append(index_select(v, index))
return new_data
if isinstance(data, tuple):
return tuple(index_select(list(data), index))
if isinstance(index, tuple):
start, end = index
return data[start:end]
return data[index]
def merge_hetero_sampler_output(
in_sample: Any, out_sample: Any, device,
edge_dir: Literal['in', 'out']='out'):
def subid2gid(sample):
for k, v in sample.row.items():
sample.row[k] = sample.node[k[0]][v]
for k, v in sample.col.items():
sample.col[k] = sample.node[k[-1]][v]
def merge_tensor_dict(in_dict, out_dict, unique=False):
for k, v in in_dict.items():
vals = out_dict.get(k, torch.tensor([], device=device))
out_dict[k] = torch.cat((vals, v)).unique() if unique \
else torch.cat((vals, v))
subid2gid(in_sample)
subid2gid(out_sample)
merge_tensor_dict(in_sample.node, out_sample.node, unique=True)
merge_tensor_dict(in_sample.row, out_sample.row)
merge_tensor_dict(in_sample.col, out_sample.col)
for k, v in out_sample.row.items():
out_sample.row[k] = id2idx(out_sample.node[k[0]])[v.to(torch.int64)]
for k, v in out_sample.col.items():
out_sample.col[k] = id2idx(out_sample.node[k[-1]])[v.to(torch.int64)]
# if in_sample.batch is not None and out_sample.batch is not None:
# merge_tensor_dict(in_sample.batch, out_sample.batch)
if in_sample.edge is not None and out_sample.edge is not None:
merge_tensor_dict(in_sample.edge, out_sample.edge, unique=False)
if out_sample.edge_types is not None and in_sample.edge_types is not None:
out_sample.edge_types = list(set(out_sample.edge_types) | set(in_sample.edge_types))
if edge_dir == 'out':
out_sample.edge_types = [
reverse_edge_type(etype) if etype[0] != etype[-1] else etype
for etype in out_sample.edge_types
]
return out_sample
def format_hetero_sampler_output(in_sample: Any, edge_dir=Literal['in', 'out']):
for k in in_sample.node.keys():
in_sample.node[k] = in_sample.node[k].unique()
if in_sample.edge_types is not None:
if edge_dir == 'out':
in_sample.edge_types = [
reverse_edge_type(etype) if etype[0] != etype[-1] else etype
for etype in in_sample.edge_types
]
return in_sample
# Append a tensor to a file using pickle
def append_tensor_to_file(filename, tensor):
# Try to open file in append binary mode
try:
with open(filename, 'ab') as f:
pickle.dump(tensor, f)
except Exception as e:
print('Error:', e)
# Load a file containing tensors and concatenate them into a single tensor
def load_and_concatenate_tensors(filename, device):
# Load file and read tensors
with open(filename, 'rb') as f:
tensor_list = []
while True:
try:
tensor = pickle.load(f)
tensor_list.append(tensor)
except EOFError:
break
# Pre-allocate memory for combined tensor
combined_tensor = torch.empty((sum(t.shape[0] for t in tensor_list),
*tensor_list[0].shape[1:]), dtype=tensor_list[0].dtype, device=device)
# Concatenate tensors in list into combined tensor
start_idx = 0
for tensor in tensor_list:
end_idx = start_idx + tensor.shape[0]
combined_tensor[start_idx:end_idx] = tensor.to(device)
start_idx = end_idx
return combined_tensor
## Default function to select ids in `srcs` that belong to a specific partition
def default_id_select(srcs, p_mask, node_pb=None):
return torch.masked_select(srcs, p_mask)
## Default function to filter src ids in a specific partition from the partition book
def default_id_filter(node_pb, partition_idx):
return torch.where(node_pb == partition_idx)[0]
def save_ckpt(
ckpt_seq: int,
ckpt_dir: str,
model: torch.nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
epoch: float = 0,
):
"""
Saves a checkpoint of the model's state.
Parameters:
ckpt_seq (int): The sequence number of the checkpoint.
ckpt_dir (str): The directory where the checkpoint will be saved.
model (torch.nn.Module): The model to be saved.
optimizer (Optional[torch.optim.Optimizer]): The optimizer, if any.
epoch (float): The current epoch. Default is 0.
"""
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
ckpt_path = os.path.join(ckpt_dir, f"model_seq_{ckpt_seq}.ckpt")
ckpt = {
'seq': ckpt_seq,
'epoch': epoch,
'model_state_dict': model.state_dict()
}
if optimizer:
ckpt['optimizer_state_dict'] = optimizer.state_dict()
torch.save(ckpt, ckpt_path)
def load_ckpt(
ckpt_seq: int,
ckpt_dir: str,
model: torch.nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
) -> float:
"""
Loads a checkpoint of the model's state, returns the epoch of the checkpoint.
Parameters:
ckpt_seq (int): The sequence number of the checkpoint.
ckpt_dir (str): The directory where the checkpoint will be saved.
model (torch.nn.Module): The model to be saved.
optimizer (Optional[torch.optim.Optimizer]): The optimizer, if any.
"""
ckpt_path = os.path.join(ckpt_dir, f"model_seq_{ckpt_seq}.ckpt")
try:
ckpt = torch.load(ckpt_path)
except FileNotFoundError:
return -1
model.load_state_dict(ckpt['model_state_dict'])
epoch = ckpt.get('epoch')
if optimizer:
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
return epoch
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/units.py | graphlearn_torch/python/utils/units.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
UNITS = {
#
"KB": 2**10,
"MB": 2**20,
"GB": 2**30,
#
"K": 2**10,
"M": 2**20,
"G": 2**30,
}
def parse_size(sz) -> int:
if isinstance(sz, int):
return sz
if isinstance(sz, float):
return int(sz)
if isinstance(sz, str):
for suf, u in sorted(UNITS.items()):
if sz.upper().endswith(suf):
return int(float(sz[:-len(suf)]) * u)
raise Exception(f"invalid size: {sz}")
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/__init__.py | graphlearn_torch/python/utils/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .common import *
from .device import *
from .exit_status import *
from .mixin import *
from .singleton import *
from .tensor import *
from .topo import *
from .units import *
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/build_glt.py | graphlearn_torch/python/utils/build_glt.py | import os
import platform
import shutil
import subprocess
from torch.utils.cpp_extension import CppExtension
def glt_v6d_ext_module(
name: str,
root_path: str,
):
include_dirs = []
library_dirs = []
libraries = []
extra_cxx_flags = []
include_dirs.append(root_path)
# We assume that glt_v6d is built in graphscope environment
if 'GRAPHSCOPE_HOME' in os.environ:
include_dirs.append(os.environ['GRAPHSCOPE_HOME'] + '/include' + '/vineyard')
include_dirs.append(os.environ['GRAPHSCOPE_HOME'] + '/include' + '/vineyard/contrib')
include_dirs.append(os.environ['GRAPHSCOPE_HOME'] + '/include')
library_dirs.append(os.environ['GRAPHSCOPE_HOME'] + '/lib')
if platform.system() == 'Darwin':
homebrew_prefix = subprocess.check_output([shutil.which('brew'), '--prefix']) \
.decode('utf-8', errors='ignore').strip()
include_dirs.append(os.path.join(homebrew_prefix, 'include'))
include_dirs.append(os.path.join(homebrew_prefix, 'include', 'vineyard'))
include_dirs.append(os.path.join(homebrew_prefix, 'include', 'vineyard', 'contrib'))
library_dirs.append(os.path.join(homebrew_prefix, 'lib'))
if platform.system() == "Linux":
include_dirs.append('/usr/lib/x86_64-linux-gnu/openmpi/include')
library_dirs.append('/usr/local/lib')
libraries.append('pthread')
libraries.append('mpi')
libraries.append('glog')
libraries.append('vineyard_basic')
libraries.append('vineyard_client')
libraries.append('vineyard_graph')
libraries.append('vineyard_io')
extra_cxx_flags.append('-std=c++17')
sources = [os.path.join(root_path, 'graphlearn_torch/python/py_export_v6d.cc')]
import glob
sources += glob.glob(
os.path.join(root_path, 'graphlearn_torch/v6d/**.cc'), recursive=True
)
extra_link_args = []
define_macros = [('WITH_VINEYARD', 'ON')]
undef_macros = []
return CppExtension(
name,
sources,
extra_link_args=extra_link_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args={
'cxx': extra_cxx_flags,
},
define_macros=define_macros,
undef_macros=undef_macros,
)
def glt_ext_module(
name: str,
root_path: str,
with_cuda: bool = False,
release: bool = False
):
include_dirs = []
library_dirs = []
libraries = []
extra_cxx_flags = []
extra_link_args = []
define_macros = []
undef_macros = []
include_dirs.append(root_path)
if with_cuda:
include_dirs.append('/usr/local/cuda' + '/include')
library_dirs.append('/usr/local/cuda' + 'lib64')
extra_cxx_flags.append('-std=c++17')
sources = [os.path.join(root_path, 'graphlearn_torch/python/py_export_glt.cc')]
import glob
sources += glob.glob(
os.path.join(root_path, 'graphlearn_torch/csrc/**/**.cc'), recursive=True
)
if with_cuda:
sources += glob.glob(
os.path.join(root_path, 'graphlearn_torch/csrc/**/**.cu'), recursive=True
)
if with_cuda:
define_macros.append(('WITH_CUDA', 'ON'))
else:
undef_macros.append(('WITH_CUDA'))
if release:
nvcc_flags = [
'-O3', '--expt-extended-lambda', '-lnuma', '-arch=sm_50',
'-gencode=arch=compute_50,code=sm_50',
'-gencode=arch=compute_52,code=sm_52',
'-gencode=arch=compute_60,code=sm_60',
'-gencode=arch=compute_61,code=sm_61',
'-gencode=arch=compute_70,code=sm_70',
'-gencode=arch=compute_75,code=sm_75',
'-gencode=arch=compute_75,code=compute_75'
]
else:
nvcc_flags = ['-O3', '--expt-extended-lambda', '-lnuma']
return CppExtension(
name,
sources,
extra_link_args=extra_link_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args={
'cxx': extra_cxx_flags,
'nvcc': nvcc_flags,
},
define_macros=define_macros,
undef_macros=undef_macros,
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/tensor.py | graphlearn_torch/python/utils/tensor.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, List, Union
import numpy
import torch
def tensor_equal_with_device(lhs: torch.Tensor, rhs: torch.Tensor):
r""" Check whether the data and device of two tensors are same.
"""
if lhs.device == rhs.device:
return torch.equal(lhs, rhs)
return False
def id2idx(ids: Union[List[int], torch.Tensor]):
r""" Get tensor of mapping from id to its original index.
"""
if not isinstance(ids, torch.Tensor):
ids = torch.tensor(ids, dtype=torch.int64)
ids = ids.to(torch.int64)
max_id = torch.max(ids).item()
id2idx = torch.zeros(max_id + 1, dtype=torch.int64, device=ids.device)
id2idx[ids] = torch.arange(ids.size(0), dtype=torch.int64, device=ids.device)
return id2idx
def convert_to_tensor(data: Any, dtype: torch.dtype = None):
r""" Convert the input data to a tensor based type.
"""
if isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k] = convert_to_tensor(v, dtype)
return new_data
if isinstance(data, list):
new_data = []
for v in data:
new_data.append(convert_to_tensor(v, dtype))
return new_data
if isinstance(data, tuple):
return tuple(convert_to_tensor(list(data), dtype))
if isinstance(data, torch.Tensor):
return data.type(dtype) if dtype is not None else data
if isinstance(data, numpy.ndarray):
return (
torch.from_numpy(data).type(dtype) if dtype is not None
else torch.from_numpy(data)
)
return data
def apply_to_all_tensor(data: Any, tensor_method, *args, **kwargs):
r""" Apply the specified method to all tensors contained by the
input data recursively.
"""
if isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k] = apply_to_all_tensor(v, tensor_method, *args, **kwargs)
return new_data
if isinstance(data, list):
new_data = []
for v in data:
new_data.append(apply_to_all_tensor(v, tensor_method, *args, **kwargs))
return new_data
if isinstance(data, tuple):
return tuple(apply_to_all_tensor(list(data), tensor_method, *args, **kwargs))
if isinstance(data, torch.Tensor):
return tensor_method(data, *args, **kwargs)
return data
def share_memory(data: Any):
r""" Share memory for all tensors contained by the input data.
"""
return apply_to_all_tensor(data, torch.Tensor.share_memory_)
def squeeze(data: Any):
r""" Squeeze all tensors contained by the input data.
"""
return apply_to_all_tensor(data, torch.Tensor.squeeze)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/mixin.py | graphlearn_torch/python/utils/mixin.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class CastMixin:
r""" This class is same as PyG's :class:`~torch_geometric.utils.CastMixin`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/utils/mixin.py
"""
@classmethod
def cast(cls, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
elem = args[0]
if elem is None:
return None
if isinstance(elem, CastMixin):
return elem
if isinstance(elem, (tuple, list)):
return cls(*elem)
if isinstance(elem, dict):
return cls(**elem)
return cls(*args, **kwargs)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/utils/device.py | graphlearn_torch/python/utils/device.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from typing import Optional
import torch
def get_available_device(device: Optional[torch.device] = None) -> torch.device:
r""" Get an available device. If the input device is not ``None``, it will
be returened directly. Otherwise an available device will be choosed (
current cuda device will be preferred if available).
"""
if device is not None:
return torch.device(device)
if torch.cuda.is_available():
return torch.device('cuda', torch.cuda.current_device())
return torch.device('cpu')
_cuda_device_assign_lock = threading.RLock()
_cuda_device_rank = 0
def assign_device():
r""" Assign an device to use, the cuda device will be preferred if available.
"""
if torch.cuda.is_available():
global _cuda_device_rank
with _cuda_device_assign_lock:
device_rank = _cuda_device_rank
_cuda_device_rank = (_cuda_device_rank + 1) % torch.cuda.device_count()
return torch.device('cuda', device_rank)
return torch.device('cpu')
def ensure_device(device: torch.device):
r""" Make sure that current cuda kernel corresponds to the assigned device.
"""
if (device.type == 'cuda' and
device.index != torch.cuda.current_device()):
torch.cuda.set_device(device)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/sampler/neighbor_sampler.py | graphlearn_torch/python/sampler/neighbor_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
from typing import Dict, Optional, Union, Literal
import torch
import threading
from .. import py_graphlearn_torch as pywrap
from ..data import Graph
from ..typing import NodeType, EdgeType, NumNeighbors, reverse_edge_type
from ..utils import (
merge_dict, merge_hetero_sampler_output, format_hetero_sampler_output,
id2idx, count_dict
)
from .base import (
BaseSampler, EdgeIndex,
NodeSamplerInput, EdgeSamplerInput,
SamplerOutput, HeteroSamplerOutput, NeighborOutput,
)
from .negative_sampler import RandomNegativeSampler
class NeighborSampler(BaseSampler):
r""" Neighbor Sampler.
"""
def __init__(self,
graph: Union[Graph, Dict[EdgeType, Graph]],
num_neighbors: Optional[NumNeighbors] = None,
device: torch.device=torch.device('cuda:0'),
with_edge: bool=False,
with_neg: bool=False,
with_weight: bool=False,
strategy: str = 'random',
edge_dir: Literal['in', 'out'] = 'out',
seed: int = None):
self.graph = graph
self.num_neighbors = num_neighbors
self.device = device
self.with_edge = with_edge
self.with_neg = with_neg
self.with_weight = with_weight
self.strategy = strategy
self.edge_dir = edge_dir
self._subgraph_op = None
self._sampler = None
self._neg_sampler = None
self._inducer = None
self._sampler_lock = threading.Lock()
self.is_sampler_initialized = False
self.is_neg_sampler_initialized = False
if seed is not None:
pywrap.RandomSeedManager.getInstance().setSeed(seed)
if isinstance(self.graph, Graph): #homo
self._g_cls = 'homo'
if self.graph.mode == 'CPU':
self.device = torch.device('cpu')
else: # hetero
self._g_cls = 'hetero'
self.edge_types = []
self.node_types = set()
for etype, graph in self.graph.items():
self.edge_types.append(etype)
self.node_types.add(etype[0])
self.node_types.add(etype[2])
if self.graph[self.edge_types[0]].mode == 'CPU':
self.device = torch.device('cpu')
self._set_num_neighbors_and_num_hops(self.num_neighbors)
@property
def subgraph_op(self):
self.lazy_init_subgraph_op()
return self._subgraph_op
def lazy_init_sampler(self):
if not self.is_sampler_initialized:
with self._sampler_lock:
if self._sampler is None:
if self._g_cls == 'homo':
if self.device.type == 'cuda':
self._sampler = pywrap.CUDARandomSampler(self.graph.graph_handler)
elif self.with_weight == False:
self._sampler = pywrap.CPURandomSampler(self.graph.graph_handler)
else:
self._sampler = pywrap.CPUWeightedSampler(self.graph.graph_handler)
self.is_sampler_initialized = True
else: # hetero
self._sampler = {}
for etype, g in self.graph.items():
if self.device != torch.device('cpu'):
self._sampler[etype] = pywrap.CUDARandomSampler(g.graph_handler)
elif self.with_weight == False:
self._sampler[etype] = pywrap.CPURandomSampler(g.graph_handler)
else:
self._sampler[etype] = pywrap.CPUWeightedSampler(g.graph_handler)
self.is_sampler_initialized = True
def lazy_init_neg_sampler(self):
if not self.is_neg_sampler_initialized and self.with_neg:
with self._sampler_lock:
if self._neg_sampler is None:
if self._g_cls == 'homo':
self._neg_sampler = RandomNegativeSampler(
graph=self.graph,
mode=self.device.type.upper(),
edge_dir=self.edge_dir
)
self.is_neg_sampler_initialized = True
else: # hetero
self._neg_sampler = {}
for etype, g in self.graph.items():
self._neg_sampler[etype] = RandomNegativeSampler(
graph=g,
mode=self.device.type.upper(),
edge_dir=self.edge_dir
)
self.is_neg_sampler_initialized = True
def lazy_init_subgraph_op(self):
if self._subgraph_op is None:
with self._sampler_lock:
if self._subgraph_op is None:
if self.device.type == 'cuda':
self._subgraph_op = pywrap.CUDASubGraphOp(self.graph.graph_handler)
else:
self._subgraph_op = pywrap.CPUSubGraphOp(self.graph.graph_handler)
def sample_one_hop(
self,
input_seeds: torch.Tensor,
req_num: int,
etype: EdgeType = None
) -> NeighborOutput:
self.lazy_init_sampler()
sampler = self._sampler[etype] if etype is not None else self._sampler
input_seeds = input_seeds.to(self.device)
edge_ids = None
if not self.with_edge:
nbrs, nbrs_num = sampler.sample(input_seeds, req_num)
else:
nbrs, nbrs_num, edge_ids = sampler.sample_with_edge(input_seeds, req_num)
if nbrs.numel() == 0:
nbrs = torch.tensor([], dtype=torch.int64 ,device=self.device)
nbrs_num = torch.zeros_like(input_seeds, dtype=torch.int64, device=self.device)
edge_ids = torch.tensor([], device=self.device, dtype=torch.int64) \
if self.with_edge else None
return NeighborOutput(nbrs, nbrs_num, edge_ids)
def sample_from_nodes(
self,
inputs: NodeSamplerInput,
**kwargs
) -> Union[HeteroSamplerOutput, SamplerOutput]:
inputs = NodeSamplerInput.cast(inputs)
input_seeds = inputs.node.to(self.device)
input_type = inputs.input_type
if self._g_cls == 'hetero':
assert input_type is not None
output = self._hetero_sample_from_nodes({input_type: input_seeds})
else:
output = self._sample_from_nodes(input_seeds)
return output
def _sample_from_nodes(
self,
input_seeds: torch.Tensor
) -> SamplerOutput:
r""" Sample on homogenous graphs and induce COO format subgraph.
Note that messages in PyG are passed from src to dst. In 'out' direction,
we sample src's out neighbors and induce [src_index, dst_index] subgraphs.
The direction of sampling is opposite to the direction of message passing.
To be consistent with the semantics of PyG, the final edge index is
transpose to [dst_index, src_index]. In 'in' direction, we don't need to
reverse it.
"""
out_nodes, out_rows, out_cols, out_edges = [], [], [], []
num_sampled_nodes, num_sampled_edges = [], []
inducer = self.get_inducer(input_seeds.numel())
srcs = inducer.init_node(input_seeds)
batch = srcs
num_sampled_nodes.append(input_seeds.numel())
out_nodes.append(srcs)
for req_num in self.num_neighbors:
out_nbrs = self.sample_one_hop(srcs, req_num)
if out_nbrs.nbr.numel() == 0:
break
nodes, rows, cols = inducer.induce_next(
srcs, out_nbrs.nbr, out_nbrs.nbr_num)
out_nodes.append(nodes)
out_rows.append(rows)
out_cols.append(cols)
if out_nbrs.edge is not None:
out_edges.append(out_nbrs.edge)
num_sampled_nodes.append(nodes.size(0))
num_sampled_edges.append(cols.size(0))
srcs = nodes
return SamplerOutput(
node=torch.cat(out_nodes),
row=torch.cat(out_cols) if len(out_cols) > 0 else torch.tensor(out_cols),
col=torch.cat(out_rows) if len(out_rows) > 0 else torch.tensor(out_rows),
edge=(torch.cat(out_edges) if out_edges else None),
batch=batch,
num_sampled_nodes=num_sampled_nodes,
num_sampled_edges=num_sampled_edges,
device=self.device
)
def _hetero_sample_from_nodes(
self,
input_seeds_dict: Dict[NodeType, torch.Tensor],
) -> HeteroSamplerOutput:
r""" Sample on heterogenous graphs and induce COO format subgraph dict.
Note that messages in PyG are passed from src to dst. In 'out' direction,
we sample src's out neighbors and induce [src_index, dst_index] subgraphs.
The direction of sampling is opposite to the direction of message passing.
To be consistent with the semantics of PyG, the final edge index is transpose to
[dst_index, src_index] and edge_type is reversed as well. For example,
given the edge_type (u, u2i, i), we sample by meta-path u->i, but return
edge_index_dict {(i, rev_u2i, u) : [i, u]}. In 'in' direction, we don't need to
reverse it.
"""
# sample neighbors hop by hop.
max_input_batch_size = max([t.numel() for t in input_seeds_dict.values()])
inducer = self.get_inducer(max_input_batch_size)
src_dict = inducer.init_node(input_seeds_dict)
batch = src_dict
out_nodes, out_rows, out_cols, out_edges = {}, {}, {}, {}
num_sampled_nodes, num_sampled_edges = {}, {}
merge_dict(src_dict, out_nodes)
count_dict(src_dict, num_sampled_nodes, 1)
for i in range(self.num_hops):
nbr_dict, edge_dict = {}, {}
for etype in self.edge_types:
req_num = self.num_neighbors[etype][i]
# out sampling needs dst_type==seed_type, in sampling needs src_type==seed_type
if self.edge_dir == 'in':
src = src_dict.get(etype[-1], None)
if src is not None and src.numel() > 0:
output = self.sample_one_hop(src, req_num, etype)
if output.nbr.numel() == 0:
continue
nbr_dict[reverse_edge_type(etype)] = [src, output.nbr, output.nbr_num]
if output.edge is not None:
edge_dict[reverse_edge_type(etype)] = output.edge
elif self.edge_dir == 'out':
src = src_dict.get(etype[0], None)
if src is not None and src.numel() > 0:
output = self.sample_one_hop(src, req_num, etype)
if output.nbr.numel() == 0:
continue
nbr_dict[etype] = [src, output.nbr, output.nbr_num]
if output.edge is not None:
edge_dict[etype] = output.edge
if len(nbr_dict) == 0:
continue
nodes_dict, rows_dict, cols_dict = inducer.induce_next(nbr_dict)
merge_dict(nodes_dict, out_nodes)
merge_dict(rows_dict, out_rows)
merge_dict(cols_dict, out_cols)
merge_dict(edge_dict, out_edges)
count_dict(nodes_dict, num_sampled_nodes, i + 2)
count_dict(cols_dict, num_sampled_edges, i + 1)
src_dict = nodes_dict
for etype, rows in out_rows.items():
out_rows[etype] = torch.cat(rows)
out_cols[etype] = torch.cat(out_cols[etype])
if self.with_edge:
out_edges[etype] = torch.cat(out_edges[etype])
res_rows, res_cols, res_edges = {}, {}, {}
for etype, rows in out_rows.items():
rev_etype = reverse_edge_type(etype)
res_rows[rev_etype] = out_cols[etype]
res_cols[rev_etype] = rows
if self.with_edge:
res_edges[rev_etype] = out_edges[etype]
return HeteroSamplerOutput(
node={k : torch.cat(v) for k, v in out_nodes.items()},
row=res_rows,
col=res_cols,
edge=(res_edges if len(res_edges) else None),
batch=batch,
num_sampled_nodes={k : torch.tensor(v, device=self.device)
for k, v in num_sampled_nodes.items()},
num_sampled_edges={
reverse_edge_type(k) : torch.tensor(v, device=self.device)
for k, v in num_sampled_edges.items()},
edge_types=self.edge_types,
device=self.device
)
def sample_from_edges(
self,
inputs: EdgeSamplerInput,
**kwargs,
) -> Union[HeteroSamplerOutput, SamplerOutput]:
r"""Performs sampling from an edge sampler input, leveraging a sampling
function of the same signature as `node_sample`.
Note that in out-edge sampling, we reverse the direction of src and dst
for the output so that features of the sampled nodes during training can
be aggregated from k-hop to (k-1)-hop nodes.
"""
src = inputs.row.to(self.device)
dst = inputs.col.to(self.device)
edge_label = None if inputs.label is None else inputs.label.to(self.device)
input_type = inputs.input_type
neg_sampling = inputs.neg_sampling
num_pos = src.numel()
num_neg = 0
# Negative Sampling
self.lazy_init_neg_sampler()
if neg_sampling is not None:
# When we are doing negative sampling, we append negative information
# of nodes/edges to `src`, `dst`.
# Later on, we can easily reconstruct what belongs to positive and
# negative examples by slicing via `num_pos`.
num_neg = math.ceil(num_pos * neg_sampling.amount)
if neg_sampling.is_binary():
# In the "binary" case, we randomly sample negative pairs of nodes.
if input_type is not None:
neg_pair = self._neg_sampler[input_type].sample(num_neg)
else:
neg_pair = self._neg_sampler.sample(num_neg)
src_neg, dst_neg = neg_pair[0], neg_pair[1]
src = torch.cat([src, src_neg], dim=0)
dst = torch.cat([dst, dst_neg], dim=0)
if edge_label is None:
edge_label = torch.ones(num_pos, device=self.device)
size = (src_neg.size()[0], ) + edge_label.size()[1:]
edge_neg_label = edge_label.new_zeros(size)
edge_label = torch.cat([edge_label, edge_neg_label])
elif neg_sampling.is_triplet():
# TODO: make triplet negative sampling strict.
# In the "triplet" case, we randomly sample negative destinations
# in a "non-strict" manner.
assert num_neg % num_pos == 0
if input_type is not None:
neg_pair = self._neg_sampler[input_type].sample(num_neg, padding=True)
else:
neg_pair = self._neg_sampler.sample(num_neg, padding=True)
dst_neg = neg_pair[1]
dst = torch.cat([dst, dst_neg], dim=0)
assert edge_label is None
# Neighbor Sampling
if input_type is not None: # hetero
if input_type[0] != input_type[-1]: # Two distinct node types:
src_seed, dst_seed = src, dst
src, inverse_src = src.unique(return_inverse=True)
dst, inverse_dst = dst.unique(return_inverse=True)
seed_dict = {input_type[0]: src, input_type[-1]: dst}
else: # Only a single node type: Merge both source and destination.
seed = torch.cat([src, dst], dim=0)
seed, inverse_seed = seed.unique(return_inverse=True)
seed_dict = {input_type[0]: seed}
temp_out = []
for it, node in seed_dict.items():
seeds = NodeSamplerInput(node=node, input_type=it)
temp_out.append(self.sample_from_nodes(seeds))
if len(temp_out) == 2:
out = merge_hetero_sampler_output(temp_out[0],
temp_out[1],
device=self.device,
edge_dir=self.edge_dir)
else:
out = format_hetero_sampler_output(temp_out[0], edge_dir=self.edge_dir)
# edge_label
if neg_sampling is None or neg_sampling.is_binary():
if input_type[0] != input_type[-1]:
inverse_src = id2idx(out.node[input_type[0]])[src_seed]
inverse_dst = id2idx(out.node[input_type[-1]])[dst_seed]
edge_label_index = torch.stack([
inverse_src,
inverse_dst,
], dim=0)
else:
edge_label_index = inverse_seed.view(2, -1)
out.metadata = {'edge_label_index': edge_label_index,
'edge_label': edge_label}
out.input_type = input_type
elif neg_sampling.is_triplet():
if input_type[0] != input_type[-1]:
inverse_src = id2idx(out.node[input_type[0]])[src_seed]
inverse_dst = id2idx(out.node[input_type[-1]])[dst_seed]
src_index = inverse_src
dst_pos_index = inverse_dst[:num_pos]
dst_neg_index = inverse_dst[num_pos:]
else:
src_index = inverse_seed[:num_pos]
dst_pos_index = inverse_seed[num_pos:2 * num_pos]
dst_neg_index = inverse_seed[2 * num_pos:]
dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1)
out.metadata = {'src_index': src_index,
'dst_pos_index': dst_pos_index,
'dst_neg_index': dst_neg_index}
out.input_type = input_type
else: #homo
seed = torch.cat([src, dst], dim=0)
seed, inverse_seed = seed.unique(return_inverse=True)
out = self.sample_from_nodes(seed)
# edge_label
if neg_sampling is None or neg_sampling.is_binary():
edge_label_index = inverse_seed.view(2, -1)
out.metadata = {'edge_label_index': edge_label_index,
'edge_label': edge_label}
elif neg_sampling.is_triplet():
src_index = inverse_seed[:num_pos]
dst_pos_index = inverse_seed[num_pos:2 * num_pos]
dst_neg_index = inverse_seed[2 * num_pos:]
dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1)
out.metadata = {'src_index': src_index,
'dst_pos_index': dst_pos_index,
'dst_neg_index': dst_neg_index}
return out
def sample_pyg_v1(self, ids: torch.Tensor):
r""" Sample multi-hop neighbors and organize results to PyG's `EdgeIndex`.
Args:
ids: input ids, 1D tensor.
The sampled results that is the same as PyG's `NeighborSampler`(PyG v1)
"""
ids = ids.to(self.device)
adjs = []
srcs = ids
out_ids = ids
batch_size = 0
inducer = self.get_inducer(srcs.numel())
for i, req_num in enumerate(self.num_neighbors):
srcs = inducer.init_node(srcs)
batch_size = srcs.numel() if i == 0 else batch_size
out_nbrs = self.sample_one_hop(srcs, req_num)
nodes, rows, cols = \
inducer.induce_next(srcs, out_nbrs.nbr, out_nbrs.nbr_num)
edge_index = torch.stack([cols, rows]) # we use csr instead of csc in PyG.
out_ids = torch.cat([srcs, nodes])
adj_size = torch.LongTensor([out_ids.size(0), srcs.size(0)])
adjs.append(EdgeIndex(edge_index, out_nbrs.edge, adj_size))
srcs = out_ids
return batch_size, out_ids, adjs[::-1]
def subgraph(
self,
inputs: NodeSamplerInput,
) -> SamplerOutput:
self.lazy_init_subgraph_op()
inputs = NodeSamplerInput.cast(inputs)
input_seeds = inputs.node.to(self.device)
if self.num_neighbors is not None:
nodes = [input_seeds]
for num in self.num_neighbors:
nbr = self.sample_one_hop(nodes[-1], num).nbr
nodes.append(torch.unique(nbr))
nodes, mapping = torch.cat(nodes).unique(return_inverse=True)
else:
nodes, mapping = torch.unique(input_seeds, return_inverse=True)
subgraph = self._subgraph_op.node_subgraph(nodes, self.with_edge)
return SamplerOutput(
node=subgraph.nodes,
# The edge index should be reversed.
row=subgraph.cols,
col=subgraph.rows,
edge=subgraph.eids if self.with_edge else None,
device=self.device,
metadata=mapping[:input_seeds.numel()])
def sample_prob(
self,
inputs: NodeSamplerInput,
node_cnt: Union[int, Dict[NodeType, int]]
) -> Union[torch.Tensor, Dict[NodeType, torch.Tensor]]:
r""" Get the probability of each node being sampled.
"""
self.lazy_init_sampler()
inputs = NodeSamplerInput.cast(inputs)
input_seeds = inputs.node.to(self.device)
input_type = inputs.input_type
if self._g_cls == 'hetero':
assert input_type is not None
output = self._hetero_sample_prob({input_type : input_seeds}, node_cnt)
else:
output = self._sample_prob(input_seeds, node_cnt)
return output
def _sample_prob(
self,
input_seeds: torch.Tensor,
node_cnt: int
) -> torch.Tensor:
last_prob = \
torch.ones(node_cnt, device=self.device, dtype=torch.float32) * 0.01
last_prob[input_seeds] = 1
for req in self.num_neighbors:
cur_prob = torch.zeros(node_cnt, device=self.device, dtype=torch.float32)
self._sampler.cal_nbr_prob(
req, last_prob, last_prob, self.graph.graph_handler, cur_prob
)
last_prob = cur_prob
return last_prob
def _hetero_sample_prob(
self,
input_seeds_dict: Dict[NodeType, torch.Tensor],
node_dict: Dict[NodeType, int]
) -> Dict[NodeType, torch.Tensor]:
probs = {}
for ntype in node_dict.keys():
probs[ntype] = []
# calculate probs for each subgraph
for i in range(self.num_hops):
for etype in self.edge_types:
req = self.num_neighbors[etype][i]
# homogenous subgraph case
if etype[0] == etype[2]:
if len(probs[etype[0]]) == 0:
last_prob = torch.ones(node_dict[etype[0]].size(0),
device=self.device,
dtype=torch.float32) * 0.005
last_prob[input_seeds_dict[etype[0]]] = 1
else:
last_prob = self.aggregate_prob(probs[etype[0]],
node_dict[etype[0]].size(0),
device=self.device)
cur_prob = torch.zeros(node_dict[etype[0]].size(0),
device=self.device,
dtype=torch.float32)
self._sampler[etype].cal_nbr_prob(
req, last_prob, last_prob,
self._graph_dict[etype].graph_handler, cur_prob
)
last_prob = cur_prob
probs[etype[0]].append(last_prob)
# hetero bipartite graph case
else:
if len(probs[etype[0]]) == 0:
last_prob = torch.ones(node_dict[etype[0]].size(0),
device=self.device,
dtype=torch.float32) * 0.005
last_prob[input_seeds_dict[etype[0]]] = 1
else:
last_prob = self.aggregate_prob(probs[etype[0]],
node_dict[etype[0]].size(0),
device=self.device)
etypes = [nbr_etype
for nbr_etype in self.edge_types
if nbr_etype[0] == etype[2]]
temp_probs = []
# prepare nbr_prob
if len(probs[etype[2]]) == 0:
nbr_prob = torch.ones(node_dict[etype[2]].size(0),
device=self.device,
dtype=torch.float32) * 0.005
if etype[2] in input_seeds_dict:
nbr_prob[input_seeds_dict[etype[2]]] = 1
else:
nbr_prob = self.aggregate_prob(probs[etype[2]],
node_dict[etype[2]].size(0),
device=self.device)
for nbr_etype in etypes:
cur_prob = torch.zeros(node_dict[etype[0]].size(0),
device=self.device,
dtype=torch.float32)
self._sampler[etype].cal_nbr_prob(
req, last_prob, nbr_prob,
self._graph_dict[nbr_etype].graph_handler, cur_prob
)
last_prob = cur_prob
temp_probs.append(last_prob)
# aggregate prob for the bipartite graph
# with #{subgraphs where the neighbours are}
sub_temp_prob = self.aggregate_prob(temp_probs,
node_dict[etype[0]].size(0),
device=self.device)
probs[etype[0]].append(sub_temp_prob)
# aggregate probs from each subgraph
# with #{subgraphs}
for ntype, prob in probs.items():
res = self.aggregate_prob(
prob, node_dict[ntype].size(0), device=self.device)
if i == self.num_hops - 1:
probs[ntype] = res
else:
probs[ntype] = [res]
return probs
def get_inducer(self, input_batch_size: int):
if self._inducer is None:
self._inducer = self.create_inducer(input_batch_size)
return self._inducer
def create_inducer(self, input_batch_size: int):
max_num_nodes = self._max_sampled_nodes(input_batch_size)
if self.device.type == 'cuda':
if self._g_cls == 'homo':
inducer = pywrap.CUDAInducer(max_num_nodes)
else:
inducer = pywrap.CUDAHeteroInducer(max_num_nodes)
else:
if self._g_cls == 'homo':
inducer = pywrap.CPUInducer(max_num_nodes)
else:
inducer = pywrap.CPUHeteroInducer(max_num_nodes)
return inducer
def _set_num_neighbors_and_num_hops(self, num_neighbors):
if isinstance(num_neighbors, (list, tuple)):
num_neighbors = {key: num_neighbors for key in self.edge_types}
assert isinstance(num_neighbors, dict)
self.num_neighbors = num_neighbors
# Add at least one element to the list to ensure `max` is well-defined
self.num_hops = max([0] + [len(v) for v in num_neighbors.values()])
for key, value in self.num_neighbors.items():
if len(value) != self.num_hops:
raise ValueError(f"Expected the edge type {key} to have "
f"{self.num_hops} entries (got {len(value)})")
def _max_sampled_nodes(
self,
input_batch_size: int,
) -> Union[int, Dict[str, int]]:
if self._g_cls == 'homo':
res = [input_batch_size]
for num in self.num_neighbors:
res.append(res[-1] * num)
return sum(res)
res = {k : [] for k in self.node_types}
for etype, num_list in self.num_neighbors.items():
tmp_res = [input_batch_size]
for num in num_list:
tmp_res.append(tmp_res[-1] * num)
res[etype[0]].extend(tmp_res)
res[etype[2]].extend(tmp_res)
return {k : sum(v) for k, v in res.items()}
def _aggregate_prob(self, probs, node_num, device):
"""
Aggregate probs from each subgraph
p = 1 - ((1-p_0)(1-p_1)...(1-p_k))**(1/k)
where k := #{subgraphs}
"""
res = torch.ones(node_num, device=device, dtype=torch.float32)
for temp_prob in probs:
# to avoid the case that p_i=1 causes p=1 s.t the whole importance won't
# be decided by one term.
res *= (1 + .002 - temp_prob)
res = 1 - res ** (1/len(probs))
return res.clamp(min=0.0)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/sampler/__init__.py | graphlearn_torch/python/sampler/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .base import *
from .negative_sampler import *
from .neighbor_sampler import * | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/sampler/base.py | graphlearn_torch/python/sampler/base.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from abc import ABC, abstractmethod
import math
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union, Literal
import torch
from ..typing import NodeType, EdgeType, NumNeighbors, Split
from ..utils import CastMixin
class EdgeIndex(NamedTuple):
r""" PyG's :class:`~torch_geometric.loader.EdgeIndex` used in old data loader
:class:`~torch_geometric.loader.NeighborSampler`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/loader/neighbor_sampler.py
"""
edge_index: torch.Tensor
e_id: Optional[torch.Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
edge_index = self.edge_index.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return EdgeIndex(edge_index, e_id, self.size)
@dataclass
class NodeSamplerInput(CastMixin):
r""" The sampling input of
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_nodes`.
This class corresponds to :class:`~torch_geometric.sampler.NodeSamplerInput`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/sampler/base.py
Args:
node (torch.Tensor): The indices of seed nodes to start sampling from.
input_type (str, optional): The input node type (in case of sampling in
a heterogeneous graph). (default: :obj:`None`).
"""
node: torch.Tensor
input_type: Optional[NodeType] = None
def __getitem__(self, index: Union[torch.Tensor, Any]) -> 'NodeSamplerInput':
if not isinstance(index, torch.Tensor):
index = torch.tensor(index, dtype=torch.long)
index = index.to(self.node.device)
return NodeSamplerInput(self.node[index], self.input_type)
def __len__(self):
return self.node.numel()
def share_memory(self):
self.node.share_memory_()
return self
def to(self, device: torch.device):
self.node.to(device)
return self
class NegativeSamplingMode(Enum):
# 'binary': Randomly sample negative edges in the graph.
binary = 'binary'
# 'triplet': Randomly sample negative destination nodes for each positive
# source node.
triplet = 'triplet'
@dataclass
class NegativeSampling(CastMixin):
r"""The negative sampling configuration of a
:class:`~torch_geometric.sampler.BaseSampler` when calling
:meth:`~torch_geometric.sampler.BaseSampler.sample_from_edges`.
Args:
mode (str): The negative sampling mode
(:obj:`"binary"` or :obj:`"triplet"`).
If set to :obj:`"binary"`, will randomly sample negative links
from the graph.
If set to :obj:`"triplet"`, will randomly sample negative
destination nodes for each positive source node.
amount (int or float, optional): The ratio of sampled negative edges to
the number of positive edges. (default: :obj:`1`)
weight (torch.Tensor, optional): A node-level vector determining the
sampling of nodes. Does not necessariyl need to sum up to one.
If not given, negative nodes will be sampled uniformly.
(default: :obj:`None`)
"""
mode: NegativeSamplingMode
amount: Union[int, float] = 1
weight: Optional[torch.Tensor] = None
def __init__(
self,
mode: Union[NegativeSamplingMode, str],
amount: Union[int, float] = 1,
weight: Optional[torch.Tensor] = None,
):
self.mode = NegativeSamplingMode(mode)
self.amount = amount
self.weight = weight
if self.amount <= 0:
raise ValueError(f"The attribute 'amount' needs to be positive "
f"for '{self.__class__.__name__}' "
f"(got {self.amount})")
if self.is_triplet():
if self.amount != math.ceil(self.amount):
raise ValueError(f"The attribute 'amount' needs to be an "
f"integer for '{self.__class__.__name__}' "
f"with 'triplet' negative sampling "
f"(got {self.amount}).")
self.amount = math.ceil(self.amount)
def is_binary(self) -> bool:
return self.mode == NegativeSamplingMode.binary
def is_triplet(self) -> bool:
return self.mode == NegativeSamplingMode.triplet
def share_memory(self):
if self.weight is not None:
self.weight.share_memory_()
return self
def to(self, device: torch.device):
if self.weight is not None:
self.weight.to(device)
return self
@dataclass
class EdgeSamplerInput(CastMixin):
r""" The sampling input of
:meth:`~graphlearn_torch.sampler.BaseSampler.sample_from_edges`.
This class corresponds to :class:`~torch_geometric.sampler.EdgeSamplerInput`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/sampler/base.py
Args:
row (torch.Tensor): The source node indices of seed links to start
sampling from.
col (torch.Tensor): The destination node indices of seed links to start
sampling from.
label (torch.Tensor, optional): The label for the seed links.
(default: :obj:`None`).
input_type (Tuple[str, str, str], optional): The input edge type (in
case of sampling in a heterogeneous graph). (default: :obj:`None`).
"""
row: torch.Tensor
col: torch.Tensor
label: Optional[torch.Tensor] = None
input_type: Optional[EdgeType] = None
neg_sampling: Optional[NegativeSampling] = None
def __getitem__(self, index: Union[torch.Tensor, Any]) -> 'EdgeSamplerInput':
if not isinstance(index, torch.Tensor):
index = torch.tensor(index, dtype=torch.long)
index = index.to(self.row.device)
return EdgeSamplerInput(
self.row[index],
self.col[index],
self.label[index] if self.label is not None else None,
self.input_type,
self.neg_sampling
)
def __len__(self):
return self.row.numel()
def share_memory(self):
self.row.share_memory_()
self.col.share_memory_()
if self.label is not None:
self.label.share_memory_()
if self.label is not None:
self.neg_sampling.share_memory()
return self
def to(self, device: torch.device):
self.row.to(device)
self.col.to(device)
if self.label is not None:
self.label.to(device)
if self.label is not None:
self.neg_sampling.to(device)
return self
@dataclass
class SamplerOutput(CastMixin):
r""" The sampling output of a :class:`~graphlearn_torch.sampler.BaseSampler` on
homogeneous graphs.
This class corresponds to :class:`~torch_geometric.sampler.SamplerOutput`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/sampler/base.py
Args:
node (torch.Tensor): The sampled nodes in the original graph.
row (torch.Tensor): The source node indices of the sampled subgraph.
Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }`
corresponding to the nodes in the :obj:`node` tensor.
col (torch.Tensor): The destination node indices of the sampled subgraph.
Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }`
corresponding to the nodes in the :obj:`node` tensor.
edge (torch.Tensor, optional): The sampled edges in the original graph.
This tensor is used to obtain edge features from the original
graph. If no edge attributes are present, it may be omitted.
batch (torch.Tensor, optional): The vector to identify the seed node
for each sampled node. Can be present in case of disjoint subgraph
sampling per seed node. (default: :obj:`None`).
device (torch.device, optional): The device that all data of this output
resides in. (default: :obj:`None`).
metadata: (Any, optional): Additional metadata information.
(default: :obj:`None`).
"""
node: torch.Tensor
row: torch.Tensor
col: torch.Tensor
edge: Optional[torch.Tensor] = None
batch: Optional[torch.Tensor] = None
num_sampled_nodes: Optional[Union[List[int], torch.Tensor]] = None
num_sampled_edges: Optional[Union[List[int], torch.Tensor]] = None
device: Optional[torch.device] = None
metadata: Optional[Any] = None
@dataclass
class HeteroSamplerOutput(CastMixin):
r""" The sampling output of a :class:`~graphlearn_torch.sampler.BaseSampler` on
heterogeneous graphs.
This class corresponds to
:class:`~torch_geometric.sampler.HeteroSamplerOutput`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/sampler/base.py
Args:
node (Dict[str, torch.Tensor]): The sampled nodes in the original graph
for each node type.
row (Dict[Tuple[str, str, str], torch.Tensor]): The source node indices
of the sampled subgraph for each edge type. Indices must be re-indexed
to :obj:`{ 0, ..., num_nodes - 1 }` corresponding to the nodes in the
:obj:`node` tensor of the source node type.
col (Dict[Tuple[str, str, str], torch.Tensor]): The destination node
indices of the sampled subgraph for each edge type. Indices must be
re-indexed to :obj:`{ 0, ..., num_nodes - 1 }` corresponding to the nodes
in the :obj:`node` tensor of the destination node type.
edge (Dict[Tuple[str, str, str], torch.Tensor], optional): The sampled
edges in the original graph for each edge type. This tensor is used to
obtain edge features from the original graph. If no edge attributes are
present, it may be omitted. (default: :obj:`None`).
batch (Dict[str, torch.Tensor], optional): The vector to identify the
seed node for each sampled node for each node type. Can be present
in case of disjoint subgraph sampling per seed node.
(default: :obj:`None`).
edge_types: (List[Tuple[str, str, str]], optional): The list of edge types
of the sampled subgraph. (default: :obj:`None`).
input_type: (Union[NodeType, EdgeType], optional): The input type of seed
nodes or edge_label_index.
(default: :obj:`None`).
device (torch.device, optional): The device that all data of this output
resides in. (default: :obj:`None`).
metadata: (Any, optional): Additional metadata information.
(default: :obj:`None`)
"""
node: Dict[NodeType, torch.Tensor]
row: Dict[EdgeType, torch.Tensor]
col: Dict[EdgeType, torch.Tensor]
edge: Optional[Dict[EdgeType, torch.Tensor]] = None
batch: Optional[Dict[NodeType, torch.Tensor]] = None
num_sampled_nodes: Optional[Dict[NodeType, Union[List[int], torch.Tensor]]] = None
num_sampled_edges: Optional[Dict[EdgeType, Union[List[int], torch.Tensor]]] = None
edge_types: Optional[List[EdgeType]] = None
input_type: Optional[Union[NodeType, EdgeType]] = None
device: Optional[torch.device] = None
metadata: Optional[Any] = None
def get_edge_index(self):
edge_index = {k: torch.stack([v, self.col[k]]) for k, v in self.row.items()}
if self.edge_types is not None:
for etype in self.edge_types:
if edge_index.get(etype, None) is None:
edge_index[etype] = \
torch.empty((2, 0), dtype=torch.long).to(self.device)
return edge_index
@dataclass
class NeighborOutput(CastMixin):
r""" The output of sampled neighbor results for a single hop sampling.
Args:
nbr (torch.Tensor): A 1D tensor of all sampled neighborhood node ids.
nbr_num (torch.Tensor): A 1D tensor that identify the number of
neighborhood nodes for each source nodes. Must be the same length as
the source nodes of this sampling hop.
nbr_num (torch.Tensor, optional): The edge ids corresponding to the sampled
edges (from source node to the sampled neighborhood node). Should be the
same length as :obj:`nbr` if provided.
"""
nbr: torch.Tensor
nbr_num: torch.Tensor
edge: Optional[torch.Tensor]
def to(self, device: torch.device):
return NeighborOutput(
nbr=self.nbr.to(device),
nbr_num=self.nbr_num.to(device),
edge=(self.edge.to(device) if self.edge is not None else None)
)
class SamplingType(Enum):
r""" Enum class for sampling types.
"""
NODE = 0
LINK = 1
SUBGRAPH = 2
RANDOM_WALK = 3
@dataclass
class SamplingConfig:
r""" Configuration info for sampling.
"""
sampling_type: SamplingType
num_neighbors: Optional[NumNeighbors]
batch_size: int
shuffle: bool
drop_last: bool
with_edge: bool
collect_features: bool
with_neg: bool
with_weight: bool
edge_dir: Literal['in', 'out']
seed: int
class BaseSampler(ABC):
r""" A base class that initializes a graph sampler and provides
:meth:`sample_from_nodes` and :meth:`sample_from_edges` routines.
This class corresponds to :class:`~torch_geometric.sampler.BaseSampler`:
https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/sampler/base.py
"""
@abstractmethod
def sample_from_nodes(
self,
inputs: NodeSamplerInput,
**kwargs
) -> Union[HeteroSamplerOutput, SamplerOutput]:
r""" Performs sampling from the nodes specified in :obj:`inputs`,
returning a sampled subgraph(egograph) in the specified output format.
Args:
inputs (torch.Tensor): The input data with node indices to start
sampling from.
"""
@abstractmethod
def sample_from_edges(
self,
inputs: EdgeSamplerInput,
**kwargs,
) -> Union[HeteroSamplerOutput, SamplerOutput]:
r""" Performs sampling from the edges specified in :obj:`inputs`,
returning a sampled subgraph(egograph) in the specified output format.
Args:
inputs (EdgeSamplerInput): The input data for sampling from edges
including the (1) source node indices, the (2) destination node
indices, the (3) optional edge labels and the (4) input edge type.
"""
@abstractmethod
def subgraph(
self,
inputs: NodeSamplerInput,
) -> SamplerOutput:
r""" Induce an enclosing subgraph based on inputs and their neighbors(if
num_neighbors is not None).
Args:
inputs (torch.Tensor): The input data with node indices to induce subgraph
from.
Returns:
The sampled unique nodes, relabeled rows and cols, original edge_ids,
and a mapping from indices in `inputs` to new indices in output nodes,
i.e. nodes[mapping] = inputs.
"""
class RemoteSamplerInput(ABC):
"""A base class that provides the `to_local_sampler_input` method for the server
to obtain the sampler input.
"""
@abstractmethod
def to_local_sampler_input(
self,
dataset,
**kwargs
) -> Union[NodeSamplerInput, EdgeSamplerInput]:
r"""
Abstract method to convert the sampler input to local format.
"""
class RemoteNodePathSamplerInput(RemoteSamplerInput):
r"""RemoteNodePathSamplerInput passes the node path to the server, where the server
can load node seeds from it.
"""
def __init__(self, node_path: str, input_type: str ) -> None:
self.node_path = node_path
self.input_type = input_type
def to_local_sampler_input(
self,
dataset,
**kwargs,
) -> NodeSamplerInput:
node = torch.load(self.node_path)
return NodeSamplerInput(node=node, input_type=self.input_type)
class RemoteNodeSplitSamplerInput(RemoteSamplerInput):
r"""RemoteNodeSplitSamplerInput passes the split category to the server and the server
loads seeds from the dataset.
"""
def __init__(self, split: Split, input_type: str ) -> None:
self.split = split
self.input_type = input_type
def to_local_sampler_input(
self,
dataset,
**kwargs,
) -> NodeSamplerInput:
if self.split == Split.train:
idx = dataset.train_idx
elif self.split == Split.valid:
idx = dataset.val_idx
elif self.split == Split.test:
idx = dataset.test_idx
if isinstance(idx, dict):
idx = idx[self.input_type]
return NodeSamplerInput(node=idx, input_type=self.input_type)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/sampler/negative_sampler.py | graphlearn_torch/python/sampler/negative_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
from .. import py_graphlearn_torch as pywrap
class RandomNegativeSampler(object):
r""" Random negative Sampler.
Args:
graph: A ``graphlearn_torch.data.Graph`` object.
mode: Execution mode of sampling, 'CUDA' means sampling on
GPU, 'CPU' means sampling on CPU.
edge_dir: The direction of edges to be sampled, determines
the order of rows and columns returned.
"""
def __init__(self, graph, mode='CUDA', edge_dir='out'):
self._mode = mode
self.edge_dir = edge_dir
if mode == 'CUDA':
self._sampler = pywrap.CUDARandomNegativeSampler(graph.graph_handler)
else:
self._sampler = pywrap.CPURandomNegativeSampler(graph.graph_handler)
def sample(self, req_num, trials_num=5, padding=False):
r""" Negative sampling.
Args:
req_num: The number of request(max) negative samples.
trials_num: The number of trials for negative sampling.
padding: Whether to patch the negative sampling results to req_num.
If True, after trying trials_num times, if the number of true negative
samples is still less than req_num, just random sample edges(non-strict
negative) as negative samples.
Returns:
negative edge_index(non-strict when padding is True).
"""
if self.edge_dir == 'out':
rows, cols = self._sampler.sample(req_num, trials_num, padding)
elif self.edge_dir == 'in':
cols, rows = self._sampler.sample(req_num, trials_num, padding)
return torch.stack([rows, cols], dim=0)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/graph.py | graphlearn_torch/python/data/graph.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from multiprocessing.reduction import ForkingPickler
from typing import Optional, Tuple, Union, Literal
import torch
import warnings
from .. import py_graphlearn_torch as pywrap
from ..typing import TensorDataType
from ..utils import (
convert_to_tensor, share_memory, ptr2ind, coo_to_csr, coo_to_csc
)
class Topology(object):
r""" Graph topology with support for CSC and CSR formats.
Args:
edge_index (a 2D torch.Tensor or numpy.ndarray, or a tuple): The edge
index for graph topology, in the order of first row and then column.
edge_ids (torch.Tensor or numpy.ndarray, optional): The edge ids for
graph edges. If set to ``None``, it will be aranged by the edge size.
(default: ``None``)
edge_weights (torch.Tensor or numpy.ndarray, optional): The edge weights for
graph edges. If set to ``None``, it will be None.
(default: ``None``)
input_layout (str): The edge layout representation for the input edge index,
should be 'COO' (rows and cols uncompressed), 'CSR' (rows compressed)
or 'CSC' (columns compressed). (default: 'COO')
layout ('CSR' or 'CSC'): The target edge layout representation for
the output. (default: 'CSR')
"""
def __init__(self,
edge_index: Union[TensorDataType,
Tuple[TensorDataType, TensorDataType]],
edge_ids: Optional[TensorDataType] = None,
edge_weights: Optional[TensorDataType] = None,
input_layout: str = 'COO',
layout: Literal['CSR', 'CSC'] = 'CSR'):
edge_index = convert_to_tensor(edge_index, dtype=torch.int64)
row, col = edge_index[0], edge_index[1]
input_layout = str(input_layout).upper()
if input_layout == 'COO':
assert row.numel() == col.numel()
num_edges = row.numel()
elif input_layout == 'CSR':
num_edges = col.numel()
elif input_layout == 'CSC':
num_edges = row.numel()
else:
raise RuntimeError(f"'{self.__class__.__name__}': got "
f"invalid edge layout {input_layout}")
edge_ids = convert_to_tensor(edge_ids, dtype=torch.int64)
if edge_ids is None:
edge_ids = torch.arange(num_edges, dtype=torch.int64, device=row.device)
else:
assert edge_ids.numel() == num_edges
edge_weights = convert_to_tensor(edge_weights, dtype=torch.float)
if edge_weights is not None:
assert edge_weights.numel() == num_edges
self._layout = layout
if input_layout == layout:
if input_layout == 'CSC':
self._indices, self._indptr = row, col
elif input_layout == 'CSR':
self._indptr, self._indices = row, col
self._edge_ids = edge_ids
self._edge_weights = edge_weights
return
elif input_layout == 'CSC':
col = ptr2ind(col)
elif input_layout == 'CSR':
row = ptr2ind(row)
# COO format data is prepared.
if layout == 'CSR':
self._indptr, self._indices, self._edge_ids, self._edge_weights = \
coo_to_csr(row, col, edge_id=edge_ids, edge_weight=edge_weights)
elif layout == 'CSC':
self._indices, self._indptr, self._edge_ids, self._edge_weights = \
coo_to_csc(row, col, edge_id=edge_ids, edge_weight=edge_weights)
def to_coo(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
r""" Convert to COO format.
Returns:
row indice tensor, column indice tensor, edge id tensor, edge weight tensor
"""
if self._layout == 'CSR':
return ptr2ind(self._indptr), self._indices, \
self._edge_ids, self._edge_weights
elif self._layout == 'CSC':
return self._indices, ptr2ind(self._indptr), \
self._edge_ids, self._edge_weights
def to_csc(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
r""" Convert to CSC format.
Returns:
row indice tensor, column ptr tensor, edge id tensor, edge weight tensor
"""
if self._layout == 'CSR':
row, col, edge_id, edge_weights = self.to_coo()
return coo_to_csc(row, col, edge_id=edge_id, edge_weight=edge_weights)
elif self._layout == 'CSC':
return self._indices, self._indptr, self._edge_ids, self._edge_weights
def to_csr(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
r""" Convert to CSR format.
Returns:
row ptr tensor, column indice tensor, edge id tensor, edge weight tensor
"""
if self._layout == 'CSR':
return self._indptr, self._indices, self._edge_ids, self._edge_weights
elif self._layout == 'CSC':
row, col, edge_ids, edge_weights = self.to_coo()
return coo_to_csr(row, col, edge_id=edge_ids, edge_weight=edge_weights)
@property
def indptr(self):
return self._indptr
@property
def indices(self):
return self._indices
@property
def edge_ids(self):
r""" local edge ids.
"""
return self._edge_ids
@property
def edge_weights(self):
r""" local edge weights.
"""
return self._edge_weights
@property
def degrees(self):
return self._indptr[1:] - self._indptr[:-1]
@property
def row_count(self):
return self._indptr.shape[0] - 1
@property
def edge_count(self):
return self._indices.shape[0]
def share_memory_(self):
self._indptr = share_memory(self._indptr)
self._indices = share_memory(self._indices)
self._edge_ids = share_memory(self._edge_ids)
self._edge_weights = share_memory(self._edge_weights)
def __getitem__(self, key):
return getattr(self, key, None)
def __setitem__(self, key, value):
setattr(self, key, value)
class Graph(object):
r""" A graph object used for graph operations such as sampling.
There are three modes supported:
1.'CPU': graph data are stored in the CPU memory and graph
operations are also executed on CPU.
2.'ZERO_COPY': graph data are stored in the pinned CPU memory and graph
operations are executed on GPU.
3.'CUDA': graph data are stored in the GPU memory and graph operations
are executed on GPU.
Args:
topo (Topology): An instance of ``Topology`` with graph topology data.
mode (str): The graph operation mode, must be 'CPU', 'ZERO_COPY' or 'CUDA'.
(Default: 'ZERO_COPY').
device (int, optional): The target cuda device rank to perform graph
operations. Note that this parameter will be ignored if the graph mode
set to 'CPU'. The value of ``torch.cuda.current_device()`` will be used
if set to ``None``. (Default: ``None``).
"""
def __init__(self, topo: Topology, mode = 'ZERO_COPY',
device: Optional[int] = None):
self.topo = topo
self.topo.share_memory_()
self.mode = mode.upper()
self.device = device
if self.mode != 'CPU' and self.device is not None:
self.device = int(self.device)
assert (
self.device >= 0 and self.device < torch.cuda.device_count()
), f"'{self.__class__.__name__}': invalid device rank {self.device}"
self._graph = None
def lazy_init(self):
if self._graph is not None:
return
self._graph = pywrap.Graph()
indptr = self.topo.indptr
indices = self.topo.indices
if self.topo.edge_ids is not None:
edge_ids = self.topo.edge_ids
else:
edge_ids = torch.empty(0)
if self.topo.edge_weights is not None:
edge_weights = self.topo.edge_weights
else:
edge_weights = torch.empty(0)
if self.mode == 'CPU':
self._graph.init_cpu_from_csr(indptr, indices, edge_ids, edge_weights)
else:
if self.device is None:
self.device = torch.cuda.current_device()
if self.mode == 'CUDA':
self._graph.init_cuda_from_csr(
indptr, indices, self.device, pywrap.GraphMode.DMA, edge_ids
)
elif self.mode == 'ZERO_COPY':
self._graph.init_cuda_from_csr(
indptr, indices, self.device, pywrap.GraphMode.ZERO_COPY, edge_ids
)
else:
raise ValueError(f"'{self.__class__.__name__}': "
f"invalid mode {self.mode}")
def export_topology(self):
return self.topo.indptr, self.topo.indices, self.topo.edge_ids
def share_ipc(self):
r""" Create ipc handle for multiprocessing.
Returns:
A tuple of topo and graph mode.
"""
return self.topo, self.mode
@classmethod
def from_ipc_handle(cls, ipc_handle):
r""" Create from ipc handle.
"""
topo, mode = ipc_handle
return cls(topo, mode, device=None)
@property
def row_count(self):
self.lazy_init()
return self._graph.get_row_count()
@property
def col_count(self):
self.lazy_init()
return self._graph.get_col_count()
@property
def edge_count(self):
self.lazy_init()
return self._graph.get_edge_count()
@property
def graph_handler(self):
r""" Get a pointer to the underlying graph object for graph operations
such as sampling.
"""
self.lazy_init()
return self._graph
## Pickling Registration
def rebuild_graph(ipc_handle):
graph = Graph.from_ipc_handle(ipc_handle)
return graph
def reduce_graph(graph: Graph):
ipc_handle = graph.share_ipc()
return (rebuild_graph, (ipc_handle, ))
ForkingPickler.register(Graph, reduce_graph)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/table_dataset.py | graphlearn_torch/python/data/table_dataset.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithPer express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import datetime
from multiprocessing.reduction import ForkingPickler
import numpy as np
import torch
import time
try:
import common_io
except ImportError:
pass
from .dataset import Dataset
class TableDataset(Dataset):
def load(self,
edge_tables=None,
node_tables=None,
graph_mode='ZERO_COPY',
sort_func=None,
split_ratio=0.0,
device_group_list=None,
directed=True,
reader_threads=10,
reader_capacity=10240,
reader_batch_size=1024,
label=None,
device=None,
**kwargs):
""" Creates `Dataset` from ODPS tables.
Args:
edge_tables: A dict({edge_type : odps_table}) denoting each
bipartite graph input table of heterogeneous graph, where edge_type is
a tuple of (src_type, edge_type, dst_type).
node_tables: A dict({node_type(str) : odps_table}) denoting each
input node table.
graph_mode: mode in graphlearn_torch's `Graph`, 'CPU', 'ZERO_COPY'
or 'CUDA'.
sort_func: function for feature reordering, return feature data(2D tenosr)
and a map(1D tensor) from id to index.
split_ratio: The proportion of data allocated to the GPU, between 0 and 1.
device_group_list: A list of `DeviceGroup`. Each DeviceGroup must have the
same size. A group of GPUs with peer-to-peer access to each other should
be set in the same device group for high feature collection performance.
directed: A Boolean value indicating whether the graph topology is
directed.
reader_threads: The number of threads of table reader.
reader_capacity: The capacity of table reader.
reader_batch_size: The number of records read at once.
label: A CPU torch.Tensor(homo) or a Dict[NodeType, torch.Tensor](hetero)
with the label data for graph nodes.
device: The target cuda device rank to perform graph operations and
feature lookups.
"""
assert isinstance(edge_tables, dict)
assert isinstance(node_tables, dict)
edge_index, feature = {}, {}
edge_hetero = (len(edge_tables) > 1)
node_hetero = (len(node_tables) > 1)
print("Start Loading edge and node tables...")
step = 0
start_time = time.time()
for e_type, table in edge_tables.items():
edge_list = []
reader = common_io.table.TableReader(table,
num_threads=reader_threads,
capacity=reader_capacity)
while True:
try:
data = reader.read(reader_batch_size, allow_smaller_final_batch=True)
edge_list.extend(data)
step += 1
except common_io.exception.OutOfRangeException:
reader.close()
break
if step % 1000 == 0:
print(f"{datetime.datetime.now()}: load "
f"{step * reader_batch_size} edges.")
rows = [e[0] for e in edge_list]
cols = [e[1] for e in edge_list]
edge_array = np.stack([np.array(rows, dtype=np.int64),
np.array(cols, dtype=np.int64)])
if edge_hetero:
edge_index[e_type] = edge_array
else:
edge_index = edge_array
del rows
del cols
del edge_list
step = 0
for n_type, table in node_tables.items():
feature_list = []
reader = common_io.table.TableReader(table,
num_threads=reader_threads,
capacity=reader_capacity)
while True:
try:
data = reader.read(reader_batch_size, allow_smaller_final_batch=True)
feature_list.extend(data)
step += 1
except common_io.exception.OutOfRangeException:
reader.close()
break
if step % 1000 == 0:
print(f"{datetime.datetime.now()}: load "
f"{step * reader_batch_size} nodes.")
ids = torch.tensor([feat[0] for feat in feature_list], dtype=torch.long)
_, original_index = torch.sort(ids)
if isinstance(feature_list[0][1], bytes):
float_feat= [
list(map(float, feat[1].decode().split(':')))
for feat in feature_list
]
else:
float_feat= [
list(map(float, feat[1].split(':')))
for feat in feature_list
]
if node_hetero:
feature[n_type] = torch.tensor(float_feat)[original_index]
else:
feature = torch.tensor(float_feat)[original_index]
del ids
del original_index
del float_feat
del feature_list
load_time = (time.time() - start_time) / 60
print(f'Loading table completed in {load_time:.2f} minutes.')
self.init_graph(edge_index=edge_index,
edge_ids=None,
edge_weights=None,
layout='COO',
graph_mode=graph_mode,
directed=directed,
device=device)
self.init_node_features(feature, None, sort_func, split_ratio,
device_group_list, device)
self.init_node_labels(label)
## Pickling Registration
def rebuild_table_dataset(ipc_handle):
ds = TableDataset.from_ipc_handle(ipc_handle)
return ds
def reduce_table_dataset(dataset: TableDataset):
ipc_handle = dataset.share_ipc()
return (rebuild_table_dataset, (ipc_handle, ))
ForkingPickler.register(TableDataset, reduce_table_dataset) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/unified_tensor.py | graphlearn_torch/python/data/unified_tensor.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List
import torch
from .. import py_graphlearn_torch as pywrap
class UnifiedTensor(object):
r""" Creates a CPU and GPUs unified Tensor for GPU direct access.
For the tensor stored in the CPU memory, we use ZERO-COPY to provide
efficient GPU access. For tensors stored in the GPU memory, p2p access
between GPUs(such as NVLink) is required.
Args:
current_device (int): An integer to represent the GPU device where the
underlying cuda operation kernel is launched.
dtype (torch.dtype): The data type of the tensor elements.
"""
def __init__(self, current_device: int, dtype: torch.dtype = torch.float32):
self.current_device = current_device
self.dtype = dtype
self.unified_tensor = pywrap.UnifiedTensor(current_device, dtype)
self.cpu_part = None # tensor stored in CPU memory.
def __getitem__(self, ids):
ids = ids.to(self.current_device)
return self.unified_tensor[ids]
def append_shared_tensor(self, shared_tensor):
r""" Append from `SharedTensor`.
Args:
shared_tensor: A `pywrap.SharedTensor` object which means GPU tensor that
can be shared with other GPUs.
"""
self.unified_tensor.append_shared_tensor(shared_tensor)
def append_cpu_tensor(self, cpu_tensor: torch.Tensor):
r""" Append from CPU tensor.
Args:
cpu_tensor: A CPU torch.Tensor object which will be stored
in pinned memory for ZERO-COPY.
"""
self.unified_tensor.append_cpu_tensor(cpu_tensor)
def init_from(self, tensors: List[torch.Tensor], tensor_devices: List[int]):
r""" Initialize from CPU torch.Tensors.
Args:
tensors: CPU torch.Tensors indicating the tensors that need to be stored
on different GPUs and CPU.
tensor_devices: The indices of devices indicating the location of the
tensor storage, -1 means on CPU and other > 0 value means on GPUs.
Note that tensors and tensor_devices must correspond to each other.
"""
self.unified_tensor.init_from(tensors, tensor_devices)
@property
def shape(self):
return self.unified_tensor.shape()
@property
def device(self):
return self.unified_tensor.device()
@property
def numel(self):
return self.unified_tensor.numel()
def size(self, dim):
return self.unified_tensor.size(dim)
def stride(self, dim):
return self.unified_tensor.stride(dim)
def share_ipc(self):
r""" Shares ipc handles.
Returns:
A list of cuda ipcs and cpu part tensor.
"""
shared_tensors = self.unified_tensor.share_cuda_ipc()
cuda_ipc_list = [item.share_cuda_ipc() for item in shared_tensors]
return cuda_ipc_list, self.cpu_part
def from_ipc_handle(self, cuda_ipc_list, cpu_part):
r""" Builds from ipc handle.
Args:
cuda_ipc_list: A list of CUDA ipcs, in the same order as tensors_devices.
cpu_part: A CPU torch.Tensor.
"""
for ipc in cuda_ipc_list:
shared_tensor = pywrap.SharedTensor()
shared_tensor.from_cuda_ipc(ipc)
self.unified_tensor.append_shared_tensor(shared_tensor)
if cpu_part is not None and cpu_part.numel() > 0:
self.cpu_part = cpu_part
self.unified_tensor.append_cpu_tensor(cpu_part)
@classmethod
def new_from_ipc(cls, ipc_handles, current_device: int, dtype: torch.dtype):
r""" Creates `UnifiedTensor` from ipc handles.
Args:
ipc_handles: ipc handles consists of CUDA ipcs and cpu part torch.Tensor.
current_device (int): An integer to represent the GPU device where the
underlying cuda operation kernel is launched.
dtype (torch.dtype): The data type of the tensor elements.
Returns:
A `UnifiedTensor` instance.
"""
cuda_ipc_list, cpu_part = ipc_handles
unified_tensor = cls(current_device, dtype)
unified_tensor.from_ipc_handle(cuda_ipc_list, cpu_part)
return unified_tensor
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/dataset.py | graphlearn_torch/python/data/dataset.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eithPer express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from multiprocessing.reduction import ForkingPickler
from typing import Dict, List, Optional, Union, Literal, Tuple
from collections.abc import Sequence
import torch
from ..typing import NodeType, EdgeType, TensorDataType, NodeLabel, NodeIndex
from ..utils import convert_to_tensor, share_memory, squeeze
from .feature import DeviceGroup, Feature
from .graph import Topology, Graph
class Dataset(object):
r""" A dataset manager for all graph topology and feature data.
"""
def __init__(
self,
graph: Union[Graph, Dict[EdgeType, Graph]] = None,
node_features: Union[Feature, Dict[NodeType, Feature]] = None,
edge_features: Union[Feature, Dict[EdgeType, Feature]] = None,
node_labels: NodeLabel = None,
edge_dir: Literal['in', 'out'] = 'out',
node_split: Tuple[NodeIndex, NodeIndex, NodeIndex] = None,
):
self.graph = graph
self.node_features = node_features
self.edge_features = edge_features
self.node_labels = squeeze(convert_to_tensor(node_labels))
self.edge_dir = edge_dir
if node_split is not None:
self.train_idx, self.val_idx, self.test_idx = squeeze(convert_to_tensor(node_split))
else:
self.train_idx, self.val_idx, self.test_idx = None, None, None
def init_graph(
self,
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]] = None,
edge_ids: Union[TensorDataType, Dict[EdgeType, TensorDataType]] = None,
edge_weights: Union[TensorDataType, Dict[EdgeType, TensorDataType]] = None,
layout: Union[str, Dict[EdgeType, str]] = 'COO',
graph_mode: str = 'ZERO_COPY',
directed: bool = False,
device: Optional[int] = None
):
r""" Initialize the graph storage and build the object of `Graph`.
Args:
edge_index (torch.Tensor or numpy.ndarray): Edge index for graph topo,
2D CPU tensor/numpy.ndarray(homo). A dict should be provided for
heterogenous graph. (default: ``None``)
edge_ids (torch.Tensor or numpy.ndarray): Edge ids for graph edges, A
CPU tensor (homo) or a Dict[EdgeType, torch.Tensor](hetero).
(default: ``None``)
edge_weights (torch.Tensor or numpy.ndarray): Edge weights for graph edges,
A CPU tensor (homo) or a Dict[EdgeType, torch.Tensor](hetero).
(default: ``None``)
layout (str): The edge layout representation for the input edge index,
should be 'COO', 'CSR' or 'CSC'. (default: 'COO')
graph_mode (str): Mode in graphlearn_torch's ``Graph``, 'CPU', 'ZERO_COPY'
or 'CUDA'. (default: 'ZERO_COPY')
directed (bool): A Boolean value indicating whether the graph topology is
directed. (default: ``False``)
device (torch.device): The target cuda device rank used for graph
operations when graph mode is not "CPU". (default: ``None``)
"""
edge_index = convert_to_tensor(edge_index, dtype=torch.int64)
edge_ids = convert_to_tensor(edge_ids, dtype=torch.int64)
edge_weights = convert_to_tensor(edge_weights, dtype=torch.float)
self._directed = directed
if edge_index is not None:
if isinstance(edge_index, dict):
# heterogeneous.
if edge_ids is not None:
assert isinstance(edge_ids, dict)
else:
edge_ids = {}
if edge_weights is not None:
assert isinstance(edge_weights, dict)
else:
edge_weights = {}
if not isinstance(layout, dict):
layout = {etype: layout for etype in edge_index.keys()}
topo_dict = {}
for etype, e_idx in edge_index.items():
topo_dict[etype] = Topology(
edge_index=e_idx,
edge_ids=edge_ids.get(etype, None),
edge_weights=edge_weights.get(etype, None),
input_layout=layout[etype],
layout='CSR' if self.edge_dir == 'out' else 'CSC',
)
self.graph = {}
for etype, topo in topo_dict.items():
g = Graph(topo, graph_mode, device)
g.lazy_init()
self.graph[etype] = g
else:
# homogeneous.
topo = Topology(edge_index, edge_ids, edge_weights, input_layout=layout,
layout='CSR' if self.edge_dir == 'out' else 'CSC')
self.graph = Graph(topo, graph_mode, device)
self.graph.lazy_init()
def random_node_split(
self,
num_val: Union[float, int],
num_test: Union[float, int],
):
r"""Performs a node-level random split by adding :obj:`train_idx`,
:obj:`val_idx` and :obj:`test_idx` attributes to the
:class:`~graphlearn_torch.data.Dataset` object. All nodes except
those in the validation and test sets will be used for training.
Args:
num_val (int or float): The number of validation samples.
If float, it represents the ratio of samples to include in the
validation set.
num_test (int or float): The number of test samples in case
of :obj:`"train_rest"` and :obj:`"random"` split. If float, it
represents the ratio of samples to include in the test set.
"""
if isinstance(self.node_labels, dict):
train_idx = {}
val_idx = {}
test_idx = {}
for node_type, labels in self.node_labels.items():
train_idx[node_type], val_idx[node_type], test_idx[node_type] = \
random_split(labels.shape[0], num_val, num_test)
else:
train_idx, val_idx, test_idx = random_split(self.node_labels.shape[0], num_val, num_test)
self.init_node_split((train_idx, val_idx, test_idx))
def load_vineyard(
self,
vineyard_id: str,
vineyard_socket: str,
edges: List[EdgeType],
edge_weights: Dict[EdgeType, str] = None,
node_features: Dict[NodeType, List[str]] = None,
edge_features: Dict[EdgeType, List[str]] = None,
node_labels: Dict[NodeType, str] = None,
):
# TODO(hongyi): GPU support
is_homo = len(edges) == 1 and edges[0][0] == edges[0][2]
from .vineyard_utils import \
vineyard_to_csr, load_vertex_feature_from_vineyard, \
load_edge_feature_from_vineyard, VineyardGid2Lid
_edge_index = {}
_edge_ids = {}
_edge_weights = {}
layout = {}
for etype in edges:
src_ntype = etype[0] if self.edge_dir == "out" else etype[2]
indptr, indices, edge_id = vineyard_to_csr(vineyard_socket, \
vineyard_id, src_ntype, etype[1], self.edge_dir, True)
_edge_index[etype] = (indptr, indices) if self.edge_dir == \
"out" else (indices, indptr)
_edge_ids[etype] = edge_id
layout[etype] = "CSR" if self.edge_dir == "out" else "CSC"
if edge_weights:
etype_edge_weights_label_name = edge_weights.get(etype)
if etype_edge_weights_label_name:
_edge_weights[etype] = torch.squeeze(
load_edge_feature_from_vineyard(vineyard_socket, vineyard_id, \
[etype_edge_weights_label_name], etype[1]))
if is_homo:
ntype = edges[0]
_edge_index = _edge_index[ntype]
_edge_ids = _edge_ids[ntype]
_edge_weights = _edge_weights.get(ntype)
layout = "CSR" if self.edge_dir == "out" else "CSC"
self.init_graph(edge_index=_edge_index, edge_ids=_edge_ids, \
layout=layout, graph_mode='CPU', edge_weights=_edge_weights)
# load node features
if node_features:
node_feature_data = {}
id2idx = {}
for ntype, property_names in node_features.items():
node_feature_data[ntype] = \
load_vertex_feature_from_vineyard(vineyard_socket, vineyard_id, property_names, ntype)
id2idx[ntype] = VineyardGid2Lid(vineyard_socket, vineyard_id, ntype)
if is_homo:
node_feature_data = node_feature_data[edges[0][0]]
id2idx = VineyardGid2Lid(vineyard_socket, vineyard_id, edges[0][0])
self.init_node_features(node_feature_data=node_feature_data, id2idx=id2idx, with_gpu=False)
# load edge features
if edge_features:
edge_feature_data = {}
if isinstance(edge_features, tuple):
edge_features = edge_features[0]
for etype, property_names in edge_features.items():
edge_feature_data[etype] = \
load_edge_feature_from_vineyard(vineyard_socket, vineyard_id, property_names, etype[1])
if is_homo:
edge_feature_data = edge_feature_data[edges[0]]
self.init_edge_features(edge_feature_data=edge_feature_data, with_gpu=False)
# load node labels
if node_labels:
node_label_data = {}
id2idx = {}
for ntype, label_property_name in node_labels.items():
node_label_data[ntype] = \
load_vertex_feature_from_vineyard(vineyard_socket, vineyard_id, [label_property_name], ntype)
id2idx[ntype] = VineyardGid2Lid(vineyard_socket, vineyard_id, ntype)
if is_homo:
node_label_data = node_label_data[edges[0][0]]
id2idx = VineyardGid2Lid(vineyard_socket, vineyard_id, edges[0][0])
self.init_node_labels(node_label_data=node_label_data, id2idx=id2idx)
def init_node_features(
self,
node_feature_data: Union[TensorDataType, Dict[NodeType, TensorDataType]] = None,
id2idx: Union[TensorDataType, Dict[NodeType, TensorDataType],
Sequence, Dict[NodeType, Sequence]] = None,
sort_func = None,
split_ratio: Union[float, Dict[NodeType, float]] = 0.0,
device_group_list: Optional[List[DeviceGroup]] = None,
device: Optional[int] = None,
with_gpu: bool = True,
dtype: Optional[torch.dtype] = None
):
r""" Initialize the node feature storage.
Args:
node_feature_data (torch.Tensor or numpy.ndarray): A tensor of the raw
node feature data, should be a dict for heterogenous graph nodes.
(default: ``None``)
id2idx (torch.Tensor or numpy.ndarray): A tensor that maps node id to
index, should be a dict for heterogenous graph nodes.
(default: ``None``)
sort_func: Function for reordering node features. Currently, only features
of homogeneous nodes are supported to reorder. (default: ``None``)
split_ratio (float): The proportion (between 0 and 1) of node feature data
allocated to the GPU, should be a dict for heterogenous graph nodes.
(default: ``0.0``)
device_group_list (List[DeviceGroup]): A list of device groups used for
node feature lookups, the GPU part of feature data will be replicated on
each device group in this list during the initialization. GPUs with
peer-to-peer access to each other should be set in the same device
group properly. (default: ``None``)
device (torch.device): The target cuda device rank used for node feature
lookups when the GPU part is not None.. (default: `None`)
with_gpu (bool): A Boolean value indicating whether the ``Feature`` uses
``UnifiedTensor``. If True, it means ``Feature`` consists of
``UnifiedTensor``, otherwise ``Feature`` is PyTorch CPU Tensor and
``split_ratio``, ``device_group_list`` and ``device`` will be invliad.
(default: ``True``)
dtype (torch.dtype): The data type of node feature elements, if not
specified, it will be automatically inferred. (Default: ``None``).
"""
if node_feature_data is not None:
node_feature_data = convert_to_tensor(node_feature_data, dtype)
id2idx = convert_to_tensor(id2idx)
if id2idx is None and sort_func is not None:
if isinstance(node_feature_data, dict):
logging.warning("'%s': reordering heterogenous graph node features "
"is not supported now.", self.__class__.__name__)
elif self.graph is not None:
# reorder node features of homogeneous graph.
assert isinstance(self.graph, Graph)
if self._directed is None or not self._directed:
topo_rev = self.graph.topo
else:
row, col, eids, weights = self.graph.topo.to_coo()
topo_rev = Topology((col, row), eids, weights, input_layout='COO',
layout='CSR' if self.edge_dir == 'out' else 'CSC')
node_feature_data, id2idx = \
sort_func(node_feature_data, split_ratio, topo_rev)
self.node_features = _build_features(
node_feature_data, id2idx, split_ratio,
device_group_list, device, with_gpu, dtype
)
def init_edge_features(
self,
edge_feature_data: Union[TensorDataType, Dict[EdgeType, TensorDataType]] = None,
id2idx: Union[TensorDataType, Dict[EdgeType, TensorDataType]] = None,
split_ratio: Union[float, Dict[EdgeType, float]] = 0.0,
device_group_list: Optional[List[DeviceGroup]] = None,
device: Optional[int] = None,
with_gpu: bool = True,
dtype: Optional[torch.dtype] = None
):
r""" Initialize the edge feature storage.
Args:
edge_feature_data (torch.Tensor or numpy.ndarray): A tensor of the raw
edge feature data, should be a dict for heterogenous graph edges.
(default: ``None``)
id2idx (torch.Tensor or numpy.ndarray): A tensor that maps edge id to
index, should be a dict for heterogenous graph edges.
(default: ``None``)
split_ratio (float): The proportion (between 0 and 1) of edge feature data
allocated to the GPU, should be a dict for heterogenous graph edges.
(default: ``0.0``)
device_group_list (List[DeviceGroup]): A list of device groups used for
edge feature lookups, the GPU part of feature data will be replicated on
each device group in this list during the initialization. GPUs with
peer-to-peer access to each other should be set in the same device
group properly. (default: ``None``)
device (torch.device): The target cuda device rank used for edge feature
lookups when the GPU part is not None.. (default: `None`)
with_gpu (bool): A Boolean value indicating whether the ``Feature`` uses
``UnifiedTensor``. If True, it means ``Feature`` consists of
``UnifiedTensor``, otherwise ``Feature`` is PyTorch CPU Tensor and
``split_ratio``, ``device_group_list`` and ``device`` will be invliad.
(default: ``True``)
dtype (torch.dtype): The data type of edge feature elements, if not
specified, it will be automatically inferred. (Default: ``None``).
"""
if edge_feature_data is not None:
self.edge_features = _build_features(
convert_to_tensor(edge_feature_data, dtype), convert_to_tensor(id2idx),
split_ratio, device_group_list, device, with_gpu, dtype
)
def init_node_labels(
self,
node_label_data: Union[TensorDataType, Dict[NodeType, TensorDataType]] = None,
id2idx: Union[TensorDataType, Dict[NodeType, TensorDataType], \
Sequence, Dict[NodeType, Sequence]] = None
):
r""" Initialize the node label storage.
Args:
node_label_data (torch.Tensor or numpy.ndarray): A tensor of the raw
node label data, should be a dict for heterogenous graph nodes.
(default: ``None``)
id2idx (torch.Tensor or numpy.ndarray): A tensor that maps global node id
to local index, and should be None for GLT(none-v6d) graph. (default: ``None``)
"""
if node_label_data is not None:
# For v6d graph, label data are partitioned into different fragments, and are
# handled in the same approach as distributed feature.
if id2idx is not None:
node_label_data = convert_to_tensor(node_label_data, dtype=torch.int64)
id2idx = convert_to_tensor(id2idx)
self.node_labels = _build_features(node_label_data, id2idx, 0.0, \
None, None, False, None)
else:
self.node_labels = squeeze(convert_to_tensor(node_label_data))
def init_node_split(
self,
node_split: Tuple[NodeIndex, NodeIndex, NodeIndex] = None,
):
r"""Initialize the node split.
Args:
node_split (tuple): A tuple containing the train, validation, and test node indices.
(default: ``None``)
"""
if node_split is not None:
self.train_idx, self.val_idx, self.test_idx = squeeze(convert_to_tensor(node_split))
def share_ipc(self):
self.node_labels = share_memory(self.node_labels)
self.train_idx = share_memory(self.train_idx)
self.val_idx = share_memory(self.val_idx)
self.test_idx = share_memory(self.test_idx)
return self.graph, self.node_features, self.edge_features, self.node_labels, \
self.edge_dir, (self.train_idx, self.val_idx, self.test_idx)
@classmethod
def from_ipc_handle(cls, ipc_handle):
graph, node_features, edge_features, node_labels, edge_dir, node_split = ipc_handle
return cls(graph, node_features, edge_features, node_labels, edge_dir, node_split)
def get_graph(self, etype: Optional[EdgeType] = None):
if isinstance(self.graph, Graph):
return self.graph
if isinstance(self.graph, dict):
assert etype is not None
return self.graph.get(etype, None)
return None
def get_node_types(self):
if isinstance(self.graph, dict):
if not hasattr(self, '_node_types'):
ntypes = set()
for etype in self.graph.keys():
ntypes.add(etype[0])
ntypes.add(etype[2])
self._node_types = list(ntypes)
return self._node_types
return None
def get_edge_types(self):
if isinstance(self.graph, dict):
if not hasattr(self, '_edge_types'):
self._edge_types = list(self.graph.keys())
return self._edge_types
return None
def get_node_feature(self, ntype: Optional[NodeType] = None):
if isinstance(self.node_features, Feature):
return self.node_features
if isinstance(self.node_features, dict):
assert ntype is not None
return self.node_features.get(ntype, None)
return None
def get_edge_feature(self, etype: Optional[EdgeType] = None):
if isinstance(self.edge_features, Feature):
return self.edge_features
if isinstance(self.edge_features, dict):
assert etype is not None
return self.edge_features.get(etype, None)
return None
def get_node_label(self, ntype: Optional[NodeType] = None):
if isinstance(self.node_labels, Feature) or isinstance(self.node_labels, torch.Tensor):
return self.node_labels
if isinstance(self.node_labels, dict):
assert ntype is not None
return self.node_labels.get(ntype, None)
return None
def __getitem__(self, key):
return getattr(self, key, None)
def __setitem__(self, key, value):
setattr(self, key, value)
def _build_features(feature_data, id2idx, split_ratio,
device_group_list, device, with_gpu, dtype):
r""" Build `Feature`s for node/edge feature data.
"""
if feature_data is not None:
if isinstance(feature_data, dict):
# heterogeneous.
if not isinstance(split_ratio, dict):
split_ratio = {
graph_type: float(split_ratio)
for graph_type in feature_data.keys()
}
if id2idx is not None:
assert isinstance(id2idx, dict)
else:
id2idx = {}
features = {}
for graph_type, feat in feature_data.items():
features[graph_type] = Feature(
feat, id2idx.get(graph_type, None),
split_ratio.get(graph_type, 0.0),
device_group_list, device, with_gpu,
dtype if dtype is not None else feat.dtype
)
else:
# homogeneous.
features = Feature(
feature_data, id2idx, float(split_ratio),
device_group_list, device, with_gpu,
dtype if dtype is not None else feature_data.dtype
)
else:
features = None
return features
## Pickling Registration
def rebuild_dataset(ipc_handle):
ds = Dataset.from_ipc_handle(ipc_handle)
return ds
def reduce_dataset(dataset: Dataset):
ipc_handle = dataset.share_ipc()
return (rebuild_dataset, (ipc_handle, ))
ForkingPickler.register(Dataset, reduce_dataset)
def random_split(
num_total: int,
num_val: Union[float, int],
num_test: Union[float, int],
):
num_val = round(num_total * num_val) if isinstance(num_val, float) else num_val
num_test = round(num_total * num_test) if isinstance(num_test, float) else num_test
perm = torch.randperm(num_total)
val_idx = perm[:num_val].clone()
test_idx = perm[num_val:num_val + num_test].clone()
train_idx = perm[num_val + num_test:].clone()
return train_idx, val_idx, test_idx | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/vineyard_utils.py | graphlearn_torch/python/data/vineyard_utils.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
try:
import torch
from typing import Dict
from collections.abc import Sequence
from .. import py_graphlearn_torch_vineyard as pywrap
except ImportError:
pass
from ..partition import PartitionBook
def vineyard_to_csr(sock, fid, v_label_name, e_label_name, edge_dir, haseid=0):
'''
Wrap to_csr function to read graph from vineyard
with return (indptr, indices, (Optional)edge_id)
'''
return pywrap.vineyard_to_csr(sock, fid, v_label_name, e_label_name, edge_dir, haseid)
def load_vertex_feature_from_vineyard(sock, fid, vcols, v_label_name):
'''
Wrap load_vertex_feature_from_vineyard function to read vertex feature
from vineyard
return vertex_feature(torch.Tensor)
'''
return pywrap.load_vertex_feature_from_vineyard(sock, fid, v_label_name, vcols)
def load_edge_feature_from_vineyard(sock, fid, ecols, e_label_name):
'''
Wrap load_edge_feature_from_vineyard function to read edge feature
from vineyard
return edge_feature(torch.Tensor)
'''
return pywrap.load_edge_feature_from_vineyard(sock, fid, e_label_name, ecols)
def get_fid_from_gid(gid):
'''
Wrap get_fid_from_gid function to get fid from gid
'''
return pywrap.get_fid_from_gid(gid)
def get_frag_vertex_offset(sock, fid, v_label_name):
'''
Wrap GetFragVertexOffset function to get vertex offset of a fragment.
'''
return pywrap.get_frag_vertex_offset(sock, fid, v_label_name)
def get_frag_vertex_num(sock, fid, v_label_name):
'''
Wrap GetFragVertexNum function to get vertex number of a fragment.
'''
return pywrap.get_frag_vertex_num(sock, fid, v_label_name)
class VineyardPartitionBook(PartitionBook):
def __init__(self, sock, obj_id, v_label_name, fid2pid: Dict=None):
self._sock = sock
self._obj_id = obj_id
self._v_label_name = v_label_name
self._frag = None
self._offset = get_frag_vertex_offset(sock, obj_id, v_label_name)
# TODO: optimise this query process if too slow
self._fid2pid = fid2pid
def __getitem__(self, gids) -> torch.Tensor:
fids = self.gid2fid(gids)
if self._fid2pid is not None:
pids = torch.tensor([self._fid2pid[fid] for fid in fids])
return pids.to(torch.int32)
return fids.to(torch.int32)
@property
def device(self):
return torch.device('cpu')
@property
def offset(self):
return self._offset
def gid2fid(self, gids):
'''
Parse gid to get fid
'''
if self._frag is None:
self._frag = pywrap.VineyardFragHandle(self._sock, self._obj_id)
fids = self._frag.get_fid_from_gid(gids.tolist())
return fids
class VineyardGid2Lid(Sequence):
def __init__(self, sock, fid, v_label_name):
self._offset = get_frag_vertex_offset(sock, fid, v_label_name)
self._vnum = get_frag_vertex_num(sock, fid, v_label_name)
def __getitem__(self, gids):
return gids - self._offset
def __len__(self):
return self._vnum
def v6d_id_select(srcs, p_mask, node_pb: PartitionBook):
'''
Select the inner vertices in `srcs` that belong to a specific partition,
and return their local offsets in the partition.
'''
gids = torch.masked_select(srcs, p_mask)
offsets = gids - node_pb.offset
return offsets
def v6d_id_filter(node_pb: VineyardPartitionBook, partition_idx):
'''
Select the inner vertices that belong to a specific partition
'''
frag = pywrap.VineyardFragHandle(node_pb._sock, node_pb._obj_id)
inner_vertices = frag.get_inner_vertices(node_pb._v_label_name)
return inner_vertices
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/__init__.py | graphlearn_torch/python/data/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .dataset import *
from .feature import *
from .graph import *
from .table_dataset import *
from .reorder import *
from .unified_tensor import *
from .vineyard_utils import *
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/reorder.py | graphlearn_torch/python/data/reorder.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
def sort_by_in_degree(cpu_tensor, shuffle_ratio, topo):
if topo is None:
return cpu_tensor, None
row_count = topo.row_count
assert not (cpu_tensor.size(0) < row_count)
new_idx = torch.arange(row_count, dtype=torch.long)
perm_range = torch.randperm(int(row_count * shuffle_ratio))
_, old_idx = torch.sort(topo.degrees, descending=True)
old2new = torch.arange(cpu_tensor.size(0), dtype=torch.long)
old_idx[:int(row_count * shuffle_ratio)] = old_idx[perm_range]
tmp_t = cpu_tensor[old_idx]
if row_count < cpu_tensor.size(0):
cpu_tensor = torch.cat([tmp_t, cpu_tensor[row_count:]], dim=0)
else:
cpu_tensor = tmp_t
old2new[old_idx] = new_idx
del new_idx, perm_range, old_idx, tmp_t
return cpu_tensor, old2new
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/data/feature.py | graphlearn_torch/python/data/feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from multiprocessing.reduction import ForkingPickler
from typing import List, Optional, Union
from collections.abc import Sequence
import torch
from ..typing import TensorDataType
from ..utils import convert_to_tensor, share_memory
from .unified_tensor import UnifiedTensor
scope_lock = threading.Lock()
class DeviceGroup(object):
r""" A group of GPUs with peer-to-peer access (NVLinks) to each other.
Args:
group_id: An integer representing the rank of the device group.
device_list: A list of devices that can be accessed p2p.
"""
def __init__(self, group_id: int, device_list: List[torch.device]):
self.group_id = group_id
self.device_list = device_list
@property
def size(self):
return len(self.device_list)
class Feature(object):
r""" A class for feature storage and lookup with hardware topology awareness
and high performance.
According to ``split_ratio``, ``Feature`` splits the feature data into the
GPU part and the CPU part(ZERO-COPY), and the GPU part is replicated between
all device groups in the input device group list. Each GPU can p2p access data
on other GPUs in the same ``DeviceGroup`` it belongs to, and can access data
on CPU part with zero copy.
Args:
feature_tensor (torch.Tensor or numpy.ndarray): A CPU tensor of the raw
feature data.
id2index (torch.Tensor, optional):: A tensor mapping the node id to the
index in the raw cpu feature tensor. If the feature data in the input
``feature_tensor`` are not consecutive and ordered by node ids, this
parameter should be provided. (Default: ``None``).
split_ratio (float): The proportion of feature data allocated to the GPU,
between 0 and 1. (Default: ``0.0``).
device_group_list (List[DeviceGroup], optional): A list of device groups
used for feature lookups, the GPU part of feature data will be replicated
on each device group in this list during the initialization. GPUs with
peer-to-peer access to each other should be set in the same device group
properly. Note that this parameter will be ignored if the ``split_ratio``
set to zero. If set to ``None``, the GPU part will be replicated on all
available GPUs got by ``torch.cuda.device_count()``, and each GPU device
is an independent group. (Default: ``None``).
device (int, optional): The target cuda device rank to perform feature
lookups with the GPU part on the current ``Feature`` instance.
The value of ``torch.cuda.current_device()`` will be used if set to
``None``(Default: ``None``).
with_gpu (bool): A Boolean value indicating whether the ``Feature`` uses
``UnifiedTensor``. If True, it means ``Feature`` consists of
``UnifiedTensor``, otherwise ``Feature`` is PyTorch CPU Tensor and
``split_ratio``, ``device_group_list`` and ``device`` will be invalid.
(Default: ``True``).
dtype (torch.dtype): The data type of feature elements.
(Default: ``torch.float32``).
Example:
>>> feat_tensor, id2index = sort_by_in_degree(feat_tensor, topo)
>>> # suppose you have 8 GPUs.
>>> # if there is no NVLink.
>>> device_groups = [DeviceGroup(i, [i]) for i in range(8)]
>>> # if there are NVLinks between GPU0-3 and GPU4-7.
>>> device_groups = [DeviceGroup(0, [0,1,2,3]), DeviceGroup(1, [4,5,6,7])]
>>> # Split the cpu feature tensor, of which the GPU part accounts for 60%.
>>> # Launch the GPU kernel on device 0 for this ``Feature`` instance.
>>> feature = Feature(feat_tensor, id2index, 0.6, device_groups, 0)
>>> out = feature[input]
TODO(baole): Support to automatically find suitable GPU groups. For now,
you can use ``nvidia-smi topo -m`` to find the right groups.
"""
def __init__(self,
feature_tensor: TensorDataType,
id2index: Optional[Union[torch.Tensor, Sequence]] = None,
split_ratio: float = 0.0,
device_group_list: Optional[List[DeviceGroup]] = None,
device: Optional[int] = None,
with_gpu: Optional[bool] = True,
dtype: torch.dtype = torch.float32):
self.feature_tensor = convert_to_tensor(feature_tensor, dtype)
self.id2index = convert_to_tensor(id2index, dtype=torch.int64)
self.split_ratio = float(split_ratio)
self.device_group_list = device_group_list
self.device = device
self.with_gpu = with_gpu
self.dtype = dtype
self._device2group = {}
self._unified_tensors = {}
self._cuda_id2index = None
self._ipc_handle = None
self._cuda_ipc_handle_dict = None
if self.feature_tensor is not None:
self.feature_tensor = share_memory(self.feature_tensor.cpu())
if self.with_gpu:
if self.device_group_list is None:
self.device_group_list = [
DeviceGroup(i, [i]) for i in range(torch.cuda.device_count())]
self._device2group = {}
group_size = self.device_group_list[0].size
for dg in self.device_group_list:
assert group_size == dg.size
for d in dg.device_list:
self._device2group[d] = dg.group_id
if self.feature_tensor is not None:
self._split_and_init()
def __getitem__(self, ids: torch.Tensor):
r""" Perform feature lookups with GPU part and CPU part.
"""
if not self.with_gpu:
return self.cpu_get(ids)
self.lazy_init_with_ipc_handle()
ids = ids.to(self.device)
if self.id2index is not None:
if self._cuda_id2index is None:
self._cuda_id2index = self.id2index.to(self.device)
ids = self._cuda_id2index[ids]
group_id = self._device2group[self.device]
unified_tensor = self._unified_tensors[group_id]
return unified_tensor[ids]
def cpu_get(self, ids: torch.Tensor):
r""" Perform feature lookups only with CPU feature tensor.
"""
self.lazy_init_with_ipc_handle()
ids = ids.to('cpu')
if self.id2index is not None:
ids = self.id2index[ids]
return self.feature_tensor[ids]
def _check_and_set_device(self):
if self.device is None:
self.device = torch.cuda.current_device()
else:
self.device = int(self.device)
assert (
self.device >= 0 and self.device < torch.cuda.device_count()
), f"'{self.__class__.__name__}': invalid device rank {self.device}"
def _split(self, feature_tensor: torch.Tensor):
device_part_size = int(feature_tensor.shape[0] * self.split_ratio)
return feature_tensor[:device_part_size], feature_tensor[device_part_size:]
def _split_and_init(self):
r""" Split cpu feature tensor and initialize GPU part and CPU part.
"""
self._check_and_set_device()
device_part, cpu_part = self._split(self.feature_tensor)
if device_part.shape[0] > 0: # GPU part
for group in self.device_group_list:
block_size = device_part.shape[0] // group.size
unified_tensor = UnifiedTensor(group.device_list[0], self.dtype)
tensors, tensor_devices = [], []
cur_pos = 0
for idx, device in enumerate(group.device_list):
if idx == group.size - 1:
tensors.append(device_part[cur_pos:])
else:
tensors.append(device_part[cur_pos:cur_pos + block_size])
cur_pos += block_size
tensor_devices.append(device)
unified_tensor.init_from(tensors, tensor_devices)
self._unified_tensors[group.group_id] = unified_tensor
if cpu_part.numel() > 0: # CPU part
group_id = self._device2group[self.device]
unified_tensor = self._unified_tensors.get(group_id, None)
if unified_tensor is None:
unified_tensor = UnifiedTensor(group_id, self.dtype)
unified_tensor.append_cpu_tensor(cpu_part)
self._unified_tensors[group_id] = unified_tensor
def share_ipc(self):
r""" Create ipc handle for multiprocessing.
"""
if self._ipc_handle is not None:
return self._ipc_handle
if self.id2index is not None and isinstance(self.id2index, torch.Tensor):
self.id2index = self.id2index.cpu()
self.id2index.share_memory_()
if self._cuda_ipc_handle_dict is None:
self._cuda_ipc_handle_dict = {}
for group_id, tensor in self._unified_tensors.items():
self._cuda_ipc_handle_dict[group_id] = tensor.share_ipc()[0]
return (
self.feature_tensor,
self.id2index,
self.split_ratio,
self.device_group_list,
self._cuda_ipc_handle_dict,
self.with_gpu,
self.dtype
)
@classmethod
def from_ipc_handle(cls, ipc_handle):
_, _, split_ratio, device_group_list, _, with_gpu, dtype = ipc_handle
feature = cls(None, None, split_ratio, device_group_list,
with_gpu=with_gpu, dtype=dtype)
feature._ipc_handle = ipc_handle
return feature
def lazy_init_with_ipc_handle(self):
if self._ipc_handle is None:
return
with scope_lock:
if self._ipc_handle is None:
return
self.feature_tensor, self.id2index, _, _, self._cuda_ipc_handle_dict, _, _ \
= self._ipc_handle
if not self.with_gpu:
self._ipc_handle = None
return
self._check_and_set_device()
_, cpu_part = self._split(self.feature_tensor)
group_id = self._device2group[self.device]
self._unified_tensors[group_id] = UnifiedTensor.new_from_ipc(
ipc_handles=(self._cuda_ipc_handle_dict.get(group_id, []), cpu_part),
current_device=self.device,
dtype=self.dtype
)
self._ipc_handle = None
@property
def shape(self):
self.lazy_init_with_ipc_handle()
return self.feature_tensor.shape
def size(self, dim):
self.lazy_init_with_ipc_handle()
return self.feature_tensor.size(dim)
## Pickling Registration
def rebuild_feature(ipc_handle):
feature = Feature.from_ipc_handle(ipc_handle)
return feature
def reduce_feature(feature: Feature):
ipc_handle = feature.share_ipc()
return (rebuild_feature, (ipc_handle, ))
ForkingPickler.register(Feature, reduce_feature)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/channel/shm_channel.py | graphlearn_torch/python/channel/shm_channel.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Union
from .. import py_graphlearn_torch as pywrap
from ..utils import parse_size
from .base import SampleMessage, ChannelBase
class ShmChannel(ChannelBase):
r""" A communication channel for sample messages based on a shared-memory
queue, which is implemented in the underlying c++ lib.
Note that the underlying shared-memory buffer of this channel is pinnable,
which will achieve better performance when the consumer needs to copy
data from channel to gpu device.
Args:
capacity: The max bufferd number of sample messages in channel.
shm_size: The allocated size (bytes) for underlying shared-memory.
When the producer send sample message to the channel, it will be limited by
both `capacity` and `shm_size`. E.g, if current number of buffered
messages in channel reaches the `capacity` limit, or current used
buffer memory reaches the `shm_size` limit, the current `send` operation
will be blocked until some messages in channel are consumed and related
resource are released.
"""
def __init__(self,
capacity: int=128,
shm_size: Union[str, int]='256MB'):
assert capacity > 0
shm_size = parse_size(shm_size)
self._queue = pywrap.SampleQueue(capacity, shm_size)
def pin_memory(self):
r""" Pin underlying shared-memory.
"""
self._queue.pin_memory()
def empty(self) -> bool:
r""" Whether the queue is empty.
"""
return self._queue.empty()
def send(self, msg: SampleMessage, **kwargs):
self._queue.send(msg)
def recv(self, timeout_ms=None, **kwargs) -> SampleMessage:
if timeout_ms is None:
timeout_ms = 0
return self._queue.receive(timeout_ms=timeout_ms)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/channel/remote_channel.py | graphlearn_torch/python/channel/remote_channel.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import queue
import torch
from .base import SampleMessage, ChannelBase
from typing import Union, List
class RemoteReceivingChannel(ChannelBase):
r""" A pull-based receiving channel that can fetch sampled messages
from remote sampling servers.
Args:
server_rank (int or List[int]): The ranks of target server to fetch sampled
messages.
producer_id (int or List[int]) ): The sequence ids of created sampling producer
on the target server.
prefetch_size (int): The number of messages to prefetch for every server.
(Default ``2``).
"""
def __init__(
self,
server_rank: Union[int, List[int]],
producer_id: Union[int, List[int]],
prefetch_size: int = 2
):
self.server_rank_list = server_rank if isinstance(server_rank,
List) else [server_rank]
self.producer_id_list = producer_id if isinstance(producer_id,
List) else [producer_id]
self.prefetch_size = prefetch_size
assert len(self.server_rank_list) == len(self.producer_id_list)
self.num_request_list = [0] * len(self.server_rank_list)
self.num_received_list = [0] * len(self.server_rank_list)
self.server_end_of_epoch = [False] * len(self.server_rank_list)
self.global_end_of_epoch = False
self.queue = queue.Queue(maxsize=self.prefetch_size * len(self.server_rank_list))
def reset(self):
r""" Reset all states to start a new epoch consuming.
"""
# Discard messages that have not been consumed.
while not self.queue.empty():
_ = self.queue.get()
self.server_end_of_epoch = [False] * len(self.server_rank_list)
self.num_request_list = [0] * len(self.server_rank_list)
self.num_received_list = [0] * len(self.server_rank_list)
self.global_end_of_epoch = False
def send(self, msg: SampleMessage, **kwargs):
raise RuntimeError(
f"'{self.__class__.__name__}': cannot send "
f"message with a receiving channel."
)
def recv(self, **kwargs) -> SampleMessage:
if self.global_end_of_epoch:
if self._all_received():
raise StopIteration
else:
self._request_some()
msg, end_of_epoch, local_server_idx = self.queue.get()
self.num_received_list[local_server_idx] += 1
# server guarantees that when end_of_epoch is true, msg must be None
while end_of_epoch:
self.server_end_of_epoch[local_server_idx] = True
if sum(self.server_end_of_epoch) == len(self.server_rank_list):
self.global_end_of_epoch = True
if self._all_received():
raise StopIteration
msg, end_of_epoch, local_server_idx = self.queue.get()
self.num_received_list[local_server_idx] += 1
return msg
def _all_received(self):
return sum(self.num_received_list) == sum(self.num_request_list)
def _request_some(self):
def on_done(f: torch.futures.Future, local_server_idx):
try:
msg, end_of_epoch = f.wait()
self.queue.put((msg, end_of_epoch, local_server_idx))
except Exception as e:
logging.error("broken future of receiving remote messages: %s", e)
def create_callback(local_server_idx):
def callback(f):
on_done(f, local_server_idx)
return callback
from ..distributed import async_request_server, DistServer
for local_server_idx, server_rank in enumerate(self.server_rank_list):
if not self.server_end_of_epoch[local_server_idx]:
for _ in range(
self.num_received_list[local_server_idx] +
self.prefetch_size -
self.num_request_list[local_server_idx]
):
fut = async_request_server(
server_rank, DistServer.fetch_one_sampled_message,
self.producer_id_list[local_server_idx]
)
cb = create_callback(local_server_idx)
fut.add_done_callback(cb)
self.num_request_list[local_server_idx] += 1
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/channel/mp_channel.py | graphlearn_torch/python/channel/mp_channel.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch.multiprocessing as mp
from .base import SampleMessage, ChannelBase
class MpChannel(ChannelBase):
r""" A simple multiprocessing channel using `torch.multiprocessing.Queue`.
Args:
The input arguments should be consistent with `torch.multiprocessing.Queue`.
"""
def __init__(self, **kwargs):
self._queue = mp.get_context('spawn').Queue(**kwargs)
def send(self, msg: SampleMessage, **kwargs):
self._queue.put(msg, **kwargs)
def recv(self, **kwargs) -> SampleMessage:
return self._queue.get(**kwargs)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/channel/__init__.py | graphlearn_torch/python/channel/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .base import SampleMessage, ChannelBase, QueueTimeoutError
from .mp_channel import MpChannel
from .shm_channel import ShmChannel
from .remote_channel import RemoteReceivingChannel
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/channel/base.py | graphlearn_torch/python/channel/base.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from abc import ABC, abstractmethod
from typing import Dict
import torch
from .. import py_graphlearn_torch as pywrap
QueueTimeoutError = pywrap.QueueTimeoutError
# A `SampleMessage` contains all possible results from a sampler, including
# subgraph data, features and user defined metas.
SampleMessage = Dict[str, torch.Tensor]
class ChannelBase(ABC):
r""" A base class that initializes a channel for sample messages and
provides :meth:`send` and :meth:`recv` routines.
"""
@abstractmethod
def send(self, msg: SampleMessage, **kwargs):
r""" Send a sample message into channel, the implemented channel should
porcess this message data properly.
Args:
msg: The sample message to send.
"""
@abstractmethod
def recv(self, **kwargs) -> SampleMessage:
r""" Recv a sample message from channel.
"""
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/partition/frequency_partitioner.py | graphlearn_torch/python/partition/frequency_partitioner.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, Dict, Optional, Tuple, Union
import torch
from ..typing import NodeType, EdgeType, TensorDataType
from ..utils import parse_size
from .base import PartitionerBase, PartitionBook
class FrequencyPartitioner(PartitionerBase):
r""" Frequency-based partitioner for graph topology and features.
Args:
output_dir: The output root directory for partitioned results.
num_parts: Number of partitions.
num_nodes: Number of graph nodes, should be a dict for hetero data.
edge_index: The edge index data of graph edges, should be a dict
for hetero data.
probs: The node access distribution on each partition, should be a
dict for hetero data.
node_feat: The node feature data, should be a dict for hetero data.
node_feat_dtype: The data type of node features.
edge_feat: The edge feature data, should be a dict for hetero data.
edge_feat_dtype: The data type of edge features.
edge_weights: The edge weights, should be a dict for hetero data.
edge_assign_strategy: The assignment strategy when partitioning edges,
should be 'by_src' or 'by_dst'.
cache_memory_budget: The memory budget (in bytes) for cached node features
per partition for each node type, should be a dict for hetero data.
cache_ratio: The proportion to cache node features per partition for each
node type, should be a dict for hetero data.
chunk_size: The chunk size for partitioning.
Note that if both `cache_memory_budget` and `cache_ratio` are provided,
the metric that caches the smaller number of features will be used.
If both of them set to empty dict, the feature cache will be turned off.
"""
def __init__(
self,
output_dir: str,
num_parts: int,
num_nodes: Union[int, Dict[NodeType, int]],
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
probs: Union[List[torch.Tensor], Dict[NodeType, List[torch.Tensor]]],
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_dtype: torch.dtype = torch.float32,
edge_weights: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_assign_strategy: str = 'by_src',
cache_memory_budget: Union[int, Dict[NodeType, int]] = None,
cache_ratio: Union[float, Dict[NodeType, float]] = None,
chunk_size: int = 10000,
):
super().__init__(output_dir, num_parts, num_nodes, edge_index, node_feat,
node_feat_dtype, edge_feat, edge_feat_dtype, edge_weights,
edge_assign_strategy, chunk_size)
self.probs = probs
if self.node_feat is not None:
if 'hetero' == self.data_cls:
self.per_feature_bytes = {}
for ntype, feat in self.node_feat.items():
assert len(feat.shape) == 2
self.per_feature_bytes[ntype] = feat.shape[1] * feat.element_size()
assert isinstance(self.probs, dict)
for ntype, prob_list in self.probs.items():
assert ntype in self.node_types
assert len(prob_list) == self.num_parts
else:
assert len(self.node_feat.shape) == 2
self.per_feature_bytes = (self.node_feat.shape[1] *
self.node_feat.element_size())
assert len(self.probs) == self.num_parts
self.blob_size = self.chunk_size * self.num_parts
if cache_memory_budget is None:
self.cache_memory_budget = {} if 'hetero' == self.data_cls else 0
else:
self.cache_memory_budget = cache_memory_budget
if cache_ratio is None:
self.cache_ratio = {} if 'hetero' == self.data_cls else 0.0
else:
self.cache_ratio = cache_ratio
def _get_chunk_probs_sum(
self,
chunk: torch.Tensor,
probs: List[torch.Tensor]
) -> List[torch.Tensor]:
r""" Helper function for partitioning a certain type of node to
calculate hotness and difference between partitions.
"""
chunk_probs_sum = [
(torch.zeros(chunk.size(0)) + 1e-6)
for _ in range(self.num_parts)
]
for src_rank in range(self.num_parts):
for dst_rank in range(self.num_parts):
if dst_rank == src_rank:
chunk_probs_sum[src_rank] += probs[dst_rank][chunk] * self.num_parts
else:
chunk_probs_sum[src_rank] -= probs[dst_rank][chunk]
return chunk_probs_sum
def _partition_node(
self,
ntype: Optional[NodeType] = None
) -> Tuple[List[torch.Tensor], PartitionBook]:
if 'hetero' == self.data_cls:
assert ntype is not None
node_num = self.num_nodes[ntype]
probs = self.probs[ntype]
else:
node_num = self.num_nodes
probs = self.probs
chunk_num = (node_num + self.chunk_size - 1) // self.chunk_size
res = [[] for _ in range(self.num_parts)]
current_chunk_start_pos = 0
current_partition_idx = 0
for _ in range(chunk_num):
current_chunk_end_pos = min(node_num,
current_chunk_start_pos + self.blob_size)
current_chunk_size = current_chunk_end_pos - current_chunk_start_pos
chunk = torch.arange(current_chunk_start_pos, current_chunk_end_pos,
dtype=torch.long)
chunk_probs_sum = self._get_chunk_probs_sum(chunk, probs)
assigned_node_size = 0
per_partition_size = self.chunk_size
for partition_idx in range(current_partition_idx,
current_partition_idx + self.num_parts):
partition_idx = partition_idx % self.num_parts
actual_per_partition_size = min(per_partition_size,
chunk.size(0) - assigned_node_size)
_, sorted_res_order = torch.sort(chunk_probs_sum[partition_idx],
descending=True)
pick_chunk_part = sorted_res_order[:actual_per_partition_size]
pick_ids = chunk[pick_chunk_part]
res[partition_idx].append(pick_ids)
for idx in range(self.num_parts):
chunk_probs_sum[idx][pick_chunk_part] = -self.num_parts
assigned_node_size += actual_per_partition_size
current_partition_idx += 1
current_chunk_start_pos += current_chunk_size
partition_book = torch.zeros(node_num, dtype=torch.long)
partition_results = []
for partition_idx in range(self.num_parts):
partition_ids = torch.cat(res[partition_idx])
partition_results.append(partition_ids)
partition_book[partition_ids] = partition_idx
return partition_results, partition_book
def _cache_node(
self,
ntype: Optional[NodeType] = None
) -> List[Optional[torch.Tensor]]:
if 'hetero' == self.data_cls:
assert ntype is not None
probs = self.probs[ntype]
per_feature_bytes = self.per_feature_bytes[ntype]
cache_memory_budget = self.cache_memory_budget.get(ntype, 0)
cache_ratio = self.cache_ratio.get(ntype, 0.0)
else:
probs = self.probs
per_feature_bytes = self.per_feature_bytes
cache_memory_budget = self.cache_memory_budget
cache_ratio = self.cache_ratio
cache_memory_budget_bytes = parse_size(cache_memory_budget)
cache_num_by_memory = int(cache_memory_budget_bytes /
(per_feature_bytes + 1e-6))
cache_num_by_memory = min(cache_num_by_memory, probs[0].size(0))
cache_num_by_ratio = int(probs[0].size(0) * min(cache_ratio, 1.0))
if cache_num_by_memory == 0:
cache_num = cache_num_by_ratio
elif cache_num_by_ratio == 0:
cache_num = cache_num_by_memory
else:
cache_num = min(cache_num_by_memory, cache_num_by_ratio)
cache_results = [None] * self.num_parts
if cache_num > 0:
for partition_idx in range(self.num_parts):
_, prev_order = torch.sort(probs[partition_idx], descending=True)
cache_results[partition_idx] = prev_order[:cache_num]
return cache_results
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/partition/__init__.py | graphlearn_torch/python/partition/__init__.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .base import *
from .frequency_partitioner import FrequencyPartitioner
from .partition_book import *
from .random_partitioner import RandomPartitioner
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/partition/base.py | graphlearn_torch/python/partition/base.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pickle
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple, Union
import torch
from ..typing import (
NodeType, EdgeType, as_str, TensorDataType,
GraphPartitionData, HeteroGraphPartitionData,
FeaturePartitionData, HeteroFeaturePartitionData,
)
from ..utils import convert_to_tensor, ensure_dir, id2idx, append_tensor_to_file, load_and_concatenate_tensors
class PartitionBook(object):
@abstractmethod
def __getitem__(self, indices):
pass
@property
def offset(self):
return 0
HeteroNodePartitionDict = Dict[NodeType, PartitionBook]
HeteroEdgePartitionDict = Dict[EdgeType, PartitionBook]
def save_meta(
output_dir: str,
num_parts: int,
data_cls: str = 'homo',
node_types: Optional[List[NodeType]] = None,
edge_types: Optional[List[EdgeType]] = None,
):
r""" Save partitioning meta info into the output directory.
"""
meta = {
'num_parts': num_parts,
'data_cls': data_cls,
'node_types': node_types,
'edge_types': edge_types
}
with open(os.path.join(output_dir, 'META'), 'wb') as outfile:
pickle.dump(meta, outfile, pickle.HIGHEST_PROTOCOL)
def save_node_pb(
output_dir: str,
node_pb: PartitionBook,
ntype: Optional[NodeType] = None
):
r""" Save a partition book of graph nodes into the output directory.
"""
if ntype is not None:
subdir = os.path.join(output_dir, 'node_pb')
ensure_dir(subdir)
fpath = os.path.join(subdir, f'{as_str(ntype)}.pt')
else:
fpath = os.path.join(output_dir, 'node_pb.pt')
torch.save(node_pb, fpath)
def save_edge_pb(
output_dir: str,
edge_pb: PartitionBook,
etype: Optional[EdgeType] = None
):
r""" Save a partition book of graph edges into the output directory.
"""
if etype is not None:
subdir = os.path.join(output_dir, 'edge_pb')
ensure_dir(subdir)
fpath = os.path.join(subdir, f'{as_str(etype)}.pt')
else:
fpath = os.path.join(output_dir, 'edge_pb.pt')
torch.save(edge_pb, fpath)
def save_graph_cache(
output_dir: str,
graph_partition_list: List[GraphPartitionData],
etype: Optional[EdgeType] = None,
with_edge_feat: bool = False
):
r""" Save full graph topology into the output directory.
"""
if len(graph_partition_list) == 0:
return
subdir = os.path.join(output_dir, 'graph')
if etype is not None:
subdir = os.path.join(subdir, as_str(etype))
ensure_dir(subdir)
rows = torch.cat([graph_partition.edge_index[0] for graph_partition in graph_partition_list])
cols = torch.cat([graph_partition.edge_index[1] for graph_partition in graph_partition_list])
weights = None
if graph_partition_list[0].weights is not None:
weights = torch.cat([graph_partition.weights for graph_partition in graph_partition_list])
torch.save(rows, os.path.join(subdir, 'rows.pt'))
torch.save(cols, os.path.join(subdir, 'cols.pt'))
if with_edge_feat:
edge_ids = torch.cat([graph_partition.eids for graph_partition in graph_partition_list])
torch.save(edge_ids, os.path.join(subdir, 'eids.pt'))
if weights is not None:
torch.save(weights, os.path.join(subdir, 'weights.pt'))
def save_graph_partition(
output_dir: str,
partition_idx: int,
graph_partition: GraphPartitionData,
etype: Optional[EdgeType] = None
):
r""" Save a graph topology partition into the output directory.
"""
subdir = os.path.join(output_dir, f'part{partition_idx}', 'graph')
if etype is not None:
subdir = os.path.join(subdir, as_str(etype))
ensure_dir(subdir)
torch.save(graph_partition.edge_index[0], os.path.join(subdir, 'rows.pt'))
torch.save(graph_partition.edge_index[1], os.path.join(subdir, 'cols.pt'))
torch.save(graph_partition.eids, os.path.join(subdir, 'eids.pt'))
if graph_partition.weights is not None:
torch.save(graph_partition.weights, os.path.join(subdir, 'weights.pt'))
def save_feature_partition(
output_dir: str,
partition_idx: int,
feature_partition: FeaturePartitionData,
group: str = 'node_feat',
graph_type: Optional[Union[NodeType, EdgeType]] = None
):
r""" Save a feature partition into the output directory.
"""
subdir = os.path.join(output_dir, f'part{partition_idx}', group)
if graph_type is not None:
subdir = os.path.join(subdir, as_str(graph_type))
ensure_dir(subdir)
append_tensor_to_file(os.path.join(subdir, 'feats.pkl'), feature_partition.feats)
append_tensor_to_file(os.path.join(subdir,'ids.pkl'), feature_partition.ids)
if feature_partition.cache_feats is not None:
torch.save(feature_partition.cache_feats, os.path.join(subdir, 'cache_feats.pt'))
torch.save(feature_partition.cache_ids, os.path.join(subdir, 'cache_ids.pt'))
def save_feature_partition_chunk(
output_dir: str,
partition_idx: int,
feature_partition: FeaturePartitionData,
group: str = 'node_feat',
graph_type: Optional[Union[NodeType, EdgeType]] = None
):
r""" Append a chunk of a feature partition to files in the output directory.
"""
subdir = os.path.join(output_dir, f'part{partition_idx}', group)
if graph_type is not None:
subdir = os.path.join(subdir, as_str(graph_type))
ensure_dir(subdir)
append_tensor_to_file(os.path.join(subdir, 'feats.pkl'), feature_partition.feats)
append_tensor_to_file(os.path.join(subdir,'ids.pkl'), feature_partition.ids)
def save_feature_partition_cache(
output_dir: str,
partition_idx: int,
feature_partition: FeaturePartitionData,
group: str = 'node_feat',
graph_type: Optional[Union[NodeType, EdgeType]] = None
):
r""" Save the feature cache of a partition into the output directory.
"""
subdir = os.path.join(output_dir, f'part{partition_idx}', group)
if graph_type is not None:
subdir = os.path.join(subdir, as_str(graph_type))
ensure_dir(subdir)
if feature_partition.cache_feats is not None:
torch.save(feature_partition.cache_feats, os.path.join(subdir, 'cache_feats.pt'))
torch.save(feature_partition.cache_ids, os.path.join(subdir, 'cache_ids.pt'))
class PartitionerBase(ABC):
r""" Base class for partitioning graphs and features.
"""
def __init__(
self,
output_dir: str,
num_parts: int,
num_nodes: Union[int, Dict[NodeType, int]],
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_dtype: torch.dtype = torch.float32,
edge_weights: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_assign_strategy: str = 'by_src',
chunk_size: int = 10000,
):
self.output_dir = output_dir
ensure_dir(self.output_dir)
self.num_parts = num_parts
assert self.num_parts > 1
self.num_nodes = num_nodes
self.edge_index = convert_to_tensor(edge_index, dtype=torch.int64)
self.node_feat = convert_to_tensor(node_feat, dtype=node_feat_dtype)
self.edge_feat = convert_to_tensor(edge_feat, dtype=edge_feat_dtype)
self.edge_weights = convert_to_tensor(edge_weights, dtype=torch.float32)
if isinstance(self.num_nodes, dict):
assert isinstance(self.edge_index, dict)
assert isinstance(self.node_feat, dict) or self.node_feat is None
assert isinstance(self.edge_feat, dict) or self.edge_feat is None
self.data_cls = 'hetero'
self.node_types = list(self.num_nodes.keys())
self.edge_types = list(self.edge_index.keys())
self.num_edges = {}
for etype, index in self.edge_index.items():
self.num_edges[etype] = len(index[0])
else:
self.data_cls = 'homo'
self.node_types = None
self.edge_types = None
self.num_edges = len(self.edge_index[0])
self.edge_assign_strategy = edge_assign_strategy.lower()
assert self.edge_assign_strategy in ['by_src', 'by_dst']
self.chunk_size = chunk_size
def get_edge_index(self, etype: Optional[EdgeType] = None):
if 'hetero' == self.data_cls:
assert etype is not None
return self.edge_index[etype]
return self.edge_index
def get_node_feat(self, ntype: Optional[NodeType] = None):
if self.node_feat is None:
return None
if 'hetero' == self.data_cls:
assert ntype is not None
return self.node_feat[ntype]
return self.node_feat
def get_edge_feat(self, etype: Optional[EdgeType] = None):
if self.edge_feat is None:
return None
if 'hetero' == self.data_cls:
assert etype is not None
return self.edge_feat[etype]
return self.edge_feat
@abstractmethod
def _partition_node(
self,
ntype: Optional[NodeType] = None
) -> Tuple[List[torch.Tensor], PartitionBook]:
r""" Partition graph nodes of a specify node type, needs to be overwritten.
Args:
ntype (str): The type for input nodes, must be provided for heterogeneous
graph. (default: ``None``)
Returns:
List[torch.Tensor]: The list of partitioned nodes ids.
PartitionBook: The partition book of graph nodes.
"""
@abstractmethod
def _cache_node(
self,
ntype: Optional[NodeType] = None
) -> List[Optional[torch.Tensor]]:
r""" Do feature caching and get cached results of a specify
node type, needs to be overwritten.
Returns:
List[Optional[torch.Tensor]]: list of node ids need to be cached on
each partition.
"""
def _partition_graph(
self,
node_pb: Union[PartitionBook, Dict[NodeType, PartitionBook]],
etype: Optional[EdgeType] = None
) -> Tuple[List[GraphPartitionData], PartitionBook]:
r""" Partition graph topology of a specified edge type, needs to be
overwritten.
Args:
node_pb (PartitionBook or Dict[NodeType, PartitionBook]): The partition
books of graph nodes.
etype (Tuple[str, str, str]): The type for input edges, must be provided
for heterogeneous graph. (default: ``None``)
Returns:
List[GraphPartitionData]: A list of graph data for each partition.
PartitionBook: The partition book of graph edges.
"""
edge_index = self.get_edge_index(etype)
rows, cols = edge_index[0], edge_index[1]
edge_num = len(rows)
eids = torch.arange(edge_num, dtype=torch.int64)
weights = self.edge_weights[etype] if isinstance(self.edge_weights, dict) \
else self.edge_weights
if 'hetero' == self.data_cls:
assert etype is not None
assert isinstance(node_pb, dict)
src_ntype, _, dst_ntype = etype
if 'by_src' == self.edge_assign_strategy:
target_node_pb = node_pb[src_ntype]
target_indices = rows
else:
target_node_pb = node_pb[dst_ntype]
target_indices = cols
else:
target_node_pb = node_pb
target_indices = rows if 'by_src' == self.edge_assign_strategy else cols
chunk_num = (edge_num + self.chunk_size - 1) // self.chunk_size
chunk_start_pos = 0
res = [[] for _ in range(self.num_parts)]
for _ in range(chunk_num):
chunk_end_pos = min(edge_num, chunk_start_pos + self.chunk_size)
current_chunk_size = chunk_end_pos - chunk_start_pos
chunk_idx = torch.arange(current_chunk_size, dtype=torch.long)
chunk_rows = rows[chunk_start_pos:chunk_end_pos]
chunk_cols = cols[chunk_start_pos:chunk_end_pos]
chunk_eids = eids[chunk_start_pos:chunk_end_pos]
if weights is not None:
chunk_weights = weights[chunk_start_pos:chunk_end_pos]
chunk_target_indices = target_indices[chunk_start_pos:chunk_end_pos]
chunk_partition_idx = target_node_pb[chunk_target_indices]
for pidx in range(self.num_parts):
mask = (chunk_partition_idx == pidx)
idx = torch.masked_select(chunk_idx, mask)
res[pidx].append(GraphPartitionData(
edge_index=(chunk_rows[idx], chunk_cols[idx]),
eids=chunk_eids[idx],
weights=chunk_weights[idx] if weights is not None else None
))
chunk_start_pos += current_chunk_size
partition_book = torch.zeros(edge_num, dtype=torch.long)
partition_results = []
for pidx in range(self.num_parts):
p_rows = torch.cat([r.edge_index[0] for r in res[pidx]])
p_cols = torch.cat([r.edge_index[1] for r in res[pidx]])
p_eids = torch.cat([r.eids for r in res[pidx]])
if weights is not None:
p_weights = torch.cat([r.weights for r in res[pidx]])
partition_book[p_eids] = pidx
partition_results.append(GraphPartitionData(
edge_index=(p_rows, p_cols),
eids=p_eids,
weights=p_weights if weights is not None else None
))
return partition_results, partition_book
def _partition_and_save_node_feat(
self,
node_ids_list: List[torch.Tensor],
ntype: Optional[NodeType] = None,
):
r""" Partition node features by the partitioned node results, and calculate
the cached nodes if needed.
"""
node_feat = self.get_node_feat(ntype)
if node_feat is None:
return
cache_node_ids_list = self._cache_node(ntype)
for pidx in range(self.num_parts):
# save partitioned node feature cache
cache_n_ids = cache_node_ids_list[pidx]
p_node_cache_feat = FeaturePartitionData(
feats=None,
ids=None,
cache_feats=(node_feat[cache_n_ids] if cache_n_ids is not None else None),
cache_ids=cache_n_ids
)
save_feature_partition_cache(self.output_dir, pidx, p_node_cache_feat,
group='node_feat', graph_type=ntype)
# save partitioned node feature chunk
n_ids = node_ids_list[pidx]
n_ids_chunks = torch.chunk(n_ids, chunks=((n_ids.shape[0] + self.chunk_size - 1) // self.chunk_size))
for chunk in n_ids_chunks:
p_node_feat_chunk = FeaturePartitionData(
feats=node_feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(self.output_dir, pidx, p_node_feat_chunk,
group='node_feat', graph_type=ntype)
def _partition_and_save_edge_feat(
self,
graph_list: List[GraphPartitionData],
etype: Optional[EdgeType] = None
):
r""" Partition edge features by the partitioned edge results.
"""
edge_feat = self.get_edge_feat(etype)
if edge_feat is None:
return
for pidx in range(self.num_parts):
eids = graph_list[pidx].eids
eids_chunks = torch.chunk(
eids, chunks=((eids.shape[0] + self.chunk_size - 1) // self.chunk_size)
)
for chunk in eids_chunks:
p_edge_feat_chunk = FeaturePartitionData(
feats=edge_feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(self.output_dir, pidx, p_edge_feat_chunk,
group='edge_feat', graph_type=etype)
def _process_node(self, ntype, with_feature):
node_ids_list, node_pb = self._partition_node(ntype)
save_node_pb(self.output_dir, node_pb, ntype)
self.node_pb_dict[ntype] = node_pb
if with_feature:
self._partition_and_save_node_feat(node_ids_list, ntype)
def _process_edge(self, etype, with_feature):
graph_list, edge_pb = self._partition_graph(self.node_pb_dict, etype)
save_edge_pb(self.output_dir, edge_pb, etype)
for pidx in range(self.num_parts):
save_graph_partition(self.output_dir, pidx, graph_list[pidx], etype)
if with_feature:
self._partition_and_save_edge_feat(graph_list, etype)
def partition(self, with_feature=True, graph_caching=False):
r""" Partition graph and feature data into different parts.
Args:
with_feature (bool): A flag indicating if the feature should be
partitioned with the graph (default: ``True``).
graph_caching (bool): A flag indicating if the full graph topology
will be saved (default: ``False``).
The output directory of partitioned graph data will be like:
* homogeneous
root_dir/
|-- META
|-- node_pb.pt
|-- edge_pb.pt
|-- part0/
|-- graph/
|-- rows.pt
|-- cols.pt
|-- eids.pt
|-- weights.pt (optional)
|-- node_feat/
|-- feats.pkl
|-- ids.pkl
|-- cache_feats.pt (optional)
|-- cache_ids.pt (optional)
|-- edge_feat/
|-- feats.pkl
|-- ids.pkl
|-- cache_feats.pt (optional)
|-- cache_ids.pt (optional)
|-- part1/
|-- graph/
...
|-- node_feat/
...
|-- edge_feat/
...
* heterogeneous
root_dir/
|-- META
|-- node_pb/
|-- ntype1.pt
|-- ntype2.pt
|-- edge_pb/
|-- etype1.pt
|-- etype2.pt
|-- part0/
|-- graph/
|-- etype1/
|-- rows.pt
|-- cols.pt
|-- eids.pt
|-- weights.pt
|-- etype2/
...
|-- node_feat/
|-- ntype1/
|-- feats.pkl
|-- ids.pkl
|-- cache_feats.pt (optional)
|-- cache_ids.pt (optional)
|-- ntype2/
...
|-- edge_feat/
|-- etype1/
|-- feats.pkl
|-- ids.pkl
|-- cache_feats.pt (optional)
|-- cache_ids.pt (optional)
|-- etype2/
...
|-- part1/
|-- graph/
...
|-- node_feat/
...
|-- edge_feat/
...
"""
if 'hetero' == self.data_cls:
node_pb_dict = {}
for ntype in self.node_types:
node_ids_list, node_pb = self._partition_node(ntype)
save_node_pb(self.output_dir, node_pb, ntype)
node_pb_dict[ntype] = node_pb
if with_feature:
self._partition_and_save_node_feat(node_ids_list, ntype)
for etype in self.edge_types:
graph_list, edge_pb = self._partition_graph(node_pb_dict, etype)
edge_feat = self.get_edge_feat(etype)
with_edge_feat = (edge_feat != None)
if graph_caching:
if with_edge_feat:
save_edge_pb(self.output_dir, edge_pb, etype)
save_graph_cache(self.output_dir, graph_list, etype, with_edge_feat)
else:
save_edge_pb(self.output_dir, edge_pb, etype)
for pidx in range(self.num_parts):
save_graph_partition(self.output_dir, pidx, graph_list[pidx], etype)
if with_feature:
self._partition_and_save_edge_feat(graph_list, etype)
else:
node_ids_list, node_pb = self._partition_node()
save_node_pb(self.output_dir, node_pb)
if with_feature:
self._partition_and_save_node_feat(node_ids_list)
graph_list, edge_pb = self._partition_graph(node_pb)
edge_feat = self.get_edge_feat()
with_edge_feat = (edge_feat != None)
if graph_caching:
if with_edge_feat:
save_edge_pb(self.output_dir, edge_pb)
save_graph_cache(self.output_dir, graph_list, with_edge_feat)
else:
save_edge_pb(self.output_dir, edge_pb)
for pidx in range(self.num_parts):
save_graph_partition(self.output_dir, pidx, graph_list[pidx])
if with_feature:
self._partition_and_save_edge_feat(graph_list)
# save meta.
save_meta(self.output_dir, self.num_parts, self.data_cls,
self.node_types, self.edge_types)
def build_partition_feature(
root_dir: str,
partition_idx: int,
chunk_size: int = 10000,
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_dtype: torch.dtype = torch.float32):
r""" In the case that the graph topology is partitioned, but the feature
partitioning is not executed. This method extracts and persist the
feature for a specific partition.
Args:
root_dir (str): The root directory for saved partition files.
partition_idx (int): The partition idx.
chunk_size: The chunk size for partitioning.
node_feat: The node feature data, should be a dict for hetero data.
node_feat_dtype: The data type of node features.
edge_feat: The edge feature data, should be a dict for hetero data.
edge_feat_dtype: The data type of edge features.
"""
with open(os.path.join(root_dir, 'META'), 'rb') as infile:
meta = pickle.load(infile)
num_partitions = meta['num_parts']
assert partition_idx >= 0
assert partition_idx < num_partitions
partition_dir = os.path.join(root_dir, f'part{partition_idx}')
assert os.path.exists(partition_dir)
graph_dir = os.path.join(partition_dir, 'graph')
device = torch.device('cpu')
node_feat = convert_to_tensor(node_feat, dtype=node_feat_dtype)
edge_feat = convert_to_tensor(edge_feat, dtype=edge_feat_dtype)
# homogenous
if meta['data_cls'] == 'homo':
# step 1: build and persist the node feature partition
node_pb = torch.load(os.path.join(root_dir, 'node_pb.pt'),
map_location=device)
node_num = node_pb.size(0)
ids = torch.arange(node_num, dtype=torch.int64)
mask = (node_pb == partition_idx)
n_ids = torch.masked_select(ids, mask)
# save partitioned node feature chunk
n_ids_chunks = torch.chunk(n_ids,
chunks=((n_ids.shape[0] + chunk_size - 1) // chunk_size))
for chunk in n_ids_chunks:
p_node_feat_chunk = FeaturePartitionData(
feats=node_feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(root_dir, partition_idx, p_node_feat_chunk,
group='node_feat', graph_type=None)
# step 2: build and persist the edge feature partition
if edge_feat is None:
return
graph = load_graph_partition_data(graph_dir, device)
eids = graph.eids
eids_chunks = torch.chunk(
eids, chunks=((eids.shape[0] + chunk_size - 1) // chunk_size)
)
for chunk in eids_chunks:
p_edge_feat_chunk = FeaturePartitionData(
feats=edge_feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(root_dir, partition_idx, p_edge_feat_chunk,
group='edge_feat', graph_type=None)
# heterogenous
else:
# step 1: build and persist the node feature partition
node_pb_dir = os.path.join(root_dir, 'node_pb')
for ntype in node_feat.keys():
node_pb = torch.load(
os.path.join(node_pb_dir, f'{as_str(ntype)}.pt'), map_location=device)
feat = node_feat[ntype]
node_num = node_pb.size(0)
ids = torch.arange(node_num, dtype=torch.int64)
mask = (node_pb == partition_idx)
n_ids = torch.masked_select(ids, mask)
# save partitioned node feature chunk
n_ids_chunks = torch.chunk(n_ids,
chunks=((n_ids.shape[0] + chunk_size - 1) // chunk_size))
for chunk in n_ids_chunks:
p_node_feat_chunk = FeaturePartitionData(
feats=feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(root_dir, partition_idx, p_node_feat_chunk,
group='node_feat', graph_type=ntype)
# step 2: build and persist the edge feature partition
if edge_feat is None:
return
for etype in edge_feat.keys():
feat = edge_feat[etype]
graph = load_graph_partition_data(
os.path.join(graph_dir, as_str(etype)), device)
eids = graph.eids
eids_chunks = torch.chunk(
eids, chunks=((eids.shape[0] + chunk_size - 1) // chunk_size)
)
for chunk in eids_chunks:
p_edge_feat_chunk = FeaturePartitionData(
feats=feat[chunk],
ids=chunk.clone(),
cache_feats=None,
cache_ids=None
)
save_feature_partition_chunk(root_dir, partition_idx, p_edge_feat_chunk,
group='edge_feat', graph_type=etype)
def load_graph_partition_data(
graph_data_dir: str,
device: torch.device
) -> GraphPartitionData:
r""" Load a graph partition data from the specified directory.
"""
if not os.path.exists(graph_data_dir):
return None
rows = torch.load(os.path.join(graph_data_dir, 'rows.pt'),
map_location=device)
cols = torch.load(os.path.join(graph_data_dir, 'cols.pt'),
map_location=device)
eids = None
eids_dir = os.path.join(graph_data_dir, 'eids.pt')
if os.path.exists(eids_dir):
eids = torch.load(eids_dir, map_location=device)
if os.path.exists(os.path.join(graph_data_dir, 'weights.pt')):
weights = torch.load(os.path.join(graph_data_dir, 'weights.pt'),
map_location=device)
else:
weights = None
pdata = GraphPartitionData(edge_index=(rows, cols), eids=eids, weights=weights)
return pdata
def load_feature_partition_data(
feature_data_dir: str,
device: torch.device
) -> FeaturePartitionData:
r""" Load a feature partition data from the specified directory.
"""
if not os.path.exists(feature_data_dir):
return None
feats = load_and_concatenate_tensors(os.path.join(feature_data_dir, 'feats.pkl'), device)
ids = load_and_concatenate_tensors(os.path.join(feature_data_dir, 'ids.pkl'), device)
cache_feats_path = os.path.join(feature_data_dir, 'cache_feats.pt')
cache_ids_path = os.path.join(feature_data_dir, 'cache_ids.pt')
cache_feats = None
cache_ids = None
if os.path.exists(cache_feats_path) and os.path.exists(cache_ids_path):
cache_feats = torch.load(cache_feats_path, map_location=device)
cache_ids = torch.load(cache_ids_path, map_location=device)
pdata = FeaturePartitionData(
feats=feats, ids=ids, cache_feats=cache_feats, cache_ids=cache_ids
)
return pdata
def load_partition(
root_dir: str,
partition_idx: int,
graph_caching: bool = False,
device: torch.device = torch.device('cpu')
) -> Union[Tuple[int, int,
GraphPartitionData,
Optional[FeaturePartitionData],
Optional[FeaturePartitionData],
PartitionBook,
PartitionBook],
Tuple[int, int,
HeteroGraphPartitionData,
Optional[HeteroFeaturePartitionData],
Optional[HeteroFeaturePartitionData],
HeteroNodePartitionDict,
HeteroEdgePartitionDict]]:
r""" Load a partition from saved directory.
Args:
root_dir (str): The root directory for saved files.
partition_idx (int): The partition idx to load.
device (torch.device): The device where loaded graph partition data locates.
graph_caching: (bool): Whether to load entire graph topology
Returns:
int: Number of all partitions.
int: The current partition idx.
GraphPartitionData/HeteroGraphPartitionData: graph partition data.
FeaturePartitionData/HeteroFeaturePartitionData: node feature partition
data, optional.
FeaturePartitionData/HeteroFeaturePartitionData: edge feature partition
data, optional.
PartitionBook/HeteroNodePartitionDict: node partition book.
PartitionBook/HeteroEdgePartitionDict: edge partition book.
"""
with open(os.path.join(root_dir, 'META'), 'rb') as infile:
meta = pickle.load(infile)
num_partitions = meta['num_parts']
assert partition_idx >= 0
assert partition_idx < num_partitions
partition_dir = os.path.join(root_dir, f'part{partition_idx}')
assert os.path.exists(partition_dir)
if graph_caching:
graph_dir = os.path.join(root_dir, 'graph')
else:
graph_dir = os.path.join(partition_dir, 'graph')
node_feat_dir = os.path.join(partition_dir, 'node_feat')
edge_feat_dir = os.path.join(partition_dir, 'edge_feat')
# homogenous
if meta['data_cls'] == 'homo':
graph = load_graph_partition_data(graph_dir, device)
node_feat = load_feature_partition_data(node_feat_dir, device)
edge_feat = load_feature_partition_data(edge_feat_dir, device)
node_pb = torch.load(os.path.join(root_dir, 'node_pb.pt'),
map_location=device)
edge_pb = torch.load(os.path.join(root_dir, 'edge_pb.pt'),
map_location=device)
return (
num_partitions, partition_idx,
graph, node_feat, edge_feat, node_pb, edge_pb
)
# heterogenous
graph_dict = {}
for etype in meta['edge_types']:
graph_dict[etype] = load_graph_partition_data(
os.path.join(graph_dir, as_str(etype)), device)
node_feat_dict = {}
for ntype in meta['node_types']:
node_feat = load_feature_partition_data(
os.path.join(node_feat_dir, as_str(ntype)), device)
if node_feat is not None:
node_feat_dict[ntype] = node_feat
if len(node_feat_dict) == 0:
node_feat_dict = None
edge_feat_dict = {}
for etype in meta['edge_types']:
edge_feat = load_feature_partition_data(
os.path.join(edge_feat_dir, as_str(etype)), device)
if edge_feat is not None:
edge_feat_dict[etype] = edge_feat
if len(edge_feat_dict) == 0:
edge_feat_dict = None
node_pb_dict = {}
node_pb_dir = os.path.join(root_dir, 'node_pb')
for ntype in meta['node_types']:
node_pb_dict[ntype] = torch.load(
os.path.join(node_pb_dir, f'{as_str(ntype)}.pt'), map_location=device)
edge_pb_dict = {}
edge_pb_dir = os.path.join(root_dir, 'edge_pb')
for etype in meta['edge_types']:
edge_pb_file = os.path.join(edge_pb_dir, f'{as_str(etype)}.pt')
if os.path.exists(edge_pb_file):
edge_pb_dict[etype] = torch.load(
edge_pb_file, map_location=device)
return (
num_partitions, partition_idx,
graph_dict, node_feat_dict, edge_feat_dict, node_pb_dict, edge_pb_dict
)
def cat_feature_cache(
partition_idx: int,
feat_pdata: FeaturePartitionData,
feat_pb: PartitionBook
) -> Tuple[float, torch.Tensor, torch.Tensor, PartitionBook]:
r""" Concatenate and deduplicate partitioned features and its cached
features into a new feature patition.
Note that if the input `feat_pdata` does not contain a feature cache, this
func will do nothing and return the results corresponding to the original
partition data.
Returns:
float: The proportion of cache features.
torch.Tensor: The new feature tensor, where the cached feature data is
arranged before the original partition data.
torch.Tensor: The tensor that indicates the mapping from global node id
to its local index in new features.
PartitionBook: The modified partition book for the new feature tensor.
"""
feats = feat_pdata.feats
ids = feat_pdata.ids
cache_feats = feat_pdata.cache_feats
cache_ids = feat_pdata.cache_ids
if cache_feats is None or cache_ids is None:
return 0.0, feats, id2idx(ids), feat_pb
device = feats.device
cache_ratio = cache_ids.size(0) / (cache_ids.size(0) + ids.size(0))
# cat features
new_feats = torch.cat([cache_feats, feats])
# compute id2idx
max_id = max(torch.max(cache_ids).item(), torch.max(ids).item())
nid2idx = torch.zeros(max_id + 1, dtype=torch.int64, device=device)
nid2idx[ids] = (torch.arange(ids.size(0), dtype=torch.int64, device=device) +
cache_ids.size(0))
nid2idx[cache_ids] = torch.arange(cache_ids.size(0), dtype=torch.int64,
device=device)
# modify partition book
new_feat_pb = feat_pb.clone()
new_feat_pb[cache_ids] = partition_idx
return cache_ratio, new_feats, nid2idx, new_feat_pb
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/partition/partition_book.py | graphlearn_torch/python/partition/partition_book.py | import torch
from typing import List, Tuple
from .base import PartitionBook
class RangePartitionBook(PartitionBook):
r"""A class for managing range-based partitions of consecutive IDs.
Suitable when IDs within each partition are consecutive.
Args:
partition_ranges (List[Tuple[int, int]]): A list of tuples representing
the start and end (exclusive) of each partition range.
partition_idx (int): The index of the current partition.
Example:
>>> partition_ranges = [(0, 10), (10, 20), (20, 30)]
>>> range_pb = RangePartitionBook(partition_ranges, partition_idx=1)
>>> indices = torch.tensor([0, 5, 10, 15, 20, 25])
>>> partition_ids = range_pb[indices]
>>> print(partition_ids)
tensor([0, 0, 1, 1, 2, 2])
"""
def __init__(self, partition_ranges: List[Tuple[int, int]], partition_idx: int):
if not all(r[0] < r[1] for r in partition_ranges):
raise ValueError("All partition ranges must have start < end")
if not all(r1[1] == r2[0] for r1, r2 in zip(partition_ranges[:-1], partition_ranges[1:])):
raise ValueError("Partition ranges must be continuous")
self.partition_bounds = torch.tensor(
[end for _, end in partition_ranges], dtype=torch.long)
self.partition_idx = partition_idx
self._id2index = OffsetId2Index(partition_ranges[partition_idx][0])
def __getitem__(self, indices: torch.Tensor) -> torch.Tensor:
return torch.searchsorted(self.partition_bounds, indices, right=True)
@property
def device(self):
return self.partition_bounds.device
@property
def id2index(self):
return self._id2index
def id_filter(self, node_pb: PartitionBook, partition_idx: int):
start = self.partition_bounds[partition_idx-1] if partition_idx > 0 else 0
end = self.partition_bounds[partition_idx]
return torch.arange(start, end)
class OffsetId2Index:
r"""
Convert global IDs to local indices by subtracting a specified offset.
"""
def __init__(self, offset: int):
self.offset = offset
def __getitem__(self, ids: torch.Tensor) -> torch.Tensor:
local_indices = ids - self.offset
return local_indices
def to(self, device):
# device is always same as the input ids
return self
class GLTPartitionBook(PartitionBook, torch.Tensor):
r""" A partition book of graph nodes or edges.
"""
def __getitem__(self, indices) -> torch.Tensor:
return torch.Tensor.__getitem__(self, indices)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/graphlearn_torch/python/partition/random_partitioner.py | graphlearn_torch/python/partition/random_partitioner.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, Dict, Optional, Tuple, Union
import torch
from ..typing import NodeType, EdgeType, TensorDataType
from .base import PartitionerBase, PartitionBook
# Implementation of a random partitioner.
class RandomPartitioner(PartitionerBase):
r""" Random partitioner for graph topology and features.
Args:
output_dir: The output root directory for partitioned results.
num_parts: Number of partitions.
num_nodes: Number of graph nodes, should be a dict for hetero data.
edge_index: The edge index data of graph edges, should be a dict
for hetero data.
node_feat: The node feature data, should be a dict for hetero data.
node_feat_dtype: The data type of node features.
edge_feat: The edge feature data, should be a dict for hetero data.
edge_feat_dtype: The data type of edge features.
edge_assign_strategy: The assignment strategy when partitioning edges,
should be 'by_src' or 'by_dst'.
chunk_size: The chunk size for partitioning.
"""
def __init__(
self,
output_dir: str,
num_parts: int,
num_nodes: Union[int, Dict[NodeType, int]],
edge_index: Union[TensorDataType, Dict[EdgeType, TensorDataType]],
node_feat: Optional[Union[TensorDataType, Dict[NodeType, TensorDataType]]] = None,
node_feat_dtype: torch.dtype = torch.float32,
edge_feat: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_feat_dtype: torch.dtype = torch.float32,
edge_weights: Optional[Union[TensorDataType, Dict[EdgeType, TensorDataType]]] = None,
edge_assign_strategy: str = 'by_src',
chunk_size: int = 10000,
):
super().__init__(output_dir, num_parts, num_nodes, edge_index, node_feat,
node_feat_dtype, edge_feat, edge_feat_dtype, edge_weights,
edge_assign_strategy, chunk_size)
def _partition_node(
self,
ntype: Optional[NodeType] = None
) -> Tuple[List[torch.Tensor], PartitionBook]:
if 'hetero' == self.data_cls:
assert ntype is not None
node_num = self.num_nodes[ntype]
else:
node_num = self.num_nodes
ids = torch.arange(node_num, dtype=torch.int64)
partition_book = ids % self.num_parts
rand_order = torch.randperm(ids.size(0))
partition_book = partition_book[rand_order]
partition_results = []
for pidx in range(self.num_parts):
mask = (partition_book == pidx)
partition_results.append(torch.masked_select(ids, mask))
return partition_results, partition_book
def _cache_node(
self,
ntype: Optional[NodeType] = None
) -> List[Optional[torch.Tensor]]:
return [None for _ in range(self.num_parts)]
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_partition.py | test/python/test_partition.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import unittest
import torch
from graphlearn_torch.typing import *
from graphlearn_torch.partition import *
class PartitionTestCase(unittest.TestCase):
def _create_edge_index(self, src_num, dst_num, degree):
rows = []
cols = []
for v in range(src_num):
rows.extend([v for _ in range(degree)])
cols.extend([((v + i + 1) % dst_num) for i in range(degree)])
return torch.tensor([rows, cols], dtype=torch.int64)
def _check_dir_and_del(self, dir):
if os.path.exists(dir):
try:
shutil.rmtree(dir)
except OSError as e:
print(f'Error when deleting {dir}: {e}')
def test_random_homo_partition(self):
dir = 'random_homo_partition_ut'
self._check_dir_and_del(dir)
nparts = 4
node_num = 20
edge_index = self._create_edge_index(node_num, node_num, 2)
edge_num = len(edge_index[0])
node_feat = torch.stack(
[torch.ones(10) * i for i in range(node_num)], dim=0
)
edge_feat = torch.stack(
[torch.ones(2) * i for i in range(edge_num)], dim=0
)
edge_weights = torch.arange(0, edge_num, dtype=torch.float)
random_partitioner = RandomPartitioner(
dir, nparts, node_num, edge_index, node_feat=node_feat,
edge_feat=edge_feat, edge_weights=edge_weights, chunk_size=3)
random_partitioner.partition()
for pidx in range(nparts):
_, _, p_graph, p_node_feat, p_edge_feat, node_pb, edge_pb = \
load_partition(dir, pidx)
# node
node_ids = torch.unique(p_graph.edge_index[0])
self.assertEqual(node_ids.size(0), 5)
self.assertTrue(torch.equal(torch.sort(node_ids)[0],
torch.sort(p_node_feat.ids)[0]))
expect_node_pids = torch.ones(5, dtype=torch.int64) * pidx
self.assertTrue(torch.equal(node_pb[node_ids], expect_node_pids))
self.assertEqual(p_node_feat.feats.size(0), 5)
self.assertEqual(p_node_feat.feats.size(1), 10)
self.assertEqual(p_node_feat.ids.size(0), 5)
self.assertTrue(p_node_feat.cache_feats is None)
self.assertTrue(p_node_feat.cache_ids is None)
for idx, n_id in enumerate(p_node_feat.ids):
self.assertTrue(torch.equal(p_node_feat.feats[idx], node_feat[n_id]))
# edge & weight
edge_ids = p_graph.eids
edge_weight = p_graph.weights
self.assertEqual(edge_ids.size(0), 10)
self.assertTrue(torch.equal(torch.sort(edge_ids)[0],
torch.sort(p_edge_feat.ids)[0]))
self.assertTrue(torch.allclose(edge_ids.to(torch.float), edge_weight))
expect_edge_pids = torch.ones(10, dtype=torch.int64) * pidx
self.assertTrue(torch.equal(edge_pb[edge_ids], expect_edge_pids))
self.assertEqual(p_edge_feat.feats.size(0), 10)
self.assertEqual(p_edge_feat.feats.size(1), 2)
self.assertEqual(p_edge_feat.ids.size(0), 10)
self.assertTrue(p_edge_feat.cache_feats is None)
self.assertTrue(p_edge_feat.cache_ids is None)
for idx, e_id in enumerate(p_edge_feat.ids):
self.assertTrue(torch.equal(p_edge_feat.feats[idx], edge_feat[e_id]))
shutil.rmtree(dir)
def test_random_hetero_partition(self):
dir = 'random_hetero_partition_ut'
self._check_dir_and_del(dir)
nparts = 4
user_num = 20
item_num = 12
node_num_dict = {'user': user_num, 'item': item_num}
u2i_type = ('user', 'u2i', 'item')
i2i_type = ('item', 'i2i', 'item')
edge_index_dict = {
u2i_type: self._create_edge_index(user_num, item_num, 2),
i2i_type: self._create_edge_index(item_num, item_num, 2)
}
u2i_num = len(edge_index_dict[u2i_type][0])
i2i_num = len(edge_index_dict[i2i_type][0])
user_feats = [torch.ones(10, dtype=torch.float) * i
for i in range(user_num)]
item_feats = [torch.ones(10, dtype=torch.float) * 2 * i
for i in range(item_num)]
node_feat_dict = {
'user': torch.stack(user_feats, dim=0),
'item': torch.stack(item_feats, dim=0)
}
u2i_feats = [torch.ones(2, dtype=torch.float) * i
for i in range(u2i_num)]
i2i_feats = [torch.ones(2, dtype=torch.float) * 2 * i
for i in range(i2i_num)]
edge_feat_dict = {
u2i_type: torch.stack(u2i_feats, dim=0),
i2i_type: torch.stack(i2i_feats, dim=0)
}
u2i_weights = torch.arange(0, edge_index_dict[u2i_type].size(1), dtype=torch.float)
i2i_weights = torch.arange(0, edge_index_dict[i2i_type].size(1), dtype=torch.float)
edge_weight_dict = {
u2i_type: u2i_weights,
i2i_type: i2i_weights
}
random_partitioner = RandomPartitioner(
dir, nparts, node_num_dict, edge_index_dict, node_feat=node_feat_dict,
edge_feat=edge_feat_dict, edge_weights=edge_weight_dict, chunk_size=3
)
random_partitioner.partition()
for pidx in range(nparts):
(
_, _,
p_graph_dict, p_node_feat_dict, p_edge_feat_dict,
node_pb_dict, edge_pb_dict
) = load_partition(dir, pidx)
# user
user_ids = torch.unique(p_graph_dict[u2i_type].edge_index[0])
self.assertEqual(user_ids.size(0), 5)
self.assertTrue(torch.equal(torch.sort(user_ids)[0],
torch.sort(p_node_feat_dict['user'].ids)[0]))
expect_user_pids = torch.ones(5, dtype=torch.int64) * pidx
self.assertTrue(torch.equal(node_pb_dict['user'][user_ids],
expect_user_pids))
self.assertEqual(p_node_feat_dict['user'].feats.size(0), 5)
self.assertEqual(p_node_feat_dict['user'].feats.size(1), 10)
self.assertEqual(p_node_feat_dict['user'].ids.size(0), 5)
self.assertTrue(p_node_feat_dict['user'].cache_feats is None)
self.assertTrue(p_node_feat_dict['user'].cache_ids is None)
for idx, user_id in enumerate(p_node_feat_dict['user'].ids):
self.assertTrue(torch.equal(p_node_feat_dict['user'].feats[idx],
node_feat_dict['user'][user_id]))
# item
item_ids = torch.unique(p_graph_dict[i2i_type].edge_index[0])
self.assertEqual(item_ids.size(0), 3)
self.assertTrue(torch.equal(torch.sort(item_ids)[0],
torch.sort(p_node_feat_dict['item'].ids)[0]))
expect_item_pids = torch.ones(3, dtype=torch.int64) * pidx
self.assertTrue(torch.equal(node_pb_dict['item'][item_ids],
expect_item_pids))
self.assertEqual(p_node_feat_dict['item'].feats.size(0), 3)
self.assertEqual(p_node_feat_dict['item'].feats.size(1), 10)
self.assertEqual(p_node_feat_dict['item'].ids.size(0), 3)
self.assertTrue(p_node_feat_dict['item'].cache_feats is None)
self.assertTrue(p_node_feat_dict['item'].cache_ids is None)
for idx, item_id in enumerate(p_node_feat_dict['item'].ids):
self.assertTrue(torch.equal(p_node_feat_dict['item'].feats[idx],
node_feat_dict['item'][item_id]))
# u2i
p_u2i_eids = p_graph_dict[u2i_type].eids
p_u2i_weights = p_graph_dict[u2i_type].weights
self.assertEqual(p_u2i_eids.size(0), 10)
self.assertTrue(torch.allclose(p_u2i_eids.to(torch.float), p_u2i_weights))
expect_u2i_pids = torch.ones(10, dtype=torch.long) * pidx
self.assertTrue(torch.equal(edge_pb_dict[u2i_type][p_u2i_eids],
expect_u2i_pids))
self.assertEqual(p_edge_feat_dict[u2i_type].feats.size(0), 10)
self.assertEqual(p_edge_feat_dict[u2i_type].feats.size(1), 2)
self.assertEqual(p_edge_feat_dict[u2i_type].ids.size(0), 10)
self.assertTrue(p_edge_feat_dict[u2i_type].cache_feats is None)
self.assertTrue(p_edge_feat_dict[u2i_type].cache_ids is None)
for idx, e_id in enumerate(p_edge_feat_dict[u2i_type].ids):
self.assertTrue(torch.equal(p_edge_feat_dict[u2i_type].feats[idx],
edge_feat_dict[u2i_type][e_id]))
# i2i
p_i2i_eids = p_graph_dict[i2i_type].eids
p_i2i_weights = p_graph_dict[i2i_type].weights
self.assertEqual(p_i2i_eids.size(0), 6)
self.assertTrue(torch.allclose(p_i2i_eids.to(torch.float), p_i2i_weights))
expect_i2i_pids = torch.ones(6, dtype=torch.long) * pidx
self.assertTrue(torch.equal(edge_pb_dict[i2i_type][p_i2i_eids],
expect_i2i_pids))
self.assertEqual(p_edge_feat_dict[i2i_type].feats.size(0), 6)
self.assertEqual(p_edge_feat_dict[i2i_type].feats.size(1), 2)
self.assertEqual(p_edge_feat_dict[i2i_type].ids.size(0), 6)
self.assertTrue(p_edge_feat_dict[i2i_type].cache_feats is None)
self.assertTrue(p_edge_feat_dict[i2i_type].cache_ids is None)
for idx, e_id in enumerate(p_edge_feat_dict[i2i_type].ids):
self.assertTrue(torch.equal(p_edge_feat_dict[i2i_type].feats[idx],
edge_feat_dict[i2i_type][e_id]))
shutil.rmtree(dir)
def test_frequency_partition(self):
dir = 'frequency_partition_ut'
self._check_dir_and_del(dir)
nparts = 4
node_num = 20
edge_index = self._create_edge_index(node_num, node_num, 2)
edge_num = len(edge_index[0])
node_feat = torch.stack(
[torch.ones(10) * i for i in range(node_num)], dim=0
)
node_probs = [torch.rand(node_num) for _ in range(nparts)]
cache_budget_bytes = 4 * node_feat.size(1) * node_feat.element_size()
edge_feat = torch.stack(
[torch.ones(2) * i for i in range(edge_num)], dim=0
)
edge_weights = torch.arange(0, edge_num, dtype=torch.float)
freq_partitioner = FrequencyPartitioner(
dir, nparts, node_num, edge_index, node_probs,
node_feat=node_feat, edge_feat=edge_feat, edge_weights=edge_weights,
cache_memory_budget=cache_budget_bytes,
chunk_size=3)
freq_partitioner.partition()
all_node_ids = []
all_edge_ids = []
for pidx in range(nparts):
_, _, p_graph, p_node_feat, p_edge_feat, node_pb, edge_pb = \
load_partition(dir, pidx)
node_ids = torch.unique(p_graph.edge_index[0])
self.assertTrue(torch.equal(torch.sort(node_ids)[0],
torch.sort(p_node_feat.ids)[0]))
all_node_ids.append(node_ids)
expect_node_pids = torch.ones(node_ids.size(0), dtype=torch.int64) * pidx
self.assertTrue(torch.equal(node_pb[node_ids], expect_node_pids))
self.assertTrue(p_node_feat.cache_feats is not None)
self.assertTrue(p_node_feat.cache_ids is not None)
for idx in range(p_node_feat.cache_ids.size(0) - 1):
self.assertGreaterEqual(
node_probs[pidx][p_node_feat.cache_ids[idx]].item(),
node_probs[pidx][p_node_feat.cache_ids[idx + 1]].item()
)
# edge & weight
edge_ids = p_graph.eids
edge_weights = p_graph.weights
self.assertTrue(torch.equal(torch.sort(edge_ids)[0],
torch.sort(p_edge_feat.ids)[0]))
self.assertTrue(torch.allclose(edge_ids.to(torch.float), edge_weights))
all_edge_ids.append(edge_ids)
expect_edge_pids = torch.ones(edge_ids.size(0), dtype=torch.int64) * pidx
self.assertTrue(torch.equal(edge_pb[edge_ids], expect_edge_pids))
self.assertTrue(p_edge_feat.cache_feats is None)
self.assertTrue(p_edge_feat.cache_ids is None)
all_node_ids = torch.cat(all_node_ids)
self.assertTrue(torch.equal(torch.sort(all_node_ids)[0],
torch.arange(node_num)))
all_edge_ids = torch.cat(all_edge_ids)
self.assertTrue(torch.equal(torch.sort(all_edge_ids)[0],
torch.arange(edge_num)))
shutil.rmtree(dir)
def test_cat_feature_cache(self):
feat_pdata = FeaturePartitionData(
feats=torch.rand(4, 10),
ids=torch.tensor([0, 2, 4, 6], dtype=torch.long),
cache_feats=torch.rand(2, 10),
cache_ids=torch.tensor([3, 4], dtype=torch.long)
)
feat_pb = torch.tensor([0, 1, 0, 1, 0, 1, 0, 1], dtype=torch.long)
res = cat_feature_cache(0, feat_pdata, feat_pb)
cache_ratio, new_feats, id2idx, new_feat_pb = res
self.assertEqual(cache_ratio, 2 / 6)
self.assertEqual(new_feats.size(0), 6)
self.assertEqual(id2idx[3].item(), 0)
self.assertEqual(id2idx[4].item(), 1)
self.assertEqual(new_feat_pb[3].item(), 0)
def test_range_partition_book(self):
partition_ranges = [(0, 10), (10, 20), (20, 30)]
range_pb = RangePartitionBook(partition_ranges, 1)
indices = torch.tensor([0, 5, 10, 15, 20, 25, 29])
self.assertTrue(torch.equal(range_pb[indices], torch.tensor([0, 0, 1, 1, 2, 2, 2])))
with self.assertRaises(ValueError):
RangePartitionBook([(0, 10), (11, 20), (20, 30)], 1)
with self.assertRaises(ValueError):
RangePartitionBook([(0, 10), (10, 5), (20, 30)], 1)
id2idx = range_pb.id2index
self.assertTrue(torch.equal(id2idx[torch.arange(10, 20)], torch.arange(10)))
self.assertTrue(torch.equal(range_pb.id_filter(range_pb, 1), torch.arange(10, 20)))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_feature.py | test/python/test_feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from graphlearn_torch.data import Topology, DeviceGroup, Feature, sort_by_in_degree
from graphlearn_torch.utils import tensor_equal_with_device
class FeatureTestCase(unittest.TestCase):
def setUp(self):
tensor = torch.ones(128, 128, dtype=torch.float32)
self.tensor = torch.cat([tensor, tensor*2, tensor*3], 0)
rows = torch.cat([torch.arange(128*3),
torch.randint(128, (128*3,)),
torch.randint(128*2, (128*3,))])
cols = torch.cat([torch.randint(128*3, (128*3,)),
torch.randint(128*3, (128*3,)),
torch.randint(128*3, (128*3,))])
self.csr_topo = Topology(edge_index=torch.stack([rows, cols]))
self.input = torch.tensor([10, 20, 200, 210, 300, 310], dtype=torch.int64,
device= torch.device('cuda:0'))
attr = torch.ones(2, 128, dtype=torch.float32,
device= torch.device('cuda:0'))
self.res = torch.cat((attr, attr*2, attr*3), 0)
def test_feature_without_degree_sort(self):
device_group_list = [DeviceGroup(0, [0])]
feature = Feature(
feature_tensor=self.tensor.clone(), split_ratio=0.5,
device_group_list=device_group_list, device=0)
self.assertEqual(list(feature.shape), [128*3, 128])
self.assertTrue(tensor_equal_with_device(feature[self.input], self.res))
def test_feature_with_degree_sort(self):
device_group_list = [DeviceGroup(0, [0])]
cpu_tensor, id2index = sort_by_in_degree(
self.tensor.clone(), 0.5, self.csr_topo)
feature = Feature(
feature_tensor=cpu_tensor, id2index=id2index, split_ratio=0.5,
device_group_list=device_group_list, device=0)
self.assertEqual(list(feature.shape), [128*3, 128])
self.assertTrue(tensor_equal_with_device(feature[self.input], self.res))
def test_feature_with_degree_sort_pin(self):
cpu_tensor, id2index = sort_by_in_degree(
self.tensor.clone(), 0.0, self.csr_topo)
feature = Feature(feature_tensor=cpu_tensor, id2index=id2index)
self.assertEqual(list(feature.shape), [128*3, 128])
self.assertTrue(tensor_equal_with_device(feature[self.input], self.res))
def test_feature_with_degree_sort_cpu(self):
cpu_tensor, id2index = sort_by_in_degree(
self.tensor.clone(), 0.0, self.csr_topo)
feature = Feature(feature_tensor=cpu_tensor, id2index=id2index,
with_gpu=False)
self.assertEqual(list(feature.shape), [128*3, 128])
self.assertTrue(tensor_equal_with_device(feature[self.input], self.res.cpu()))
def test_feature_with_degree_sort_gpu(self):
device_group_list = [DeviceGroup(0, [0])]
cpu_tensor, id2index = sort_by_in_degree(
self.tensor.clone(), 1.0, self.csr_topo)
feature = Feature(
feature_tensor=cpu_tensor, id2index=id2index, split_ratio=1.0,
device_group_list=device_group_list, device=0)
self.assertEqual(list(feature.shape), [128*3, 128])
self.assertTrue(tensor_equal_with_device(feature[self.input], self.res))
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_dist_random_partitioner.py | test/python/test_dist_random_partitioner.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import time
import unittest
import torch
import torch.multiprocessing as mp
import graphlearn_torch as glt
# for hetero dataset
user_ntype = 'user'
item_ntype = 'item'
u2i_etype = ('user', 'u2i', 'item')
i2i_etype = ('item', 'i2i', 'item')
def _get_part_of_node(rank, world_size, num_nodes, nfeat_num=10):
per_num_nodes = num_nodes // world_size
nids, nfeat = [], []
for v in range(per_num_nodes * rank, min(num_nodes, per_num_nodes * (rank + 1))):
nids.append(v)
nfeat.append([v for _ in range(nfeat_num)])
node_ids = torch.tensor(nids, dtype=torch.int64)
node_feat = torch.tensor(nfeat, dtype=torch.float32)
return node_ids, node_feat
def _get_part_of_edge(rank, world_size, num_src, num_dst, degree=2, efeat_num=5):
per_num_src = num_src // world_size
rows, cols, eids, efeat = [], [], [], []
for v in range(per_num_src * rank, min(num_src, per_num_src * (rank + 1))):
rows.extend([v for _ in range(degree)])
cols.extend([((v + i + 1) % num_dst) for i in range(degree)])
eids.extend([(v * degree + i) for i in range(degree)])
efeat.extend([[(v * degree + i) for _ in range(efeat_num)] for i in range(degree)])
edge_index = torch.tensor([rows, cols], dtype=torch.int64)
edge_ids = torch.tensor(eids, dtype=torch.int64)
edge_feat = torch.tensor(efeat, dtype=torch.float32)
return edge_index, edge_ids, edge_feat
def _get_part_of_graph(rank, world_size, graph_type='homo'):
if graph_type == 'homo':
num_nodes = 100
node_ids, node_feat = _get_part_of_node(rank, world_size, 100)
edge_index, edge_ids, edge_feat = _get_part_of_edge(rank, world_size, 100, 100)
else:
num_nodes = {user_ntype: 100, item_ntype: 60}
node_ids, node_feat = {}, {}
node_ids[user_ntype], node_feat[user_ntype] = \
_get_part_of_node(rank, world_size, 100)
node_ids[item_ntype], node_feat[item_ntype] = \
_get_part_of_node(rank, world_size, 60)
edge_index, edge_ids, edge_feat = {}, {}, {}
edge_index[u2i_etype], edge_ids[u2i_etype], edge_feat[u2i_etype] = \
_get_part_of_edge(rank, world_size, 100, 60)
edge_index[i2i_etype], edge_ids[i2i_etype], edge_feat[i2i_etype] = \
_get_part_of_edge(rank, world_size, 60, 60)
return num_nodes, node_ids, node_feat, edge_index, edge_ids, edge_feat
def _check_partition(dir, pidx, num_parts, graph_type='homo'):
loaded_num_parts, _, graph, node_feat, edge_feat, node_pb, edge_pb = \
glt.partition.load_partition(dir, pidx)
tc = unittest.TestCase()
tc.assertEqual(loaded_num_parts, num_parts)
if graph_type == 'homo':
node_ids = torch.unique(graph.edge_index[0])
tc.assertEqual(node_ids.size(0), 50)
tc.assertTrue(torch.equal(torch.sort(node_ids)[0],
torch.sort(node_feat.ids)[0]))
expect_node_pidx = torch.ones(50, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(node_pb[node_ids], expect_node_pidx))
tc.assertEqual(node_feat.feats.size(0), 50)
tc.assertEqual(node_feat.feats.size(1), 10)
tc.assertTrue(node_feat.cache_feats is None)
tc.assertTrue(node_feat.cache_ids is None)
for idx, n_id in enumerate(node_feat.ids):
tc.assertTrue(torch.equal(node_feat.feats[idx],
torch.ones(10, dtype=torch.float32) * n_id))
edge_ids = graph.eids
tc.assertEqual(edge_ids.size(0), 50 * 2)
tc.assertTrue(torch.equal(torch.sort(edge_ids)[0],
torch.sort(edge_feat.ids)[0]))
expect_edge_pidx = torch.ones(50 * 2, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(edge_pb[edge_ids], expect_edge_pidx))
tc.assertEqual(edge_feat.feats.size(0), 50 * 2)
tc.assertEqual(edge_feat.feats.size(1), 5)
tc.assertTrue(edge_feat.cache_feats is None)
tc.assertTrue(edge_feat.cache_ids is None)
for idx, e_id in enumerate(edge_feat.ids):
tc.assertTrue(torch.equal(edge_feat.feats[idx],
torch.ones(5, dtype=torch.float32) * e_id))
else:
# user
user_ids = torch.unique(graph[u2i_etype].edge_index[0])
tc.assertEqual(user_ids.size(0), 50)
tc.assertTrue(torch.equal(torch.sort(user_ids)[0],
torch.sort(node_feat[user_ntype].ids)[0]))
expect_user_pidx = torch.ones(50, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(node_pb[user_ntype][user_ids], expect_user_pidx))
tc.assertEqual(node_feat[user_ntype].feats.size(0), 50)
tc.assertEqual(node_feat[user_ntype].feats.size(1), 10)
tc.assertTrue(node_feat[user_ntype].cache_feats is None)
tc.assertTrue(node_feat[user_ntype].cache_ids is None)
for idx, user_id in enumerate(node_feat[user_ntype].ids):
tc.assertTrue(torch.equal(node_feat[user_ntype].feats[idx],
torch.ones(10, dtype=torch.float32) * user_id))
# item
item_ids = torch.unique(graph[i2i_etype].edge_index[0])
tc.assertEqual(item_ids.size(0), 30)
tc.assertTrue(torch.equal(torch.sort(item_ids)[0],
torch.sort(node_feat[item_ntype].ids)[0]))
expect_item_pidx = torch.ones(30, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(node_pb[item_ntype][item_ids], expect_item_pidx))
tc.assertEqual(node_feat[item_ntype].feats.size(0), 30)
tc.assertEqual(node_feat[item_ntype].feats.size(1), 10)
tc.assertTrue(node_feat[item_ntype].cache_feats is None)
tc.assertTrue(node_feat[item_ntype].cache_ids is None)
for idx, item_id in enumerate(node_feat[item_ntype].ids):
tc.assertTrue(torch.equal(node_feat[item_ntype].feats[idx],
torch.ones(10, dtype=torch.float32) * item_id))
# u2i
u2i_eids = graph[u2i_etype].eids
tc.assertEqual(u2i_eids.size(0), 50 * 2)
expect_u2i_pidx = torch.ones(50 * 2, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(edge_pb[u2i_etype][u2i_eids], expect_u2i_pidx))
tc.assertEqual(edge_feat[u2i_etype].feats.size(0), 50 * 2)
tc.assertEqual(edge_feat[u2i_etype].feats.size(1), 5)
tc.assertTrue(edge_feat[u2i_etype].cache_feats is None)
tc.assertTrue(edge_feat[u2i_etype].cache_ids is None)
for idx, u2i_eid in enumerate(edge_feat[u2i_etype].ids):
tc.assertTrue(torch.equal(edge_feat[u2i_etype].feats[idx],
torch.ones(5, dtype=torch.float32) * u2i_eid))
# i2i
i2i_eids = graph[i2i_etype].eids
tc.assertEqual(i2i_eids.size(0), 30 * 2)
expect_i2i_pidx = torch.ones(30 * 2, dtype=torch.int64) * pidx
tc.assertTrue(torch.equal(edge_pb[i2i_etype][i2i_eids], expect_i2i_pidx))
tc.assertEqual(edge_feat[i2i_etype].feats.size(0), 30 * 2)
tc.assertEqual(edge_feat[i2i_etype].feats.size(1), 5)
tc.assertTrue(edge_feat[i2i_etype].cache_feats is None)
tc.assertTrue(edge_feat[i2i_etype].cache_ids is None)
for idx, i2i_eid in enumerate(edge_feat[i2i_etype].ids):
tc.assertTrue(torch.equal(edge_feat[i2i_etype].feats[idx],
torch.ones(5, dtype=torch.float32) * i2i_eid))
def run_dist_partitioner(rank, world_size, master_addr, master_port,
root_dir, graph_type='homo'):
num_nodes, node_ids, node_feat, edge_index, edge_ids, edge_feat = \
_get_part_of_graph(rank, world_size, graph_type)
output_dir = os.path.join(root_dir, f'p{rank}')
dist_partitioner = glt.distributed.DistRandomPartitioner(
output_dir, num_nodes, edge_index, edge_ids, node_feat, node_ids,
edge_feat, edge_ids, num_parts=world_size, current_partition_idx=rank,
chunk_size=7, master_addr=master_addr, master_port=master_port,
num_rpc_threads=4
)
dist_partitioner.partition()
_check_partition(output_dir, rank, world_size, graph_type)
class DistRandomPartitionerTestCase(unittest.TestCase):
def test_with_homo_graph(self):
root_dir = 'dist_random_partitioner_ut_homo'
master_addr = 'localhost'
master_port = glt.utils.get_free_port(master_addr)
time.sleep(1)
n_partitioners = 2
mp.spawn(
run_dist_partitioner,
args=(n_partitioners, master_addr, master_port, root_dir, 'homo'),
nprocs=n_partitioners,
join=True,
)
shutil.rmtree(root_dir)
def test_with_hetero_graph(self):
root_dir = 'dist_random_partitioner_ut_hetero'
master_addr = 'localhost'
master_port = glt.utils.get_free_port(master_addr)
time.sleep(1)
n_partitioners = 2
mp.spawn(
run_dist_partitioner,
args=(n_partitioners, master_addr, master_port, root_dir, 'hetero'),
nprocs=n_partitioners,
join=True,
)
shutil.rmtree(root_dir)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_subgraph.py | test/python/test_subgraph.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from graphlearn_torch.data import Topology, Graph
from graphlearn_torch.sampler import NeighborSampler
from graphlearn_torch.utils import tensor_equal_with_device
class RandomSamplerTestCase(unittest.TestCase):
def setUp(self):
"""
input graph:
1 1 0 0 0 0
0 1 0 1 0 0
0 0 1 0 1 0
0 0 0 0 0 1
"""
indptr = torch.tensor([0, 2, 4, 6, 7], dtype=torch.int64)
indices = torch.tensor([0, 1, 1, 3, 2, 4, 5], dtype=torch.int64)
self.csr_topo = Topology(edge_index=(indptr, indices), input_layout='CSR')
# input
self.input_seeds1 = torch.tensor([0, 2, 1, 2, 4], dtype=torch.int64)
# output
self.nodes1 = torch.tensor([0, 1, 2, 4], dtype=torch.int64)
self.mapping1 = torch.tensor([0, 2, 1, 2, 3], dtype=torch.int64)
self.rows1 = torch.tensor([0, 0, 1, 2, 2], dtype=torch.int64)
self.cols1 = torch.tensor([0, 1, 1, 2, 3], dtype=torch.int64)
self.eids1 = torch.tensor([0, 1, 2, 4, 5], dtype=torch.int64)
# input
self.input_seeds2 = torch.tensor([0, 1, 2, 3], dtype=torch.int64)
# output
self.nodes2 = torch.tensor([0, 1, 2, 3, 4, 5], dtype=torch.int64)
self.mapping2 = torch.tensor([0, 1, 2, 3], dtype=torch.int64)
self.rows2 = torch.tensor([0, 0, 1, 1, 2, 2, 3], dtype=torch.int64)
self.cols2 = torch.tensor([0, 1, 1, 3, 2, 4, 5], dtype=torch.int64)
self.eids2 = torch.tensor([0, 1, 2, 3, 4, 5, 6], dtype=torch.int64)
def test_cpu_node_subgraph(self):
g = Graph(self.csr_topo, mode='CPU')
sampler = NeighborSampler(g, device=torch.device('cpu'), with_edge=True)
subgraph = sampler.subgraph(self.input_seeds1)
self.assertTrue(tensor_equal_with_device(subgraph.node, self.nodes1))
self.assertTrue(tensor_equal_with_device(subgraph.metadata, self.mapping1))
self.assertTrue(tensor_equal_with_device(subgraph.row, self.cols1))
self.assertTrue(tensor_equal_with_device(subgraph.col, self.rows1))
self.assertTrue(tensor_equal_with_device(subgraph.edge, self.eids1))
def test_cpu_khop_subgraph(self):
g = Graph(self.csr_topo, mode='CPU')
sampler = NeighborSampler(g, device=torch.device('cpu'),
num_neighbors=[-1, -1], with_edge=True)
subgraph = sampler.subgraph(self.input_seeds2)
self.assertTrue(tensor_equal_with_device(subgraph.node, self.nodes2))
self.assertTrue(tensor_equal_with_device(subgraph.metadata, self.mapping2))
self.assertTrue(tensor_equal_with_device(subgraph.row, self.cols2))
self.assertTrue(tensor_equal_with_device(subgraph.col, self.rows2))
self.assertTrue(tensor_equal_with_device(subgraph.edge, self.eids2))
def test_cuda_node_subgraph(self):
g = Graph(self.csr_topo, mode='CUDA')
sampler = NeighborSampler(g, device=torch.device('cuda:0'), with_edge=True)
subgraph = sampler.subgraph(self.input_seeds1)
self.assertTrue(tensor_equal_with_device(subgraph.node, self.nodes1.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.metadata, self.mapping1.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.row, self.cols1.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.col, self.rows1.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.edge, self.eids1.to(0)))
def test_cuda_khop_subgraph(self):
g = Graph(self.csr_topo, mode='CUDA')
sampler = NeighborSampler(g, device=torch.device('cuda:0'),
num_neighbors=[-1, -1], with_edge=True)
subgraph = sampler.subgraph(self.input_seeds2)
self.assertTrue(tensor_equal_with_device(subgraph.node, self.nodes2.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.metadata, self.mapping2.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.row, self.cols2.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.col, self.rows2.to(0)))
self.assertTrue(tensor_equal_with_device(subgraph.edge, self.eids2.to(0)))
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_dist_link_loader.py | test/python/test_dist_link_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
import torch
import graphlearn_torch as glt
from dist_test_utils import *
from dist_test_utils import _prepare_dataset, _prepare_hetero_dataset
from parameterized import parameterized
def _check_sample_result(data, edge_dir='out'):
tc = unittest.TestCase()
if 'src_index' in data:
# triplet negative sampling
tc.assertEqual(data.src_index.size(0), 5)
tc.assertEqual(data.dst_pos_index.size(0), 5)
tc.assertEqual(data.dst_neg_index.size(0), 5)
tc.assertTrue(data.edge_attr is not None)
pos_index = torch.stack(
(data.node[data.src_index],
data.node[data.dst_pos_index]
))
tc.assertTrue(torch.all(
((pos_index[0]+1)%40==pos_index[1]) + ((pos_index[0]+2)%40==pos_index[1])
))
else:
# binary negative sampling
tc.assertEqual(data.edge_label_index.size(1), 10)
tc.assertEqual(data.edge_label.size(0), 10)
tc.assertTrue(data.edge_attr is not None)
tc.assertEqual(max(data.edge_label), 1)
out_index = data.edge_label_index
pos_index = torch.stack(
(data.node[out_index[0,:5]],
data.node[out_index[1,:5]]
))
sub_edge_index = torch.stack((data.node[data.edge_index[0]],
data.node[data.edge_index[1]]))
tc.assertTrue(torch.all(
((pos_index[1]+1)%40==pos_index[0]) + ((pos_index[1]+2)%40==pos_index[0])
))
tc.assertTrue(torch.all(
((sub_edge_index[1]+1)%40==sub_edge_index[0]) +
((sub_edge_index[1]+2)%40==sub_edge_index[0])
))
def _check_hetero_sample_result(data, edge_dir='out'):
tc = unittest.TestCase()
if edge_dir == 'out':
if len(data[user_ntype]) > 2:
# triplet negative sampling
tc.assertEqual(data[user_ntype].node.size(0), 5)
tc.assertEqual(data[user_ntype].src_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_pos_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_neg_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_neg_index.size(1), 2)
tc.assertEqual(data[rev_u2i_etype].edge.size(0), 10)
tc.assertTrue(data[rev_u2i_etype].edge_attr is not None)
tc.assertTrue(data[i2i_etype].edge_attr is not None)
tc.assertLess(max(data[user_ntype].src_index), 5)
tc.assertLess(max(data[rev_u2i_etype].edge_index[1]), 5)
pos_index = torch.stack(
(data[user_ntype].node[data[user_ntype].src_index],
data[item_ntype].node[data[item_ntype].dst_pos_index]
))
tc.assertTrue(torch.all(
((pos_index[0]+1)%40==pos_index[1]) + ((pos_index[0]+2)%40==pos_index[1])
))
else:
# binary negative sampling
tc.assertLessEqual(data[user_ntype].node.size(0), 10)
tc.assertEqual(data[rev_u2i_etype].edge_label_index.size(0), 2)
tc.assertEqual(data[rev_u2i_etype].edge_label_index.size(1), 10)
tc.assertEqual(data[rev_u2i_etype].edge_label.size(0), 10)
tc.assertEqual(max(data[rev_u2i_etype].edge_label), 1)
tc.assertTrue(data[rev_u2i_etype].edge_attr is not None)
tc.assertTrue(data[i2i_etype].edge_attr is not None)
out_index = data[rev_u2i_etype].edge_label_index
pos_index = torch.stack(
(data[item_ntype].node[out_index[0,:int(out_index.size(1)/2)]],
data[user_ntype].node[out_index[1,:int(out_index.size(1)/2)]])
)
neg_index = torch.stack(
(data[item_ntype].node[out_index[0,int(out_index.size(1)/2):]],
data[user_ntype].node[out_index[1,int(out_index.size(1)/2):]])
)
tc.assertTrue(torch.all(
((pos_index[1]+1)%40==pos_index[0]) + ((pos_index[1]+2)%40==pos_index[0])
))
tc.assertEqual(neg_index.size(0), pos_index.size(0))
tc.assertEqual(neg_index.size(1), pos_index.size(1))
sub_edge_index = data[rev_u2i_etype].edge_index
glob_edge_index = torch.stack((data[item_ntype].node[sub_edge_index[0]],
data[user_ntype].node[sub_edge_index[1]]))
tc.assertTrue(torch.all(
((glob_edge_index[1]+1)%40==glob_edge_index[0]) +
((glob_edge_index[1]+2)%40==glob_edge_index[0])
))
elif edge_dir == 'in':
if len(data[user_ntype]) > 2:
tc.assertTrue(data[u2i_etype].edge_attr.size(1), 10)
tc.assertTrue(data[i2i_etype].edge_attr.size(1), 5)
tc.assertEqual(data[user_ntype].src_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_pos_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_neg_index.size(0), 5)
tc.assertEqual(data[item_ntype].dst_neg_index.size(1), 2)
u2i_row = data[user_ntype].node[data[u2i_etype].edge_index[0]]
u2i_col = data[item_ntype].node[data[u2i_etype].edge_index[1]]
i2i_row = data[item_ntype].node[data[i2i_etype].edge_index[0]]
i2i_col = data[item_ntype].node[data[i2i_etype].edge_index[1]]
pos_index = torch.stack(
(data[user_ntype].node[data[user_ntype].src_index],
data[item_ntype].node[data[item_ntype].dst_pos_index])
)
tc.assertTrue(torch.all(
((u2i_row+1)%vnum_total == u2i_col) + ((u2i_row+2)%vnum_total == u2i_col)
))
tc.assertTrue(torch.all(
((i2i_row+1)%vnum_total == i2i_col) + ((i2i_row+2)%vnum_total == i2i_col)
))
tc.assertTrue(torch.all(
((pos_index[0]+1)%vnum_total==pos_index[1]) + ((pos_index[0]+2)%vnum_total==pos_index[1])
))
else:
tc.assertEqual(max(data[u2i_etype].edge_label), 1)
tc.assertTrue(data[u2i_etype].edge_attr.size(1), 10)
tc.assertTrue(data[i2i_etype].edge_attr.size(1), 5)
tc.assertEqual(data[u2i_etype].edge_label_index.size(0), 2)
tc.assertEqual(data[u2i_etype].edge_label_index.size(1), 10)
tc.assertEqual(data[u2i_etype].edge_label.size(0), 10)
u2i_row = data[user_ntype].node[data[u2i_etype].edge_index[0]]
u2i_col = data[item_ntype].node[data[u2i_etype].edge_index[1]]
i2i_row = data[item_ntype].node[data[i2i_etype].edge_index[0]]
i2i_col = data[item_ntype].node[data[i2i_etype].edge_index[1]]
out_index = data[u2i_etype].edge_label_index
pos_index = torch.stack(
(data[user_ntype].node[out_index[0,:int(out_index.size(1)/2)]],
data[item_ntype].node[out_index[1,:int(out_index.size(1)/2)]])
)
neg_index = torch.stack(
(data[user_ntype].node[out_index[0,int(out_index.size(1)/2):]],
data[item_ntype].node[out_index[1,int(out_index.size(1)/2):]])
)
tc.assertTrue(torch.all(
((u2i_row+1)%vnum_total == u2i_col) + ((u2i_row+2)%vnum_total == u2i_col)
))
tc.assertTrue(torch.all(
((i2i_row+1)%vnum_total == i2i_col) + ((i2i_row+2)%vnum_total == i2i_col)
))
tc.assertTrue(torch.all(
((pos_index[0]+1)%vnum_total==pos_index[1]) +
((pos_index[0]+2)%vnum_total==pos_index[1])
))
tc.assertEqual(neg_index.size(0), pos_index.size(0))
tc.assertEqual(neg_index.size(1), pos_index.size(1))
sub_edge_index = data[u2i_etype].edge_index
glob_edge_index = torch.stack((data[user_ntype].node[sub_edge_index[0]],
data[item_ntype].node[sub_edge_index[1]]))
tc.assertTrue(torch.all(
((glob_edge_index[0]+1)%vnum_total==glob_edge_index[1]) +
((glob_edge_index[0]+2)%vnum_total==glob_edge_index[1])
))
def run_test_as_worker(world_size: int, rank: int,
master_port: int, sampling_master_port: int,
dataset: glt.distributed.DistDataset,
neg_sampling: glt.sampler.NegativeSampling,
input_edges: glt.InputEdges, check_fn,
collocated = False, edge_dir='out'):
# Initialize worker group context
glt.distributed.init_worker_group(
world_size, rank, 'dist-neighbor-loader-test'
)
dist_context = glt.distributed.get_context()
# Init RPC
glt.distributed.init_rpc(
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
rpc_timeout=30
)
# dist loader
if collocated:
worker_options = glt.distributed.CollocatedDistSamplingWorkerOptions(
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10
)
else:
worker_options = glt.distributed.MpDistSamplingWorkerOptions(
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
pin_memory=True
)
dist_loader = glt.distributed.DistLinkNeighborLoader(
data=dataset,
num_neighbors=[2, 1],
edge_label_index=input_edges,
neg_sampling=neg_sampling,
batch_size=5,
shuffle=True,
drop_last=False,
with_edge=True,
edge_dir=edge_dir,
collect_features=True,
to_device=torch.device('cuda', rank % device_num),
worker_options=worker_options
)
# run testing
for epoch in range(0, 2):
for res in dist_loader:
check_fn(res, edge_dir)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Trainer {dist_context.rank}] epoch {epoch} finished.')
dist_loader.shutdown()
def run_test_as_server(num_servers: int, num_clients: int, server_rank: int,
master_port: int, dataset: glt.distributed.DistDataset):
print(f'[Server {server_rank}] Initializing server ...')
glt.distributed.init_server(
num_servers=num_servers,
num_clients=num_clients,
server_rank=server_rank,
dataset=dataset,
master_addr='localhost',
master_port=master_port,
request_timeout=30,
num_rpc_threads=2,
server_group_name='dist-remote-sampling-test-server'
)
print(f'[Server {server_rank}] Waiting for exit ...')
glt.distributed.wait_and_shutdown_server()
print(f'[Server {server_rank}] Exited ...')
def run_test_as_client(num_servers: int, num_clients: int, client_rank: int,
master_port: int, sampling_master_port: int,
neg_sampling: glt.sampler.NegativeSampling,
input_edges: glt.InputEdges, check_fn):
print(f'[Client {client_rank}] Initializing client ...')
glt.distributed.init_client(
num_servers=num_servers,
num_clients=num_clients,
client_rank=client_rank,
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
client_group_name='dist-remote-sampling-test-client'
)
print(f'[Client {client_rank}] Creating DistLinkNeighborLoader ...')
target_server_rank = client_rank % num_servers
options = glt.distributed.RemoteDistSamplingWorkerOptions(
server_rank=target_server_rank,
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
prefetch_size=4
)
dist_loader = glt.distributed.DistLinkNeighborLoader(
data=None,
num_neighbors=[2, 1],
edge_label_index=input_edges,
neg_sampling=neg_sampling,
batch_size=5,
shuffle=True,
drop_last=False,
with_edge=True,
edge_dir='out',
collect_features=True,
to_device=torch.device('cuda', client_rank % device_num),
worker_options=options
)
print(f'[Client {client_rank}] Running tests ...')
for epoch in range(0, 2):
for res in dist_loader:
check_fn(res)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Client {client_rank}] epoch {epoch} finished.')
print(f'[Client {client_rank}] Shutdowning ...')
glt.distributed.shutdown_client()
print(f'[Client {client_rank}] Exited ...')
class DistLinkNeighborLoaderTestCase(unittest.TestCase):
def setUp(self):
self.dataset0 = _prepare_dataset(rank=0)
self.dataset1 = _prepare_dataset(rank=1)
self.range_partition_dataset0 = _prepare_dataset(rank=0, is_range_partition=True)
self.range_partition_dataset1 = _prepare_dataset(rank=1, is_range_partition=True)
self.input_edges0 = torch.stack(
(torch.arange(vnum_per_partition), torch.arange(vnum_per_partition)+1)
).to(dtype=torch.long)
self.input_edges1 = torch.stack(
(torch.arange(vnum_per_partition)+vnum_per_partition,
(torch.arange(vnum_per_partition)+vnum_per_partition+1)%vnum_total)
).to(dtype=torch.long)
self.out_hetero_dataset0 = _prepare_hetero_dataset(rank=0, edge_dir='out')
self.out_hetero_dataset1 = _prepare_hetero_dataset(rank=1, edge_dir='out')
self.in_hetero_dataset0 = _prepare_hetero_dataset(rank=0, edge_dir='in')
self.in_hetero_dataset1 = _prepare_hetero_dataset(rank=1, edge_dir='in')
self.hetero_input_edges0 = (u2i_etype, self.input_edges0)
self.hetero_input_edges1 = (u2i_etype, self.input_edges1)
self.bin_neg_sampling = glt.sampler.NegativeSampling('binary')
self.tri_neg_sampling = glt.sampler.NegativeSampling('triplet', amount=2)
self.master_port = glt.utils.get_free_port()
self.sampling_master_port = glt.utils.get_free_port()
def _get_homo_datasets(self, is_range_partition):
return (self.range_partition_dataset0, self.range_partition_dataset1) if is_range_partition else (self.dataset0, self.dataset1)
@parameterized.expand([
(True),
(False),
])
def test_homo_out_sample_collocated(self, is_range_partition):
print("\n--- DistLinkNeighborLoader Test (homogeneous, collocated) ---")
dataset0, dataset1 = self._get_homo_datasets(is_range_partition)
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
dataset0, self.bin_neg_sampling, self.input_edges0, _check_sample_result, True)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
dataset1, self.bin_neg_sampling, self.input_edges1, _check_sample_result, True)
)
w0.start()
w1.start()
w0.join()
w1.join()
@parameterized.expand([
(True),
(False),
])
def test_homo_out_sample_mp(self, is_range_partition):
print("\n--- DistLinkNeighborLoader Test (homogeneous, multiprocessing) ---")
dataset0, dataset1 = self._get_homo_datasets(is_range_partition)
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
dataset0, self.tri_neg_sampling, self.input_edges0,
_check_sample_result, False)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
dataset1, self.tri_neg_sampling, self.input_edges1,
_check_sample_result, False)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_out_sample_collocated(self):
print("\n--- DistLinkNeighborLoader Test (heterogeneous, collocated) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.out_hetero_dataset0, self.tri_neg_sampling, self.hetero_input_edges0,
_check_hetero_sample_result, True)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.out_hetero_dataset1, self.tri_neg_sampling, self.hetero_input_edges1,
_check_hetero_sample_result, True)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_out_sample_mp(self):
print("\n--- DistLinkNeighborLoader Test (heterogeneous, multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.out_hetero_dataset0, self.bin_neg_sampling, self.hetero_input_edges0,
_check_hetero_sample_result, False)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.out_hetero_dataset1, self.bin_neg_sampling, self.hetero_input_edges1,
_check_hetero_sample_result, False)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_in_sample_collocated(self):
print("\n--- DistLinkNeighborLoader Test (in-sample, heterogeneous, collocated) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.in_hetero_dataset0, self.tri_neg_sampling, self.hetero_input_edges0,
_check_hetero_sample_result, True, 'in')
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.in_hetero_dataset1, self.tri_neg_sampling, self.hetero_input_edges1,
_check_hetero_sample_result, True, 'in')
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_in_sample_mp(self):
print("\n--- DistLinkNeighborLoader Test (in-sample, heterogeneous, multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.in_hetero_dataset0, self.bin_neg_sampling, self.hetero_input_edges0,
_check_hetero_sample_result, False, 'in')
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.in_hetero_dataset1, self.bin_neg_sampling, self.hetero_input_edges1,
_check_hetero_sample_result, False, 'in')
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_remote_mode(self):
print("\n--- DistLinkNeighborLoader Test (server-client mode, remote) ---")
mp_context = torch.multiprocessing.get_context('spawn')
server0 = mp_context.Process(
target=run_test_as_server,
args=(2, 2, 0, self.master_port, self.dataset0)
)
server1 = mp_context.Process(
target=run_test_as_server,
args=(2, 2, 1, self.master_port, self.dataset1)
)
client0 = mp_context.Process(
target=run_test_as_client,
args=(2, 2, 0, self.master_port, self.sampling_master_port,
self.bin_neg_sampling, self.input_edges0, _check_sample_result)
)
client1 = mp_context.Process(
target=run_test_as_client,
args=(2, 2, 1, self.master_port, self.sampling_master_port,
self.bin_neg_sampling, self.input_edges1, _check_sample_result)
)
server0.start()
server1.start()
client0.start()
client1.start()
server0.join()
server1.join()
client0.join()
client1.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_unified_tensor.py | test/python/test_unified_tensor.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from graphlearn_torch.data import UnifiedTensor
from graphlearn_torch.utils import tensor_equal_with_device
class UnifiedTensorTestCase(unittest.TestCase):
def test_unified_tensor_with_int_types(self):
self._test_with_dtype(torch.uint8)
self._test_with_dtype(torch.int8)
self._test_with_dtype(torch.int16)
self._test_with_dtype(torch.int32)
self._test_with_dtype(torch.int64)
def test_unified_tensor_with_float_types(self):
self._test_with_dtype(torch.float16)
self._test_with_dtype(torch.float32)
self._test_with_dtype(torch.float64)
self._test_with_dtype(torch.bfloat16)
self._test_with_dtype(torch.complex64)
self._test_with_dtype(torch.complex128)
def _test_with_dtype(self, dtype: torch.dtype):
cpu_tensor1 = torch.ones(128, 128, dtype=dtype)
cpu_tensor2 = cpu_tensor1 * 2
tensors = [cpu_tensor1, cpu_tensor2]
tensor_devices = [0, -1]
unified_tensor = UnifiedTensor(0, dtype)
unified_tensor.init_from(tensors, tensor_devices)
self.assertEqual(unified_tensor.shape, [128*2, 128])
input = torch.tensor([10, 20, 200, 210], dtype=torch.int64,
device= torch.device('cuda:0'))
feature = torch.ones(2, 128, dtype=dtype, device=torch.device('cuda:0'))
expected_res = torch.cat((feature, feature*2), 0)
res = unified_tensor[input]
self.assertEqual(expected_res.dtype, res.dtype)
self.assertTrue(tensor_equal_with_device(expected_res, res))
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_sample_prob.py | test/python/test_sample_prob.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import os.path as osp
import unittest
import torch
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from graphlearn_torch.data import Topology, Graph
from graphlearn_torch.sampler import NeighborSampler, NodeSamplerInput
class SampleProbTestCase(unittest.TestCase):
@unittest.skip("Download too long")
def setUp(self):
path = osp.join(osp.dirname(osp.realpath(__file__)), '../../../data/')
os.system('mkdir '+path+ ' && wget -P '+path+'mag/raw \
https://graphlearn.oss-cn-hangzhou.aliyuncs.com/data/github/mag.zip \
&& wget -P '+path+ 'mag/raw \
https://graphlearn.oss-cn-hangzhou.aliyuncs.com/data/github/mag_metapath2vec_emb.zip'
)
transform = T.ToUndirected(merge=True)
dataset = OGB_MAG(path, preprocess='metapath2vec', transform=transform)
data = dataset[0]
# init graphlearn_torch Dataset.
edge_dict, self.node_dict, csr_dict, self.graph_dict = {}, {}, {}, {}
self.req_nums, self.ids = {}, {}
for etype in data.edge_types:
edge_dict[etype] = data[etype]['edge_index']
for ntype in data.node_types:
self.node_dict[ntype] = torch.tensor(list(range(len(data[ntype].x))))
self.input_type = data.node_types[0]
self.ids = torch.randperm(self.node_dict[self.input_type].size(0))[:5]
for etype, eidx in edge_dict.items():
csr_dict[etype] = Topology(edge_index=eidx)
self.graph_dict[etype] = Graph(csr_topo=csr_dict[etype])
self.req_nums[etype] = [5, 5]
# print(f"{etype}: #row={self.graph_dict[etype].row_count} \
# #edge={self.graph_dict[etype].edge_count} \
# #col={self.graph_dict[etype].col_count}")
@unittest.skip("Download too long")
def test_sample_prob(self):
sampler = NeighborSampler(self.graph_dict, self.req_nums)
print("loading done!")
inputs = NodeSamplerInput(
node=self.ids,
input_type=self.input_type
)
probs = sampler.sample_prob(inputs, self.node_dict)
print(probs)
assert(probs['paper'].size(0) == self.node_dict['paper'].size(0))
assert(probs['author'].size(0) == self.node_dict['author'].size(0))
assert(probs['field_of_study'].size(0) ==
self.node_dict['field_of_study'].size(0))
assert(probs['institution'].size(0) == self.node_dict['institution'].size(0))
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_hetero_neighbor_sampler.py | test/python/test_hetero_neighbor_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
import torch
import graphlearn_torch as glt
class RandomSamplerTestCase(unittest.TestCase):
def setUp(self):
# options for dataset generation
self.vnum_total = 40
self.degree = 2
self.enum_total = 80
# for hetero dataset
self.user_ntype = 'user'
self.item_ntype = 'item'
self.u2i_etype = ('user', 'u2i', 'item')
self.i2i_etype = ('item', 'i2i', 'item')
self.rev_u2i_etype = ('item', 'rev_u2i', 'user')
# graph
user_nodes, u2i_rows, u2i_cols, u2i_eids = [], [], [], []
for v in range(self.vnum_total):
user_nodes.append(v)
u2i_rows.extend([v for _ in range(self.degree)])
u2i_cols.extend([((v + i + 1) % self.vnum_total) for i in range(self.degree)])
u2i_eids.extend([(v * self.degree + i) for i in range(self.degree)])
u2i_edge_index = torch.tensor([u2i_rows, u2i_cols], dtype=torch.int64)
u2i_edge_ids = torch.tensor(u2i_eids, dtype=torch.int64)
u2i_edge_weights = (u2i_edge_ids % 2).to(torch.float) + .5
u2i_csr_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids)
u2i_csc_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids, layout='CSC')
u2i_weighted_csr_topo = glt.data.Topology(
edge_index=u2i_edge_index,
edge_ids=u2i_edge_ids, edge_weights=u2i_edge_weights)
u2i_graph = glt.data.Graph(u2i_csr_topo, 'ZERO_COPY', device=0)
u2i_in_graph = glt.data.Graph(u2i_csc_topo, 'ZERO_COPY', device=0)
u2i_weighted_graph = glt.data.Graph(u2i_weighted_csr_topo, 'CPU')
item_nodes, i2i_rows, i2i_cols, i2i_eids = [], [], [], []
for v in range(self.vnum_total):
item_nodes.append(v)
i2i_rows.extend([v for _ in range(self.degree)])
i2i_cols.extend([((v + i + 2) % self.vnum_total) for i in range(self.degree)])
i2i_eids.extend([(v * self.degree + i) for i in range(self.degree)])
i2i_edge_index = torch.tensor([i2i_rows, i2i_cols], dtype=torch.int64)
i2i_edge_ids = torch.tensor(i2i_eids, dtype=torch.int64)
i2i_edge_weights = (i2i_edge_ids % 2).to(torch.float) + .5
i2i_csr_topo = glt.data.Topology(edge_index=i2i_edge_index, edge_ids=i2i_edge_ids)
i2i_csc_topo = glt.data.Topology(
edge_index=i2i_edge_index, edge_ids=i2i_edge_ids, layout='CSC'
)
i2i_weighted_csr_topo = glt.data.Topology(
edge_index=i2i_edge_index,
edge_ids=i2i_edge_ids, edge_weights=i2i_edge_weights)
i2i_graph = glt.data.Graph(i2i_csr_topo, 'ZERO_COPY', device=0)
i2i_in_graph = glt.data.Graph(i2i_csc_topo, 'ZERO_COPY', device=0)
i2i_weighted_graph = glt.data.Graph(i2i_weighted_csr_topo, 'CPU')
self.graph_dict = {
self.u2i_etype: u2i_graph,
self.i2i_etype: i2i_graph
}
self.graph_in_dict = {
self.u2i_etype: u2i_in_graph,
self.i2i_etype: i2i_in_graph
}
self.weighted_graph_dict = {
self.u2i_etype: u2i_weighted_graph,
self.i2i_etype: i2i_weighted_graph
}
# feature
device_group_list = [glt.data.DeviceGroup(0, [0])]
split_ratio = 0.2
user_nfeat = torch.zeros(len(user_nodes), 512, dtype=torch.float32)
user_nfeat_id2idx = glt.utils.id2idx(user_nodes)
user_feature = glt.data.Feature(user_nfeat, user_nfeat_id2idx,
split_ratio, device_group_list, device=0)
item_nfeat = torch.ones(len(item_nodes), 256, dtype=torch.float32) + 1
item_nfeat_id2idx = glt.utils.id2idx(item_nodes)
item_feature = glt.data.Feature(item_nfeat, item_nfeat_id2idx,
split_ratio, device_group_list, device=0)
self.node_feature_dict = {
self.user_ntype: user_feature,
self.item_ntype: item_feature
}
u2i_efeat = torch.ones(len(u2i_eids), 10, dtype=torch.float32) + 1
u2i_efeat_id2idx = glt.utils.id2idx(u2i_eids)
u2i_feature = glt.data.Feature(u2i_efeat, u2i_efeat_id2idx,
split_ratio, device_group_list, device=0)
i2i_efeat = torch.ones(len(i2i_eids), 5, dtype=torch.float32) + 3
i2i_efeat_id2idx = glt.utils.id2idx(i2i_eids)
i2i_feature = glt.data.Feature(i2i_efeat, i2i_efeat_id2idx,
split_ratio, device_group_list, device=0)
self.edge_feature_dict = {
self.u2i_etype: u2i_feature,
self.i2i_etype: i2i_feature
}
# node label
self.node_label_dict = {
self.user_ntype: torch.arange(self.vnum_total),
self.item_ntype: torch.arange(self.vnum_total)
}
def test_hetero_sample_from_nodes(self):
node_sampler = glt.sampler.NeighborSampler(
graph=self.graph_dict,
num_neighbors=[2,1],
with_edge=True,
edge_dir='out',
)
sampler_input = glt.sampler.NodeSamplerInput(
node=torch.tensor([1,5,9,13,17,21,25,29]), input_type=self.user_ntype)
sample_out = node_sampler.sample_from_nodes(sampler_input)
base_homo_edge_index = torch.stack((
sample_out.node['item'][sample_out.row[self.i2i_etype]],
sample_out.node['item'][sample_out.col[self.i2i_etype]]
))
base_hetero_edge_index = torch.stack((
sample_out.node['item'][sample_out.row[self.rev_u2i_etype]],
sample_out.node['user'][sample_out.col[self.rev_u2i_etype]]
))
self.assertTrue(torch.all(
((base_homo_edge_index[1]+2)%40==base_homo_edge_index[0]) +
((base_homo_edge_index[1]+3)%40==base_homo_edge_index[0])
))
self.assertTrue(torch.all(
((base_hetero_edge_index[1]+1)%40==base_hetero_edge_index[0]) +
((base_hetero_edge_index[1]+2)%40==base_hetero_edge_index[0])
))
base_edge_ids = torch.cat(
(sample_out.node['user'] * 2, sample_out.node['user'] * 2 + 1)
).unique()
self.assertTrue(glt.utils.tensor_equal_with_device(
base_edge_ids, sample_out.edge[self.rev_u2i_etype].unique())
)
def test_weighted_hetero_sample_from_nodes(self):
node_out_sampler = glt.sampler.NeighborSampler(
graph=self.weighted_graph_dict,
device='CPU',
num_neighbors=[1],
with_edge=True,
with_weight=True,
edge_dir='out',
)
sampler_out_input = glt.sampler.NodeSamplerInput(
node=torch.tensor([1,5,9,13,21,29,37,38]), input_type=self.user_ntype)
stats = torch.zeros(80)
for _ in range(1000):
sample_out = node_out_sampler.sample_from_nodes(
sampler_out_input, device=torch.device('cpu'))
edges = sample_out.edge[('item', 'rev_u2i', 'user')]
stats.scatter_add_(0, edges, torch.ones(80))
# with high probability
self.assertTrue(stats[2] < 350 and stats[10] < 350 and stats[18] < 350 and \
stats[26] < 350 and stats[42] < 350 and stats[58] < 350 \
and stats[74] < 350 and stats[76] < 350)
self.assertTrue(stats[3] > 650 and stats[11] > 650 and stats[19] > 650 and \
stats[27] > 650 and stats[43] > 650 and stats[59] > 650 \
and stats[75] > 650 and stats[77] > 650)
self.assertEqual(sum(stats), 8000)
def test_dict_sample_from_nodes(self):
node_out_sampler = glt.sampler.NeighborSampler(
graph=self.weighted_graph_dict,
device='CPU',
num_neighbors={('user', 'u2i', 'item'): [1,0],
('item', 'i2i', 'item'): [0,1]},
with_edge=True,
with_weight=True,
edge_dir='out',
)
sampler_out_input = glt.sampler.NodeSamplerInput(
node=torch.tensor([1,5,9,13,21,29,37,38]), input_type=self.user_ntype)
sample_out = node_out_sampler.sample_from_nodes(
sampler_out_input, device=torch.device('cpu'))
assert sample_out.edge[self.rev_u2i_etype].size(0) > 0
assert sample_out.edge[self.i2i_etype].size(0) > 0
assert sample_out.num_sampled_nodes['item'].size(0) == 3
assert sample_out.num_sampled_nodes['user'].size(0) == 1
assert sample_out.num_sampled_edges[self.rev_u2i_etype].size(0) == 1
assert sample_out.num_sampled_edges[self.i2i_etype].size(0) == 2
def test_hetero_insample_from_items(self):
node_sampler = glt.sampler.NeighborSampler(
graph=self.graph_in_dict,
num_neighbors=[1],
with_edge=True,
edge_dir='in'
)
# sample from 'user' can't get other nodes
sampler_input = glt.sampler.NodeSamplerInput(
node=torch.tensor([1,5,9,13,17,21,25,29]), input_type=self.user_ntype)
sample_out = node_sampler.sample_from_nodes(sampler_input)
self.assertTrue(len(sample_out.num_sampled_edges) == 0 and \
sample_out.node['user'].numel() == 8)
# sampler from 'item', we can get 'user' and 'item'
sampler_input = glt.sampler.NodeSamplerInput(
node=torch.tensor([1,5,9,13,17,21,25,29]), input_type=self.item_ntype)
sample_out = node_sampler.sample_from_nodes(sampler_input)
base_homo_edge_index = torch.stack((
sample_out.node['item'][sample_out.row[self.i2i_etype]],
sample_out.node['item'][sample_out.col[self.i2i_etype]]
))
base_hetero_edge_index = torch.stack((
sample_out.node['user'][sample_out.row[self.u2i_etype]],
sample_out.node['item'][sample_out.col[self.u2i_etype]]
))
self.assertTrue(torch.all(
((base_homo_edge_index[0]+2)%40==base_homo_edge_index[1]) +
((base_homo_edge_index[0]+3)%40==base_homo_edge_index[1])
))
self.assertTrue(torch.all(
((base_hetero_edge_index[0]+1)%40==base_hetero_edge_index[1]) +
((base_hetero_edge_index[0]+2)%40==base_hetero_edge_index[1])
))
def test_hetero_sample_from_edges(self):
edge_sampler = glt.sampler.NeighborSampler(
graph=self.graph_dict,
num_neighbors=[2,1],
with_edge=True,
with_neg=True
)
bin_neg_sampling = glt.sampler.NegativeSampling(mode='binary')
tri_neg_sampling = glt.sampler.NegativeSampling(mode='triplet', amount=2)
bin_sampler_input = glt.sampler.EdgeSamplerInput(
row=torch.tensor([1, 3, 4, 7, 12, 18, 27, 32, 38], device=0),
col=torch.tensor([2, 5, 5, 8, 13, 20, 29, 33, 0], device=0),
input_type=self.u2i_etype,
neg_sampling=bin_neg_sampling
)
tri_sampler_input = glt.sampler.EdgeSamplerInput(
row=torch.tensor([1, 3, 4, 7, 12, 18, 27, 32, 38], device=0),
col=torch.tensor([2, 5, 5, 8, 13, 20, 29, 33, 0], device=0),
input_type=self.u2i_etype,
neg_sampling=tri_neg_sampling
)
# check binary cases
bin_sampler_out = edge_sampler.sample_from_edges(bin_sampler_input)
base_homo_edge_index = torch.stack((
bin_sampler_out.node['item'][bin_sampler_out.row[self.i2i_etype]],
bin_sampler_out.node['item'][bin_sampler_out.col[self.i2i_etype]]
))
base_hetero_edge_index = torch.stack((
bin_sampler_out.node['item'][bin_sampler_out.row[self.rev_u2i_etype]],
bin_sampler_out.node['user'][bin_sampler_out.col[self.rev_u2i_etype]]
))
self.assertTrue(torch.all(
((base_homo_edge_index[1]+2)%40==base_homo_edge_index[0]) +
((base_homo_edge_index[1]+3)%40==base_homo_edge_index[0])
))
self.assertTrue(torch.all(
((base_hetero_edge_index[1]+1)%40==base_hetero_edge_index[0]) +
((base_hetero_edge_index[1]+2)%40==base_hetero_edge_index[0])
))
base_edge_ids = torch.cat(
(bin_sampler_out.node['user'] * 2, bin_sampler_out.node['user'] * 2 + 1)
).unique()
self.assertTrue(glt.utils.tensor_equal_with_device(
base_edge_ids, bin_sampler_out.edge[self.rev_u2i_etype].unique())
)
self.assertTrue(glt.utils.tensor_equal_with_device(
bin_sampler_out.metadata['edge_label'][:9],
torch.ones(9, dtype=torch.float, device=0))
)
self.assertTrue(glt.utils.tensor_equal_with_device(
bin_sampler_out.metadata['edge_label'][9:],
torch.zeros(9, dtype=torch.float, device=0))
)
base_edge_label_index = torch.stack((
bin_sampler_input.row, bin_sampler_input.col
))
pos_index = torch.stack((
bin_sampler_out.node['user'][bin_sampler_out.metadata['edge_label_index'][0,:9]],
bin_sampler_out.node['item'][bin_sampler_out.metadata['edge_label_index'][1,:9]]
))
self.assertTrue(glt.utils.tensor_equal_with_device(
base_edge_label_index, pos_index
))
neg_index = torch.stack((
bin_sampler_out.node['user'][bin_sampler_out.metadata['edge_label_index'][0,9:]],
bin_sampler_out.node['item'][bin_sampler_out.metadata['edge_label_index'][1,9:]]
))
self.assertFalse(torch.any(
((neg_index[0]+1)%40==neg_index[1]) + ((neg_index[0]+2)%40==neg_index[1])
))
# check triplet cases
tri_sampler_out = edge_sampler.sample_from_edges(tri_sampler_input)
base_homo_edge_index = torch.stack((
tri_sampler_out.node['item'][tri_sampler_out.row[self.i2i_etype]],
tri_sampler_out.node['item'][tri_sampler_out.col[self.i2i_etype]]
))
base_hetero_edge_index = torch.stack((
tri_sampler_out.node['item'][tri_sampler_out.row[self.rev_u2i_etype]],
tri_sampler_out.node['user'][tri_sampler_out.col[self.rev_u2i_etype]]
))
self.assertTrue(torch.all(
((base_homo_edge_index[1]+2)%40==base_homo_edge_index[0]) +
((base_homo_edge_index[1]+3)%40==base_homo_edge_index[0])
))
self.assertTrue(torch.all(
((base_hetero_edge_index[1]+1)%40==base_hetero_edge_index[0]) +
((base_hetero_edge_index[1]+2)%40==base_hetero_edge_index[0])
))
base_edge_ids = torch.cat(
(tri_sampler_out.node['user'] * 2, tri_sampler_out.node['user'] * 2 + 1)
).unique()
self.assertTrue(glt.utils.tensor_equal_with_device(
base_edge_ids, tri_sampler_out.edge[self.rev_u2i_etype].unique())
)
base_src_index = tri_sampler_out.node['user'][
tri_sampler_out.metadata['src_index']]
base_dst_index = tri_sampler_out.node['item'][
tri_sampler_out.metadata['dst_pos_index']]
self.assertTrue(glt.utils.tensor_equal_with_device(
base_src_index, tri_sampler_input.row
))
self.assertTrue(glt.utils.tensor_equal_with_device(
base_dst_index, tri_sampler_input.col
))
self.assertEqual(
tri_sampler_out.metadata['dst_neg_index'].size(), torch.Size([9, 2])
)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_dist_neighbor_loader.py | test/python/test_dist_neighbor_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
import os
import torch
import graphlearn_torch as glt
from dist_test_utils import *
from dist_test_utils import _prepare_dataset, _prepare_hetero_dataset, vnum_per_partition
from parameterized import parameterized
from typing import List, Optional
def _check_sample_result(data, edge_dir):
tc = unittest.TestCase()
tc.assertEqual(data.batch_size, 5)
device = data.node.device
label = torch.arange(vnum_total).to(device)
tc.assertTrue(glt.utils.tensor_equal_with_device(data.y, label[data.node]))
for i, v in enumerate(data.node):
expect_feat = torch.tensor([v] * 512, device=device, dtype=torch.float32)
tc.assertTrue(glt.utils.tensor_equal_with_device(data.x[i], expect_feat))
tc.assertTrue(data.edge is not None)
tc.assertTrue(data.edge_attr is not None)
for i, e in enumerate(data.edge):
expect_feat = torch.tensor([e] * 10, device=device, dtype=torch.float32)
tc.assertTrue(glt.utils.tensor_equal_with_device(data.edge_attr[i], expect_feat))
rows = data.node[data.edge_index[0]]
cols = data.node[data.edge_index[1]]
for i in range(rows.size(0)):
tc.assertTrue(
int(rows[i]) == ((int(cols[i]) + 1) % vnum_total) or
int(rows[i]) == ((int(cols[i]) + 2) % vnum_total)
)
tc.assertEqual(data.num_sampled_nodes[0].item(), 5)
tc.assertEqual(data.num_sampled_nodes.size(0), 3)
tc.assertNotEqual(data.num_sampled_nodes[1].item(), 0)
tc.assertNotEqual(data.num_sampled_nodes[2].item(), 0)
tc.assertEqual(data.num_sampled_edges[0].item(), 10)
tc.assertEqual(data.num_sampled_edges.size(0), 2)
tc.assertNotEqual(data.num_sampled_edges[1].item(), 0)
def _check_hetero_sample_result(data, edge_dir):
tc = unittest.TestCase()
if edge_dir == 'out':
tc.assertEqual(data[user_ntype].batch_size, 5)
device = data[user_ntype].node.device
user_label = torch.arange(vnum_total).to(device)
tc.assertTrue(glt.utils.tensor_equal_with_device(
data[user_ntype].y, user_label[data[user_ntype].node]
))
for i, v in enumerate(data[user_ntype].node):
expect_feat = int(v) % 2 + torch.zeros(512, device=device, dtype=torch.float32)
tc.assertTrue(glt.utils.tensor_equal_with_device(
data.x_dict[user_ntype][i], expect_feat
))
for i, v in enumerate(data[item_ntype].node):
expect_feat = (int(v) % 2) * 2 + torch.zeros(
256, device=device, dtype=torch.float32
)
tc.assertTrue(glt.utils.tensor_equal_with_device(
data.x_dict[item_ntype][i], expect_feat
))
rev_u2i_etype = glt.reverse_edge_type(u2i_etype)
rev_i2i_etype = glt.reverse_edge_type(i2i_etype)
tc.assertTrue(data[rev_u2i_etype].edge is not None)
tc.assertTrue(data[rev_u2i_etype].edge_attr is not None)
for i, e in enumerate(data[rev_u2i_etype].edge):
expect_feat = ((int(e) // degree) % 2) + torch.ones(
10, device=device, dtype=torch.float32
)
tc.assertTrue(glt.utils.tensor_equal_with_device(
data.edge_attr_dict[rev_u2i_etype][i], expect_feat
))
tc.assertTrue(data[rev_i2i_etype].edge is not None)
tc.assertTrue(data[rev_i2i_etype].edge_attr is not None)
for i, e in enumerate(data[rev_i2i_etype].edge):
expect_feat = ((int(e) // degree) % 2) * 2 + torch.ones(
5, device=device, dtype=torch.float32
)
tc.assertTrue(glt.utils.tensor_equal_with_device(
data.edge_attr_dict[rev_i2i_etype][i], expect_feat
))
rev_u2i_rows = data[item_ntype].node[data.edge_index_dict[rev_u2i_etype][0]]
rev_u2i_cols = data[user_ntype].node[data.edge_index_dict[rev_u2i_etype][1]]
tc.assertEqual(rev_u2i_rows.size(0), rev_u2i_cols.size(0))
for i in range(rev_u2i_rows.size(0)):
tc.assertTrue(
(int(rev_u2i_rows[i]) == ((int(rev_u2i_cols[i]) + 1) % vnum_total) or
int(rev_u2i_rows[i]) == ((int(rev_u2i_cols[i]) + 2) % vnum_total))
)
rev_i2i_rows = data[item_ntype].node[data.edge_index_dict[rev_i2i_etype][0]]
rev_i2i_cols = data[item_ntype].node[data.edge_index_dict[rev_i2i_etype][1]]
tc.assertEqual(rev_i2i_rows.size(0), rev_i2i_cols.size(0))
for i in range(rev_i2i_rows.size(0)):
tc.assertTrue(
int(rev_i2i_rows[i]) == ((int(rev_i2i_cols[i]) + 2) % vnum_total) or
int(rev_i2i_rows[i]) == ((int(rev_i2i_cols[i]) + 3) % vnum_total)
)
tc.assertEqual(data.num_sampled_nodes['item'][0].item(), 0)
tc.assertNotEqual(data.num_sampled_nodes['item'][1].item(), 0)
tc.assertNotEqual(data.num_sampled_nodes['item'][2].item(), 0)
tc.assertEqual(data.num_sampled_nodes['user'][0].item(), 5)
tc.assertEqual(data.num_sampled_nodes['user'][1].item(), 0)
tc.assertEqual(data.num_sampled_nodes['user'][2].item(), 0)
tc.assertEqual(data.num_sampled_edges['item', 'rev_u2i', 'user'][0].item(), 10)
tc.assertEqual(data.num_sampled_edges['item', 'rev_u2i', 'user'][1].item(), 0)
tc.assertEqual(data.num_sampled_edges['item', 'i2i', 'item'][0].item(), 0)
tc.assertNotEqual(data.num_sampled_edges['item', 'i2i', 'item'][1].item(), 0)
else:
tc.assertEqual(data['num_sampled_nodes']['item'].size(0), 3)
tc.assertEqual(data['num_sampled_nodes']['user'].size(0), 3)
tc.assertEqual(
data['num_sampled_edges'][('user', 'u2i', 'item')].size(0), 2)
tc.assertEqual(
data['num_sampled_edges'][('item', 'i2i', 'item')].size(0), 2)
tc.assertTrue(data[('user', 'u2i', 'item')].edge_attr.size(1), 10)
tc.assertTrue(data[('item', 'i2i', 'item')].edge_attr.size(1), 5)
u2i_row = data['user'].node[data[('user', 'u2i', 'item')].edge_index[0]]
u2i_col = data['item'].node[data[('user', 'u2i', 'item')].edge_index[1]]
i2i_row = data['item'].node[data[('item', 'i2i', 'item')].edge_index[0]]
i2i_col = data['item'].node[data[('item', 'i2i', 'item')].edge_index[1]]
tc.assertEqual(u2i_row.size(0), u2i_col.size(0))
tc.assertEqual(i2i_row.size(0), i2i_row.size(0))
tc.assertTrue(torch.all(
((u2i_row+2)%vnum_total == u2i_col) + ((u2i_row+1)%vnum_total == u2i_col))
)
tc.assertTrue(torch.all(
((i2i_row+2)%vnum_total == i2i_col) + ((i2i_row+3)%vnum_total == i2i_col))
)
def run_test_as_worker(world_size: int, rank: int,
master_port: int, sampling_master_port: int,
dataset: glt.distributed.DistDataset,
input_nodes: glt.InputNodes, check_fn,
collocated = False, edge_dir='out'):
# Initialize worker group context
glt.distributed.init_worker_group(
world_size, rank, 'dist-neighbor-loader-test'
)
dist_context = glt.distributed.get_context()
# Init RPC
glt.distributed.init_rpc(
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
rpc_timeout=30
)
# dist loader
if collocated:
worker_options = glt.distributed.CollocatedDistSamplingWorkerOptions(
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10
)
else:
worker_options = glt.distributed.MpDistSamplingWorkerOptions(
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
pin_memory=True
)
dist_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[2, 2],
input_nodes=input_nodes,
batch_size=5,
shuffle=True,
drop_last=False,
with_edge=True,
edge_dir=edge_dir,
collect_features=True,
to_device=torch.device('cuda', rank % device_num),
worker_options=worker_options
)
# run testing
for epoch in range(0, 2):
for res in dist_loader:
check_fn(res, edge_dir)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Trainer {dist_context.rank}] epoch {epoch} finished.')
dist_loader.shutdown()
def run_test_as_server(num_servers: int, num_clients: int, server_rank: List[int],
master_port: int, dataset: glt.distributed.DistDataset, is_dynamic: bool = False):
print(f'[Server {server_rank}] Initializing server ...')
glt.distributed.init_server(
num_servers=num_servers,
num_clients=num_clients,
server_rank=server_rank,
dataset=dataset,
master_addr='localhost',
master_port=master_port,
request_timeout=30,
num_rpc_threads=2,
server_group_name='dist_remote_sampling_test_server',
is_dynamic=is_dynamic
)
print(f'[Server {server_rank}] Waiting for exit ...')
glt.distributed.wait_and_shutdown_server()
print(f'[Server {server_rank}] Exited ...')
def run_test_as_client(num_servers: int, num_clients: int, client_rank: int, server_rank: Optional[List[int]],
master_port: int, sampling_master_port: int,
input_nodes: glt.InputNodes, check_fn, edge_dir='out',
is_dynamic: bool = False):
print(f'[Client {client_rank}] Initializing client ...')
glt.distributed.init_client(
num_servers=num_servers,
num_clients=num_clients,
client_rank=client_rank,
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
client_group_name='dist_remote_sampling_test_client',
is_dynamic=is_dynamic
)
print(f'[Client {client_rank}] Creating DistNeighborLoader ...')
options = glt.distributed.RemoteDistSamplingWorkerOptions(
# Automatically assign server_rank (server_rank_list) if server_rank (server_rank_list) is None
server_rank=server_rank,
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
prefetch_size=2,
worker_key='unittest'
)
dist_loader = glt.distributed.DistNeighborLoader(
data=None,
num_neighbors=[2, 2],
input_nodes=input_nodes,
batch_size=5,
shuffle=True,
drop_last=False,
with_edge=True,
edge_dir=edge_dir,
collect_features=True,
to_device=torch.device('cuda', client_rank % device_num),
worker_options=options
)
print(f'[Client {client_rank}] Running tests ...')
for epoch in range(0, 2):
num_batches = 0
for res in dist_loader:
num_batches += 1
check_fn(res, edge_dir)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Client {client_rank}] epoch {epoch} finished with {num_batches} batches.')
print(f'[Client {client_rank}] Shutdowning ...')
glt.distributed.shutdown_client()
print(f'[Client {client_rank}] Exited ...')
class DistNeighborLoaderTestCase(unittest.TestCase):
input_nodes0_path = 'input_nodes0.pt'
input_nodes1_path = 'input_nodes1.pt'
def setUp(self):
self.dataset0 = _prepare_dataset(rank=0)
self.dataset1 = _prepare_dataset(rank=1)
self.range_partition_dataset0 = _prepare_dataset(rank=0, is_range_partition=True)
self.range_partition_dataset1 = _prepare_dataset(rank=1, is_range_partition=True)
# all for train
self.dataset0.random_node_split(0, 0)
self.dataset1.random_node_split(0, 0)
self.input_nodes0 = torch.arange(vnum_per_partition)
self.input_nodes1 = torch.arange(vnum_per_partition) + vnum_per_partition
torch.save(self.input_nodes0, self.input_nodes0_path)
torch.save(self.input_nodes1, self.input_nodes1_path)
self.in_hetero_dataset0 = _prepare_hetero_dataset(rank=0, edge_dir='in')
self.in_hetero_dataset1 = _prepare_hetero_dataset(rank=1, edge_dir='in')
self.out_hetero_dataset0 = _prepare_hetero_dataset(rank=0, edge_dir='out')
self.out_hetero_dataset1 = _prepare_hetero_dataset(rank=1, edge_dir='out')
self.out_hetero_input_nodes0 = (user_ntype, self.input_nodes0)
self.out_hetero_input_nodes1 = (user_ntype, self.input_nodes1)
self.in_hetero_input_nodes0 = (item_ntype, self.input_nodes0)
self.in_hetero_input_nodes1 = (item_ntype, self.input_nodes1)
self.master_port = glt.utils.get_free_port()
self.sampling_master_port = glt.utils.get_free_port()
def tearDown(self):
for file_path in [self.input_nodes0_path, self.input_nodes1_path]:
if os.path.exists(file_path):
os.remove(file_path)
def _get_homo_datasets(self, is_range_partition):
return (self.range_partition_dataset0, self.range_partition_dataset1) if is_range_partition else (self.dataset0, self.dataset1)
@parameterized.expand([
(True),
(False),
])
def test_homo_collocated(self, is_range_partition):
print("\n--- DistNeighborLoader Test (homogeneous, collocated) ---")
dataset0, dataset1 = self._get_homo_datasets(is_range_partition)
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
dataset0, self.input_nodes0, _check_sample_result, True)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
dataset1, self.input_nodes1, _check_sample_result, True)
)
w0.start()
w1.start()
w0.join()
w1.join()
@parameterized.expand([
(True),
(False),
])
def test_homo_mp(self, is_range_partition):
print("\n--- DistNeighborLoader Test (homogeneous, multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
dataset0, dataset1 = self._get_homo_datasets(is_range_partition)
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
dataset0, self.input_nodes0, _check_sample_result, False)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
dataset1, self.input_nodes1, _check_sample_result, False)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_out_sample_collocated(self):
print("\n--- DistNeighborLoader Test (heterogeneous, collocated) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.out_hetero_dataset0, self.out_hetero_input_nodes0,
_check_hetero_sample_result, True)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.out_hetero_dataset1, self.out_hetero_input_nodes1,
_check_hetero_sample_result, True)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_out_sample_mp(self):
print("\n--- DistNeighborLoader Test (heterogeneous, multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.out_hetero_dataset0, self.out_hetero_input_nodes0,
_check_hetero_sample_result, False)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.out_hetero_dataset1, self.out_hetero_input_nodes1,
_check_hetero_sample_result, False)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_in_sample_collocated(self):
print("\n--- DistNeighborLoader Test (in-sample, heterogeneous, collocated) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.in_hetero_dataset0, self.in_hetero_input_nodes0,
_check_hetero_sample_result, True, 'in')
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.in_hetero_dataset1, self.in_hetero_input_nodes1,
_check_hetero_sample_result, True, 'in')
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_hetero_in_sample_mp(self):
print("\n--- DistNeighborLoader Test (in-sample, heterogeneous, multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.in_hetero_dataset0, self.in_hetero_input_nodes0,
_check_hetero_sample_result, False, 'in')
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.in_hetero_dataset1, self.in_hetero_input_nodes1,
_check_hetero_sample_result, False, 'in')
)
w0.start()
w1.start()
w0.join()
w1.join()
@parameterized.expand([
([[0],[1]], 2, 2, "file_path"),
([[0, 1]], 1, 2, "file_path"),
([[0, 1], [0, 1]], 2, 2, "file_path"),
([[0],[1]], 2, 2, "split"),
([[0, 1]], 1, 2, "split"),
([[0, 1], [0, 1]], 2, 2, "split"),
])
def test_remote_mode(self, servers_for_clients, num_clients, num_servers, input_nodes_type):
print("\n--- DistNeighborLoader Test (server-client mode, remote) ---")
print(f"--- num_clients: {num_clients} num_servers: {num_servers} ---")
print(f"--- input_nodes_type: {input_nodes_type} ---")
self.dataset_list = [self.dataset0, self.dataset1]
# self.input_nodes_list = [self.input_nodes0, self.input_nodes1]
self.input_nodes_path_list = [self.input_nodes0_path, self.input_nodes1_path]
mp_context = torch.multiprocessing.get_context('spawn')
server_procs = []
for server_rank in range(num_servers):
server_procs.append(mp_context.Process(
target=run_test_as_server,
args=(num_servers, num_clients, server_rank, self.master_port, self.dataset_list[server_rank])
))
client_procs = []
for client_rank in range(num_clients):
server_rank_list = servers_for_clients[client_rank]
if input_nodes_type == "split":
input_nodes = glt.typing.Split.train
elif input_nodes_type == "file_path":
input_nodes = [self.input_nodes_path_list[server_rank] for server_rank in server_rank_list]
client_procs.append(mp_context.Process(
target=run_test_as_client,
args=(num_servers, num_clients, client_rank, server_rank_list, self.master_port,
self.sampling_master_port, input_nodes, _check_sample_result)
))
for sproc in server_procs:
sproc.start()
for cproc in client_procs:
cproc.start()
for sproc in server_procs:
sproc.join()
for cproc in client_procs:
cproc.join()
@parameterized.expand([
([[0],[1]], 2, 2),
([[0, 1]], 1, 2),
([[0, 1], [0, 1]], 2, 2),
])
def test_remote_mode_dynamic_world_size(self, servers_for_clients, num_clients, num_servers):
print("\n--- DistNeighborLoader Test (server-client mode, remote, dynamic world size) ---")
print(f"--- num_clients: {num_clients} num_servers: {num_servers} ---")
self.dataset_list = [self.dataset0, self.dataset1]
self.input_nodes_list = [self.input_nodes0_path, self.input_nodes1_path]
mp_context = torch.multiprocessing.get_context('spawn')
server_procs = []
for server_rank in range(num_servers):
server_procs.append(mp_context.Process(
target=run_test_as_server,
# set `num_clients`=0 because this arg is not used in server-client mode with dynamic world size feature(`is_dynamic`=True).
args=(num_servers, 0, server_rank, self.master_port, self.dataset_list[server_rank], True)
))
client_procs = []
for client_rank in range(num_clients):
server_rank_list = servers_for_clients[client_rank]
client_procs.append(mp_context.Process(
target=run_test_as_client,
args=(num_servers, num_clients, client_rank, server_rank_list, self.master_port, self.sampling_master_port,
[self.input_nodes_list[server_rank] for server_rank in server_rank_list], _check_sample_result, 'out', True)
))
for sproc in server_procs:
sproc.start()
for cproc in client_procs:
cproc.start()
for sproc in server_procs:
sproc.join()
for cproc in client_procs:
cproc.join()
@parameterized.expand([
(2, 2),
(1, 2)
])
def test_remote_mode_auto_assign_server(self, num_clients, num_servers):
print("\n--- DistNeighborLoader Test (server-client mode, remote, dynamic world size) ---")
print(f"--- num_clients: {num_clients} num_servers: {num_servers} ---")
self.dataset_list = [self.dataset0, self.dataset1]
self.input_nodes_list = [self.input_nodes0_path, self.input_nodes1_path]
mp_context = torch.multiprocessing.get_context('spawn')
server_procs = []
for server_rank in range(num_servers):
server_procs.append(mp_context.Process(
target=run_test_as_server,
# set `num_clients`=0 because this arg is not used in server-client mode with dynamic world size feature(`is_dynamic`=True).
args=(num_servers, 0, server_rank, self.master_port, self.dataset_list[server_rank], True)
))
client_procs = []
for client_rank in range(num_clients):
client_procs.append(mp_context.Process(
target=run_test_as_client,
# set `server_rank`=None to test assign server rank automatically.
args=(num_servers, num_clients, client_rank, None, self.master_port, self.sampling_master_port,
[self.input_nodes_list[server_rank] for server_rank in range(num_servers)], _check_sample_result, 'out', True)
))
for sproc in server_procs:
sproc.start()
for cproc in client_procs:
cproc.start()
for sproc in server_procs:
sproc.join()
for cproc in client_procs:
cproc.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_shm_channel.py | test/python/test_shm_channel.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
import torch.multiprocessing as mp
from graphlearn_torch.channel import ShmChannel, QueueTimeoutError
def run_sender(_, channel):
for i in range(1, 11):
send_map = {}
send_map['from_cpu'] = torch.ones([i, i], dtype=torch.float32)
send_map['from_cuda'] = torch.arange(i, dtype=torch.int32,
device=torch.device('cuda'))
channel.send(send_map)
print("[sender] message {} sent".format(i))
def run_receiver(_, channel):
channel.pin_memory()
print("[receiver] memory pinned!")
tc = unittest.TestCase()
for i in range(1, 11):
received_map = channel.recv()
print("[receiver] message {} received".format(i))
tc.assertEqual(len(received_map), 2)
tc.assertTrue('from_cpu' in received_map)
tc.assertEqual(received_map['from_cpu'].device, torch.device('cpu'))
tc.assertTrue(torch.equal(received_map['from_cpu'],
torch.ones([i, i], dtype=torch.float32)))
tc.assertTrue('from_cuda' in received_map)
tc.assertEqual(received_map['from_cuda'].device, torch.device('cpu'))
tc.assertTrue(torch.equal(received_map['from_cuda'],
torch.arange(i, dtype=torch.int32)))
try:
channel.recv(10)
except QueueTimeoutError as e:
print('Expected Error', e)
tc.assertTrue(channel.empty())
class SampleQueueCase(unittest.TestCase):
def test_send_and_receive(self):
channel = ShmChannel(capacity=5, shm_size=1024*1024)
ctx1 = mp.spawn(run_sender, args=(channel,), nprocs=1, join=False)
ctx2 = mp.spawn(run_receiver, args=(channel,), nprocs=1, join=False)
ctx1.join()
ctx2.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_graph.py | test/python/test_graph.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from graphlearn_torch.data import Topology, Graph
class GraphTestCase(unittest.TestCase):
def setUp(self):
"""
input graph:
r/c 0 1 2 3 4 5
0 0 1
1 2 3
2 4 5 6
3 7
4
5
"""
# CSC
self.indptr_csc = torch.tensor([0, 1, 3, 4, 6, 7, 8], dtype=torch.int64)
self.indices_csc = torch.tensor([0, 0, 1, 2, 1, 2, 2, 3], dtype=torch.int64)
self.edge_ids_csc = torch.tensor([0, 1, 2, 4, 3, 5, 6, 7], dtype=torch.int64)
self.edge_weights_csc = torch.tensor([.1, .2, .3, .5, .4, .6, .7, .8], dtype=torch.float)
self.csc_topo = Topology(
edge_index=(self.indices_csc, self.indptr_csc),
edge_ids=self.edge_ids_csc,
edge_weights=self.edge_weights_csc,
input_layout='CSC',
layout='CSC'
)
# CSR
self.indptr_csr = torch.tensor([0, 2, 4, 7, 8], dtype=torch.int64)
self.indices_csr = torch.tensor([0, 1, 1, 3, 2, 3, 4, 5], dtype=torch.int64)
self.edge_ids_csr = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.int64)
self.edge_weights_csr = torch.tensor([.1, .2, .3, .4, .5, .6, .7, .8], dtype=torch.float)
self.csr_topo = Topology(
edge_index=(self.indptr_csr, self.indices_csr),
edge_ids=self.edge_ids_csr,
edge_weights=self.edge_weights_csr,
input_layout='CSR',
layout='CSR'
)
def test_csr_topo_with_coo(self):
row = torch.tensor([0, 0, 1, 1, 2, 2, 2, 3], dtype=torch.int64)
col = torch.tensor([0, 1, 1, 3, 2, 3, 4, 5], dtype=torch.int64)
edge_ids = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.int64)
edge_weights = torch.tensor([.1, .2, .3, .4, .5, .6, .7, .8], dtype=torch.float)
csr_topo_from_coo = Topology(
edge_index=(row, col), edge_ids=edge_ids, edge_weights=edge_weights, input_layout='COO'
)
self.assertTrue(torch.equal(self.indptr_csr, csr_topo_from_coo.indptr))
self.assertTrue(torch.equal(self.indices_csr, csr_topo_from_coo.indices))
self.assertTrue(torch.equal(self.edge_ids_csr, csr_topo_from_coo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csr, csr_topo_from_coo.edge_weights))
row_from_csr, col_from_csr, edge_ids_from_csr, edge_weights_from_csr = self.csr_topo.to_coo()
self.assertTrue(torch.equal(row, row_from_csr))
self.assertTrue(torch.equal(col, col_from_csr))
self.assertTrue(torch.equal(edge_ids, edge_ids_from_csr))
self.assertTrue(torch.allclose(edge_weights, edge_weights_from_csr))
def test_csr_topo_with_csc(self):
row = torch.tensor([0, 0, 1, 2, 1, 2, 2, 3], dtype=torch.int64)
colptr = torch.tensor([0, 1, 3, 4, 6, 7, 8], dtype=torch.int64)
edge_ids = torch.tensor([0, 1, 2, 4, 3, 5, 6, 7], dtype=torch.int64)
edge_weights = torch.tensor([.1, .2, .3, .5, .4, .6, .7, .8], dtype=torch.float)
csr_topo_from_csc = Topology(
edge_index=(row, colptr), edge_ids=edge_ids, edge_weights=edge_weights, input_layout='CSC'
)
self.assertTrue(torch.equal(self.indptr_csr, csr_topo_from_csc.indptr))
self.assertTrue(torch.equal(self.indices_csr, csr_topo_from_csc.indices))
self.assertTrue(torch.equal(self.edge_ids_csr, csr_topo_from_csc.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csr, csr_topo_from_csc.edge_weights))
row_from_csr, colptr_from_csr, edge_ids_from_csr, edge_weights_from_csr = self.csr_topo.to_csc()
self.assertTrue(torch.equal(row, row_from_csr))
self.assertTrue(torch.equal(colptr, colptr_from_csr))
self.assertTrue(torch.equal(edge_ids, edge_ids_from_csr))
self.assertTrue(torch.allclose(edge_weights, edge_weights_from_csr))
def test_cpu_graph_init(self):
g = Graph(self.csr_topo, mode='CPU')
self.assertEqual(g.edge_count, self.indices_csr.size(0))
self.assertEqual(g.row_count, self.indptr_csr.size(0) - 1)
def test_cuda_graph_init(self):
g = Graph(self.csr_topo, 'CUDA', 0)
self.assertEqual(g.edge_count, self.indices_csr.size(0))
self.assertEqual(g.row_count, self.indptr_csr.size(0) - 1)
def test_pin_graph_init(self):
g = Graph(self.csr_topo, 'ZERO_COPY', 0)
self.assertEqual(g.edge_count, self.indices_csr.size(0))
self.assertEqual(g.row_count, self.indptr_csr.size(0) - 1)
def test_topo_with_layout(self):
# 'COO' -> 'CSC'
row = torch.tensor([0, 0, 1, 1, 2, 2, 2, 3], dtype=torch.int64)
col = torch.tensor([0, 1, 1, 3, 2, 3, 4, 5], dtype=torch.int64)
edge_ids = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.int64)
edge_weights = torch.tensor([.1, .2, .3, .4, .5, .6, .7, .8], dtype=torch.float)
csc_topo = Topology(
edge_index=(row, col),
edge_ids=edge_ids,
edge_weights=edge_weights,
input_layout='COO',
layout='CSC'
)
self.assertTrue(torch.equal(self.indices_csc, csc_topo.indices))
self.assertTrue(torch.equal(self.indptr_csc, csc_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csc, csc_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csc, csc_topo.edge_weights))
# 'COO' -> 'CSR'
csr_topo = Topology(
edge_index=(row, col),
edge_ids=edge_ids,
edge_weights=edge_weights,
input_layout='COO',
layout='CSR'
)
self.assertTrue(torch.equal(self.indices_csr, csr_topo.indices))
self.assertTrue(torch.equal(self.indptr_csr, csr_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csr, csr_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csr, csr_topo.edge_weights))
# 'CSC' -> 'CSR'
csr_topo = Topology(
edge_index=(self.indices_csc, self.indptr_csc),
edge_ids=self.edge_ids_csc,
edge_weights=self.edge_weights_csc,
input_layout='CSC',
layout='CSR'
)
self.assertTrue(torch.equal(self.indices_csr, csr_topo.indices))
self.assertTrue(torch.equal(self.indptr_csr, csr_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csr, csr_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csr, csr_topo.edge_weights))
# 'CSR' -> 'CSC'
csc_topo = Topology(
edge_index=(self.indptr_csr, self.indices_csr),
edge_ids=self.edge_ids_csr,
edge_weights=self.edge_weights_csr,
input_layout='CSR',
layout='CSC'
)
self.assertTrue(torch.equal(self.indices_csc, csc_topo.indices))
self.assertTrue(torch.equal(self.indptr_csc, csc_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csc, csc_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csc, csc_topo.edge_weights))
# 'CSR' -> 'CSR'
csr_topo = Topology(
edge_index=(self.indptr_csr, self.indices_csr),
edge_ids=self.edge_ids_csr,
edge_weights=self.edge_weights_csr,
input_layout='CSR',
layout='CSR'
)
self.assertTrue(torch.equal(self.indices_csr, csr_topo.indices))
self.assertTrue(torch.equal(self.indptr_csr, csr_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csr, csr_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csr, csr_topo.edge_weights))
# 'CSC' -> 'CSC'
csc_topo = Topology(
edge_index=(self.indices_csc, self.indptr_csc),
edge_ids=self.edge_ids_csc,
edge_weights=self.edge_weights_csc,
input_layout='CSC',
layout='CSC'
)
self.assertTrue(torch.equal(self.indices_csc, csc_topo.indices))
self.assertTrue(torch.equal(self.indptr_csc, csc_topo.indptr))
self.assertTrue(torch.equal(self.edge_ids_csc, csc_topo.edge_ids))
self.assertTrue(torch.allclose(self.edge_weights_csc, csc_topo.edge_weights))
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/dist_test_utils.py | test/python/dist_test_utils.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import graphlearn_torch as glt
from typing import Literal
# options for dataset generation
vnum_per_partition = 20
num_partition = 2
vnum_total = vnum_per_partition * num_partition # 40
degree = 2
enum_per_partition = vnum_per_partition * degree # 40
enum_total = enum_per_partition * num_partition # 80
# for hetero dataset
user_ntype = 'user'
item_ntype = 'item'
u2i_etype = ('user', 'u2i', 'item')
i2i_etype = ('item', 'i2i', 'item')
rev_u2i_etype = ('item', 'rev_u2i', 'user')
# fixed sampling options
sampling_nprocs = 2
device_num = 2
def _prepare_dataset(rank: int,
weighted: bool = False,
is_range_partition: bool = False):
"""
Prepare a synthetic graph dataset with 40 nodes and 80 edges for unit tests.
Graph topology:
- rows: [0, 0, 1, 1, 2, 2, ... 37, 37, 38, 38, 39, 39]
- cols: [1, 2, 2, 3, 3, 4, ... 38, 39, 39, 0, 0, 1]
- eids: [0, 1, 2, 3, 4, 5, ... 74, 75, 76, 77, 78, 79]
Node features:
[[0., 0., ..., 0., 0.],
[1., 1., ..., 1., 1.],
...
[39., 39., ..., 39., 39.]]
Edge features:
[[0., 0., ..., 0., 0.],
[1., 1., ..., 1., 1.],
...
[79., 79., ..., 79., 79.]]
Two partition strategies are available:
1. Range partition:
- Nodes with IDs [0, 19] and edges with IDs [0, 39] are on partition 0
- Nodes with IDs [20, 39] and edges with IDs [40, 79] are on partition 1
2. Hash partition:
- Even-numbered nodes and edges are on partition 0
- Odd-numbered nodes and edges are on partition 1
The graph topology and features are identical under both partition strategies.
"""
if is_range_partition:
node_ranges = [(0, vnum_per_partition), (vnum_per_partition, vnum_total)]
edge_ranges = [(0, enum_total // 2), (enum_total // 2, enum_total)]
node_pb = glt.partition.RangePartitionBook(
node_ranges, rank)
edge_pb = glt.partition.RangePartitionBook(
edge_ranges, rank)
start, end, step = rank * vnum_per_partition, (rank + 1) * vnum_per_partition, 1
else:
node_pb = torch.tensor(
[v % 2 for v in range(0, vnum_total)],
dtype=torch.long
)
edge_pb = torch.tensor(
[((e // degree) % 2) for e in range(0, enum_total)],
dtype=torch.long
)
start, end, step = rank, vnum_total, 2
# graph
nodes, rows, cols, eids = [], [], [], []
for v in range(start, end, step):
nodes.append(v)
rows.extend([v for _ in range(degree)])
cols.extend([((v + i + 1) % vnum_total) for i in range(degree)])
eids.extend([(v * degree + i) for i in range(degree)])
edge_index = torch.tensor([rows, cols], dtype=torch.int64)
edge_ids = torch.tensor(eids, dtype=torch.int64)
edge_weights = (edge_ids % 2).to(torch.float)
csr_topo = glt.data.Topology(edge_index=edge_index, edge_ids=edge_ids)
graph = glt.data.Graph(csr_topo, 'ZERO_COPY', device=0)
weighted_csr_topo = glt.data.Topology(
edge_index=edge_index, edge_ids=edge_ids, edge_weights=edge_weights)
weighted_graph = glt.data.Graph(weighted_csr_topo, 'CPU')
# feature
device_group_list = [glt.data.DeviceGroup(0, [0]),
glt.data.DeviceGroup(1, [1])]
split_ratio = 0.2
nfeat = torch.tensor(nodes, dtype=torch.float32).unsqueeze(1).repeat(1, 512)
nfeat_id2idx = node_pb.id2index if is_range_partition else glt.utils.id2idx(nodes)
node_feature = glt.data.Feature(nfeat, nfeat_id2idx, split_ratio,
device_group_list, device=0)
efeat = torch.tensor(eids, dtype=torch.float32).unsqueeze(1).repeat(1, 10)
efeat_id2idx = edge_pb.id2index if is_range_partition else glt.utils.id2idx(eids)
edge_feature = glt.data.Feature(efeat, efeat_id2idx, split_ratio,
device_group_list, device=0)
# whole node label
node_label = torch.arange(vnum_total)
# dist dataset
ds = glt.distributed.DistDataset(
2, rank,
weighted_graph if weighted else graph,
node_feature, edge_feature, node_label,
node_pb, edge_pb
)
if is_range_partition:
ds.id_filter = node_pb.id_filter
return ds
def _prepare_hetero_dataset(
rank: int,
edge_dir: Literal['in', 'out'] = 'out',
weighted: bool = False
):
# partition
node_pb = torch.tensor(
[v % 2 for v in range(0, vnum_total)],
dtype=torch.long
)
edge_pb = torch.tensor(
[((e // degree) % 2) for e in range(0, enum_total)],
dtype=torch.long
)
node_pb_dict = {
user_ntype: node_pb,
item_ntype: node_pb
}
edge_pb_dict = {
u2i_etype: edge_pb,
i2i_etype: edge_pb
}
# graph
user_nodes = []
u2i_rows = []
u2i_cols = []
u2i_eids = []
for v in range(rank, vnum_total, 2):
user_nodes.append(v)
u2i_rows.extend([v for _ in range(degree)])
u2i_cols.extend([((v + i + 1) % vnum_total) for i in range(degree)])
u2i_eids.extend([(v * degree + i) for i in range(degree)])
u2i_edge_index = torch.tensor([u2i_rows, u2i_cols], dtype=torch.int64)
u2i_edge_ids = torch.tensor(u2i_eids, dtype=torch.int64)
u2i_edge_weights = (u2i_edge_ids % 2).to(torch.float)
if edge_dir == 'out':
u2i_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids, layout='CSR')
weighted_u2i_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids,
edge_weights=u2i_edge_weights, layout='CSR')
elif edge_dir == 'in':
u2i_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids, layout='CSC')
weighted_u2i_topo = glt.data.Topology(
edge_index=u2i_edge_index, edge_ids=u2i_edge_ids,
edge_weights=u2i_edge_weights, layout='CSC')
u2i_graph = glt.data.Graph(u2i_topo, 'ZERO_COPY', device=0)
weighted_u2i_graph = glt.data.Graph(weighted_u2i_topo, 'CPU')
item_nodes = []
i2i_rows = []
i2i_cols = []
i2i_eids = []
for v in range(rank, vnum_total, 2):
item_nodes.append(v)
i2i_rows.extend([v for _ in range(degree)])
i2i_cols.extend([((v + i + 2) % vnum_total) for i in range(degree)])
i2i_eids.extend([(v * degree + i) for i in range(degree)])
i2i_edge_index = torch.tensor([i2i_rows, i2i_cols], dtype=torch.int64)
i2i_edge_ids = torch.tensor(i2i_eids, dtype=torch.int64)
i2i_edge_weights = (i2i_edge_ids % 2).to(torch.float)
if edge_dir == 'out':
i2i_topo = glt.data.Topology(
edge_index=i2i_edge_index, edge_ids=i2i_edge_ids, layout='CSR')
weighted_i2i_topo = glt.data.Topology(
edge_index=i2i_edge_index, edge_ids=i2i_edge_ids,
edge_weights=i2i_edge_weights, layout='CSR')
elif edge_dir == 'in':
i2i_topo = glt.data.Topology(
edge_index=i2i_edge_index, edge_ids=i2i_edge_ids, layout='CSC')
weighted_i2i_topo = glt.data.Topology(
edge_index=i2i_edge_index, edge_ids=i2i_edge_ids,
edge_weights=i2i_edge_weights, layout='CSC')
i2i_graph = glt.data.Graph(i2i_topo, 'ZERO_COPY', device=0)
weighted_i2i_graph = glt.data.Graph(weighted_i2i_topo, 'CPU')
graph_dict = {
u2i_etype: u2i_graph,
i2i_etype: i2i_graph
}
weighted_graph_dict = {
u2i_etype: weighted_u2i_graph,
i2i_etype: weighted_i2i_graph
}
# feature
device_group_list = [glt.data.DeviceGroup(0, [0]),
glt.data.DeviceGroup(1, [1])]
split_ratio = 0.2
user_nfeat = rank + torch.zeros(len(user_nodes), 512, dtype=torch.float32)
user_nfeat_id2idx = glt.utils.id2idx(user_nodes)
user_feature = glt.data.Feature(user_nfeat, user_nfeat_id2idx,
split_ratio, device_group_list, device=0)
item_nfeat = rank * 2 + torch.zeros(len(item_nodes), 256, dtype=torch.float32)
item_nfeat_id2idx = glt.utils.id2idx(item_nodes)
item_feature = glt.data.Feature(item_nfeat, item_nfeat_id2idx,
split_ratio, device_group_list, device=0)
node_feature_dict = {
user_ntype: user_feature,
item_ntype: item_feature
}
u2i_efeat = rank + torch.ones(len(u2i_eids), 10, dtype=torch.float32)
u2i_efeat_id2idx = glt.utils.id2idx(u2i_eids)
u2i_feature = glt.data.Feature(u2i_efeat, u2i_efeat_id2idx,
split_ratio, device_group_list, device=0)
i2i_efeat = rank * 2 + torch.ones(len(i2i_eids), 5, dtype=torch.float32)
i2i_efeat_id2idx = glt.utils.id2idx(i2i_eids)
i2i_feature = glt.data.Feature(i2i_efeat, i2i_efeat_id2idx,
split_ratio, device_group_list, device=0)
edge_feature_dict = {
u2i_etype: u2i_feature,
i2i_etype: i2i_feature
}
# node label
node_label_dict = {
user_ntype: torch.arange(vnum_total),
item_ntype: torch.arange(vnum_total)
}
# dist dataset
if weighted:
return glt.distributed.DistDataset(
2, rank,
weighted_graph_dict, node_feature_dict, edge_feature_dict, node_label_dict,
node_pb_dict, edge_pb_dict, edge_dir=edge_dir
)
else:
return glt.distributed.DistDataset(
2, rank,
graph_dict, node_feature_dict, edge_feature_dict, node_label_dict,
node_pb_dict, edge_pb_dict, edge_dir=edge_dir
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_link_loader.py | test/python/test_link_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from torch_geometric.data import Data, HeteroData
from graphlearn_torch.data import Dataset, DeviceGroup
from graphlearn_torch.sampler import NegativeSampling
from graphlearn_torch.loader import LinkNeighborLoader
def get_edge_index(num_src_nodes, num_dst_nodes, num_edges):
row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long)
col = torch.randint(num_dst_nodes, (num_edges, ), dtype=torch.long)
return torch.stack([row, col], dim=0)
def unique_edge_pairs(edge_index):
return set(map(tuple, edge_index.t().tolist()))
class LinkLoaderTestCase(unittest.TestCase):
def setUp(self) -> None:
self.bin_neg_sampling = NegativeSampling('binary')
self.tri_neg_sampling = NegativeSampling('triplet', amount=3)
def test_homo_link_neighbor_loader(self):
edge_label_index = get_edge_index(100, 50, 500)
data = Data()
data.edge_index = edge_label_index
data.x = torch.arange(100)
data.edge_attr = torch.arange(500)
dataset = Dataset()
dataset.init_graph(
edge_index=data.edge_index,
graph_mode='ZERO_COPY',
directed=False
)
dataset.init_node_features(
node_feature_data=data.x,
split_ratio=0.2,
device_group_list=[DeviceGroup(0, [0])],
)
dataset.init_edge_features(
edge_feature_data=data.edge_attr,
device_group_list=[DeviceGroup(0, [0])],
device=0)
loader = LinkNeighborLoader(
dataset,
num_neighbors=[3] * 2,
batch_size=20,
edge_label_index=edge_label_index,
neg_sampling=self.bin_neg_sampling,
shuffle=True,
with_edge=True
)
assert str(loader) == 'LinkNeighborLoader()'
assert len(loader._seed_loader) == 500 / 20
for batch in loader:
assert isinstance(batch, Data)
assert batch.node.size() == (batch.num_nodes, )
assert batch.edge.size() == (batch.num_edges, )
assert batch.x.size(0) <= 100
assert batch.x.min() >= 0 and batch.x.max() < 100
assert batch.edge_index.min() >= 0
assert batch.edge_index.max() < batch.num_nodes
assert batch.edge_attr.min() >= 0
assert batch.edge_attr.max() < 500
assert batch.edge_label_index.size(1) == 40
assert torch.all(batch.edge_label[:20] == 1)
assert torch.all(batch.edge_label[20:] == 0)
def test_hetero_link_neighbor_loader(self):
hetero_data, hetero_dataset = HeteroData(), Dataset()
hetero_data['paper'].x = torch.arange(100, dtype=torch.float32)
hetero_data['author'].x = torch.arange(100, 300, dtype=torch.float32)
hetero_data['paper', 'to', 'paper'].edge_index = get_edge_index(100, 100, 500)
hetero_data['paper', 'to', 'paper'].edge_attr = torch.arange(500, dtype=torch.float32)
hetero_data['paper', 'to', 'author'].edge_index = get_edge_index(100, 200, 1000)
hetero_data['paper', 'to', 'author'].edge_attr = torch.arange(500, 1500, dtype=torch.float32)
hetero_data['author', 'to', 'paper'].edge_index = get_edge_index(200, 100, 1000)
hetero_data['author', 'to', 'paper'].edge_attr = torch.arange(1500, 2500, dtype=torch.float32)
edge_dict, node_feature_dict, edge_feature_dict = {}, {}, {}
for etype in hetero_data.edge_types:
edge_dict[etype] = hetero_data[etype]['edge_index']
edge_feature_dict[etype] = hetero_data[etype]['edge_attr']
for ntype in hetero_data.node_types:
node_feature_dict[ntype] = hetero_data[ntype].x.clone(memory_format=torch.contiguous_format)
hetero_dataset.init_graph(
edge_index=edge_dict,
graph_mode='CUDA',
device=0)
hetero_dataset.init_node_features(
node_feature_data=node_feature_dict,
device_group_list=[DeviceGroup(0, [0])],
device=0)
hetero_dataset.init_edge_features(
edge_feature_data=edge_feature_dict,
device_group_list=[DeviceGroup(0, [0])],
device=0)
bin_loader = LinkNeighborLoader(
hetero_dataset,
num_neighbors=[3] * 3,
edge_label_index=('paper', 'to', 'author'),
batch_size=20,
neg_sampling=self.bin_neg_sampling,
with_edge=True,
shuffle=True,
)
homo_seeds_loader = LinkNeighborLoader(
hetero_dataset,
num_neighbors=[3] * 3,
edge_label_index=('paper', 'to', 'paper'),
batch_size=20,
neg_sampling=self.bin_neg_sampling,
with_edge=True,
shuffle=True,
)
assert str(bin_loader) == 'LinkNeighborLoader()'
assert len(bin_loader._seed_loader) == 1000 / 20
for batch in bin_loader:
assert isinstance(batch, HeteroData)
batch_a_rev_p = batch['author', 'rev_to', 'paper']
assert batch_a_rev_p.edge_label_index.size(1) == 40
assert torch.all(batch['author', 'rev_to', 'paper'].edge_label[:20] == 1)
assert torch.all(batch['author', 'rev_to', 'paper'].edge_label[20:] == 0)
assert batch_a_rev_p.edge.size(0) == batch_a_rev_p.edge_attr.size(0)
for batch in homo_seeds_loader:
assert isinstance(batch, HeteroData)
assert batch['paper', 'to', 'paper'].edge_label_index.size(1) == 40
assert torch.all(batch['paper', 'to', 'paper'].edge_label[:20] == 1)
assert torch.all(batch['paper', 'to', 'paper'].edge_label[20:] == 0)
tri_loader = LinkNeighborLoader(
hetero_dataset,
num_neighbors=[3] * 3,
edge_label_index=('paper', 'to', 'author'),
batch_size=20,
neg_sampling=self.tri_neg_sampling,
with_edge=True,
shuffle=True,
)
for batch in tri_loader:
assert isinstance(batch, HeteroData)
assert batch['paper'].src_index.size(0) == 20
assert batch['author'].dst_pos_index.size(0) == 20
assert batch['author'].dst_neg_index.size(0) == 20
assert batch['author'].dst_neg_index.size(1) == 3
def test_hetero_link_neighbor_loader_with_insampling(self):
hetero_data, hetero_dataset = HeteroData(), Dataset(edge_dir='in')
hetero_data['paper'].x = torch.arange(100, dtype=torch.float32)
hetero_data['author'].x = torch.arange(100, 300, dtype=torch.float32)
hetero_data['institute'].x = torch.arange(300, 350, dtype=torch.float32)
hetero_data['paper', 'to', 'author'].edge_index = get_edge_index(100, 200, 1000)
hetero_data['paper', 'to', 'author'].edge_attr = torch.arange(500, 1500, dtype=torch.float32)
hetero_data['author', 'to', 'institute'].edge_index = get_edge_index(200, 50, 100)
hetero_data['author', 'to', 'institute'].edge_attr = torch.arange(1500, 1600, dtype=torch.float32)
edge_dict, node_feature_dict, edge_feature_dict = {}, {}, {}
for etype in hetero_data.edge_types:
edge_dict[etype] = hetero_data[etype]['edge_index']
edge_feature_dict[etype] = hetero_data[etype]['edge_attr']
for ntype in hetero_data.node_types:
node_feature_dict[ntype] = hetero_data[ntype].x.clone(memory_format=torch.contiguous_format)
hetero_dataset.init_graph(
edge_index=edge_dict,
graph_mode='CUDA',
device=0)
hetero_dataset.init_node_features(
node_feature_data=node_feature_dict,
device_group_list=[DeviceGroup(0, [0])],
device=0)
hetero_dataset.init_edge_features(
edge_feature_data=edge_feature_dict,
device_group_list=[DeviceGroup(0, [0])],
device=0)
loader1 = LinkNeighborLoader(
hetero_dataset,
num_neighbors=[3],
edge_label_index=('paper', 'to', 'author'),
batch_size=20,
neg_sampling=self.bin_neg_sampling,
with_edge=True,
shuffle=True,
)
assert str(loader1) == 'LinkNeighborLoader()'
for batch in loader1:
self.assertTrue(set(batch.node_types) == set(['paper', 'author']))
assert isinstance(batch, HeteroData)
batch_p2a = batch['paper', 'to', 'author']
assert batch_p2a.edge_label_index.size(1) == 40
assert torch.all(batch['paper', 'to', 'author'].edge_label[:20] == 1)
assert torch.all(batch['paper', 'to', 'author'].edge_label[20:] == 0)
assert batch_p2a.edge.size(0) == batch_p2a.edge_attr.size(0)
loader2 = LinkNeighborLoader(
hetero_dataset,
num_neighbors=[3]*2,
edge_label_index=('author', 'to', 'institute'),
batch_size=20,
neg_sampling=self.bin_neg_sampling,
with_edge=True,
shuffle=True,
)
for batch in loader2:
self.assertTrue(set(batch.node_types) == set(['author', 'institute', 'paper']))
assert isinstance(batch, HeteroData)
batch_p2a = batch['author', 'to', 'institute']
assert batch_p2a.edge_label_index.size(1) == 40
assert torch.all(batch['author', 'to', 'institute'].edge_label[:20] == 1)
assert torch.all(batch['author', 'to', 'institute'].edge_label[20:] == 0)
assert batch_p2a.edge.size(0) == batch_p2a.edge_attr.size(0)
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.