code stringlengths 17 6.64M |
|---|
class SpatialModulation(nn.Module):
'Spatial Semantic Modulation.\n\n This module is used to align spatial semantics of features in the\n multi-depth pyramid. For each but the top-level feature, a stack\n of convolutions with level-specific stride are applied to it, matching\n its spatial shape and re... |
class AuxHead(nn.Module):
"Auxiliary Head.\n\n This auxiliary head is appended to receive stronger supervision,\n leading to enhanced semantics.\n\n Args:\n in_channels (int): Channel number of input features.\n out_channels (int): Channel number of output features.\n loss_weight (fl... |
class TemporalModulation(nn.Module):
'Temporal Rate Modulation.\n\n The module is used to equip TPN with a similar flexibility for temporal\n tempo modulation as in the input-level frame pyramid.\n\n Args:\n in_channels (int): Channel number of input features.\n out_channels (int): Channel ... |
@NECKS.register_module()
class TPN(nn.Module):
"TPN neck.\n\n This module is proposed in `Temporal Pyramid Network for Action Recognition\n <https://arxiv.org/pdf/2004.03548.pdf>`_\n\n Args:\n in_channels (tuple[int]): Channel numbers of input features tuple.\n out_channels (int): Channel n... |
@RECOGNIZERS.register_module()
class AudioRecognizer(BaseRecognizer):
'Audio recognizer model framework.'
def forward(self, audios, label=None, return_loss=True):
'Define the computation performed at every call.'
if return_loss:
if (label is None):
raise ValueError... |
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
'Base class for recognizers.\n\n All recognizers should subclass it.\n All subclass should overwrite:\n\n - Methods:``forward_train``, supporting to forward when training.\n - Methods:``forward_test``, supporting to forward when testing.\n\n Args:... |
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
'2D recognizer model framework.'
def forward_train(self, imgs, labels, **kwargs):
'Defines the computation performed at every call when training.'
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.r... |
@RECOGNIZERS.register_module()
class Recognizer3D(BaseRecognizer):
'3D recognizer model framework.'
def forward_train(self, imgs, labels, **kwargs):
'Defines the computation performed at every call when training.'
assert self.with_cls_head
imgs = imgs.reshape((((- 1),) + imgs.shape[2:... |
class Teacher(nn.Module):
def __init__(self, backbone, cls_head):
super().__init__()
self.backbone = backbone
self.cls_head = cls_head
def forward(self, x):
x = self.backbone(x)
x = self.cls_head(x)
return x
|
def load_teacher_model(path='checkpoints/k400/dualformer_base_patch244_window877.pth', last_embed_dim=1024, output_dim=400, info=False):
state_dict = torch.load(path, map_location='cpu')
backbone = DualFormer(pretrained=path, pretrained2d=True, video_size=(32, 224, 224), patch_size=(2, 4, 4), in_chans=3, num_... |
@RECOGNIZERS.register_module()
class Recognizer3D_TL(BaseRecognizer):
'3D recognizer model framework.'
def __init__(self, backbone, cls_head=None, neck=None, train_cfg=None, test_cfg=None):
super(Recognizer3D_TL, self).__init__(backbone, cls_head, neck, train_cfg, test_cfg)
self.teacher = loa... |
def collect_env():
env_info = collect_basic_env()
env_info['MMAction2'] = ((mmaction.__version__ + '+') + get_git_hash(digits=7))
return env_info
|
def import_module_error_func(module_name):
'When a function is imported incorrectly due to a missing module, raise\n an import error when the function is called.'
def decorate(func):
def new_func(*args, **kwargs):
raise ImportError(f'Please install {module_name} to use {func.__name__}... |
def import_module_error_class(module_name):
'When a class is imported incorrectly due to a missing module, raise an\n import error when the class is instantiated.'
def decorate(cls):
def import_error_init(*args, **kwargs):
raise ImportError(f'Please install {module_name} to use {cls._... |
def get_root_logger(log_file=None, log_level=logging.INFO):
'Use ``get_logger`` method in mmcv to get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If ``log_file`` is specified, a FileHandler\n will also be added. The name ... |
def get_random_string(length=15):
'Get random string with letters and digits.\n\n Args:\n length (int): Length of random string. Default: 15.\n '
return ''.join((random.choice((string.ascii_letters + string.digits)) for _ in range(length)))
|
def get_thread_id():
'Get current thread id.'
thread_id = ctypes.CDLL('libc.so.6').syscall(186)
return thread_id
|
def get_shm_dir():
'Get shm dir for temporary usage.'
return '/dev/shm'
|
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
'Optimizer hook for distributed training.'
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_... |
def is_parallel_module(module):
'Check if a module is a parallel module.\n\n The following 3 modules (and their subclasses) are regarded as parallel\n modules: DataParallel, DistributedDataParallel,\n MMDistributedDataParallel (the deprecated version).\n\n Args:\n module (nn.Module): The module... |
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
'Recompute and update the batch norm stats to make them more precise.\n\n During\n training both BN stats and the weight are changing after every iteration,\n so the running average can not precisely reflect the actual ... |
class PreciseBNHook(Hook):
'Precise BN hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n num_iters (int): Number of iterations to update the bn stats.\n Default: 200.\n interval (int): Perform precise bn interval (by epochs). Default: 1.\n '
def __... |
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_inf... |
def save_checkpoint(model, filename, optimizer=None, meta=None, amp=False):
'Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose params are to ... |
class EpochBasedRunnerAmp(mmcv.runner.EpochBasedRunner):
def __init__(self, model, batch_processor=None, optimizer=None, work_dir=None, logger=None, meta=None, max_iters=None, max_epochs=None, amp=False):
super().__init__(model, batch_processor, optimizer, work_dir, logger, meta, max_iters, max_epochs)
... |
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
|
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
|
def parse_requirements(fname='requirements.txt', with_version=True):
'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version spec... |
def cal_train_time(log_dicts, args):
for (i, log_dict) in enumerate(log_dicts):
print(f"{('-' * 5)}Analyze train time of {args.json_logs[i]}{('-' * 5)}")
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['tim... |
def plot_curve(log_dicts, args):
if (args.backend is not None):
plt.switch_backend(args.backend)
sns.set_style(args.style)
legend = args.legend
if (legend is None):
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'... |
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['top1_acc'], help='the met... |
def add_time_parser(subparsers):
parser_time = subparsers.add_parser('cal_train_time', help='parser for computing the average time per training iteration')
parser_time.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_time.add_argument('--include-outliers', act... |
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
|
def load_json_logs(json_logs):
log_dicts = [dict() for _ in json_logs]
for (json_log, log_dict) in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
if ('epoch' not in log):
... |
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
|
def main():
parser = argparse.ArgumentParser(description='Benchmark dataloading')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
cfg = Config.fromfile(args.config)
logger = get_root_logger()
logger.info(f'MMAction2 Version: {__version__}')
logger.in... |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 benchmark a recognizer')
parser.add_argument('config', help='test config file path')
parser.add_argument('--log-interval', default=10, help='interval of logging')
parser.add_argument('--fuse-conv-bn', action='store_true', help='... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.backbone.pretrained = None
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
data_loade... |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 check datasets')
parser.add_argument('config', help='test config file path')
parser.add_argument('--options', nargs='+', action=DictAction, default={}, help='custom options for evaluation, the key-value pair in xxx=yyy format will b... |
@PIPELINES.register_module()
class RandomSampleFrames():
def __call__(self, results):
'Select frames to verify.\n\n Select the first, last and three random frames, Required key is\n "total_frames", added or modified key is "frame_inds".\n Args:\n results (dict): The result... |
def _do_check_videos(lock, dataset, output_file, idx):
try:
dataset[idx]
except:
lock.acquire()
with open(output_file, 'a') as f:
f.write((dataset.video_infos[idx]['filename'] + '\n'))
lock.release()
|
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the results saved in pkl/yaml/json format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('results', help='Results in pkl/yaml/json format')
parser.add_argument('--eval', type=str, nargs=... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert (args.eval is not None)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.results)
kwa... |
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[340, 256], help='input image size')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (1, 3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((1, 3) + tuple(args.shape))
elif (len(args.shape) == 4):
input_shape = tuple(args.shape)
elif (len(args.shape) == 5):
... |
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument('--options', nargs='+', action=DictAction, help='arguments in dict')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
print(f'''Config:
{cfg.pretty_text}''')
|
def parse_args():
parser = argparse.ArgumentParser(description='Fusing multiple scores')
parser.add_argument('--scores', nargs='+', help='list of scores', default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl'])
parser.add_argument('--coefficients', nargs='+', type=float, help='coefficients of each score file... |
def main():
args = parse_args()
assert (len(args.scores) == len(args.coefficients))
score_list = args.scores
score_list = [load(f) for f in score_list]
if args.apply_softmax:
def apply_softmax(scores):
return [softmax(score) for score in scores]
score_list = [apply_sof... |
def cuhk17_top1():
'Assign label for each proposal with the cuhk17 result, which is the #2\n entry in http://activity-net.org/challenges/2017/evaluation.html.'
if (not osp.exists('cuhk_anet17_pred.json')):
os.system('wget https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json')
... |
def parse_args():
parser = argparse.ArgumentParser(description='Report detection mAP forActivityNet proposal file')
parser.add_argument('--proposal', type=str, help='proposal file')
parser.add_argument('--gt', type=str, default='data/ActivityNet/anet_anno_val.json', help='groundtruth file')
parser.add... |
def main():
global args, cls_funcs
args = parse_args()
func = cls_funcs[args.cls]
func()
anet_detection = ActivityNetLocalization(args.gt, args.det_output, tiou_thresholds=np.linspace(0.5, 0.95, 10), verbose=True)
(mAP, average_mAP) = anet_detection.evaluate()
print(f'''[RESULTS] Performan... |
def parse_args():
parser = argparse.ArgumentParser(description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
|
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
... |
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
|
def load_video_infos(ann_file):
'Load the video annotations.\n\n Args:\n ann_file (str): A json file path of the annotation file.\n\n Returns:\n list[dict]: A list containing annotations for videos.\n '
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_... |
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir, pgm_proposals_thread, **kwargs):
'Generate proposals using multi-process.\n\n Args:\n ann_file (str): A json file path of the annotation file for\n all videos to be processed.\n tem_results_dir (str): Directory to rea... |
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir, pgm_features_dir, pgm_features_thread, **kwargs):
'Generate proposals features using multi-process.\n\n Args:\n ann_file (str): A json file path of the annotation file for\n all videos to be processed.\n tem_results_di... |
def parse_args():
parser = argparse.ArgumentParser(description='Proposal generation module')
parser.add_argument('config', help='test config file path')
parser.add_argument('--mode', choices=['train', 'test'], default='test', help='train or test')
args = parser.parse_args()
return args
|
def main():
print('Begin Proposal Generation Module')
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
tem_results_dir = cfg.tem_results_dir
pgm_proposals_dir = cfg.pgm_proposals_dir
pgm_features_dir = cfg.pgm_features_dir
if (args.mode == 'test'):
generate_proposals(cfg... |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 clip-level feature extraction')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--video-list', help='video file list')
parser.add_arg... |
def turn_off_pretrained(cfg):
if ('pretrained' in cfg):
cfg.pretrained = None
for sub_cfg in cfg.values():
if isinstance(sub_cfg, dict):
turn_off_pretrained(sub_cfg)
|
def text2tensor(text, size=256):
nums = [ord(x) for x in text]
assert (len(nums) < size)
nums.extend(([0] * (size - len(nums))))
nums = np.array(nums, dtype=np.uint8)
return torch.from_numpy(nums)
|
def tensor2text(tensor):
chars = [chr(x) for x in tensor if (x != 0)]
return ''.join(chars)
|
def inference_pytorch(args, cfg, distributed, data_loader):
'Get predictions by pytorch models.'
turn_off_pretrained(cfg.model)
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
if (len(cfg.module_hooks) > 0):
register_module_hooks(model, cfg.module_hooks)
fp16_c... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
if (cfg.model['test_cfg'] is None):
cfg.model['test_cfg'] = dict(feature_extraction=True)
else:
cfg.model['test_cfg']['feature_extraction'] = True
output_config = cfg.get('o... |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', default=None, help='output result file in pkl/yaml/json... |
def turn_off_pretrained(cfg):
if ('pretrained' in cfg):
cfg.pretrained = None
for sub_cfg in cfg.values():
if isinstance(sub_cfg, dict):
turn_off_pretrained(sub_cfg)
|
def inference_pytorch(args, cfg, distributed, data_loader):
'Get predictions by pytorch models.'
if (args.average_clips is not None):
if ((cfg.model.get('test_cfg') is None) and (cfg.get('test_cfg') is None)):
cfg.model.setdefault('test_cfg', dict(average_clips=args.average_clips))
... |
def inference_tensorrt(ckpt_path, distributed, data_loader, batch_size):
'Get predictions by TensorRT engine.\n\n For now, multi-gpu mode and dynamic tensor shape are not supported.\n '
assert (not distributed), 'TensorRT engine inference only supports single gpu mode.'
import tensorrt as trt
fr... |
def inference_onnx(ckpt_path, distributed, data_loader, batch_size):
'Get predictions by ONNX.\n\n For now, multi-gpu mode and dynamic tensor shape are not supported.\n '
assert (not distributed), 'ONNX inference only supports single gpu mode.'
import onnx
import onnxruntime as rt
onnx_model... |
def main():
args = parse_args()
if (args.tensorrt and args.onnx):
raise ValueError('Cannot set onnx mode and tensorrt mode at the same time.')
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
output_config = cfg.get('output_config', {})
if args.out:
outp... |
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is N... |
def graph_collate(batch):
feature = torch.stack([item[0] for item in batch], dim=0)
labels = torch.stack([item[1] for item in batch], dim=0)
neighbor = [item[2] for item in batch]
return [feature, labels, neighbor]
|
def continuum(root='/data/', name='reddit', data_type='train', task_type=0, k_hop=1, download=True, thres_nodes=50):
name = name.lower()
if (name in ['reddit', 'flickr']):
return ContinuumLS(root=root, name=name, data_type=data_type, task_type=task_type, download=download, k_hop=k_hop, thres_nodes=thr... |
class Continuum(VisionDataset):
def __init__(self, root='~/.dgl', name='cora', data_type='train', k_hop=1, download=True, task_type=0, thres_nodes=50):
super(Continuum, self).__init__(root)
self.name = name
self.thres_nodes = thres_nodes
self.k_hop = k_hop
self.download()
... |
class ContinuumLS(VisionDataset):
def __init__(self, root='/data/', name='reddit', data_type='train', task_type=0, download=None, k_hop=1, thres_nodes=50):
super(ContinuumLS, self).__init__(root)
self.name = name
self.k_hop = k_hop
self.thres_nodes = thres_nodes
(adj_full,... |
class ContinuumOGB(VisionDataset):
def __init__(self, root='~/.dgl', name='"ogbn-arxiv"', data_type='train', download=True, task_type=0, thres_nodes=50, k_hop=1):
super(ContinuumOGB, self).__init__(root)
self.name = name
self.k_hop = k_hop
self.thres_nodes = thres_nodes
se... |
class APPNP(nn.Module):
'\n APPNP: ICLR 2019\n Predict then Propagate: Graph Neural Networks Meet Personalized Pagerank\n https://arxiv.org/pdf/1810.05997.pdf\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0], alpha=0.1):
super().__init__()
(self.feat_len, s... |
class APP(nn.Module):
'\n APPNP: ICLR 2019\n Predict then Propagate: Graph Neural Networks Meet Personalized Pagerank\n https://arxiv.org/pdf/1810.05997.pdf\n \n A modified version for the graph lifelong learning\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0], alph... |
class GraphApp(nn.Module):
def __init__(self, alpha):
super().__init__()
self.alpha = alpha
def forward(self, x, neighbor_agg, h, neighbor):
(x, neighbor_agg) = sum_aggregation(x, neighbor_agg)
x = (((1 - self.alpha) * x) + (self.alpha * h))
return (x, neighbor)
|
class GraphAppnp(nn.Module):
def __init__(self, alpha):
super().__init__()
self.alpha = alpha
def forward(self, x, neighbor_agg, h, neighbor):
(x, neighbor_agg) = sum_aggregation(x, neighbor_agg)
x = (((1 - self.alpha) * x) + (self.alpha * h))
neighbor_agg = [(((1 - s... |
class GAT(nn.Module):
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0]):
'\n GAT: Graph Attention Network, ICLR, 2018\n https://arxiv.org/pdf/1710.10903.pdf\n '
super().__init__()
self.feat1 = GraphAttn(in_channels=feat_len, out_channels=hidden[0])... |
class GraphAttn(nn.Module):
def __init__(self, in_channels, out_channels, alpha=0.2):
super().__init__()
(self.in_channels, self.out_channels) = (in_channels, out_channels)
self.tran = nn.Linear(in_channels, out_channels)
self.att1 = nn.Linear(out_channels, 1, bias=False)
... |
class GCN(nn.Module):
'\n A variant of\n GCN: Graph Convolutional Network, ICLR 2017\n https://arxiv.org/pdf/1609.02907.pdf\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0]):
super().__init__()
(self.feat_len, self.hidden) = (feat_len, num_class)
se... |
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
def forward(self, x, neighbor):
x = self.linear(x)
neighbor = [self.linear(n) for n in neighbor]
(x, neig... |
def sum_aggregation(x, neighbor):
batch_id = x.shape[0]
aggred_x = torch.stack([neighbor[i].sum(0) for i in range(batch_id)])
neighbor = [torch.add(x[i].unsqueeze(0), neighbor[i]) for i in range(batch_id)]
return (aggred_x, neighbor)
|
class KTransCAT(nn.Module):
'\n Using a logit like ResNet and DenseNet to encode the neighbor in different level\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0, 0], k=1):
super(KTransCAT, self).__init__()
self.k = k
c = [1, 4, hidden[1]]
f = [fea... |
class AttnKTransCAT(nn.Module):
'\n Using a logit like ResNet and DenseNet to encode the neighbor in different level\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0, 0], k=1):
super(AttnKTransCAT, self).__init__()
self.k = k
c = [1, 4, hidden[1]]
... |
class MLP(nn.Module):
'\n A variant of\n GCN: Graph Convolutional Network, ICLR 2017\n https://arxiv.org/pdf/1609.02907.pdf\n '
def __init__(self, feat_len, num_class, hidden=[64, 32], dropout=[0]):
super().__init__()
(self.feat_len, self.hidden) = (feat_len, num_class)
se... |
class EWCLoss(nn.Module):
def __init__(self, model):
super().__init__()
self.fisher = [0 for p in model.parameters() if p.requires_grad]
self.criterion = nn.CrossEntropyLoss()
self.update(model)
def update(self, model):
self.model = copy.deepcopy(model)
self.w... |
class FeatBrd1d(nn.Module):
'\n Feature Broadcasting Layer for multi-channel 1D features.\n Input size should be (n_batch, in_channels, n_features)\n Output size is (n_batch, out_channels, n_features)\n Args:\n in_channels (int): number of feature input channels\n out_channels (int): num... |
class Mlp(nn.Module):
def __init__(self, in_channels, in_features, out_channels, out_features):
super(Mlp, self).__init__()
(self.out_channels, self.out_features) = (out_channels, out_features)
self.conv = nn.Conv1d(in_channels, (out_channels * out_features), kernel_size=in_features, bias... |
class FeatTrans1d(nn.Module):
'\n Feature Transforming Layer for multi-channel 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n Args:\n in_channels (int): number of feature input channels\n out_channels (int)... |
class FeatTransKhop(nn.Module):
'\n Feature Transforming Layer for K-hop multi-channel 1D features.\n Taking K as channels and encode the k-hop into the channel of the nodex\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n Args:\n ... |
class FeatTransKCat(nn.Module):
'\n Feature Transforming Layer for K-hop caten 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n This will encode the k-hop into the channel of the nodex\n Args:\n in_channels (int)... |
class AttnFeatTrans1d(nn.Module):
'\n Feature Transforming Layer for multi-channel 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n Args:\n in_channels (int): number of feature input channels\n out_channels (... |
class AttnFeatTrans1dSoft(nn.Module):
'\n Feature Transforming Layer for multi-channel 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n Args:\n in_channels (int): number of feature input channels\n out_channe... |
class AttnFeatTransKCat(nn.Module):
'\n Feature Transforming Layer for K-hop caten 1D features.\n Input size should be (n_batch, in_channels, in_features)\n Output size is (n_batch, out_channels, out_features)\n This will encode the k-hop into the channel of the nodex\n Args:\n in_channels (... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.