repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/utils/llama_convert/fs_to_hf.py | from transformers.models.llama import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from fengshen.models.megatron import mpu
from fengshen.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from fengshen.models.llama.configuration_llama import LlamaConfig as FengshenConfig
import argparse
import torch
from tqdm import tqdm
def convert_config(fs_config: FengshenConfig):
hf_config = LlamaConfig(
vocab_size=fs_config.vocab_size,
hidden_size=fs_config.hidden_size,
intermediate_size=fs_config.intermediate_size,
num_hidden_layers=fs_config.num_hidden_layers,
num_attention_heads=fs_config.num_attention_heads,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=fs_config.rms_norm_epsilon,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
)
return hf_config
def merge_data(module):
if hasattr(module, "merge"):
module.merge()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert fengshen llama to hugginface format.")
parser.add_argument(
"--input_path",
help="Location of LLaMA weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_path",
help="Location to write fengshen mode",
)
args = parser.parse_args()
mpu.set_model_parallel_world_size(1)
mpu.set_model_parallel_rank(0)
fs_model = FengshenLlama.from_pretrained(args.input_path)
fs_model.apply(merge_data)
tokenizer = LlamaTokenizer.from_pretrained(args.input_path)
fs_config = fs_model.config
hf_config = convert_config(fs_config)
hf_model = LlamaForCausalLM(hf_config)
# embed_in
hf_model.model.embed_tokens.load_state_dict(
{"weight": fs_model.llama.embed_in.word_embeddings.weight}
)
# embed_out
hf_model.lm_head.load_state_dict({"weight": fs_model.embed_out.final_linear.weight})
# final_norm
hf_model.model.norm.load_state_dict({"weight": fs_model.llama.final_layer_norm.scale})
num_heads = hf_config.num_attention_heads
hidden_size = hf_config.hidden_size
dims_per_head = hidden_size // num_heads
# layer
for layer_i in tqdm(range(fs_config.num_hidden_layers)):
hf_layer = hf_model.model.layers[layer_i]
fs_layer = fs_model.llama.layers[layer_i]
state_dict = {}
sharded_qkv = fs_layer.attention.query_key_value.weight.view(num_heads, 3, dims_per_head, hidden_size)
q, k, v = sharded_qkv.chunk(3, dim=1)
state_dict["self_attn.q_proj.weight"] = q.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.k_proj.weight"] = k.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.v_proj.weight"] = v.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.o_proj.weight"] = fs_layer.attention.dense.weight
# Just take one
state_dict["self_attn.rotary_emb.inv_freq"] = fs_layer.attention.rotary_emb.inv_freq
## average layernorm stats over mp ranks
state_dict["input_layernorm.weight"] = fs_layer.input_layernorm.scale
state_dict["post_attention_layernorm.weight"] = fs_layer.post_attention_layernorm.scale
## mlp params
state_dict["mlp.gate_proj.weight"] = fs_layer.mlp.w1.weight
state_dict["mlp.up_proj.weight"] = fs_layer.mlp.w3.weight
state_dict["mlp.down_proj.weight"] = fs_layer.mlp.w2.weight
## load state_dict into layer
hf_layer.load_state_dict(state_dict)
hf_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 3,811 | 36.009709 | 110 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/utils/llama_convert/convert_fs_llama_tp.py | import argparse
import os
import json
import torch
from fengshen.models.llama.configuration_llama import LlamaConfig
__HF_NORM_PREFIX__ = "llama.final_layer_norm"
__HF_EMBED_IN_KEY__ = "llama.embed_in.word_embeddings.weight"
__HF_EMBED_OUT_KEY__ = "embed_out.final_linear.weight"
__HF_LAYER_PREFIX__ = "llama.layers"
__WEIGHT_MAP_FILE__ = "pytorch_model.bin.index.json"
def make_output_dir(path, parallel_size):
"""
root_dir
|--- part_0
|___ part_1
"""
try:
os.mkdir(path)
except:
pass
for i in range(parallel_size):
try:
os.mkdir(os.path.join(path, f"part_{i}"))
except:
pass
def save_splits(input_dir, output_dir, helper, config):
weight_map_file = os.path.join(input_dir, __WEIGHT_MAP_FILE__)
with open(weight_map_file, 'r') as fp:
weight_map = json.load(fp)
for rank, sd in enumerate(helper.sequential_cache):
output_part_dir = os.path.join(output_dir, f"part_{rank}")
with open(os.path.join(output_part_dir, __WEIGHT_MAP_FILE__), 'w') as f:
json.dump(weight_map, f)
config.save_pretrained(output_part_dir)
for file_name, keys in helper.revert_weight_map.items():
output_sd = {}
for k in keys:
if k in sd:
output_sd[k] = sd[k]
torch.save(output_sd, os.path.join(output_part_dir, file_name))
def get_loaders(root_dir, weight_map):
loaders_map = {}
weight_map_with_loader = {}
revert_weight_map = {}
for k, v in weight_map['weight_map'].items():
if v in revert_weight_map:
revert_weight_map[v].append(k)
else:
revert_weight_map[v] = [k]
# 打开对应的state_dict
ld = torch.load(os.path.join(root_dir, v), map_location='cpu')
loaders_map[v] = ld
weight_map_with_loader[k] = loaders_map[v]
return weight_map_with_loader, revert_weight_map, loaders_map.values()
class Helper:
def __init__(
self, args):
self.num_output_shards = args.model_parallel_size
self.sequential_cache = [{} for _ in range(args.model_parallel_size)]
self.init_weight_map(args)
def init_weight_map(self, args):
weight_map_file = os.path.join(args.input_dir, __WEIGHT_MAP_FILE__)
with open(weight_map_file, 'r') as fp:
weight_map = json.load(fp)
self.weight_map, self.revert_weight_map, self.loaders = get_loaders(
args.input_dir, weight_map)
def del_loaded(self, key: str):
# Remove from memory as we go along
if key in self.weight_map:
del self.weight_map[key][key]
def shard(self, x, dim):
x_shape = list(x.shape)
assert x_shape[dim] % self.num_output_shards == 0
new_x_shape = (
x_shape[:dim]
+ [self.num_output_shards, x_shape[dim] // self.num_output_shards]
+ x_shape[dim + 1:]
)
x = x.view(*new_x_shape)
return torch.movedim(x, 0, dim)
def add_sequential_shard(self, dictionary):
for k, v in dictionary.items():
for rank in range(self.num_output_shards):
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v[rank].clone()
self.sequential_cache[rank][k] = v[rank].clone()
def add_sequential_duplicates(self, dictionary):
for k, v in dictionary.items():
for rank in range(self.num_output_shards):
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone()
self.sequential_cache[rank][k] = v.clone()
def add_sequential(self, dictionary, rank):
for k, v in dictionary.items():
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone()
self.sequential_cache[rank][k] = v.clone()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="covert hf model to hf model with mp"
)
parser.add_argument(
"--input_dir",
type=str,
help="Path to hf model dir",
)
parser.add_argument(
"--output_dir",
type=str,
help="Path to hf model dir",
)
parser.add_argument(
"--model_parallel_size",
type=int,
default=1,
help="Path to hf model dir",
)
args = parser.parse_args()
make_output_dir(args.output_dir, args.model_parallel_size)
helper = Helper(args)
config = LlamaConfig.from_pretrained(args.input_dir)
num_output_shards = args.model_parallel_size
num_heads_per_output_shard = config.num_attention_heads // num_output_shards
dims_per_head = config.hidden_size // config.num_attention_heads
for k, v in helper.weight_map.items():
# embed in and out
if k in [__HF_EMBED_IN_KEY__, __HF_EMBED_OUT_KEY__]:
helper.add_sequential_shard({k: helper.shard(v[k], dim=0)})
elif k.startswith(__HF_NORM_PREFIX__):
helper.add_sequential_duplicates({k: v[k]})
elif k.startswith(__HF_LAYER_PREFIX__):
# QKV weight and bias
if k.find("query_key_value") != -1:
output_shape = [num_output_shards, num_heads_per_output_shard *
3 * dims_per_head] + list(v[k].shape[1:])
sharded = v[k].view(output_shape)
for out_rank in range(num_output_shards):
helper.add_sequential({k: sharded[out_rank]}, out_rank)
# rotary emb
elif k.find("rotary_emb.inv_freq") != -1:
helper.add_sequential_duplicates({k: v[k]})
# layer_norm
elif k.find("layernorm") != -1:
helper.add_sequential_duplicates({k: v[k]})
# linear
elif k.find("dense") != -1 or k.find("mlp") != -1:
# 纵切
if k.find("w2") != -1 or k.find("attention") != -1:
if k.find('weight') != -1:
shard = helper.shard(v[k], dim=1)
helper.add_sequential_shard({k: shard})
# bias不切
else:
helper.add_sequential_duplicates({k: v[k]})
# 横切
else:
shard = helper.shard(v[k], dim=0)
helper.add_sequential_shard({k: shard})
else:
print(f"WARNING: unexcept key {k}")
else:
print(f"WARNING: unexcept key {k}")
helper.del_loaded(k)
save_splits(args.input_dir, args.output_dir, helper, config)
| 6,643 | 34.72043 | 92 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/utils/llama_convert/hf_to_fs.py | from transformers.models.llama import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from fengshen.models.megatron import mpu
from fengshen.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from fengshen.models.llama.configuration_llama import LlamaConfig as FengshenConfig
import argparse
import torch
from tqdm import tqdm
def convert_config(hf_config: LlamaConfig):
fs_config = FengshenConfig(
vocab_size=hf_config.vocab_size,
hidden_size=hf_config.hidden_size,
num_hidden_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
intermediate_size=hf_config.intermediate_size,
hidden_act=hf_config.hidden_act,
rotary_pct=1,
rotary_emb_base=10000,
max_position_embeddings=hf_config.max_position_embeddings,
initializer_range=hf_config.initializer_range,
rms_norm_epsilon=hf_config.rms_norm_eps,
torch_dtype=hf_config.torch_dtype,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
use_parallel_residual=False,
)
fs_config.llama_mlp_multiple_of = 256
assert fs_config.intermediate_size % fs_config.llama_mlp_multiple_of == 0, \
f"{fs_config.intermediate_size} % {fs_config.llama_mlp_multiple_of}"
fs_config.init_method = "small_init"
fs_config.hidden_dropout = 0
fs_config.output_layer_init_method = "wang_init"
fs_config.pos_emb = "rotary"
fs_config.norm = "rmsnorm"
fs_config.gpt_j_residual = False
fs_config.gpt_j_tied = False
fs_config.apply_query_key_layer_scaling = False
fs_config.attention_softmax_in_fp32 = False
fs_config.scaled_masked_softmax_fusion = True
fs_config.scaled_upper_triang_masked_softmax_fusion = False
fs_config.bias_gelu_fusion = False
fs_config.attention_dropout = 0
fs_config.output_layer_parallelism = "column"
fs_config.eod_mask_loss = False
fs_config.bias_dropout_fusion = False
fs_config.attention_config = [[["flash"], "all"]]
fs_config.mlp_type = "llama"
fs_config.use_bias_in_attn_linear = False
fs_config.lora = False
return fs_config
def find_closest_multiple(current_num, n):
if current_num % n == 0:
return current_num
closest_multiple = ((current_num // n) + 1) * n
return closest_multiple
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert raw LLaMA checkpoints to fengshen format.")
parser.add_argument(
"--input_path",
help="Location of LLaMA weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_path",
help="Location to write fengshen mode",
)
parser.add_argument(
"--multiplier",
default = 1,
help="Make embedding_size an integer multiple of multiplier",
)
args = parser.parse_args()
hf_model = LlamaForCausalLM.from_pretrained(args.input_path)
tokenizer = LlamaTokenizer.from_pretrained(args.input_path, use_fast=False)
hf_config = hf_model.config
fs_config = convert_config(hf_config)
# used for FengshenLlama initialized
mpu.set_model_parallel_world_size(1)
mpu.set_model_parallel_rank(0)
mpu.set_init_params_in_cuda(False)
fs_model = FengshenLlama(fs_config)
# embed_in
fs_model.llama.embed_in.load_state_dict(
{"word_embeddings.weight": hf_model.model.embed_tokens.weight}
)
# embed_out
fs_model.embed_out.load_state_dict(
{"final_linear.weight": hf_model.lm_head.weight}
)
fs_model.resize_token_embeddings(find_closest_multiple(fs_model.config.vocab_size, int(args.multiplier)))
# final_norm
fs_model.llama.final_layer_norm.load_state_dict(
{"scale": hf_model.model.norm.weight}
)
num_heads = hf_config.num_attention_heads
hidden_size = hf_config.hidden_size
dims_per_head = hidden_size // num_heads
def permute_rotary(w):
assert w.shape == (num_heads, dims_per_head, hidden_size)
return w.view(num_heads, dims_per_head // 2, 2, hidden_size) \
.transpose(1, 2) \
.reshape(num_heads, dims_per_head, hidden_size)
# layer
for layer_i in tqdm(range(fs_config.num_hidden_layers)):
fs_layer = fs_model.llama.layers[layer_i]
hf_layer = hf_model.model.layers[layer_i]
# Linear
attn_wo = hf_layer.self_attn.o_proj.weight
mlp_w1 = hf_layer.mlp.gate_proj.weight
mlp_w2 = hf_layer.mlp.down_proj.weight
mlp_w3 = hf_layer.mlp.up_proj.weight
# Attention
w_q = hf_layer.self_attn.q_proj.weight.view(num_heads, dims_per_head, hidden_size)
w_k = hf_layer.self_attn.k_proj.weight.view(num_heads, dims_per_head, hidden_size)
w_v = hf_layer.self_attn.v_proj.weight.view(num_heads, dims_per_head, hidden_size)
sharded_qkv = torch.stack([w_q, w_k, w_v], dim=1)
sharded_qkv = sharded_qkv.view(num_heads*dims_per_head*3, hidden_size)
# Duplicated
input_layernorm = hf_layer.input_layernorm.weight
post_attention_layernorm = hf_layer.post_attention_layernorm.weight
rotary_inv = hf_layer.self_attn.rotary_emb.inv_freq
fs_layer.load_state_dict({
"attention.query_key_value.weight": sharded_qkv,
# Sharded layers
"attention.dense.weight": attn_wo.clone(),
"mlp.w1.weight": mlp_w1.clone(),
"mlp.w2.weight": mlp_w2.clone(),
"mlp.w3.weight": mlp_w3.clone(),
# Duplicated layers
"input_layernorm.scale": input_layernorm,
"post_attention_layernorm.scale": post_attention_layernorm,
"attention.rotary_emb.inv_freq": rotary_inv,
})
fs_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 5,921 | 38.218543 | 109 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/utils/llama_convert/merge_lt_mp_to_hf.py | import argparse
import os
import json
import torch
from fengshen_inner.models.llama.configuration_llama import LlamaConfig as FengshenConfig
from fengshen_inner.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
from fengshen_inner.models.megatron import mpu
from glob import glob
import copy
from tqdm import tqdm
__FS_FINAL_NORM_KEY__ = "llama.final_layer_norm.scale"
__FS_EMBED_IN_KEY__ = "llama.embed_in.word_embeddings.weight"
__FS_EMBED_OUT_KEY__ = "embed_out.final_linear.weight"
__FS_LAYER_PREFIX__ = "llama.layers"
def convert_config(fs_config: FengshenConfig):
hf_config = LlamaConfig(
vocab_size=fs_config.vocab_size,
hidden_size=fs_config.hidden_size,
intermediate_size=fs_config.intermediate_size,
num_hidden_layers=fs_config.num_hidden_layers,
num_attention_heads=fs_config.num_attention_heads,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=fs_config.rms_norm_epsilon,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
torch_dtype=fs_config.torch_dtype,
)
return hf_config
def merge_data(module):
if hasattr(module, "merge"):
module.merge()
def get_loaders(root_path, mp_size, fs_config):
fs_model = FengshenLlama(fs_config)
loaders = []
for mp in range(mp_size):
file = os.path.join(root_path, f"mp_rank_{mp:02}_model_states.pt")
print(f"loading {file}")
sd = torch.load(file, map_location='cpu')
new_sd = {}
for k, v in sd["module"].items():
try:
anchor = k.index('llama')
except:
if 'embed_out' in k:
anchor = k.index('embed_out')
else:
anchor = 0
rep = k[:anchor]
new_sd[k.replace(rep, "")] = v
# new_sd[k.replace("module.model.", "")] = v
fs_model.load_state_dict(new_sd)
fs_model.apply(merge_data)
loaders.append(copy.deepcopy(fs_model.state_dict()))
return loaders
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="covert hf model to gxy hf model with mp"
)
# fs结构的预训练配置
parser.add_argument(
"--pretrained_model_path",
type=str,
help="Path to hf pretrained model dir",
)
# 模型并行数
parser.add_argument(
"--model_parallel_size",
type=int,
default=1,
help="Path to hf model dir",
)
# lightning checkpoint目录--pretrained_model_path
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to lightning checkpoint dir",
)
parser.add_argument(
"--output_path",
type=str,
help="Path to hf model dir",
)
args = parser.parse_args()
mpu.set_model_parallel_world_size(args.model_parallel_size)
#mpu.set_init_params_in_cuda(False)
mpu.set_model_parallel_rank(0)
fs_config = FengshenConfig.from_pretrained(args.pretrained_model_path)
loaded_tp_ranks = get_loaders(args.ckpt_path, args.model_parallel_size, fs_config)
config = convert_config(fs_config)
tokenizer = LlamaTokenizer.from_pretrained(args.pretrained_model_path)
num_output_shards = 1
num_heads_per_output_shard = config.num_attention_heads
dims_per_head = config.hidden_size // config.num_attention_heads
hf_model = LlamaForCausalLM(config)
num_heads = config.num_attention_heads
hidden_size = config.hidden_size
dims_per_head = hidden_size // num_heads
mp_partitions = args.model_parallel_size
# EMBED_IN
hf_model.model.embed_tokens.load_state_dict(
{"weight": torch.cat([t[__FS_EMBED_IN_KEY__] for t in loaded_tp_ranks], dim=0)})
# EMBED_OUT
hf_model.lm_head.load_state_dict(
{"weight": torch.cat([t[__FS_EMBED_OUT_KEY__] for t in loaded_tp_ranks], dim=0)})
# FINAL_LAYER_NORM
hf_model.model.norm.load_state_dict(
{"weight": (sum([t[__FS_FINAL_NORM_KEY__] for t in loaded_tp_ranks])) / mp_partitions})
# layer
for layer_i in tqdm(range(config.num_hidden_layers)):
hf_layer = hf_model.model.layers[layer_i]
state_dict = {}
sharded_qkv = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.query_key_value.weight"] for t in loaded_tp_ranks], dim=0)
sharded_qkv = sharded_qkv.view(num_heads, 3, dims_per_head, hidden_size)
q, k, v = sharded_qkv.chunk(3, dim=1)
state_dict["self_attn.q_proj.weight"] = q.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.k_proj.weight"] = k.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.v_proj.weight"] = v.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.o_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.dense.weight"] for t in loaded_tp_ranks], dim=1)
state_dict["self_attn.rotary_emb.inv_freq"] = \
loaded_tp_ranks[0][f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.rotary_emb.inv_freq"]
# average layernorm stats over mp ranks
state_dict["input_layernorm.weight"] = (sum(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.input_layernorm.scale"] for t in loaded_tp_ranks])) / mp_partitions
state_dict["post_attention_layernorm.weight"] = (sum(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.post_attention_layernorm.scale"] for t in loaded_tp_ranks])) / mp_partitions
# mlp params
state_dict["mlp.gate_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w1.weight"] for t in loaded_tp_ranks], dim=0)
state_dict["mlp.up_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w3.weight"] for t in loaded_tp_ranks], dim=0)
state_dict["mlp.down_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w2.weight"] for t in loaded_tp_ranks], dim=1)
# load state_dict into layer
hf_layer.load_state_dict(state_dict)
hf_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 6,335 | 37.4 | 125 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/universal_datamodule/universal_datamodule.py | from pytorch_lightning import LightningDataModule
from typing import Optional
from torch.utils.data import DataLoader, DistributedSampler
from fengshen.models.megatron import mpu
def get_consume_samples(data_model: LightningDataModule) -> int:
if hasattr(data_model.trainer.lightning_module, 'consumed_samples'):
consumed_samples = data_model.trainer.lightning_module.consumed_samples
print('get consumed samples from model: {}'.format(consumed_samples))
else:
world_size = data_model.trainer.world_size
consumed_samples = max(0, data_model.trainer.global_step - 1) * \
data_model.hparams.train_batchsize * world_size * data_model.trainer.accumulate_grad_batches
print('calculate consumed samples: {}'.format(consumed_samples))
return consumed_samples
class UniversalDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Universal DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--dataloader_workers', default=2, type=int)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--val_batchsize', default=16, type=int)
parser.add_argument('--test_batchsize', default=16, type=int)
parser.add_argument('--datasets_name', type=str, default=None)
parser.add_argument('--train_datasets_field', type=str, default='train')
parser.add_argument('--val_datasets_field', type=str, default='validation')
parser.add_argument('--test_datasets_field', type=str, default='test')
parser.add_argument('--train_file', type=str, default=None)
parser.add_argument('--val_file', type=str, default=None)
parser.add_argument('--test_file', type=str, default=None)
parser.add_argument('--raw_file_type', type=str, default='json')
parser.add_argument('--sampler_type', type=str,
choices=['single',
'random'],
default='random')
parser.add_argument('--use_mpu', action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
return parent_args
def __init__(
self,
tokenizer,
collate_fn,
args,
datasets=None,
**kwargs,
):
super().__init__()
# 如果不传入datasets的名字,则可以在对象外部替换内部的datasets为模型需要的
if datasets is not None:
self.datasets = datasets
elif args.datasets_name is not None:
from fengshen.data.fs_datasets import load_dataset
print('---------begin to load datasets {}'.format(args.datasets_name))
self.datasets = load_dataset(
args.datasets_name, num_proc=args.num_workers)
print('---------ending load datasets {}'.format(args.datasets_name))
else:
print('---------begin to load datasets from local file')
from datasets import load_dataset
self.datasets = load_dataset(args.raw_file_type,
data_files={
args.train_datasets_field: args.train_file,
args.val_datasets_field: args.val_file,
args.test_datasets_field: args.test_file})
print('---------end to load datasets from local file')
self.tokenizer = tokenizer
self.collate_fn = collate_fn
self.save_hyperparameters(args)
def get_custom_sampler(self, ds):
from .universal_sampler import PretrainingRandomSampler
from .universal_sampler import PretrainingSampler
world_size = self.trainer.world_size
consumed_samples = get_consume_samples(self)
# use the user default sampler
data_parallel_rank = mpu.get_data_parallel_rank() if self.hparams.use_mpu else self.trainer.global_rank
data_parallel_size = mpu.get_data_parallel_world_size() if self.hparams.use_mpu else world_size
if self.hparams.sampler_type == 'random':
return PretrainingRandomSampler(
total_samples=len(ds),
# consumed_samples cal by global steps
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
epoch=self.trainer.current_epoch,
)
elif self.hparams.sampler_type == 'single':
return PretrainingSampler(
total_samples=len(ds),
# consumed_samples cal by global steps
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
)
else:
raise Exception('Unknown sampler type: {}'.format(self.hparams.sampler_type))
def setup(self, stage: Optional[str] = None) -> None:
return
def train_dataloader(self):
ds = self.datasets[self.hparams.train_datasets_field]
collate_fn = self.collate_fn
if hasattr(ds, 'collate_fn'):
collate_fn = ds.collate_fn
if self.hparams.replace_sampler_ddp is False:
return DataLoader(
ds,
batch_sampler=self.get_custom_sampler(ds),
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
pin_memory=True,
)
return DataLoader(
ds,
batch_size=self.hparams.train_batchsize,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
pin_memory=True,
)
def val_dataloader(self):
ds = self.datasets[self.hparams.val_datasets_field]
collate_fn = self.collate_fn
if hasattr(ds, 'collate_fn'):
collate_fn = ds.collate_fn
return DataLoader(
ds,
batch_size=self.hparams.val_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
# return DataLoader(
# ds, shuffle=False, batch_size=self.hparams.val_batchsize, pin_memory=False, collate_fn=collate_fn,
# )
def test_dataloader(self):
ds = self.datasets[self.hparams.test_datasets_field]
collate_fn = self.collate_fn
if collate_fn is None and hasattr(ds, 'collater'):
collate_fn = ds.collater
return DataLoader(
ds,
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
def predict_dataloader(self):
ds = self.datasets[self.hparams.test_datasets_field]
collate_fn = self.collate_fn
if collate_fn is None and hasattr(ds, 'collater'):
collate_fn = ds.collater
return DataLoader(
ds,
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
| 7,829 | 40.210526 | 112 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/universal_datamodule/universal_sampler.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloaders."""
import torch
class PretrainingSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, drop_last=True):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.drop_last = drop_last
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.consumed_samples < self.total_samples, \
'no samples left to consume: {}, {}'.format(self.consumed_samples,
self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples // self.micro_batch_times_data_parallel_size
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self.micro_batch_times_data_parallel_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class PretrainingRandomSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, epoch):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.last_batch_size = \
self.total_samples % self.micro_batch_times_data_parallel_size
self.epoch = epoch
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples // self.micro_batch_times_data_parallel_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) \
* self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.micro_batch_times_data_parallel_size
yield batch
batch = []
def set_epoch(self, epoch):
self.epoch = epoch
| 5,181 | 40.126984 | 89 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/mmap_dataloader/mmap_index_dataset.py | import numpy as np
import torch
from typing import List
from torch.utils.data import Dataset
class MMapIndexDataset(Dataset):
# datapaths 是所有的内存映射文件的路径
# input_tensor_name 是输入的tensor的名字 例如 ['input_ids'] 会存储在对应的文件里面
def __init__(self, datapaths: List[str], input_tensor_name: List[str]):
dict_idx_fp = {}
dict_bin_fp = {}
idx_len = []
for tensor_name in input_tensor_name:
idx_fp = []
bin_fp = []
len = 0
for data_path in datapaths:
idx_fp += [np.load(
data_path + '_' + tensor_name + '.npy', mmap_mode='r')]
bin_fp += [np.memmap(
data_path + '_' + tensor_name + '.bin',
dtype='long',
mode='r')]
len += idx_fp[-1].shape[0]
idx_len += [idx_fp[-1].shape[0]]
dict_idx_fp[tensor_name] = idx_fp
dict_bin_fp[tensor_name] = bin_fp
# 通常情况下不同的tensor的长度是一样的
self._len = len
self._input_tensor_name = input_tensor_name
self._dict_idx_fp = dict_idx_fp
self._dict_bin_fp = dict_bin_fp
self._idx_len = idx_len
def __len__(self):
return self._len
def __getitem__(self, idx):
sample = {}
for i in range(len(self._idx_len)):
if idx >= self._idx_len[i]:
idx -= self._idx_len[i]
else:
break
for tensor_name in self._input_tensor_name:
sample[tensor_name] = torch.tensor(self._dict_bin_fp[tensor_name][i][
self._dict_idx_fp[tensor_name][i][idx, 0]:
self._dict_idx_fp[tensor_name][i][idx, 1]
], dtype=torch.long)
# print(sample)
return sample
| 1,815 | 32.62963 | 81 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/mmap_dataloader/mmap_datamodule.py | from typing import Optional
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from fengshen.data.mmap_index_dataset import MMapIndexDataset
class MMapDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('MMAP DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_batchsize', default=32, type=int)
parser.add_argument('--eval_batchsize', default=32, type=int)
parser.add_argument('--test_batchsize', default=32, type=int)
parser.add_argument('--train_datas', default=[
'./train_datas'
], type=str, nargs='+')
parser.add_argument('--valid_datas', default=[
'./valid_datas'
], type=str, nargs='+')
parser.add_argument('--test_datas', default=[
'./test_datas'],
type=str, nargs='+')
parser.add_argument('--input_tensor_name', default=['input_ids'], type=str, nargs='+')
return parent_args
def __init__(
self,
collate_fn,
args,
**kwargs,
):
super().__init__()
self.collate_fn = collate_fn
self.train_dataset = MMapIndexDataset(args.train_datas, args.input_tensor_name)
self.valid_dataset = MMapIndexDataset(args.valid_datas, args.input_tensor_name)
self.test_dataset = MMapIndexDataset(args.test_datas, args.input_tensor_name)
self.save_hyperparameters(args)
def setup(self, stage: Optional[str] = None) -> None:
return super().setup(stage)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.valid_dataset,
batch_size=self.hparams.eval_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.hparams.test_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
| 2,461 | 34.681159 | 94 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/dreambooth_datasets/dreambooth_datasets.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : dreambooth_datasets.py
@Time : 2022/11/10 00:20
@Author : Gan Ruyi
@Version : 1.0
@Contact : ganruyi@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from pathlib import Path
def add_data_args(parent_args):
parser = parent_args.add_argument_group('taiyi stable diffusion data args')
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If not have enough images, additional images will be"
" sampled with class_prompt."
),
)
parser.add_argument(
"--resolution", type=int, default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
return parent_args
class DreamBoothDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
instance_data_dir,
instance_prompt,
tokenizer,
class_data_dir=None,
class_prompt=None,
size=512,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.instance_data_dir = Path(instance_data_dir)
if not self.instance_data_dir.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_dir).iterdir())
print(self.instance_images_path)
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_dir is not None:
self.class_data_dir = Path(class_data_dir)
self.class_data_dir.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_dir.iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_dir = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
example["instance_prompt_ids"] = self.tokenizer(
self.instance_prompt,
padding="do_not_pad",
truncation=True,
max_length=64,
# max_length=self.tokenizer.model_max_length,
).input_ids
if self.class_data_dir:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_prompt_ids"] = self.tokenizer(
self.class_prompt,
padding="do_not_pad",
truncation=True,
# max_length=self.tokenizer.model_max_length,
max_length=64,
).input_ids
return example
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
| 6,386 | 33.711957 | 118 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/t5_dataloader/t5_datasets.py | # coding=utf8
import json
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, MT5Config, MT5Tokenizer, BatchEncoding
import torch
import pytorch_lightning as pl
import numpy as np
from itertools import chain
import sys
sys.path.append('../../')
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/
text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .
Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the other
training hyperparmeters in a way that avoids padding.
This function helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,
and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for split_tokens())
as well as the length of the encoded targets. Note that this function assumes
the inputs and targets will have EOS appended and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
_input_length = num_nonnoise_tokens + num_noise_spans + 1
_output_length = num_noise_tokens + num_noise_spans + 1
return _input_length, _output_length
tokens_length = inputs_length
while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
tokens_length += 1
inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(
tokens_length)
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
return tokens_length, targets_length
class UnsuperviseT5Dataset(Dataset):
'''
Dataset Used for T5 unsuprvise pretrain.
load_data_type = 0: load raw data from data path and save tokenized data, call function load_data
load_data_type = 1: load tokenized data from path, call function load_tokenized_data
load_data_type = 2: load tokenized data from memery data, call function load_tokenized_memory_data
'''
def __init__(self, data_path, args, load_data_type=0, data=None):
super().__init__()
if args.tokenizer_type == 't5_tokenizer':
if args.new_vocab_path is not None:
self.tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path)
else:
self.tokenizer = MT5Tokenizer.from_pretrained(args.pretrained_model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path)
self.noise_density = 0.15
self.mean_noise_span_length = 3
self.text_column_name = args.text_column_name
self.dataset_num_workers = args.dataset_num_workers
self.max_seq_length = args.max_seq_length
self.remove_columns = args.remove_columns
# whether load tokenieze data
self.load_data_type = load_data_type
if self.load_data_type == 0:
# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
# To ensure that the input length is `max_seq_length`, we need to increase the maximum length
# according to `mlm_probability` and `mean_noise_span_length`.
# We can also define the label length accordingly.
self.expanded_inputs_length, self.targets_length = compute_input_and_target_lengths(
inputs_length=self.max_seq_length,
noise_density=self.noise_density,
mean_noise_span_length=self.mean_noise_span_length,
)
print('self.expanded_inputs_length, self.targets_length:{},{}'.format(
self.expanded_inputs_length, self.targets_length))
self.data = self.load_data(data_path)
elif self.load_data_type == 1:
self.data = self.load_tokenized_data(data_path)
else:
assert data is not None
self.data = self.load_tokenized_memory_data(data)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path):
# TODO: large data process
from data.fs_datasets import load_dataset
samples = load_dataset(
# samples = datasets.load_from_disk(data_path)['train']
data_path, num_proc=self.dataset_num_workers)['train']
# print(samples)
tokenized_datasets = samples.map(
self.tokenize_function,
batched=True,
num_proc=self.dataset_num_workers,
# load_from_cache_file=not data_args.overwrite_cache,
).map(
batched=True,
num_proc=self.dataset_num_workers,
remove_columns=self.remove_columns)
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
self.group_texts,
batched=True,
num_proc=self.dataset_num_workers,
# load_from_cache_file=not data_args.overwrite_cache,
)
return tokenized_datasets
'''
The function load tokenized data saved from load_data function.
'''
def load_tokenized_data(self, data_path):
from data.fs_datasets import load_dataset
samples = load_dataset(data_path)['train']
return samples
def load_tokenized_memory_data(self, data):
return data
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# Since we make sure that all sequences are of the same length, no attention_mask is needed.
def tokenize_function(self, examples):
# 这里add_special_tokens=False,避免句子中间出现eos
return self.tokenizer(examples[self.text_column_name],
add_special_tokens=False,
return_attention_mask=False)
# Main data processing function that will concatenate all texts from our dataset
# and generate chunks of expanded_inputs_length.
def group_texts(self, examples):
# Concatenate all texts.
concatenated_examples = {
k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= self.expanded_inputs_length:
total_length = (
total_length // self.expanded_inputs_length) * self.expanded_inputs_length
# Split by chunks of max_len.
result = {
k: [t[i: i + self.expanded_inputs_length]
for i in range(0, total_length, self.expanded_inputs_length)]
for k, t in concatenated_examples.items()
}
return result
class UnsuperviseT5DataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('UnsuperviseT5DataModel')
parser.add_argument('--dataset_num_workers', default=8, type=int)
parser.add_argument('--dataloader_num_workers', default=4, type=int)
parser.add_argument(
'--train_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument('--train_batchsize', default=2, type=int)
parser.add_argument('--valid_batchsize', default=2, type=int)
parser.add_argument('--train_split_size', default=None, type=float)
parser.add_argument('--tokenizer_type', default='t5_tokenizer', choices=['t5_tokenizer', 'bert_tokenizer'])
parser.add_argument('--text_column_name', default='text')
parser.add_argument('--remove_columns', nargs='+', default=[])
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
if args.train_split_size is not None:
from data.fs_datasets import load_dataset
data_splits = load_dataset(args.train_data_path, num_proc=args.dataset_num_workers)
train_split = data_splits['train']
test_split = data_splits['test']
print('train:', train_split, '\ntest_data:', test_split)
self.train_dataset = UnsuperviseT5Dataset('', args, load_data_type=2, data=train_split)
self.test_dataset = UnsuperviseT5Dataset('', args, load_data_type=2, data=test_split)
else:
self.train_data = UnsuperviseT5Dataset(args.train_data_path, args, load_data_type=1)
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.noise_density = 0.15
self.mean_noise_span_length = 3
self.pad_token_id = self.config.pad_token_id
self.decoder_start_token_id = self.config.decoder_start_token_id
self.eos_token_id = self.config.eos_token_id
self.vocab_size = self.config.vocab_size
self.max_seq_length = args.max_seq_length
# 因为加载旧的spm里面已经包括了exrta_ids,但是T5Tokenizer会在spm的基础上再增加100个extra_ids,所以需要指定extra_ids=0
if args.tokenizer_type == 't5_tokenizer' and args.new_vocab_path is not None:
self.tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path, extra_ids=0)
# 如果是刚开始加载mt5,需要更新vocab_size为提取中英词之后的new_vocab_size
self.vocab_size = len(self.tokenizer)
# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
# To ensure that the input length is `max_seq_length`, we need to increase the maximum length
# according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly.
self.expanded_inputs_length, self.targets_length = compute_input_and_target_lengths(
inputs_length=self.max_seq_length,
noise_density=self.noise_density,
mean_noise_span_length=self.mean_noise_span_length,
)
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import PretrainingSampler
from fengshen.data.universal_datamodule.universal_datamodule import get_consume_samples
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
batch_sampler = PretrainingSampler(
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank,
data_parallel_size=self.trainer.world_size,
)
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def predict_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False)
return DataLoader(
self.test_data,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def collate_fn(self, examples):
# convert list to dict and tensorize input
batch = BatchEncoding(
{k: np.array([examples[i][k] for i in range(len(examples))])
for k, v in examples[0].items()}
)
input_ids = np.array(batch['input_ids'])
batch_size, expanded_input_length = input_ids.shape
mask_indices = np.asarray([self.random_spans_noise_mask(
expanded_input_length) for i in range(batch_size)])
labels_mask = ~mask_indices
input_ids_sentinel = self.create_sentinel_ids(
mask_indices.astype(np.int8))
labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8))
batch["input_ids"] = self.filter_input_ids(
input_ids, input_ids_sentinel)
batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel)
if batch["input_ids"].shape[-1] != self.max_seq_length:
raise ValueError(
f"`input_ids` are incorrectly preprocessed. `input_ids` length is \
{batch['input_ids'].shape[-1]}, but should be {self.targets_length}."
)
if batch["labels"].shape[-1] != self.targets_length:
raise ValueError(
f"`labels` are incorrectly preprocessed. `labels` length is \
{batch['labels'].shape[-1]}, but should be {self.targets_length}."
)
batch["decoder_input_ids"] = self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
)
for k, v in batch.items():
batch[k] = torch.tensor(v)
# print(k, batch[k], self.tokenizer.batch_decode(batch[k]), '\n', flush=True)
return batch
def create_sentinel_ids(self, mask_indices):
"""
Sentinel ids creation given the indices that should be masked.
The start indices of each mask are replaced by the sentinel ids in increasing
order. Consecutive mask indices to be deleted are replaced with `-1`.
"""
start_indices = mask_indices - \
np.roll(mask_indices, 1, axis=-1) * mask_indices
start_indices[:, 0] = mask_indices[:, 0]
sentinel_ids = np.where(start_indices != 0, np.cumsum(
start_indices, axis=-1), start_indices)
sentinel_ids = np.where(
sentinel_ids != 0, (self.vocab_size - sentinel_ids), 0)
sentinel_ids -= mask_indices - start_indices
return sentinel_ids
def filter_input_ids(self, input_ids, sentinel_ids):
"""
Puts sentinel mask on `input_ids` and fuse consecutive mask tokens into a single mask token by deleting.
This will reduce the sequence length from `expanded_inputs_length` to `input_length`.
"""
batch_size = input_ids.shape[0]
input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids)
# input_ids tokens and sentinel tokens are >= 0, tokens < 0 are
# masked tokens coming after sentinel tokens and should be removed
input_ids = input_ids_full[input_ids_full >=
0].reshape((batch_size, -1))
input_ids = np.concatenate(
[input_ids, np.full((batch_size, 1), self.eos_token_id, dtype=np.int32)], axis=-1
)
return input_ids
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
def random_spans_noise_mask(self, length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/
blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ .
Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
mean_noise_span_length: a number
Returns:
a boolean tensor with shape [length]
"""
orig_length = length
num_noise_tokens = int(np.round(length * self.noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
num_noise_spans = int(
np.round(num_noise_tokens / self.mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = max(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
mask_indices = np.arange(num_items - 1) < (num_segments - 1)
np.random.shuffle(mask_indices)
first_in_segment = np.pad(mask_indices, [[1, 0]])
segment_id = np.cumsum(first_in_segment)
# count length of sub segments assuming that list is sorted
_, segment_length = np.unique(segment_id, return_counts=True)
return segment_length
noise_span_lengths = _random_segmentation(
num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _random_segmentation(
num_nonnoise_tokens, num_noise_spans)
interleaved_span_lengths = np.reshape(
np.stack([nonnoise_span_lengths, noise_span_lengths],
axis=1), [num_noise_spans * 2]
)
span_starts = np.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = np.zeros((length,), dtype=np.int8)
span_start_indicator[span_starts] = True
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:orig_length]
class TaskT5Dataset(Dataset):
def __init__(self, data_path, args):
super().__init__()
self.max_length = args.max_seq_length
if args.tokenizer_type == 't5_tokenizer':
self.tokenizer = MT5Tokenizer.from_pretrained(args.pretrained_model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path)
self.data = self.load_data(data_path)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
samples = []
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in tqdm(lines):
samples.append(json.loads(line))
return samples
def encode(self, item):
if item["textb"] != "":
text = item['question'] + ','.join(item['choice'])+'。' + f"""{item["texta"]}""" + f"""{item["textb"]}"""
else:
text = f"""{item["question"]}""" + ",".join(item["choice"]) + "。" + f"""{item["texta"]}"""
label = item['answer']
encode_dict = self.tokenizer.encode_plus(text, max_length=self.max_length, padding='max_length',
truncation=True, return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(label, max_length=16, padding='max_length',
truncation=True)
answer_token = []
max_label_len = 0
choice_encode = [] # 用来确定模型生成的最大长度
for a in item['choice']:
answer_encode = self.tokenizer.encode(a)
choice_encode.append(answer_encode)
if len(answer_encode) > max_label_len:
max_label_len = len(answer_encode)
for an in answer_encode:
if an not in answer_token:
answer_token.append(an)
# bad_words_ids = [[i] for i in range(self.tokenizer.vocab_size) if i not in answer_token] #不生成这些token
# while len(bad_words_ids)<self.tokenizer.vocab_size:
# bad_words_ids.append(bad_words_ids[0])
# bad_words_ids = [[423],[67],[878]]
encode_sent = encode_dict['input_ids'].squeeze()
attention_mask = encode_dict['attention_mask'].squeeze()
target = decode_dict['input_ids']
labels = torch.tensor(target)
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": torch.tensor(encode_sent).long(),
"attention_mask": torch.tensor(attention_mask).float(),
"labels": torch.tensor(target).long(),
"force_words_ids": answer_token,
}
class TaskT5DataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TaskT5DataModel')
parser.add_argument('--dataset_num_workers', default=8, type=int)
parser.add_argument('--dataloader_num_workers', default=4, type=int)
parser.add_argument(
'--train_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument(
'--valid_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument('--train_batchsize', default=2, type=int)
parser.add_argument('--valid_batchsize', default=2, type=int)
parser.add_argument('--train_split_size', default=None, type=float)
parser.add_argument('--tokenizer_type', default='t5_tokenizer', choices=['t5_tokenizer', 'bert_tokenizer'])
parser.add_argument('--text_column_name', default='text')
parser.add_argument('--remove_columns', nargs='+', default=[])
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.train_dataset = TaskT5Dataset(args.train_data_path, args)
self.valid_dataset = TaskT5Dataset(args.valid_data_path, args)
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import PretrainingSampler
from fengshen.data.universal_datamodule.universal_datamodule import get_consume_samples
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
# batch_sampler = PretrainingRandomSampler(
batch_sampler = PretrainingSampler(
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank,
data_parallel_size=self.trainer.world_size,
)
# epoch=self.trainer.current_epoch
# )
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.valid_dataset, shuffle=False)
return DataLoader(
self.valid_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers
)
| 25,946 | 45.087034 | 127 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/t5_dataloader/t5_gen_datasets.py | # -*- encoding: utf-8 -*-
'''
@File : t5_gen_datasets.py
@Time : 2022/10/24 19:29
@Author : He Junqing
@Version : 1.0
@Contact : hejunqing@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from logging import exception
from transformers import (
BertTokenizer,
MT5Config,
MT5Tokenizer,
MT5ForConditionalGeneration,
)
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import pytorch_lightning as pl
import numpy as np
import sys
sys.path.append("../../")
special_token_dict = {
"additional_special_tokens": [
"[CTSTART]",
"[CTEND]",
"[SEP]",
"[KNSTART]",
"[KNEND]",
]
}
class DialogDataset(Dataset):
def __init__(self, data_path, args, data, load_data_type=1) -> None:
super().__init__()
if args.tokenizer_type == "t5_tokenizer":
self.tokenizer = MT5Tokenizer.from_pretrained(
args.pretrained_model_path)
if len(self.tokenizer) == 32596:
self.tokenizer.add_special_tokens(special_token_dict)
print(
"add special tokens to tokenizer,vocab size:",
len(self.tokenizer)
)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
self.model.resize_token_embeddings(len(self.tokenizer))
self.model.save_pretrained(args.new_vocab_path)
self.tokenizer.save_pretrained(
args.new_vocab_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(
args.pretrained_model_path)
self.load_data_type = load_data_type
self.data_split = data
self.num_workers = args.preprocessing_num_workers
self.max_seq_length = args.max_seq_length
self.max_knowledge_length = args.max_knowledge_length
self.max_target_length = args.max_target_length
# tokenizer config
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.decoder_start_token_id = self.config.decoder_start_token_id
self.eos_token_id = self.config.eos_token_id
self.vocab_size = self.config.vocab_size
# print(self.tokenizer.decode([2]))
# load from raw data or hf dataset
if self.load_data_type == 0:
self.data = self.load_data(data_path)
elif self.load_data_type == 1:
self.data = self.load_packed_data(data_path)
else: # for testing
self.data = data_path
def load_packed_data(self, data_path):
from fengshen.data.fs_datasets import load_dataset
samples = load_dataset(data_path,
num_proc=self.num_workers)[self.data_split]
tokenized_samples = samples.map(
self.regular_tokenize, batched=False,
num_proc=self.num_workers
)
return tokenized_samples
def load_data(self, data_path):
"""
load data from raw data
return untokoenized data
"""
from datasets import load_dataset
ds = load_dataset("json", data_files=data_path)['train']
samples = ds.map(self.regular_tokenize, batched=False, num_proc=self.num_workers
)
return samples
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def regular_tokenize(self, sample):
# print(len(sample['context']))
context_ids = self.tokenizer(
sample["context"],
add_special_tokens=True,
return_attention_mask=False,
return_token_type_ids=True,
)
context_types = self.get_token_type(
sample["context"], context_ids["token_type_ids"]
)
# print('context',sample['context'])
# print('context_ids',context_ids['input_ids'])
knowledge_ids = self.tokenizer.encode(
sample["knowledge"], add_special_tokens=False
)
# print('knowledge_ids',knowledge_ids)
if isinstance(knowledge_ids, int):
knowledge_ids = [knowledge_ids]
target_ids = self.tokenizer.encode(
sample["target"],
add_special_tokens=False,
max_length=self.max_target_length - 1,
truncation=True,
)
# print('target',sample['target'])
# print('target_ids',target_ids)
# print('decode target',self.tokenizer.decode(target_ids))
# truncate
knowledge_ids = (
[self.tokenizer.convert_tokens_to_ids("[KNSTART]")]
+ knowledge_ids[: self.max_knowledge_length - 2]
+ [self.tokenizer.convert_tokens_to_ids("[KNEND]")]
)
l_kn = len(knowledge_ids)
knowledge_types = [2] * l_kn
flatten_context = []
for line in context_ids["input_ids"]:
flatten_context.extend(line)
l_ct = min(len(flatten_context), self.max_seq_length - l_kn - 2)
context_ids = (
[self.tokenizer.convert_tokens_to_ids("[CTSTART]")]
+ flatten_context[-l_ct:]
+ [self.tokenizer.convert_tokens_to_ids("[CTEND]")]
)
context_types = context_types[-l_ct:] + [0]
context_types.insert(0, context_types[0])
assert len(context_ids) == len(
context_types
), "len of context ids and token types unmatch, context:{},ids:{} types:{},len {}:{}".format(
sample["context"],
context_ids,
context_types,
len(context_ids),
len(context_types),
)
try:
target_ids = target_ids + [self.eos_token_id]
except exception:
print(sample["target"], target_ids, self.eos_token_id)
tokenized = {}
tokenized["input_ids"] = np.array(context_ids + knowledge_ids, dtype=np.int32)
tokenized["token_types"] = np.array(
context_types + knowledge_types, dtype=np.int32
)
tokenized["attention_mask"] = np.ones(
len(context_types + knowledge_types), dtype=np.int8
)
tokenized["labels"] = np.array(target_ids, dtype=np.int32)
return tokenized
def get_token_type(self, context, tokentypes=None):
# token_type fail in tokenizer, all zero
context_token_types = []
for i, line in enumerate(context):
if tokentypes:
if i % 2 == 0:
token_type = [0] * len(tokentypes[i])
else:
token_type = [1] * len(tokentypes[i])
else:
if i % 2 == 0:
token_type = [0] * (1 + len(line))
else:
token_type = [1] * (1 + len(line))
context_token_types.extend(token_type)
return context_token_types
class DialogDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group("SuperviseT5DataModel")
parser.add_argument("--dataset_num_workers", default=8, type=int)
parser.add_argument("--dataloader_num_workers", default=4, type=int)
parser.add_argument("--train_data_path", default="dialog_4g_test", type=str)
parser.add_argument(
"--valid_data_path", default="wudao_180g_mt5_tokenized", type=str
)
parser.add_argument("--train_batchsize", default=2, type=int)
parser.add_argument("--valid_batchsize", default=2, type=int)
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--max_knowledge_length", default=128, type=int)
parser.add_argument("--max_target_length", default=128, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.load_data(args)
self.epochs = args.max_epochs
def load_data(self, args):
if args.train_split_size is not None:
from fengshen.data.fs_datasets import load_dataset
data_splits = load_dataset(
args.train_data_path, num_proc=args.dataset_num_workers
)
train_split = data_splits['train']
test_split = data_splits['test']
print('train:', train_split, '\ntest_data:', test_split)
self.train_dataset = DialogDataset(
args.train_data_path, args, load_data_type=1, data="train"
)
self.test_dataset = DialogDataset(
args.train_data_path, args, load_data_type=1, data="test"
)
else:
self.train_data = DialogDataset(
args.train_data_path, args, load_data_type=1
)
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.pad_token_id = self.config.pad_token_id
self.decoder_start_token_id = self.config.decoder_start_token_id
print("bos id:", self.decoder_start_token_id)
def collate_fn(self, samples):
batch = {
k: [
torch.tensor(samples[i][k], dtype=torch.int64)
for i in range(len(samples))
]
for k in ["input_ids", "token_types", "attention_mask", "labels"]
}
# print(batch)
for k, v in batch.items():
if k != "labels":
batch[k] = pad_sequence(
v, batch_first=True, padding_value=self.pad_token_id
)
else:
batch[k] = pad_sequence(v, batch_first=True, padding_value=-100)
batch["decoder_input_ids"] = torch.tensor(
self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
),
dtype=torch.long,
)
return batch
def shift_tokens_right(
self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids
)
return shifted_input_ids
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import (
PretrainingRandomSampler,
)
from fengshen.data.universal_datamodule.universal_datamodule import (
get_consume_samples,
)
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
batch_sampler = PretrainingRandomSampler(
epoch=self.epochs,
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank, # gpu idx
data_parallel_size=self.trainer.world_size, # gpu num
)
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False
)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def predict_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False
)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
if __name__ == "__main__":
# test
import argparse
total_parser = argparse.ArgumentParser("DATASET parser")
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["bert_tokenizer", "t5_tokenizer"],
)
total_parser.add_argument("--preprocessing_num_workers", default="10", type=int)
total_parser.add_argument(
"--new_vocab_path",
default="/cognitive_comp/hejunqing/projects/Dialog_pretrain/randeng_t5_newvocab_784M",
type=str,
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument(
"--pretrained_model_path",
default="/cognitive_comp/hejunqing/projects/Dialog_pretrain/randeng_t5_newvocab_784M",
)
total_parser = DialogDataModel.add_data_specific_args(total_parser)
args = total_parser.parse_args()
dl = DialogDataModel(args)
for i in range(5):
for batch in dl.train_dataloader():
print(batch)
print(batch["input_ids"])
print(batch["token_types"])
print(batch["decoder_input_ids"])
print(batch["labels"])
print("test finish")
| 13,701 | 33.954082 | 101 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/taiyi_stable_diffusion_datasets/taiyi_datasets.py | from torch.utils.data import Dataset, ConcatDataset
import os
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
def add_data_args(parent_args):
parser = parent_args.add_argument_group('taiyi stable diffusion data args')
# 支持传入多个路径,分别加载
parser.add_argument(
"--datasets_path", type=str, default=None, required=True, nargs='+',
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--datasets_type", type=str, default=None, required=True, choices=['txt', 'csv', 'fs_datasets'], nargs='+',
help="dataset type, txt or csv, same len as datasets_path",
)
parser.add_argument(
"--resolution", type=int, default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--thres", type=float, default=0.2)
return parent_args
class TXTDataset(Dataset):
# 添加Txt数据集读取,主要是针对Zero23m数据集。
def __init__(self,
foloder_name,
thres=0.2):
super().__init__()
# print(f'Loading folder data from {foloder_name}.')
self.image_paths = []
'''
暂时没有开源这部分文件
score_data = pd.read_csv(os.path.join(foloder_name, 'score.csv'))
img_path2score = {score_data['image_path'][i]: score_data['score'][i]
for i in range(len(score_data))}
'''
# print(img_path2score)
# 这里都存的是地址,避免初始化时间过多。
for each_file in os.listdir(foloder_name):
if each_file.endswith('.jpg'):
self.image_paths.append(os.path.join(foloder_name, each_file))
# print('Done loading data. Len of images:', len(self.image_paths))
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
img_path = str(self.image_paths[idx])
caption_path = img_path.replace('.jpg', '.txt') # 图片名称和文本名称一致。
with open(caption_path, 'r') as f:
caption = f.read()
return {'img_path': img_path, 'caption': caption}
# NOTE 加速读取数据,直接用原版的,在外部使用并行读取策略。30min->3min
class CSVDataset(Dataset):
def __init__(self,
input_filename,
image_root,
img_key,
caption_key,
thres=0.2):
super().__init__()
# logging.debug(f'Loading csv data from {input_filename}.')
print(f'Loading csv data from {input_filename}.')
self.images = []
self.captions = []
if input_filename.endswith('.csv'):
# print(f"Load Data from{input_filename}")
df = pd.read_csv(input_filename, index_col=0, on_bad_lines='skip')
print(f'file {input_filename} datalen {len(df)}')
# 这个图片的路径也需要根据数据集的结构稍微做点修改
self.images.extend(df[img_key].tolist())
self.captions.extend(df[caption_key].tolist())
self.image_root = image_root
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = os.path.join(self.image_root, str(self.images[idx]))
return {'img_path': img_path, 'caption': self.captions[idx]}
def if_final_dir(path: str) -> bool:
# 如果当前目录有一个文件,那就算是终极目录
for f in os.scandir(path):
if f.is_file():
return True
return False
def process_pool_read_txt_dataset(args,
input_root=None,
thres=0.2):
p = ProcessPoolExecutor(max_workers=20)
all_datasets = []
res = []
# 遍历该目录下所有的子目录
def traversal_files(path: str):
list_subfolders_with_paths = [f.path for f in os.scandir(path) if f.is_dir()]
for dir_path in list_subfolders_with_paths:
if if_final_dir(dir_path):
res.append(p.submit(TXTDataset,
dir_path,
thres))
else:
traversal_files(dir_path)
traversal_files(input_root)
p.shutdown()
for future in res:
all_datasets.append(future.result())
dataset = ConcatDataset(all_datasets)
return dataset
def process_pool_read_csv_dataset(args,
input_root,
thres=0.20):
# here input_filename is a directory containing a CSV file
all_csvs = os.listdir(os.path.join(input_root, 'release'))
image_root = os.path.join(input_root, 'images')
# csv_with_score = [each for each in all_csvs if 'score' in each]
all_datasets = []
res = []
p = ProcessPoolExecutor(max_workers=150)
for path in all_csvs:
each_csv_path = os.path.join(input_root, 'release', path)
res.append(p.submit(CSVDataset,
each_csv_path,
image_root,
img_key="name",
caption_key="caption",
thres=thres))
p.shutdown()
for future in res:
all_datasets.append(future.result())
dataset = ConcatDataset(all_datasets)
return dataset
def load_data(args, global_rank=0):
assert len(args.datasets_path) == len(args.datasets_type), \
"datasets_path num not equal to datasets_type"
all_datasets = []
for path, type in zip(args.datasets_path, args.datasets_type):
if type == 'txt':
all_datasets.append(process_pool_read_txt_dataset(
args, input_root=path, thres=args.thres))
elif type == 'csv':
all_datasets.append(process_pool_read_csv_dataset(
args, input_root=path, thres=args.thres))
elif type == 'fs_datasets':
from fengshen.data.fs_datasets import load_dataset
all_datasets.append(load_dataset(path, num_proc=args.num_workers,
thres=args.thres, global_rank=global_rank)['train'])
else:
raise ValueError('unsupport dataset type: %s' % type)
print(f'load datasset {type} {path} len {len(all_datasets[-1])}')
return {'train': ConcatDataset(all_datasets)}
| 6,417 | 35.885057 | 117 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/task_dataloader/medicalQADataset.py | # coding=utf8
import os
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoTokenizer
class GPT2QADataset(Dataset):
'''
Dataset Used for yuyuan medical qa task.
Just surpport small datasets, when deal with large datasets it may be slowly.
for large datasets please use mmapdatasets(doing)
'''
def __init__(self, data_path, name, args):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path)
if self.tokenizer.pad_token is None:
self.tokenizer.add_special_tokens({'pad_token': '<|endoftext|>'})
self.data_size = os.path.getsize(data_path)/1024/1024/1024
self.data_type_name = name
self.data = self.load_data(data_path)
self.max_seq_length = args.max_seq_length
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
# 有进度条展示
if self.data_size <= 5:
with open(data_path, "rt", encoding='utf8') as f:
lines = f.readlines()
total_num = len(lines)
data_gen = lines
else:
data_gen = open(data_path, "rt", encoding='utf8')
total_num = None
data = []
with tqdm(total=total_num, desc=f'{self.data_type_name}处理进度', mininterval=0.3) as bar:
for idx, line in enumerate(data_gen):
data.append(self.data_parse(line))
bar.update()
if self.data_size > 5:
data_gen.close()
return data
def data_parse(self, line):
"""
解析不同格式的数据
"""
dic = eval(line.strip())
return dic
def encode(self, item):
"""
将数据转换成模型训练的输入
"""
inputs_dict = self.tokenizer.encode_plus(item['Question']+item['answer'],
max_length=self.max_seq_length, padding='max_length',
truncation=True, return_tensors='pt')
target = inputs_dict['input_ids']
labels = target.clone().detach()
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": inputs_dict['input_ids'].squeeze(),
"attention_mask": inputs_dict['attention_mask'].squeeze(),
"labels": labels.squeeze(),
"question": item['Question'],
"answer": item['answer']
}
class GPT2QADataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('GPT2QADataModel')
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--train_data', default='train.txt', type=str)
parser.add_argument('--valid_data', default='valid.txt', type=str)
parser.add_argument('--test_data', default='test.txt', type=str)
parser.add_argument('--train_batchsize', type=int, required=True)
parser.add_argument('--valid_batchsize', type=int, required=True)
parser.add_argument('--max_seq_length', default=1024, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.args = args
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
if not args.do_eval_only:
self.train_data = GPT2QADataset(os.path.join(
args.data_dir, args.train_data), '训练集', args)
self.valid_data = GPT2QADataset(os.path.join(
args.data_dir, args.valid_data), '验证集', args)
self.test_data = GPT2QADataset(os.path.join(
args.data_dir, args.test_data), '测试集', args)
def train_dataloader(self):
return DataLoader(
self.train_data, shuffle=True,
batch_size=self.train_batchsize,
pin_memory=False, num_workers=self.args.num_workers)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False, num_workers=self.args.num_workers)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False,
batch_size=self.valid_batchsize, pin_memory=False,
num_workers=self.args.num_workers)
if __name__ == '__main__':
import argparse
modelfile = '/cognitive_comp/wuziwei/pretrained_model_hf/medical_v2'
datafile = '/cognitive_comp/wuziwei/task-data/medical_qa/medical_qa_train.txt'
parser = argparse.ArgumentParser(description='hf test', allow_abbrev=False)
group = parser.add_argument_group(title='test args')
group.add_argument('--pretrained-model-path', type=str, default=modelfile,
help='Number of transformer layers.')
group.add_argument('--max-seq-length', type=int, default=1024)
args = parser.parse_args()
testml = GPT2QADataset(datafile, 'medical_qa', args=args)
print(testml[10])
| 5,285 | 37.304348 | 102 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/task_dataloader/task_datasets.py | # coding=utf8
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import json
import torch
import pytorch_lightning as pl
import os
class AbstractCollator:
"""
collector for summary task
"""
def __init__(self, tokenizer, max_enc_length, max_dec_length, prompt):
self.tokenizer = tokenizer
self.max_enc_length = max_enc_length
self.max_dec_length = max_dec_length
self.prompt = prompt
def __call__(self, samples):
labels = []
attn_mask = []
# decoder_attn_mask = []
source_inputs = []
for sample in samples:
encode_dict = self.tokenizer.encode_plus(
self.prompt + sample['text'],
max_length=self.max_enc_length,
padding='max_length',
truncation=True,
return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(
sample['summary'],
max_length=self.max_dec_length,
padding='max_length',
truncation=True,
return_tensors='pt')
source_inputs.append(encode_dict['input_ids'].squeeze())
labels.append(decode_dict['input_ids'].squeeze())
attn_mask.append(encode_dict['attention_mask'].squeeze())
# decoder_attn_mask.append(decode_dict['attention_mask'].squeeze())
# labels = torch.tensor(decode_dict['input'])
source_inputs = torch.stack(source_inputs)
labels = torch.stack(labels)
attn_mask = torch.stack(attn_mask)
# decoder_attn_mask = torch.stack(decoder_attn_mask)
# decode_input_idxs = shift_tokens_right(labels, self.tokenizer.pad_token_id, self.tokenizer.pad_token_id)
end_token_index = torch.where(labels == self.tokenizer.eos_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
labels[idx][end_idx + 1:] = -100
return {
"input_ids": source_inputs,
"attention_mask": attn_mask,
"labels": labels,
"text": [sample['text'] for sample in samples],
"summary": [sample['summary'] for sample in samples]
}
class LCSTSDataset(Dataset):
'''
Dataset Used for LCSTS summary task.
'''
def __init__(self, data_path, args):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, use_fast=False)
self.data = self.load_data(data_path)
self.prompt = args.prompt
self.max_enc_length = args.max_enc_length
self.max_dec_length = args.max_dec_length
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
with open(data_path, "r", encoding='utf8') as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
obj = json.loads(line)
source = obj['text']
target = obj['summary']
samples.append({
"text": source,
"summary": target
})
return samples
def cal_data(self, data_path):
with open(data_path, "r", encoding='utf8') as f:
lines = f.readlines()
samples = []
enc_sizes = []
dec_sizes = []
for line in tqdm(lines):
obj = json.loads(line.strip())
source = obj['text']
target = obj['summary']
enc_input_ids = self.tokenizer.encode(source)
target = self.tokenizer.encode(target)
enc_sizes.append(len(enc_input_ids))
dec_sizes.append(len(target)-1)
samples.append({
"enc_input_ids": enc_input_ids,
"dec_input_ids": target[:-1],
"label_ids": target[1:]
})
max_enc_len = max(enc_sizes)
max_dec_len = max(dec_sizes)
import numpy as np
# mean of len(enc_input_ids): 74.68041911345998
# mean of len(dec_input_ids): 14.02265483791283
# max of len(enc_input_ids): 132
# max of len(dec_input_ids): 31
print('mean of len(enc_input_ids):', np.mean(enc_sizes),
'mean of len(dec_input_ids):', np.mean(dec_sizes),
'max of len(enc_input_ids):', max_enc_len,
'max of len(dec_input_ids):', max_dec_len)
return samples
def encode(self, item):
encode_dict = self.tokenizer.encode_plus(
self.prompt + item['text'],
max_length=self.max_enc_length,
padding='max_length',
truncation=True,
return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(
item['summary'],
max_length=self.max_dec_length,
padding='max_length',
truncation=True)
target = decode_dict['input_ids']
# print('encode_dict shape:', encode_dict['input_ids'].shape)
labels = torch.tensor(target)
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": encode_dict['input_ids'].squeeze(),
"attention_mask": encode_dict['attention_mask'].squeeze(),
"labels": labels.squeeze(),
"text": item['text'],
"summary": item['summary']
}
class LCSTSDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('LCSTSDataModel')
parser.add_argument(
'--data_dir', default='/cognitive_comp/ganruyi/data_datasets_LCSTS_LCSTS/', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.jsonl', type=str)
parser.add_argument('--valid_data', default='valid.jsonl', type=str)
parser.add_argument('--test_data', default='test_public.jsonl', type=str)
parser.add_argument('--train_batchsize', default=128, type=int)
parser.add_argument('--valid_batchsize', default=128, type=int)
parser.add_argument('--max_enc_length', default=128, type=int)
parser.add_argument('--max_dec_length', default=30, type=int)
parser.add_argument('--prompt', default='summarize:', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.args = args
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
if not args.do_eval_only:
self.train_data = LCSTSDataset(os.path.join(
args.data_dir, args.train_data), args)
self.valid_data = LCSTSDataset(os.path.join(
args.data_dir, args.valid_data), args)
self.test_data = LCSTSDataset(os.path.join(
args.data_dir, args.test_data), args)
def train_dataloader(self):
return DataLoader(self.train_data,
shuffle=True,
batch_size=self.train_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
def val_dataloader(self):
return DataLoader(self.valid_data,
shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
def predict_dataloader(self):
return DataLoader(self.test_data,
shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
| 7,832 | 36.84058 | 114 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/hubert/hubert_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Hubert Dataset')
parser.add_argument('--data', type=str)
parser.add_argument('--sample_rate', type=float, default=16000)
parser.add_argument('--label_dir', type=str)
parser.add_argument('--labels', type=str, nargs='+')
parser.add_argument('--label_rate', type=float)
parser.add_argument('--max_keep_size', type=int, default=None)
parser.add_argument('--min_sample_size', type=int)
parser.add_argument('--max_sample_size', type=int)
parser.add_argument('--pad_audio', type=bool)
parser.add_argument('--normalize', type=bool)
parser.add_argument('--random_crop', type=bool)
parser.add_argument('--single_target', type=bool, default=False)
return parent_args
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes = [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
class HubertDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
):
self.audio_root, self.audio_names, inds, tot, self.sizes = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.random_crop = random_crop
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, float)
else label_rates
)
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(
self.sizes, sample_rate, label_path, label_rate, inds, tot
)
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, "
f"normalize={normalize}, max_sample_size={self.max_sample_size}"
)
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav = self.get_audio(index)
labels = self.get_labels(index)
return {"id": index, "source": wav, "label_list": labels}
def __len__(self):
return len(self.sizes)
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end], start
def collater(self, samples):
# target = max(sizes) -> random_crop not used
# target = max_sample_size -> random_crop used for long
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
collated_audios, padding_mask, audio_starts = self.collater_audio(
audios, audio_size
)
targets_by_label = [
[s["label_list"][i] for s in samples] for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
net_input = {"source": collated_audios, "padding_mask": padding_mask}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
# if self.pad_audio else None
)
audio_starts = [0 for _ in audios]
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)])
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size
)
return collated_audios, padding_mask, audio_starts
def collater_frm_label(self, targets, audio_size, audio_starts, label_rate, pad):
assert label_rate > 0
s2f = label_rate / self.sample_rate
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s: s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1.0:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| 13,124 | 35.256906 | 86 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/clip_dataloader/flickr.py | from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from transformers import BertTokenizer
import pytorch_lightning as pl
from PIL import Image
import os
class flickr30k_CNA(Dataset):
def __init__(self, img_root_path,
annot_path,
transform=None):
self.images = []
self.captions = []
self.labels = []
self.root = img_root_path
with open(annot_path, 'r') as f:
for line in f:
line = line.strip().split('\t')
key, caption = line[0].split('#')[0], line[1]
img_path = key + '.jpg'
self.images.append(img_path)
self.captions.append(caption)
self.labels.append(key)
self.transforms = transform
self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext")
# NOTE large 模型
self.context_length = 77
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = str(self.images[idx])
image = self.transforms(Image.open(os.path.join(self.root, img_path)))
text = self.tokenizer(str(self.captions[idx]), max_length=self.context_length,
padding='max_length', truncation=True, return_tensors='pt')['input_ids'][0]
label = self.labels[idx]
return image, text, label
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
class FlickrDataModule(pl.LightningDataModule):
def __init__(self, args):
self.batch_size = args.batch_size
self.train_filename = args.train_filename # NOTE 标注的文件夹
self.train_root = args.train_root # NOTE 图片地址
self.val_filename = args.val_filename
self.val_root = args.val_root
self.test_filename = args.test_filename
self.test_root = args.test_root
self.pretrain_model = args.pretrain_model
self.image_size = 224
self.prepare_data_per_node = True
self._log_hyperparams = False
self.num_workers = args.num_workers
def setup(self, stage=None):
# dataset
train_transform = image_transform(224, True)
val_transform = image_transform(224, False)
test_transform = image_transform(224, False)
self.train_dataset = flickr30k_CNA(self.train_root, self.train_filename, transform=train_transform)
self.val_dataset = flickr30k_CNA(self.val_root, self.val_filename, transform=val_transform)
self.test_dataset = flickr30k_CNA(self.test_root, self.test_filename, transform=test_transform)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
| 3,812 | 34.971698 | 112 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/bart_dataset.py | """BART Style dataset. Modified from fairseq."""
import numpy as np
import torch
import math
import re
from fengshen.data.megatron_dataloader.dataset_utils import (
get_samples_mapping
)
class BartDataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed, tokenizer, zh_tokenizer):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length - 3, # account for added tokens
short_seq_prob,
self.seed,
self.name,
False)
# Vocab stuff.
self.vocab_size = tokenizer.vocab_size
inv_vocab = {v: k for k, v in tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
self.tokenizer = tokenizer
seg_tokens = ['。', ';', ';', '!', '!', '?', '?']
seg_token_ids = []
for t in seg_tokens:
if t in tokenizer.vocab:
seg_token_ids.append(tokenizer.vocab[t])
else:
print('seg_token "{}" not in vocab'.format(t))
self.seg_token_ids = set(seg_token_ids)
self.zh_tokenizer = zh_tokenizer
# Denoising ratios
self.permute_sentence_ratio = 1.0
self.mask_ratio = masked_lm_prob # 0.15
self.random_ratio = 0.1
self.insert_ratio = 0.0
self.rotate_ratio = 0.0
self.mask_whole_word = 1
self.item_transform_func = None
self.mask_span_distribution = None
if False:
_lambda = 3 # Poisson lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
return self.build_training_sample(sample, self.max_seq_length, np_rng)
def build_training_sample(self, sample, max_seq_length, np_rng):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
max_seq_length: Desired sequence length.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
"""
# permute sentences
full_stops = []
tokens = [self.cls_id]
for sent in sample:
for t in sent:
token = self.vocab_id_to_token_dict[t]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
# 兼容erlangshen ##的方式做whole word mask
t = self.tokenizer.convert_tokens_to_ids(token[2:])
tokens.append(t)
if t in self.seg_token_ids:
tokens.append(self.sep_id)
if tokens[-1] != self.sep_id:
tokens.append(self.sep_id)
if len(tokens) > max_seq_length:
tokens = tokens[:max_seq_length]
tokens[-1] = self.sep_id
tokens = torch.LongTensor(tokens)
full_stops = (tokens == self.sep_id).long()
assert (max_seq_length - tokens.shape[0]) >= 0, (tokens.size(), tokens[-1], max_seq_length)
source, target = tokens, tokens[1:].clone()
use_decoder = 1
# if torch.rand(1).item() < 0.5:
# use_decoder = 0
if self.permute_sentence_ratio > 0.0 and use_decoder == 1:
source = self.permute_sentences(source, full_stops, self.permute_sentence_ratio)
if self.mask_ratio > 0.0:
replace_length = 1 if use_decoder else -1
mask_ratio = self.mask_ratio * 2 if use_decoder else self.mask_ratio
source = self.add_whole_word_mask(source, mask_ratio, replace_length)
if self.insert_ratio > 0.0:
raise NotImplementedError
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
raise NotImplementedError
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
# assert (source[1:-1] >= 1).all()
assert (source <= self.vocab_size).all()
assert source[0] == self.cls_id
assert source[-1] == self.sep_id
# tokenizer = get_tokenizer()
# print(' '.join(tokenizer.tokenizer.convert_ids_to_tokens(source)))
# print(tokenizer.detokenize(target))
# print(tokenizer.detokenize(source))
# print()
prev_output_tokens = torch.zeros_like(target)
prev_output_tokens[0] = self.sep_id # match the preprocessing in fairseq
prev_output_tokens[1:] = target[:-1]
# src_padding_length = max_seq_length - source.shape[0]
# tgt_padding_length = max_seq_length - target.shape[0]
# assert src_padding_length >= 0, (source.size(), source[-1], max_seq_length)
# assert tgt_padding_length >= 0, (target.size(), target[-1], max_seq_length)
source_ = torch.full((max_seq_length,), self.pad_id, dtype=torch.long)
source_[:source.shape[0]] = source
target_ = torch.full((max_seq_length,), -100, dtype=torch.long)
# decoder not need bos in the front
target_[:target.shape[0]] = target
prev_output_tokens_ = torch.full((max_seq_length,), self.pad_id, dtype=torch.long)
prev_output_tokens_[:prev_output_tokens.shape[0]] = prev_output_tokens
return {
"input_ids": source_,
"labels": target_,
# "decoder_input_ids": prev_output_tokens_,
"attention_mask": (source_ != self.pad_id).long()
}
def permute_sentences(self, source, full_stops, p=1.0):
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1): sentence_ends[i]]
result[index: index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts_en(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def word_starts(self, source):
if self.mask_whole_word is None:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
raw_tokens = [self.vocab_id_to_token_dict[i] for i in source.tolist()]
words = [raw_tokens[0]] + \
self.zh_tokenizer(''.join(raw_tokens[1:-1]), HMM=True) + [raw_tokens[-1]]
def _is_chinese_char(c):
"""Checks whether CP is the #codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if len(c) > 1:
return all([_is_chinese_char(c_i) for c_i in c])
cp = ord(c)
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def align_linear(atokens, btokens):
a2c = []
c2b = []
a2b = []
length = 0
for tok in atokens:
a2c.append([length + i for i in range(len(tok))])
length += len(tok)
for i, tok in enumerate(btokens):
c2b.extend([i for _ in range(len(tok))])
for i, amap in enumerate(a2c):
bmap = [c2b[ci] for ci in amap]
a2b.append(list(set(bmap)))
return a2b
raw_to_word_align = align_linear(raw_tokens, words)
is_word_start = torch.zeros(source.size())
word_starts = []
skip_cur_word = True
for i in range(1, len(raw_to_word_align)):
if raw_to_word_align[i-1] == raw_to_word_align[i]:
# not a word start, as they align to the same word
if not skip_cur_word and not _is_chinese_char(raw_tokens[i]):
word_starts.pop(-1)
skip_cur_word = True
continue
else:
is_word_start[i] = 1
if _is_chinese_char(raw_tokens[i]):
word_starts.append(i)
skip_cur_word = False
is_word_start[0] = 0
is_word_start[-1] = 0
word_starts = torch.tensor(word_starts).long().view(-1, 1)
return is_word_start, word_starts
def add_whole_word_mask(self, source, p, replace_length=1):
is_word_start, word_starts = self.word_starts(source)
num_to_mask_word = int(math.ceil(word_starts.size(0) * p))
num_to_mask_char = int(math.ceil(word_starts.size(0) * p * 0.1))
num_to_mask = num_to_mask_word + num_to_mask_char
if num_to_mask > word_starts.size(0):
word_starts = is_word_start.nonzero(as_tuple=False)
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
# print(source.size(), word_starts.size(), indices.size(), mask_random.size())
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
# sorted_indices = torch.sort(indices)[0]
# continue_mask_pos = ((sorted_indices + 1)[:-1] == sorted_indices[1:])
# continue_mask_indices = sorted_indices[1:][continue_mask_pos]
# to_keep[continue_mask_indices] = 0
# for char indices, we already masked, the following loop handles word mask
indices = indices[:num_to_mask_word]
mask_random = mask_random[:num_to_mask_word]
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_id
result[noise_indices[:num_random]] = torch.randint(
low=1, high=self.vocab_size, size=(num_random,)
)
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
| 18,396 | 40.434685 | 103 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/dataset_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, and NVIDIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
import math
import time
import collections
import numpy as np
import re
from fengshen.data.megatron_dataloader.utils import (
print_rank_0
)
from fengshen.data.megatron_dataloader.blendable_dataset import BlendableDataset
from fengshen.data.megatron_dataloader.indexed_dataset import make_dataset as make_indexed_dataset
DSET_TYPE_BERT = 'standard_bert'
DSET_TYPE_ICT = 'ict'
DSET_TYPE_T5 = 't5'
DSET_TYPE_BERT_CN_WWM = 'bert_cn_wwm'
DSET_TYPE_BART = 'bart'
DSET_TYPE_COCOLM = 'coco_lm'
DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT,
DSET_TYPE_T5, DSET_TYPE_BERT_CN_WWM,
DSET_TYPE_BART, DSET_TYPE_COCOLM]
def get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples):
# The data prefix should be in the format of:
# weight-1, data-prefix-1, weight-2, data-prefix-2, ..
assert len(data_prefix) % 2 == 0
num_datasets = len(data_prefix) // 2
weights = [0] * num_datasets
prefixes = [0] * num_datasets
for i in range(num_datasets):
weights[i] = float(data_prefix[2 * i])
prefixes[i] = (data_prefix[2 * i + 1]).strip()
# Normalize weights
weight_sum = 0.0
for weight in weights:
weight_sum += weight
assert weight_sum > 0.0
weights = [weight / weight_sum for weight in weights]
# Add 0.5% (the 1.005 factor) so in case the bleding dataset does
# not uniformly distribute the number of samples, we still have
# samples left to feed to the network.
datasets_train_valid_test_num_samples = []
for weight in weights:
datasets_train_valid_test_num_samples.append(
[int(math.ceil(val * weight * 1.005))
for val in train_valid_test_num_samples])
return prefixes, weights, datasets_train_valid_test_num_samples
def compile_helper():
"""Compile helper function ar runtime. Make sure this
is invoked on a single process."""
import os
import subprocess
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(['make', '-C', path])
if ret.returncode != 0:
print("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
# print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(tokens,
vocab_id_list, vocab_id_to_token_dict,
masked_lm_prob,
cls_id, sep_id, mask_id,
max_predictions_per_seq,
np_rng,
tokenizer,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
masking_style="bert",
zh_tokenizer=None):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
# 如果没有指定中文分词器,那就直接按##算
if zh_tokenizer is None:
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(vocab_id_to_token_dict[token])):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
else:
# 如果指定了中文分词器,那就先用分词器分词,然后再进行判断
# 获取去掉CLS SEP的原始文本
raw_tokens = []
for t in tokens:
if t != cls_id and t != sep_id:
raw_tokens.append(t)
raw_tokens = [vocab_id_to_token_dict[i] for i in raw_tokens]
# 分词然后获取每次字开头的最长词的长度
word_list = set(zh_tokenizer(''.join(raw_tokens), HMM=True))
word_length_dict = {}
for w in word_list:
if len(w) < 1:
continue
if w[0] not in word_length_dict:
word_length_dict[w[0]] = len(w)
elif word_length_dict[w[0]] < len(w):
word_length_dict[w[0]] = len(w)
i = 0
# 从词表里面检索
while i < len(tokens):
token_id = tokens[i]
token = vocab_id_to_token_dict[token_id]
if len(token) == 0 or token_id == cls_id or token_id == sep_id:
token_boundary[i] = 1
i += 1
continue
word_max_length = 1
if token[0] in word_length_dict:
word_max_length = word_length_dict[token[0]]
j = 0
word = ''
word_end = i+1
# 兼容以前##的形式,如果后面的词是##开头的,那么直接把后面的拼到前面当作一个词
old_style = False
while word_end < len(tokens) and vocab_id_to_token_dict[tokens[word_end]].startswith('##'):
old_style = True
word_end += 1
if not old_style:
while j < word_max_length and i+j < len(tokens):
cur_token = tokens[i+j]
word += vocab_id_to_token_dict[cur_token]
j += 1
if word in word_list:
word_end = i+j
cand_indexes.append([p for p in range(i, word_end)])
token_boundary[i] = 1
i = word_end
output_tokens = list(tokens)
# add by ganruyi
if masking_style == 'bert-cn-wwm':
# if non chinese is False, that means it is chinese
# then try to remove "##" which is added previously
new_token_ids = []
for token_id in output_tokens:
token = tokenizer.convert_ids_to_tokens([token_id])[0]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[
0]
new_token_ids.append(new_token_id)
output_tokens = new_token_ids
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions,
masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1. / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
# 获取一个ngram的idx,对于每个word,记录他的ngram的word
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == 'bert-cn-wwm':
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
# 如果是中文全词mask,去掉tokens里的##
token_id = tokens[index]
token = tokenizer.convert_ids_to_tokens([token_id])[
0]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[
0]
masked_token = new_token_id
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(
0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(
index=index, label=tokens[index]))
masked_spans.append(MaskedLmInstance(
index=index_set,
label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(
index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans)
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob, short_seq_prob, seed,
tokenizer,
skip_warmup, binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert',
zh_tokenizer=None,
span=None):
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length, masked_lm_prob,
short_seq_prob, seed,
skip_warmup,
binary_head,
max_seq_length_dec,
tokenizer,
dataset_type=dataset_type,
zh_tokenizer=zh_tokenizer,
span=span)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], data_impl, splits_string,
datasets_train_valid_test_num_samples[i],
max_seq_length, masked_lm_prob, short_seq_prob,
seed, skip_warmup, binary_head, max_seq_length_dec,
tokenizer, dataset_type=dataset_type, zh_tokenizer=zh_tokenizer)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob, short_seq_prob, seed,
skip_warmup, binary_head,
max_seq_length_dec,
tokenizer,
dataset_type='standard_bert',
zh_tokenizer=None,
span=None):
if dataset_type not in DSET_TYPES:
raise ValueError("Invalid dataset_type: ", dataset_type)
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
print_rank_0(' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index,
end_index - start_index))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
from fengshen.data.megatron_dataloader.bert_dataset import BertDataset
from fengshen.data.megatron_dataloader.bart_dataset import BartDataset
from fengshen.data.megatron_dataloader.cocolm_dataset import COCOLMDataset
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
# Build the dataset accordingly.
kwargs = dict(
name=name,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=train_valid_test_num_samples[index],
max_seq_length=max_seq_length,
seed=seed,
)
if dataset_type == DSET_TYPE_BERT or dataset_type == DSET_TYPE_BERT_CN_WWM:
dataset = BertDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
binary_head=binary_head,
# 增加参数区分bert和bert-cn-wwm
tokenizer=tokenizer,
masking_style='bert' if dataset_type == DSET_TYPE_BERT else 'bert-cn-wwm',
**kwargs
)
elif dataset_type == DSET_TYPE_BART:
dataset = BartDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
tokenizer=tokenizer,
zh_tokenizer=zh_tokenizer,
**kwargs
)
elif dataset_type == DSET_TYPE_COCOLM:
dataset = COCOLMDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
tokenizer=tokenizer,
masking_style='bert',
span=span,
**kwargs
)
else:
raise NotImplementedError(
"Dataset type not fully implemented.")
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == \
(total_num_of_documents + 1)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' > indexed dataset stats:')
print_rank_0(' number of documents: {}'.format(
indexed_dataset.doc_idx.shape[0] - 1))
print_rank_0(' number of sentences: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] +
int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def get_samples_mapping(indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name,
binary_head):
"""Get a list that maps a sample index to a starting
sentence index, end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
# ganruyi comment
# counts = torch.cuda.LongTensor([1])
# torch.distributed.all_reduce(
# counts, group=mpu.get_data_parallel_group())
# torch.distributed.all_reduce(
# counts, group=mpu.get_pipeline_model_parallel_group())
# assert counts[0].item() == (
# torch.distributed.get_world_size() //
# torch.distributed.get_world_size(
# group=mpu.get_tensor_model_parallel_group()))
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
samples_mapping = np.load(
indexmap_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
samples_mapping.shape[0]))
return samples_mapping
| 30,965 | 38.247148 | 103 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/utils.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
| 903 | 35.16 | 74 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/bert_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Style dataset."""
import numpy as np
import torch
from fengshen.data.megatron_dataloader.dataset_utils import (
get_samples_mapping,
get_a_and_b_segments,
create_masked_lm_predictions,
create_tokens_and_tokentypes,
)
class BertDataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed, binary_head, tokenizer, masking_style):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.short_seq_prob = short_seq_prob
self.binary_head = binary_head
self.masking_style = masking_style
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
# account for added tokens
self.max_seq_length - 3,
short_seq_prob,
self.seed,
self.name,
self.binary_head)
inv_vocab = {v: k for k, v in tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
self.tokenizer = tokenizer
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
return build_training_sample(sample, seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng,
self.binary_head,
tokenizer=self.tokenizer,
masking_style=self.masking_style)
def build_training_sample(sample,
target_seq_length, max_seq_length,
vocab_id_list, vocab_id_to_token_dict,
cls_id, sep_id, mask_id, pad_id,
masked_lm_prob, np_rng, binary_head,
tokenizer,
masking_style='bert'):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
"""
if binary_head:
# We assume that we have at least two sentences in the sample
assert len(sample) > 1
assert target_seq_length <= max_seq_length
# Divide sample into two segments (A and B).
if binary_head:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample,
np_rng)
else:
tokens_a = []
for j in range(len(sample)):
tokens_a.extend(sample[j])
tokens_b = []
is_next_random = False
if len(tokens_a) >= max_seq_length-3:
tokens_a = tokens_a[:max_seq_length-3]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
''''
truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a),
len(tokens_b), max_num_tokens, np_rng)
'''
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
cls_id, sep_id)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob,
cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng,
tokenizer=tokenizer,
masking_style=masking_style)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \
= pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length)
train_sample = {
'input_ids': tokens_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
'next_sentence_label': int(is_next_random),
'attention_mask': padding_mask_np}
return train_sample
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
| 8,121 | 40.228426 | 94 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/blendable_dataset.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Blendable dataset."""
import time
import numpy as np
import torch
from fengshen.data.megatron_dataloader.utils import print_rank_0
class BlendableDataset(torch.utils.data.Dataset):
def __init__(self, datasets, weights):
self.datasets = datasets
num_datasets = len(datasets)
assert num_datasets == len(weights)
self.size = 0
for dataset in self.datasets:
self.size += len(dataset)
# Normalize weights.
weights = np.array(weights, dtype=np.float64)
sum_weights = np.sum(weights)
assert sum_weights > 0.0
weights /= sum_weights
# Build indecies.
start_time = time.time()
assert num_datasets < 255
self.dataset_index = np.zeros(self.size, dtype=np.uint8)
self.dataset_sample_index = np.zeros(self.size, dtype=np.int64)
from fengshen.data.megatron_dataloader import helpers
helpers.build_blending_indices(self.dataset_index,
self.dataset_sample_index,
weights, num_datasets, self.size,
torch.distributed.get_rank() == 0)
print_rank_0('> elapsed time for building blendable dataset indices: '
'{:.2f} (sec)'.format(time.time() - start_time))
def __len__(self):
return self.size
def __getitem__(self, idx):
dataset_idx = self.dataset_index[idx]
sample_idx = self.dataset_sample_index[idx]
return self.datasets[dataset_idx][sample_idx]
| 2,208 | 32.984615 | 78 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/megatron_dataloader/indexed_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
from fengshen.data.megatron_dataloader.utils import print_rank_0
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and "
".bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file,
dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx "
"and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[
self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError(
"Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[
self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(
self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(
self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer,
dtype=np.int32,
count=self._len,
offset=offset)
print_rank_0(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer,
dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
print_rank_0(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64, count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print_rank_0(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode='r', order='C')
print_rank_0(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError(
"Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=length, offset=ptr)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb', buffering=5000000)
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| 18,859 | 31.1843 | 80 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/sequence_tagging_dataloader/sequence_tagging_collator.py | from dataclasses import dataclass
from torch.utils.data._utils.collate import default_collate
import copy
import torch
import numpy as np
@dataclass
class CollatorForLinear:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
features=[]
for (ex_index, example) in enumerate(samples):
tokens = copy.deepcopy(example['text_a'])
label_ids = [self.label2id[x] for x in example['labels']]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
label_ids = label_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [self.label2id["O"]]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
label_ids = [self.label2id["O"]] + label_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(label_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
label_ids += [pad_token] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(label_ids) == self.args.max_seq_length
features.append({
'input_ids':torch.tensor(input_ids),
'attention_mask':torch.tensor(input_mask),
'input_len':torch.tensor(input_len),
'token_type_ids':torch.tensor(segment_ids),
'labels':torch.tensor(label_ids),
})
return default_collate(features)
@dataclass
class CollatorForCrf:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
for (ex_index, example) in enumerate(samples):
tokens = copy.deepcopy(example['text_a'])
label_ids = [self.label2id[x] for x in example['labels']]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
label_ids = label_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [self.label2id["O"]]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
label_ids = [self.label2id["O"]] + label_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(label_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
label_ids += [pad_token] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(label_ids) == self.args.max_seq_length
features.append({
'input_ids':torch.tensor(input_ids),
'attention_mask':torch.tensor(input_mask),
'input_len':torch.tensor(input_len),
'token_type_ids':torch.tensor(segment_ids),
'labels':torch.tensor(label_ids),
})
return default_collate(features)
@dataclass
class CollatorForSpan:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
max_entities_count = 100
segment_id = 0
for (ex_index, example) in enumerate(samples):
subjects = copy.deepcopy(example['subject'])
tokens = copy.deepcopy(example['text_a'])
start_ids = [0] * len(tokens)
end_ids = [0] * len(tokens)
subject_ids = []
for subject in subjects:
label = subject[0]
start = subject[1]
end = subject[2]
start_ids[start] = self.label2id[label]
end_ids[end] = self.label2id[label]
subject_ids.append([self.label2id[label], start, end])
subject_ids+=[[-1,-1,-1]]*(max_entities_count-len(subject_ids))
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
start_ids = start_ids[: (self.args.max_seq_length - special_tokens_count)]
end_ids = end_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
start_ids += [0]
end_ids += [0]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
start_ids = [0] + start_ids
end_ids = [0] + end_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(input_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
start_ids += [0] * padding_length
end_ids += [0] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(start_ids) == self.args.max_seq_length
assert len(end_ids) == self.args.max_seq_length
features.append({
'input_ids': torch.tensor(np.array(input_ids)),
'attention_mask': torch.tensor(np.array(input_mask)),
'token_type_ids': torch.tensor(np.array(segment_ids)),
'start_positions': torch.tensor(np.array(start_ids)),
'end_positions': torch.tensor(np.array(end_ids)),
"subjects": torch.tensor(np.array(subject_ids)),
'input_len': torch.tensor(np.array(input_len)),
})
return default_collate(features)
@dataclass
class CollatorForBiaffine:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
for (ex_index, example) in enumerate(samples):
subjects = copy.deepcopy(example['subject'])
tokens = copy.deepcopy(example['text_a'])
span_labels = np.zeros((self.args.max_seq_length,self.args.max_seq_length))
span_labels[:] = self.label2id["O"]
for subject in subjects:
label = subject[0]
start = subject[1]
end = subject[2]
if start < self.args.max_seq_length - special_tokens_count and end < self.args.max_seq_length - special_tokens_count:
span_labels[start + 1, end + 1] = self.label2id[label]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
span_labels[len(tokens), :] = self.label2id["O"]
span_labels[:, len(tokens)] = self.label2id["O"]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
span_labels[0, :] = self.label2id["O"]
span_labels[:, 0] = self.label2id["O"]
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [0] * len(input_ids)
span_mask = np.ones(span_labels.shape)
input_len = len(input_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
span_labels[input_len:, :] = 0
span_labels[:, input_len:] = 0
span_mask[input_len:, :] = 0
span_mask[:, input_len:] = 0
span_mask=np.triu(span_mask,0)
span_mask=np.tril(span_mask,10)
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(span_labels) == self.args.max_seq_length
assert len(span_labels[0]) == self.args.max_seq_length
features.append({
'input_ids': torch.tensor(np.array(input_ids)),
'attention_mask': torch.tensor(np.array(input_mask)),
'token_type_ids': torch.tensor(np.array(segment_ids)),
'span_labels': torch.tensor(np.array(span_labels)),
'span_mask': torch.tensor(np.array(span_mask)),
'input_len': torch.tensor(np.array(input_len)),
})
return default_collate(features) | 10,403 | 36.970803 | 133 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/sequence_tagging_dataloader/sequence_tagging_datasets.py | from torch.utils.data import Dataset
from fengshen.metric.utils_ner import get_entities
import os
def get_datasets(args):
processor = DataProcessor(args.data_dir, args.decode_type)
train_data = TaskDataset(processor=processor, mode="train")
valid_data = TaskDataset(processor=processor, mode="dev")
test_data = TaskDataset(processor=processor, mode="dev")
return {"train":train_data,"validation":valid_data,"test":test_data}
# def get_labels(decode_type):
# with open("/cognitive_comp/lujunyu/data_zh/NER_Aligned/weibo/labels.txt") as f:
# label_list = ["[PAD]", "[START]", "[END]"]
# if decode_type=="crf" or decode_type=="linear":
# for line in f.readlines():
# label_list.append(line.strip())
# elif decode_type=="biaffine" or decode_type=="span":
# for line in f.readlines():
# tag = line.strip().split("-")
# if len(tag) == 1 and tag[0] not in label_list:
# label_list.append(tag[0])
# elif tag[1] not in label_list:
# label_list.append(tag[1])
# label2id={label:id for id,label in enumerate(label_list)}
# id2label={id:label for id,label in enumerate(label_list)}
# return label2id, id2label
class DataProcessor(object):
def __init__(self, data_dir, decode_type) -> None:
super().__init__()
self.data_dir = data_dir
self.decode_type = decode_type
def get_examples(self, mode):
return self._create_examples(self._read_text(os.path.join(self.data_dir, mode + ".all.bmes")), mode)
@staticmethod
def get_labels(args):
with open(os.path.join(args.data_dir, "labels.txt")) as f:
label_list = ["[PAD]", "[START]", "[END]"]
if args.decode_type=="crf" or args.decode_type=="linear":
for line in f.readlines():
label_list.append(line.strip())
elif args.decode_type=="biaffine" or args.decode_type=="span":
for line in f.readlines():
tag = line.strip().split("-")
if len(tag) == 1 and tag[0] not in label_list:
label_list.append(tag[0])
elif tag[1] not in label_list:
label_list.append(tag[1])
label2id = {label: i for i, label in enumerate(label_list)}
id2label={id:label for id,label in enumerate(label_list)}
return label2id,id2label
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
else:
labels.append(x)
subject = get_entities(labels, id2label=None, markup='bioes')
examples.append({'guid':guid, 'text_a':text_a, 'labels':labels, 'subject':subject})
return examples
@classmethod
def _read_text(self, input_file):
lines = []
with open(input_file, 'r') as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
lines.append({"words": words, "labels": labels})
words = []
labels = []
else:
splits = line.split()
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
lines.append({"words": words, "labels": labels})
return lines
class TaskDataset(Dataset):
def __init__(self, processor, mode='train'):
super().__init__()
self.data = self.load_data(processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, processor, mode):
examples = processor.get_examples(mode)
return examples | 4,409 | 37.017241 | 108 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/data/bert_dataloader/load.py | import os
import re
from pathlib import Path
import glob
from tqdm import tqdm
from contextlib import ExitStack
import datasets
import multiprocessing
from typing import cast, TextIO
from itertools import chain
import json
from concurrent.futures import ProcessPoolExecutor
from random import shuffle
from pytorch_lightning import LightningDataModule
from typing import Optional
from torch.utils.data import DataLoader
# _SPLIT_DATA_PATH = '/data1/datas/wudao_180g_split/test'
_SPLIT_DATA_PATH = '/data1/datas/wudao_180g_split'
_CACHE_SPLIT_DATA_PATH = '/data1/datas/wudao_180g_FSData'
# feats = datasets.Features({"text": datasets.Value('string')})
class BertDataGenerate(object):
def __init__(self,
data_files=_SPLIT_DATA_PATH,
save_path=_CACHE_SPLIT_DATA_PATH,
train_test_validation='950,49,1',
num_proc=1,
cache=True):
self.data_files = Path(data_files)
if save_path:
self.save_path = Path(save_path)
else:
self.save_path = self.file_check(
Path(self.data_files.parent, self.data_files.name+'_FSDataset'),
'save')
self.num_proc = num_proc
self.cache = cache
self.split_idx = self.split_train_test_validation_index(train_test_validation)
if cache:
self.cache_path = self.file_check(
Path(self.save_path.parent, 'FSDataCache', self.data_files.name), 'cache')
else:
self.cache_path = None
@staticmethod
def file_check(path, path_type):
print(path)
if not path.exists():
path.mkdir(parents=True)
print(f"Since no {path_type} directory is specified, the program will automatically create it in {path} directory.")
return str(path)
@staticmethod
def split_train_test_validation_index(train_test_validation):
split_idx_ = [int(i) for i in train_test_validation.split(',')]
idx_dict = {
'train_rate': split_idx_[0]/sum(split_idx_),
'test_rate': split_idx_[1]/sum(split_idx_[1:])
}
return idx_dict
def process(self, index, path):
print('saving dataset shard {}'.format(index))
ds = (datasets.load_dataset('json', data_files=str(path),
cache_dir=self.cache_path,
features=None))
# ds = ds.map(self.cut_sent,input_columns='text')
# print(d)
# print('!!!',ds)
ds = ds['train'].train_test_split(train_size=self.split_idx['train_rate'])
ds_ = ds['test'].train_test_split(train_size=self.split_idx['test_rate'])
ds = datasets.DatasetDict({
'train': ds['train'],
'test': ds_['train'],
'validation': ds_['test']
})
# print('!!!!',ds)
ds.save_to_disk(Path(self.save_path, path.name))
return 'saving dataset shard {} done'.format(index)
def generate_cache_arrow(self) -> None:
'''
生成HF支持的缓存文件,加速后续的加载
'''
data_dict_paths = self.data_files.rglob('*')
p = ProcessPoolExecutor(max_workers=self.num_proc)
res = list()
for index, path in enumerate(data_dict_paths):
res.append(p.submit(self.process, index, path))
p.shutdown(wait=True)
for future in res:
print(future.result(), flush=True)
def load_dataset(num_proc=4, **kargs):
cache_dict_paths = Path(_CACHE_SPLIT_DATA_PATH).glob('*')
ds = []
res = []
p = ProcessPoolExecutor(max_workers=num_proc)
for path in cache_dict_paths:
res.append(p.submit(datasets.load_from_disk,
str(path), **kargs))
p.shutdown(wait=True)
for future in res:
ds.append(future.result())
# print(future.result())
train = []
test = []
validation = []
for ds_ in ds:
train.append(ds_['train'])
test.append(ds_['test'])
validation.append(ds_['validation'])
# ds = datasets.concatenate_datasets(ds)
# print(ds)
return datasets.DatasetDict({
'train': datasets.concatenate_datasets(train),
'test': datasets.concatenate_datasets(test),
'validation': datasets.concatenate_datasets(validation)
})
class BertDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Universal DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_batchsize', default=32, type=int)
parser.add_argument('--val_batchsize', default=32, type=int)
parser.add_argument('--test_batchsize', default=32, type=int)
parser.add_argument('--datasets_name', type=str)
# parser.add_argument('--datasets_name', type=str)
parser.add_argument('--train_datasets_field', type=str, default='train')
parser.add_argument('--val_datasets_field', type=str, default='validation')
parser.add_argument('--test_datasets_field', type=str, default='test')
return parent_args
def __init__(
self,
tokenizer,
collate_fn,
args,
**kwargs,
):
super().__init__()
self.datasets = load_dataset(num_proc=args.num_workers)
self.tokenizer = tokenizer
self.collate_fn = collate_fn
self.save_hyperparameters(args)
def setup(self, stage: Optional[str] = None) -> None:
self.train = DataLoader(
self.datasets[self.hparams.train_datasets_field],
batch_size=self.hparams.train_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
self.val = DataLoader(
self.datasets[self.hparams.val_datasets_field],
batch_size=self.hparams.val_batchsize,
shuffle=False,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
self.test = DataLoader(
self.datasets[self.hparams.test_datasets_field],
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
return
def train_dataloader(self):
return self.train
def val_dataloader(self):
return self.val
def test_dataloader(self):
return self.test
if __name__ == '__main__':
# pre = PreProcessing(_SPLIT_DATA_PATH)
# pre.processing()
dataset = BertDataGenerate(_SPLIT_DATA_PATH, num_proc=16)
dataset.generate_cache_arrow()
| 6,756 | 32.616915 | 124 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/pipelines/sequence_tagging.py | import torch
import torch.nn.functional as F
from torch.utils.data._utils.collate import default_collate
from dataclasses import dataclass
from typing import Dict, List, Union
from fengshen.models.tagging_models.bert_for_tagging import BertLinear,BertCrf,BertSpan,BertBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_collator import CollatorForLinear, CollatorForCrf, CollatorForSpan, CollatorForBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_datasets import DataProcessor, get_datasets
from fengshen.metric.metric import EntityScore
from fengshen.metric.utils_ner import get_entities, bert_extract_item
from transformers import (
BertConfig,
AutoTokenizer, BertTokenizer
)
from transformers.models.auto.tokenization_auto import get_tokenizer_config
from transformers.pipelines.base import PipelineException, GenericTensor
from transformers import TokenClassificationPipeline as HuggingfacePipe
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.models.model_utils import add_module_args
from fengshen.models.model_utils import configure_optimizers
from fengshen.models.model_utils import get_total_steps
_model_dict={
'bert-linear': BertLinear,
'bert-crf': BertCrf,
'bert-span': BertSpan,
'bert-biaffine': BertBiaffine
}
_collator_dict={
'linear': CollatorForLinear,
'crf': CollatorForCrf,
'span': CollatorForSpan,
'biaffine': CollatorForBiaffine
}
class _taskModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('sequence tagging task model')
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--model_type', default='bert', type=str)
parser.add_argument("--decode_type", default="linear", choices=["linear", "crf", "biaffine", "span"], type=str)
parser.add_argument('--loss_type', default='ce', type=str)
return parent_args
def __init__(self, args, model, label2id, validate_fn):
super().__init__()
self.label2id = label2id
self.id2label = {v: k for k, v in self.label2id.items()}
self.model=model
self.validate_fn=getattr(self,validate_fn)
self.entity_score=EntityScore()
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
self.validate_fn(batch,batch_idx)
def validation_linear(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_crf(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = self.model.crf.decode(logits, batch['attention_mask'])
preds = preds.detach().squeeze(0).cpu().numpy().tolist()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_span(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
start_logits = outputs.start_logits
end_logits = outputs.end_logits
labels=batch['subjects']
for i, T in enumerate(labels):
active_start_logits=start_logits[i][:batch['input_len'][i]]
active_end_logits=end_logits[i][:batch['input_len'][i]]
R = bert_extract_item(active_start_logits, active_end_logits)
T=T[~torch.all(T==-1,dim=-1)].cpu().numpy()
T=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),T))
R=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),R))
self.entity_score.update(true_subject=T, pred_subject=R)
self.log('val_loss', loss)
def validation_biaffine(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.span_logits
preds = torch.argmax(logits.cpu().numpy(), axis=-1)
labels = batch['span_labels'].cpu().numpy()
for i, label in enumerate(labels):
input_len=(batch['input_len'][i])-2
active_label=labels[i,1:input_len+1,1:input_len+1]
active_pred=preds[i,1:input_len+1,1:input_len+1]
temp_1 = []
temp_2 = []
for j in range(input_len):
for k in range(input_len):
if self.id2label[active_label[j,k]]!="O":
temp_1.append([self.id2label[active_label[j,k]],j,k])
if self.id2label[active_pred[j,k]]!="O":
temp_2.append([self.id2label[active_pred[j,k]],j,k])
self.entity_score.update(pred_subject=temp_2, true_subject=temp_1)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
return configure_optimizers(self)
class SequenceTaggingPipeline(HuggingfacePipe):
@staticmethod
def add_pipeline_specific_args(parent_args):
parser = parent_args.add_argument_group('SequenceTaggingPipeline')
parser.add_argument("--max_seq_length", default=512, type=int)
parser = _taskModel.add_model_specific_args(parent_args)
parser = UniversalDataModule.add_data_specific_args(parent_args)
parser = UniversalCheckpoint.add_argparse_args(parent_args)
parser = pl.Trainer.add_argparse_args(parent_args)
parser = add_module_args(parent_args)
return parent_args
def __init__(self,
model_path: str = None,
args=None,
**kwargs):
_validation_dict={
'linear': 'validation_linear',
'crf': 'validation_crf',
'span': 'validation_span',
'biaffine': 'validation_biaffine',
}
_prediction_dict={
'linear': 'postprocess_linear',
'crf': 'postprocess_crf',
'span': 'postprocess_span',
'biaffine': 'postprocess_biaffine',
}
self.args = args
self.model_name=args.model_type+"-"+args.decode_type
self.label2id,self.id2label=DataProcessor.get_labels(args)
self.config=BertConfig.from_pretrained(model_path)
self.model = _model_dict[self.model_name].from_pretrained(model_path, config=self.config, num_labels=len(self.label2id), loss_type=args.loss_type)
self.tokenizer=BertTokenizer.from_pretrained(model_path)
self.validate_fn = _validation_dict[args.decode_type]
self.predict_fn = getattr(self,_prediction_dict[args.decode_type])
self.collator = _collator_dict[args.decode_type]()
self.collator.args=self.args
self.collator.tokenizer=self.tokenizer
self.collator.label2id=self.label2id
device=-1
super().__init__(model=self.model,
tokenizer=self.tokenizer,
framework='pt',
device=device,
**kwargs)
def check_model_type(self, supported_models: Union[List[str], dict]):
pass
def train(self):
datasets=get_datasets(self.args)
checkpoint_callback = UniversalCheckpoint(self.args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(self.args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = UniversalDataModule(
datasets=datasets,
args=self.args,
collate_fn=self.collator,
tokenizer=self.tokenizer)
model = _taskModel(self.args,self.model,self.label2id,self.validate_fn)
trainer.fit(model,data_model)
def _forward(self, model_inputs):
outputs = self.model(**model_inputs)
return (model_inputs,outputs)
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
samples=[]
labels,subject=["O" for _ in range(len(inputs))],[]
samples.append({"text_a": list(inputs), "labels": labels, "subject":subject})
return self.collator(samples)
def postprocess(self, model_outputs):
return self.predict_fn(model_outputs)
def postprocess_linear(self, model_outputs):
model_inputs,outputs=model_outputs
preds = torch.argmax(F.log_softmax(outputs.logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
pred = preds[0][:model_inputs['input_len'][0]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
return label_entities
def postprocess_crf(self, model_outputs):
model_inputs,outputs=model_outputs
preds = self.model.crf.decode(outputs.logits, model_inputs['attention_mask']).squeeze(0).cpu().numpy().tolist()
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
pred = preds[0][:model_inputs['input_len'][0]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
return label_entities
def postprocess_span(self, model_outputs):
model_inputs,outputs=model_outputs
start_logits, end_logits = outputs.start_logits[0], outputs.end_logits[0]
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
R = bert_extract_item(start_logits[:model_inputs['input_len'][0]], end_logits[:model_inputs['input_len'][0]])
label_entities = [[self.id2label[x[0]],x[1],x[2],"".join(text[x[1]:x[2]+1])] for x in R]
return label_entities
Pipeline = SequenceTaggingPipeline
| 12,608 | 39.156051 | 154 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/pipelines/text_classification.py | import torch
from torch.utils.data._utils.collate import default_collate
from dataclasses import dataclass
from typing import Dict, List
from .base import (
_CONFIG_MODEL_TYPE,
_CONFIG_TOKENIZER_TYPE)
from fengshen.models.roformer import RoFormerForSequenceClassification
from fengshen.models.longformer import LongformerForSequenceClassification
from fengshen.models.zen1 import ZenForSequenceClassification
from transformers import (
BertConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
)
from transformers.models.auto.tokenization_auto import get_tokenizer_config
from transformers.pipelines.base import PipelineException, GenericTensor
from transformers import TextClassificationPipeline as HuggingfacePipe
import pytorch_lightning as pl
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.models.model_utils import add_module_args
import torchmetrics
_model_dict = {
'fengshen-roformer': RoFormerForSequenceClassification,
# 'fengshen-megatron_t5': T5EncoderModel, TODO 实现T5EncoderForSequenceClassification
'fengshen-longformer': LongformerForSequenceClassification,
'fengshen-zen1': ZenForSequenceClassification,
'huggingface-auto': AutoModelForSequenceClassification,
}
_tokenizer_dict = {}
_ATTR_PREPARE_INPUT = '_prepare_inputs_for_sequence_classification'
class _taskModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
_ = parent_args.add_argument_group('text classification task model')
return parent_args
def __init__(self, args, model):
super().__init__()
self.model = model
self.acc_metrics = torchmetrics.Accuracy()
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, _ = outputs[0], outputs[1]
self.log('train_loss', loss)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).long()
acc = self.acc_metrics(y_pred.long(), y_true.long())
return acc
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, logits = outputs[0], outputs[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
@dataclass
class _Collator:
tokenizer = None
texta_name = 'sentence'
textb_name = 'sentence2'
label_name = 'label'
max_length = 512
model_type = 'huggingface-auto'
def __call__(self, samples):
sample_list = []
for item in samples:
if self.textb_name in item and item[self.textb_name] != '':
if self.model_type != 'fengshen-roformer':
encode_dict = self.tokenizer.encode_plus(
[item[self.texta_name], item[self.textb_name]],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
[item[self.texta_name]+'[SEP]'+item[self.textb_name]],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
item[self.texta_name],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
sample = {}
for k, v in encode_dict.items():
sample[k] = torch.tensor(v)
if self.label_name in item:
sample['labels'] = torch.tensor(item[self.label_name]).long()
sample_list.append(sample)
return default_collate(sample_list)
class TextClassificationPipeline(HuggingfacePipe):
@staticmethod
def add_pipeline_specific_args(parent_args):
parser = parent_args.add_argument_group('SequenceClassificationPipeline')
parser.add_argument('--texta_name', default='sentence', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--max_length', default=512, type=int)
parser.add_argument('--device', default=-1, type=int)
parser = _taskModel.add_model_specific_args(parent_args)
parser = UniversalDataModule.add_data_specific_args(parent_args)
parser = UniversalCheckpoint.add_argparse_args(parent_args)
parser = pl.Trainer.add_argparse_args(parent_args)
parser = add_module_args(parent_args)
return parent_args
def __init__(self,
model: str = None,
args=None,
**kwargs):
self.args = args
self.model_name = model
self.model_type = 'huggingface-auto'
# 用BertConfig做兼容,我只需要读里面的fengshen_model_type,所以这里用啥Config都可以
config = BertConfig.from_pretrained(model)
if hasattr(config, _CONFIG_MODEL_TYPE):
self.model_type = config.fengshen_model_type
if self.model_type not in _model_dict:
raise PipelineException(self.model_name, ' not in model type dict')
# 加载模型,并且使用模型的config
self.model = _model_dict[self.model_type].from_pretrained(model)
self.config = self.model.config
# 加载分词
tokenizer_config = get_tokenizer_config(model, **kwargs)
self.tokenizer = None
if hasattr(tokenizer_config, _CONFIG_TOKENIZER_TYPE):
if tokenizer_config._CONFIG_TOKENIZER_TYPE in _tokenizer_dict:
self.tokenizer = _tokenizer_dict[tokenizer_config._CONFIG_TOKENIZER_TYPE].from_pretrained(
model)
if self.tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(model)
# 加载数据处理模块
c = _Collator()
c.tokenizer = self.tokenizer
c.model_type = self.model_type
if args is not None:
c.texta_name = self.args.texta_name
c.textb_name = self.args.textb_name
c.label_name = self.args.label_name
c.max_length = self.args.max_length
self.collator = c
device = -1 if args is None else args.device
print(device)
print(kwargs)
super().__init__(model=self.model,
tokenizer=self.tokenizer,
framework='pt',
device=device,
**kwargs)
def train(self,
datasets: Dict):
"""
Args:
datasets is a dict like
{
test: Dataset()
validation: Dataset()
train: Dataset()
}
"""
checkpoint_callback = UniversalCheckpoint(self.args)
trainer = pl.Trainer.from_argparse_args(self.args,
callbacks=[checkpoint_callback]
)
data_model = UniversalDataModule(
datasets=datasets,
tokenizer=self.tokenizer,
collate_fn=self.collator,
args=self.args)
model = _taskModel(self.args, self.model)
trainer.fit(model, data_model)
return
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
# 如果模型有自定义的接口,用模型的口
if hasattr(self.model, _ATTR_PREPARE_INPUT):
return getattr(self.model, _ATTR_PREPARE_INPUT)(inputs, self.tokenizer, **tokenizer_kwargs)
samples = []
if isinstance(inputs, str):
samples.append({self.collator.texta_name: inputs})
else:
# 在__call__里面已经保证了input的类型,所以这里直接else就行
for i in inputs:
samples.append({self.collator.texta_name})
return self.collator(samples)
Pipeline = TextClassificationPipeline
| 9,274 | 38.468085 | 106 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/pipelines/tcbert.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.tcbert.modeling_tcbert import (
TCBertDataModel,
TCBertLitModel,
TCBertPredict,
)
class TCBertPipelines(Pipeline):
@staticmethod
def piplines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--language',
default='chinese', type=str)
total_parser = TCBertDataModel.add_data_specific_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = TCBertLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args, model_path, nlabels):
self.args = args
self.checkpoint_callback = UniversalCheckpoint(args)
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.config = AutoConfig.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(
model_path)
if args.load_checkpoints_path != '':
self.model = TCBertLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args, model_path=model_path, nlabels=nlabels)
print('load model from: ', args.load_checkpoints_path)
else:
self.model = TCBertLitModel(
args, model_path=model_path, nlabels=nlabels)
def train(self, train_data, dev_data, prompt, prompt_label):
data_model = TCBertDataModel(
train_data, dev_data, self.tokenizer, self.args, prompt, prompt_label)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
def predict(self, test_data, prompt, prompt_label, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.model.eval()
predict_model = TCBertPredict(self.model, self.tokenizer, self.args, prompt, prompt_label)
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = predict_model.predict(batch_data)
result.extend(batch_result)
# result = self.postprocess(result)
return result
def preprocess(self, data):
return data
def postprocess(self, data):
return data
def _forward(self, model_inputs):
return self.model(**model_inputs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
| 5,390 | 38.350365 | 116 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/pipelines/multiplechoice.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AlbertTokenizer
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.unimc.modeling_unimc import (
UniMCDataModel,
UniMCLitModel,
UniMCPredict,
)
class UniMCPipelines(Pipeline):
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--language',
default='chinese', type=str)
total_parser = UniMCDataModel.add_data_specific_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = UniMCLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args, model_path):
self.args = args
self.checkpoint_callback = UniversalCheckpoint(args)
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.config = AutoConfig.from_pretrained(model_path)
if self.config.model_type == 'albert':
self.tokenizer = AlbertTokenizer.from_pretrained(
model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_path)
if args.language == 'chinese':
self.yes_token = self.tokenizer.encode('是')[1]
self.no_token = self.tokenizer.encode('非')[1]
else:
self.yes_token = self.tokenizer.encode('yes')[1]
self.no_token = self.tokenizer.encode('no')[1]
if args.load_checkpoints_path != '':
self.model = UniMCLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args, yes_token=self.yes_token, model_path=model_path)
print('load model from: ', args.load_checkpoints_path)
else:
self.model = UniMCLitModel(
args, yes_token=self.yes_token, model_path=model_path)
def train(self, train_data, dev_data, process=True):
if process:
train_data = self.preprocess(train_data)
dev_data = self.preprocess(dev_data)
data_model = UniMCDataModel(
train_data, dev_data, self.yes_token, self.no_token, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True, process=True):
if process:
test_data = self.preprocess(test_data)
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.model.eval()
predict_model = UniMCPredict(
self.yes_token, self.no_token, self.model, self.tokenizer, self.args)
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = predict_model.predict(batch_data)
result.extend(batch_result)
if process:
result = self.postprocess(result)
return result
def preprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['choice'] = ['不能理解为:'+data[i]
['textb'], '可以理解为:'+data[i]['textb']]
# data[i]['question']='怎么理解这段话?'
data[i]['textb'] = ''
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['choice'] = ['不能推断出:'+data[i]['textb'],
'很难推断出:'+data[i]['textb'], '可以推断出:'+data[i]['textb']]
# data[i]['question']='根据这段话'
data[i]['textb'] = ''
return data
def postprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['textb'] = data[i]['choice'][0].replace('不能理解为:', '')
data[i]['choice'] = ['不相似', '相似']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '不相似'
if '可以' in k:
k = '相似'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['textb'] = data[i]['choice'][0].replace('不能推断出:', '')
data[i]['choice'] = ['矛盾', '自然', '蕴含']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '矛盾'
if '很难' in k:
k = '自然'
if '可以' in k:
k = '蕴含'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
return data
def _forward(self, model_inputs):
return self.model(**model_inputs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
| 7,967 | 39.653061 | 116 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/pipelines/information_extraction.py | from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AlbertTokenizer
from transformers import AutoConfig,AutoTokenizer
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.uniex.modeling_uniex import (
UniEXDataModel,
TaskModelCheckpoint,
UniEXLitModel,
FastExtractModel,
ExtractModel
)
class UniEXPipelines:
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_path',
default='./predict.json', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--max_extract_entity_number',
default=1, type=float)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--fast_ex_mode', action='store_true')
total_parser.add_argument('--threshold_index',
default=0.5, type=float)
total_parser.add_argument('--threshold_entity',
default=0.5, type=float)
total_parser.add_argument('--threshold_event',
default=0.5, type=float)
total_parser.add_argument('--threshold_relation',
default=0.5, type=float)
total_parser = UniEXDataModel.add_data_specific_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
total_parser = UniEXLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args):
if args.load_checkpoints_path != '':
self.model = UniEXLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args)
print('导入模型成功:', args.load_checkpoints_path)
else:
self.model = UniEXLitModel(args)
self.args = args
self.checkpoint_callback = TaskModelCheckpoint(args).callbacks
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
added_token = ['[unused'+str(i+1)+']' for i in range(10)]
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, is_split_into_words=True, add_prefix_space=True, additional_special_tokens=added_token)
if args.fast_ex_mode:
self.em = FastExtractModel(self.tokenizer, args)
else:
self.em = ExtractModel(self.tokenizer, args)
def fit(self, train_data, dev_data,test_data=[]):
data_model = UniEXDataModel(
train_data, dev_data, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.model.dev_data = dev_data
self.model.test_data = test_data
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.eval()
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = self.em.extract(
batch_data, self.model.model)
result.extend(batch_result)
return result
| 4,151 | 36.071429 | 127 | py |
TFusion | TFusion-master/rank-reid/transfer/simple_rank_transfer.py | import os
import utils.cuda_util
import numpy as np
from keras import Input
from keras import backend as K
from keras.applications.resnet50 import preprocess_input, ResNet50
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.engine import Model
from keras.layers import Flatten, Lambda, Dense, Conv2D
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.utils import plot_model
from numpy.random import randint
from pretrain.pair_train import eucl_dist
from utils.file_helper import safe_remove
def reid_img_prepare(LIST, TRAIN):
images = []
with open(LIST, 'r') as f:
for line in f:
if 'jp' not in line:
continue
line = line.strip()
img = line.split()[0]
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
images = np.array(images)
return images
def gen_neg_right_img_ids(left_similar_persons, left_similar_matrix, batch_size):
right_img_ids = list()
right_img_idxes = randint(25, 50, size=batch_size)
right_img_scores = list()
for i in range(batch_size):
right_img_ids.append(left_similar_persons[i][right_img_idxes[i]])
right_img_scores.append(left_similar_matrix[i][right_img_idxes[i]])
right_img_ids = np.array(right_img_ids)
return right_img_ids, np.array(right_img_scores)
def gen_pos_right_img_ids(left_similar_persons, left_similar_matrix, batch_size):
right_img_ids = list()
right_img_idxes = randint(0, 25, size=batch_size)
right_img_scores = list()
for i in range(batch_size):
right_img_ids.append(left_similar_persons[i][right_img_idxes[i]])
right_img_scores.append(left_similar_matrix[i][right_img_idxes[i]])
right_img_ids = np.array(right_img_ids)
return right_img_ids, np.array(right_img_scores)
def gen_right_img_infos(cur_epoch, similar_matrix, similar_persons, left_img_ids, img_cnt, batch_size):
pos_prop = 2
if cur_epoch % pos_prop == 0:
# select from last match for negative
left_similar_persons = similar_persons[left_img_ids]
left_similar_matrix = similar_matrix[left_img_ids]
right_img_ids, right_img_scores = gen_pos_right_img_ids(left_similar_persons, left_similar_matrix, batch_size)
else:
# select from last match for negative
left_similar_persons = similar_persons[left_img_ids]
left_similar_matrix = similar_matrix[left_img_ids]
right_img_ids, right_img_scores = gen_neg_right_img_ids(left_similar_persons, left_similar_matrix, batch_size)
right_img_ids = right_img_ids.astype(int)
return right_img_ids, right_img_scores
def triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False):
cur_epoch = 0
img_cnt = len(similar_persons)
while True:
left_img_ids = randint(img_cnt, size=batch_size)
right_img_ids1, right_img_scores1 = gen_right_img_infos(cur_epoch,
similar_matrix, similar_persons,
left_img_ids,
img_cnt, batch_size)
cur_epoch += 1
right_img_ids2, right_img_scores2 = gen_right_img_infos(cur_epoch,
similar_matrix, similar_persons,
left_img_ids,
img_cnt, batch_size)
left_images = train_images[left_img_ids]
right_images1 = train_images[right_img_ids1]
right_images2 = train_images[right_img_ids2]
sub_scores = np.subtract(right_img_scores1, right_img_scores2) # * 10
cur_epoch += 1
# print cur_epoch
if (cur_epoch/2) % 2 == 0:
# print sub_scores
# yield [left_images, right_images1, right_images2], [sub_scores, right_img_scores1, right_img_scores2]
yield [left_images, right_images1, right_images2], [sub_scores]
else:
# print -sub_scores
# yield [left_images, right_images2, right_images1], [-sub_scores, right_img_scores2, right_img_scores1]
yield [left_images, right_images2, right_images1], [-sub_scores]
def sub(inputs):
x, y = inputs
return (x - y) # *10
def cross_entropy_loss(real_score, predict_score):
predict_prob = 1 / (1 + K.exp(-predict_score))
real_prob = 1 / (1 + K.exp(-real_score))
cross_entropy = -real_prob * K.log(predict_prob) - (1 - real_prob) * K.log(1 - predict_prob)
return cross_entropy
def rank_transfer_model(pair_model_path):
pair_model = load_model(pair_model_path)
base_model = pair_model.layers[2]
base_model = Model(inputs=base_model.get_input_at(0), outputs=[base_model.get_output_at(0)], name='resnet50')
# base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
# base_model = Model(inputs=[base_model.input], outputs=[base_model.output], name='resnet50')
for layer in base_model.layers[: len(base_model.layers)/3*2]:
layer.trainable = False
print 'to layer: %d' % (len(base_model.layers)/3*2)
img0 = Input(shape=(224, 224, 3), name='img_0')
img1 = Input(shape=(224, 224, 3), name='img_1')
img2 = Input(shape=(224, 224, 3), name='img_2')
feature0 = Flatten()(base_model(img0))
feature1 = Flatten()(base_model(img1))
feature2 = Flatten()(base_model(img2))
dis1 = Lambda(eucl_dist, name='square1')([feature0, feature1])
dis2 = Lambda(eucl_dist, name='square2')([feature0, feature2])
score1 = Dense(1, activation='sigmoid', name='score1')(dis1)
score2 = Dense(1, activation='sigmoid', name='score2')(dis2)
sub_score = Lambda(sub, name='sub_score')([score1, score2])
model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
# model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
model.get_layer('score1').set_weights(pair_model.get_layer('bin_out').get_weights())
model.get_layer('score2').set_weights(pair_model.get_layer('bin_out').get_weights())
plot_model(model, to_file='rank_model.png')
print(model.summary())
return model
def rank_transfer(train_generator, val_generator, source_model_path, target_model_path, batch_size=48):
model = rank_transfer_model(source_model_path)
plot_model(model, 'rank_model.png')
model.compile(
optimizer=SGD(lr=0.001, momentum=0.9), # 'adam',
# optimizer='adam',
loss={
'sub_score': cross_entropy_loss,
#'score1': 'binary_crossentropy',
#'score2': 'binary_crossentropy'
# 'sub_score': 'mse'
},
loss_weights={
'sub_score': 1,
# 'score1': 0.5,
# 'score2': 0.5
},
# metrics=['accuracy']
)
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=0, mode='auto', epsilon=0.0001,
cooldown=0, min_lr=0)
if 'market-' in target_model_path:
train_data_cnt = 16500
val_data_cnt = 1800
else:
train_data_cnt = 1600
val_data_cnt = 180
model.fit_generator(train_generator,
steps_per_epoch=train_data_cnt / batch_size + 1,
epochs=5,
validation_data=val_generator,
validation_steps=val_data_cnt / batch_size + 1,
callbacks=[
early_stopping,
auto_lr
]
)
safe_remove(target_model_path)
# model.save('simple_rank_transfer.h5')
model.save(target_model_path)
def rank_transfer_2market():
DATASET = '../dataset/Market'
LIST = os.path.join(DATASET, 'pretrain.list')
TRAIN = os.path.join(DATASET, 'bounding_box_train')
train_images = reid_img_prepare(LIST, TRAIN)
batch_size = 16
# similar_persons = np.genfromtxt('../pretrain/train_renew_pid.log', delimiter=' ')
# similar_matrix = np.genfromtxt('../pretrain/train_renew_ac.log', delimiter=' ')
similar_persons = np.genfromtxt('../pretrain/cross_filter_pid.log', delimiter=' ') - 1
similar_matrix = np.genfromtxt('../pretrain/cross_filter_score.log', delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images[: len(train_images)*9/10], batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images[len(train_images)*9/10:], batch_size, similar_persons, similar_matrix, train=False),
'../pretrain/pair_pretrain.h5',
'market2grid.h5',
batch_size=batch_size
)
def rank_transfer_2dataset(source_pair_model_path, target_train_list, target_model_path, target_train_path,
rank_pid_path, rank_score_path):
train_images = reid_img_prepare(target_train_list, target_train_path)
batch_size = 16
similar_persons = np.genfromtxt(rank_pid_path, delimiter=' ')
# if 'cross' in rank_pid_path:
# similar_persons = similar_persons - 1
similar_matrix = np.genfromtxt(rank_score_path, delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False),
source_pair_model_path,
target_model_path,
batch_size=batch_size
)
def two_stage_rank_transfer_2dataset(source_pair_model_path, target_train_list, target_model_path, target_train_path,
rank_pid_path, rank_score_path):
train_images = reid_img_prepare(target_train_list, target_train_path)
batch_size = 16
similar_persons = np.genfromtxt(rank_pid_path, delimiter=' ')
# if 'cross' in rank_pid_path:
# similar_persons = similar_persons - 1
similar_matrix = np.genfromtxt(rank_score_path, delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False),
source_pair_model_path,
target_model_path,
batch_size=batch_size
)
if __name__ == '__main__':
pair_model = load_model('../pretrain/cuhk_pair_pretrain.h5')
base_model = pair_model.layers[2]
base_model = Model(inputs=base_model.get_input_at(0), outputs=[base_model.get_output_at(0)], name='resnet50')
print isinstance(base_model.layers[-20], Conv2D)
rank_transfer_2dataset('../pretrain/cuhk_pair_pretrain.h5', '../dataset/market_train.list',
'rank_transfer_test.h5',
'/home/cwh/coding/Market-1501/train',
'/home/cwh/coding/TrackViz/data/cuhk_market-train/cross_filter_pid.log',
'/home/cwh/coding/TrackViz/data/cuhk_market-train/cross_filter_score.log')
| 11,654 | 43.484733 | 136 | py |
TFusion | TFusion-master/rank-reid/baseline/evaluate.py | from __future__ import division, print_function, absolute_import
import os
import numpy as np
import tensorflow as tf
from keras.applications.resnet50 import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
from utils.file_helper import write, safe_remove
def extract_info(dir_path):
infos = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 's' in image_name or 'f' in image_name:
# market && duke
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
elif 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
else:
continue
infos.append((person, camera))
return infos
def extract_feature(dir_path, net):
features = []
infos = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 'f' in image_name or 's' in image_name:
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
elif 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
else:
continue
image_path = os.path.join(dir_path, image_name)
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = net.predict(x)
features.append(np.squeeze(feature))
infos.append((person, camera))
return features, infos
def similarity_matrix(query_f, test_f):
# Tensorflow graph
# use GPU to calculate the similarity matrix
query_t = tf.placeholder(tf.float32, (None, None))
test_t = tf.placeholder(tf.float32, (None, None))
query_t_norm = tf.nn.l2_normalize(query_t, dim=1)
test_t_norm = tf.nn.l2_normalize(test_t, dim=1)
tensor = tf.matmul(query_t_norm, test_t_norm, transpose_a=False, transpose_b=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
result = sess.run(tensor, {query_t: query_f, test_t: test_f})
print(result.shape)
# descend
return result
def sort_similarity(query_f, test_f):
result = similarity_matrix(query_f, test_f)
result_argsort = np.argsort(-result, axis=1)
return result, result_argsort
def map_rank_quick_eval(query_info, test_info, result_argsort):
# much more faster than hehefan's evaluation
match = []
junk = []
QUERY_NUM = len(query_info)
for q_index, (qp, qc) in enumerate(query_info):
tmp_match = []
tmp_junk = []
for t_index in range(len(test_info)):
p_t_idx = result_argsort[q_index][t_index]
p_info = test_info[int(p_t_idx)]
tp = p_info[0]
tc = p_info[1]
if tp == qp and qc != tc:
tmp_match.append(t_index)
elif tp == qp or tp == -1:
tmp_junk.append(t_index)
match.append(tmp_match)
junk.append(tmp_junk)
rank_1 = 0.0
mAP = 0.0
rank1_list = list()
for idx in range(len(query_info)):
if idx % 100 == 0:
print('evaluate img %d' % idx)
recall = 0.0
precision = 1.0
ap = 0.0
YES = match[idx]
IGNORE = junk[idx]
ig_cnt = 0
for ig in IGNORE:
if ig < YES[0]:
ig_cnt += 1
else:
break
if ig_cnt >= YES[0]:
rank_1 += 1
rank1_list.append(1)
else:
rank1_list.append(0)
for i, k in enumerate(YES):
ig_cnt = 0
for ig in IGNORE:
if ig < k:
ig_cnt += 1
else:
break
cnt = k + 1 - ig_cnt
hit = i + 1
tmp_recall = hit / len(YES)
tmp_precision = hit / cnt
ap = ap + (tmp_recall - recall) * ((precision + tmp_precision) / 2)
recall = tmp_recall
precision = tmp_precision
mAP += ap
rank1_acc = rank_1 / QUERY_NUM
mAP = mAP / QUERY_NUM
print('Rank 1:\t%f' % rank1_acc)
print('mAP:\t%f' % mAP)
np.savetxt('rank_1.log', np.array(rank1_list), fmt='%d')
return rank1_acc, mAP
def train_predict(net, train_path, pid_path, score_path):
net = Model(inputs=[net.input], outputs=[net.get_layer('avg_pool').output])
train_f, test_info = extract_feature(train_path, net)
result, result_argsort = sort_similarity(train_f, train_f)
for i in range(len(result)):
result[i] = result[i][result_argsort[i]]
result = np.array(result)
# ignore top1 because it's the origin image
np.savetxt(score_path, result[:, 1:], fmt='%.4f')
np.savetxt(pid_path, result_argsort[:, 1:], fmt='%d')
return result
def test_predict(net, probe_path, gallery_path, pid_path, score_path):
net = Model(inputs=[net.input], outputs=[net.get_layer('avg_pool').output])
test_f, test_info = extract_feature(gallery_path, net)
query_f, query_info = extract_feature(probe_path, net)
result, result_argsort = sort_similarity(query_f, test_f)
for i in range(len(result)):
result[i] = result[i][result_argsort[i]]
result = np.array(result)
safe_remove(pid_path)
safe_remove(score_path)
np.savetxt(pid_path, result_argsort, fmt='%d')
np.savetxt(score_path, result, fmt='%.4f')
def market_result_eval(predict_path, log_path='market_result_eval.log', TEST='Market-1501/test',
QUERY='Market-1501/probe'):
res = np.genfromtxt(predict_path, delimiter=' ')
print('predict info get, extract gallery info start')
test_info = extract_info(TEST)
print('extract probe info start')
query_info = extract_info(QUERY)
print('start evaluate map and rank acc')
rank1, mAP = map_rank_quick_eval(query_info, test_info, res)
write(log_path, predict_path + '\n')
write(log_path, '%f\t%f\n' % (rank1, mAP))
def grid_result_eval(predict_path, log_path='grid_eval.log'):
pids4probes = np.genfromtxt(predict_path, delimiter=' ')
probe_shoot = [0, 0, 0, 0, 0]
for i, pids in enumerate(pids4probes):
for j, pid in enumerate(pids):
if pid - i == 775:
if j == 0:
for k in range(5):
probe_shoot[k] += 1
elif j < 5:
for k in range(1, 5):
probe_shoot[k] += 1
elif j < 10:
for k in range(2, 5):
probe_shoot[k] += 1
elif j < 20:
for k in range(3, 5):
probe_shoot[k] += 1
elif j < 50:
for k in range(4, 5):
probe_shoot[k] += 1
break
probe_acc = [shoot / len(pids4probes) for shoot in probe_shoot]
write(log_path, predict_path + '\n')
write(log_path, '%.2f\t%.2f\t%.2f\n' % (probe_acc[0], probe_acc[1], probe_acc[2]))
print(predict_path)
print(probe_acc)
if __name__ == '__main__':
market_result_eval('cross_filter_pid.log')
| 7,555 | 31.568966 | 96 | py |
TFusion | TFusion-master/rank-reid/baseline/train.py | from __future__ import division, print_function, absolute_import
import os
from random import shuffle
import numpy as np
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.initializers import RandomNormal
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
def load_mix_data(LIST, TRAIN):
images, labels = [], []
with open(LIST, 'r') as f:
last_label = -1
label_cnt = -1
last_type = ''
for line in f:
line = line.strip()
img = line
lbl = line.split('_')[0]
cur_type = line.split('.')[-1]
if last_label != lbl or last_type != cur_type:
label_cnt += 1
last_label = lbl
last_type = cur_type
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
labels.append(label_cnt)
img_cnt = len(labels)
shuffle_idxes = range(img_cnt)
shuffle(shuffle_idxes)
shuffle_imgs = list()
shuffle_labels = list()
for idx in shuffle_idxes:
shuffle_imgs.append(images[idx])
shuffle_labels.append(labels[idx])
images = np.array(shuffle_imgs)
labels = to_categorical(shuffle_labels)
return images, labels
def load_data(LIST, TRAIN):
images, labels = [], []
with open(LIST, 'r') as f:
last_label = -1
label_cnt = -1
for line in f:
line = line.strip()
img = line
lbl = line.split('_')[0]
if last_label != lbl:
label_cnt += 1
last_label = lbl
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
labels.append(label_cnt)
img_cnt = len(labels)
shuffle_idxes = range(img_cnt)
shuffle(shuffle_idxes)
shuffle_imgs = list()
shuffle_labels = list()
for idx in shuffle_idxes:
shuffle_imgs.append(images[idx])
shuffle_labels.append(labels[idx])
images = np.array(shuffle_imgs)
labels = to_categorical(shuffle_labels)
return images, labels
def softmax_model_pretrain(train_list, train_dir, class_count, target_model_path):
images, labels = load_data(train_list, train_dir)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
# load pre-trained resnet50
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
x = base_model.output
x = Flatten(name='flatten')(x)
x = Dropout(0.5)(x)
x = Dense(class_count, activation='softmax', name='fc8', kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(x)
net = Model(inputs=[base_model.input], outputs=[x])
for layer in net.layers:
layer.trainable = True
# pretrain
batch_size = 16
train_datagen = ImageDataGenerator(
shear_range=0.2,
width_shift_range=0.2, # 0.
height_shift_range=0.2)
net.compile(optimizer=SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
net.fit_generator(
train_datagen.flow(images, labels, batch_size=batch_size),
steps_per_epoch=len(images) / batch_size + 1, epochs=40,
)
net.save(target_model_path)
def softmax_pretrain_on_dataset(source, project_path='/home/cwh/coding/rank-reid', dataset_parent='/home/cwh/coding'):
if source == 'market':
train_list = project_path + '/dataset/market_train.list'
train_dir = dataset_parent + '/Market-1501/train'
class_count = 751
elif source == 'grid':
train_list = project_path + '/dataset/grid_train.list'
train_dir = dataset_parent + '/grid_label'
class_count = 250
elif source == 'cuhk':
train_list = project_path + '/dataset/cuhk_train.list'
train_dir = dataset_parent + '/cuhk01'
class_count = 971
elif source == 'viper':
train_list = project_path + '/dataset/viper_train.list'
train_dir = dataset_parent + '/viper'
class_count = 630
elif source == 'duke':
train_list = project_path + '/dataset/duke_train.list'
train_dir = dataset_parent + '/DukeMTMC-reID/train'
class_count = 702
elif 'grid-cv' in source:
cv_idx = int(source.split('-')[-1])
train_list = project_path + '/dataset/grid-cv/%d.list' % cv_idx
train_dir = dataset_parent + '/underground_reid/cross%d/train' % cv_idx
class_count = 125
elif 'mix' in source:
train_list = project_path + '/dataset/mix.list'
train_dir = dataset_parent + '/cuhk_grid_viper_mix'
class_count = 250 + 971 + 630
else:
train_list = 'unknown'
train_dir = 'unknown'
class_count = -1
softmax_model_pretrain(train_list, train_dir, class_count, '../pretrain/' + source + '_softmax_pretrain.h5')
if __name__ == '__main__':
# sources = ['market', 'grid', 'cuhk', 'viper']
sources = ['market']
for source in sources:
softmax_pretrain_on_dataset(source)
| 5,745 | 33.407186 | 120 | py |
TFusion | TFusion-master/rank-reid/pretrain/pair_train.py | import os
import numpy as np
from keras import Input
from keras import backend as K
from keras.applications.resnet50 import preprocess_input
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.engine import Model
from keras.layers import Lambda, Dense, Dropout, Flatten
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.utils import plot_model, to_categorical
from numpy.random import randint, shuffle, choice
from baseline.train import softmax_pretrain_on_dataset
def mix_data_prepare(data_list_path, train_dir_path):
class_img_labels = dict()
class_cnt = -1
last_label = -2
last_type = ''
with open(data_list_path, 'r') as f:
for line in f:
line = line.strip()
img = line
lbl = int(line.split('_')[0])
img_type = line.split('.')[-1]
if lbl != last_label or img_type != last_type:
class_cnt = class_cnt + 1
cur_list = list()
class_img_labels[str(class_cnt)] = cur_list
last_label = lbl
last_type = img_type
img = image.load_img(os.path.join(train_dir_path, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
class_img_labels[str(class_cnt)].append(img[0])
return class_img_labels
def reid_data_prepare(data_list_path, train_dir_path):
if 'mix' in data_list_path:
return mix_data_prepare(data_list_path, train_dir_path)
class_img_labels = dict()
class_cnt = -1
last_label = -2
with open(data_list_path, 'r') as f:
for line in f:
line = line.strip()
img = line
lbl = int(line.split('_')[0])
if lbl != last_label:
class_cnt = class_cnt + 1
cur_list = list()
class_img_labels[str(class_cnt)] = cur_list
last_label = lbl
img = image.load_img(os.path.join(train_dir_path, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
class_img_labels[str(class_cnt)].append(img[0])
return class_img_labels
def pair_generator(class_img_labels, batch_size, train=False):
cur_epoch = 0
pos_prop = 5
while True:
left_label = randint(len(class_img_labels), size=batch_size)
if cur_epoch % pos_prop == 0:
right_label = left_label
else:
right_label = np.copy(left_label)
shuffle(right_label)
# select by label
left_images = list()
right_images = list()
if train:
slice_start = 0
else:
# val
slice_start = 0.9
for i in range(batch_size):
len_left_label_i = len(class_img_labels[str(left_label[i])])
left_images.append(class_img_labels[str(left_label[i])][int(slice_start * len_left_label_i):][
choice(len_left_label_i - int(len_left_label_i * slice_start))])
len_right_label_i = len(class_img_labels[str(right_label[i])])
right_images.append(class_img_labels[str(right_label[i])][int(slice_start * len_right_label_i):][
choice(len_right_label_i - int(len_right_label_i * slice_start))])
left_images = np.array(left_images)
right_images = np.array(right_images)
binary_label = (left_label == right_label).astype(int)
left_label = to_categorical(left_label, num_classes=len(class_img_labels))
right_label = to_categorical(right_label, num_classes=len(class_img_labels))
cur_epoch += 1
yield [left_images, right_images], [left_label, right_label, binary_label]
def eucl_dist(inputs):
x, y = inputs
# return K.mean(K.square((x - y)), axis=1)
return K.square((x - y))
def dis_sigmoid(dis):
return K.expand_dims(2/(1+K.exp(dis)))
def pair_model(source_model_path, num_classes):
softmax_model = load_model(source_model_path)
# base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
base_model = Model(inputs=softmax_model.input, outputs=[softmax_model.get_layer('avg_pool').output], name='resnet50')
img1 = Input(shape=(224, 224, 3), name='img_1')
img2 = Input(shape=(224, 224, 3), name='img_2')
feature1 = Flatten()(base_model(img1))
feature2 = Flatten()(base_model(img2))
dis = Lambda(eucl_dist, name='square')([feature1, feature2])
# judge = Lambda(dis_sigmoid, name='bin_out')(dis)
judge = Dense(1, activation='sigmoid', name='bin_out')(Dropout(0.9)(dis))
category_predict1 = Dense(num_classes, activation='softmax', name='ctg_out_1')(
Dropout(0.9)(feature1)
)
category_predict2 = Dense(num_classes, activation='softmax', name='ctg_out_2')(
Dropout(0.9)(feature2)
)
model = Model(inputs=[img1, img2], outputs=[category_predict1, category_predict2, judge])
model.get_layer('ctg_out_1').set_weights(softmax_model.get_layer('fc8').get_weights())
model.get_layer('ctg_out_2').set_weights(softmax_model.get_layer('fc8').get_weights())
plot_model(model, to_file='model_combined.png')
# for layer in base_model.layers[:-10]:
# layer.trainable = False
for layer in base_model.layers:
layer.trainable = True
return model
def common_lr(epoch):
if epoch < 20:
return 0.01
else:
return 0.001
def pair_tune(source_model_path, train_generator, val_generator, tune_dataset, batch_size=48, num_classes=751):
model = pair_model(source_model_path, num_classes)
model.compile(optimizer=SGD(lr=0.001, momentum=0.9),
loss={'ctg_out_1': 'categorical_crossentropy',
'ctg_out_2': 'categorical_crossentropy',
'bin_out': 'binary_crossentropy'},
loss_weights={
'ctg_out_1': 0.5,
'ctg_out_2': 0.5,
'bin_out': 1.
},
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=4)
auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=0, mode='auto', epsilon=0.0001,
cooldown=0, min_lr=0)
# save_model = ModelCheckpoint('resnet50-{epoch:02d}-{val_ctg_out_1_acc:.2f}.h5', period=2)
model.fit_generator(train_generator,
steps_per_epoch=16500 / batch_size + 1,
epochs=20,
validation_data=val_generator,
validation_steps=1800 / batch_size + 1,
callbacks=[auto_lr, early_stopping])
model.save(tune_dataset + '_pair_pretrain.h5')
def pair_pretrain_on_dataset(source, project_path='/home/cwh/coding/rank-reid', dataset_parent='/home/cwh/coding'):
if source == 'market':
train_list = project_path + '/dataset/market_train.list'
train_dir = dataset_parent + '/Market-1501/train'
class_count = 751
elif source == 'markets1':
train_list = project_path + '/dataset/markets1_train.list'
train_dir = dataset_parent + '/markets1'
class_count = 751
elif source == 'grid':
train_list = project_path + '/dataset/grid_train.list'
train_dir = dataset_parent + '/grid_label'
class_count = 250
elif source == 'cuhk':
train_list = project_path + '/dataset/cuhk_train.list'
train_dir = dataset_parent + '/cuhk01'
class_count = 971
elif source == 'viper':
train_list = project_path + '/dataset/viper_train.list'
train_dir = dataset_parent + '/viper'
class_count = 630
elif source == 'duke':
train_list = project_path + '/dataset/duke_train.list'
train_dir = dataset_parent + '/DukeMTMC-reID/train'
class_count = 702
elif 'grid-cv' in source:
cv_idx = int(source.split('-')[-1])
train_list = project_path + '/dataset/grid-cv/%d.list' % cv_idx
train_dir = dataset_parent + '/grid_train_probe_gallery/cross%d/train' % cv_idx
class_count = 125
elif 'mix' in source:
train_list = project_path + '/dataset/mix.list'
train_dir = dataset_parent + '/cuhk_grid_viper_mix'
class_count = 250 + 971 + 630
else:
train_list = 'unknown'
train_dir = 'unknown'
class_count = -1
class_img_labels = reid_data_prepare(train_list, train_dir)
batch_size = 16
pair_tune(
source + '_softmax_pretrain.h5',
pair_generator(class_img_labels, batch_size=batch_size, train=True),
pair_generator(class_img_labels, batch_size=batch_size, train=False),
source,
batch_size=batch_size, num_classes=class_count
)
if __name__ == '__main__':
sources = ['cuhk_grid_viper_mix']
sources = ['cuhk', 'viper', 'market','duke']
for source in sources:
softmax_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding/')
pair_pretrain_on_dataset(source)
sources = ['grid-cv-%d' % i for i in range(10)]
for source in sources:
softmax_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding')
pair_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding')
# sources = ['viper']
# for source in sources:
# # softmax_pretrain_on_dataset(source,
# # project_path='/home/cwh/coding/rank-reid',
# # dataset_parent='/home/cwh/coding/')
# pair_pretrain_on_dataset(source)
# sources = ['grid-cv-%d' % i for i in range(10)]
# for source in sources:
# softmax_pretrain_on_dataset(source,
# project_path='/home/cwh/coding/rank-reid',
# dataset_parent='/home/cwh/coding')
# pair_pretrain_on_dataset(source,
# project_path='/home/cwh/coding/rank-reid',
# dataset_parent='/home/cwh/coding')
| 10,663 | 40.173745 | 121 | py |
TFusion | TFusion-master/rank-reid/pretrain/eval.py | # coding=utf-8
import os
from keras import backend as K
from keras.engine import Model
from keras.models import load_model
from keras.preprocessing import image
from baseline.evaluate import train_predict, test_predict, grid_result_eval, market_result_eval
from transfer.simple_rank_transfer import cross_entropy_loss
#
def train_pair_predict(pair_model_path, target_train_path, pid_path, score_path):
model = load_model(pair_model_path)
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
train_predict(model, target_train_path, pid_path, score_path)
def test_pair_predict(pair_model_path, target_probe_path, target_gallery_path, pid_path, score_path):
# todo
model = load_model(pair_model_path)
# model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
# model = Model(inputs=[model.input], outputs=[model.get_layer('avg_pool').output])
test_predict(model, target_probe_path, target_gallery_path, pid_path, score_path)
def extract_imgs(dir_path):
imgs = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
elif 's' in image_name:
# market
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
else:
continue
image_path = os.path.join(dir_path, image_name)
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
imgs.append(x)
return imgs
def tf_eucl_dist(inputs):
x, y = inputs
return K.square((x - y))
def avg_eucl_dist(inputs):
x, y = inputs
return K.mean(K.square((x - y)), axis=1)
def train_rank_predict(rank_model_path, target_train_path, pid_path, score_path):
model = load_model(rank_model_path, custom_objects={'cross_entropy_loss': cross_entropy_loss})
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
train_predict(model, target_train_path, pid_path, score_path)
def test_rank_predict(rank_model_path, target_probe_path, target_gallery_path, pid_path, score_path):
model = load_model(rank_model_path, custom_objects={'cross_entropy_loss': cross_entropy_loss})
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
test_predict(model, target_probe_path, target_gallery_path, pid_path, score_path)
def grid_eval(source, transform_dir):
target = 'grid'
for i in range(10):
test_pair_predict(source + '_pair_pretrain.h5',
transform_dir + 'cross%d' % i + '/probe', transform_dir + 'cross%d' % i + '/test',
source + '_' + target + '_pid.log', source + '_' + target + '_score.log')
grid_result_eval(source + '_' + target + '_pid.log', 'gan.log')
def market_eval(source, transform_dir):
target = 'market'
test_pair_predict(source + '_pair_pretrain.h5',
transform_dir + '/probe', transform_dir + '/test',
source + '_' + target + '_pid.log', source + '_' + target + '_score.log')
if __name__ == '__main__':
# market_eval('market', '/home/cwh/coding/Market-1501')
# market_result_eval('market_market_pid.log',
# TEST='/home/cwh/coding/Market-1501/test',
# QUERY='/home/cwh/coding/Market-1501/probe')
# grid_eval('market', '/home/cwh/coding/grid_train_probe_gallery/cross0')
grid_result_eval('/home/cwh/coding/TrackViz/data/market_grid-cv0-test/cross_filter_pid.log')
| 4,102 | 38.07619 | 108 | py |
TFusion | TFusion-master/rank-reid/utils/cuda_util.py | import os
from keras.backend import set_session
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.6
set_session(tf.Session(config=config)) | 302 | 26.545455 | 64 | py |
hyperopt | hyperopt-master/docs/autogen.py | # This file has been taken from Keras' `docs` module found here:
# https://github.com/keras-team/keras/blob/master/docs/autogen.py
#
import re
import inspect
import os
import shutil
EXCLUDE = {}
PAGES = [
# {
# 'page': 'target.md',
# 'classes': [
# ],
# 'functions': [
# ],
# },
# {
# 'page': 'other_target.md',
# 'all_module_functions': [],
# },
]
ROOT = "http://hyperopt.github.io/hyperopt"
def get_function_signature(function, method=True):
wrapped = getattr(function, "_original_function", None)
if wrapped is None:
signature = inspect.getargspec(function)
else:
signature = inspect.getargspec(wrapped)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults) :], defaults)
args = args[: -len(defaults)]
else:
kwargs = []
signature = [f"{clean_module_name(function.__module__)}.{function.__name__}("]
for arg in args:
signature.append(str(arg))
for key, value in kwargs:
if isinstance(value, str):
value = f"'{value}'"
signature.append(f"{key}={value}")
return ", ".join(signature) + ")"
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace("__init__", cls.__name__)
except (TypeError, AttributeError):
# in case the class inherits from object and does not
# define __init__
class_signature = "{clean_module_name}.{cls_name}()".format(
clean_module_name=clean_module_name(cls.__module__), cls_name=cls.__name__
)
return class_signature
def clean_module_name(name):
assert name[:8] == "hyperopt.", "Invalid module name: %s" % name
return name
def class_to_docs_link(cls):
module_name = clean_module_name(cls.__module__)
module_name = module_name[6:]
link = ROOT + module_name.replace(".", "/") + "#" + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = clean_module_name(cls.__module__)
path = module_name.replace(".", "/")
path += ".py"
line = inspect.getsourcelines(cls)[-1]
link = "https://github.com/hyperopt/" "hyperopt/blob/master/" + path + "#L" + str(
line
)
return "[[source]](" + link + ")"
def code_snippet(snippet):
result = "```python\n"
result += snippet + "\n"
result += "```\n"
return result
def count_leading_spaces(s):
ws = re.search(r"\S", s)
if ws:
return ws.start()
else:
return 0
def process_list_block(docstring, starting_point, leading_spaces, marker):
ending_point = docstring.find("\n\n", starting_point)
block = docstring[
starting_point : (None if ending_point == -1 else ending_point - 1)
]
# Place marker for later reinjection.
docstring = docstring.replace(block, marker)
lines = block.split("\n")
# Remove the computed number of leading white spaces from each line.
lines = [re.sub("^" + " " * leading_spaces, "", line) for line in lines]
# Usually lines have at least 4 additional leading spaces.
# These have to be removed, but first the list roots have to be detected.
top_level_regex = r"^ ([^\s\\\(]+):(.*)"
top_level_replacement = r"- __\1__:\2"
lines = [re.sub(top_level_regex, top_level_replacement, line) for line in lines]
# All the other lines get simply the 4 leading space (if present) removed
lines = [re.sub(r"^ ", "", line) for line in lines]
# Fix text lines after lists
indent = 0
text_block = False
for i in range(len(lines)):
line = lines[i]
spaces = re.search(r"\S", line)
if spaces:
# If it is a list element
if line[spaces.start()] == "-":
indent = spaces.start() + 1
if text_block:
text_block = False
lines[i] = "\n" + line
elif spaces.start() < indent:
text_block = True
indent = spaces.start()
lines[i] = "\n" + line
else:
text_block = False
indent = 0
block = "\n".join(lines)
return docstring, block
def process_docstring(docstring):
# First, extract code blocks and process them.
code_blocks = []
if "```" in docstring:
tmp = docstring[:]
while "```" in tmp:
tmp = tmp[tmp.find("```") :]
index = tmp[3:].find("```") + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
docstring = docstring.replace(snippet, "$CODE_BLOCK_%d" % len(code_blocks))
snippet_lines = snippet.split("\n")
# Remove leading spaces.
num_leading_spaces = snippet_lines[-1].find("`")
snippet_lines = [snippet_lines[0]] + [
line[num_leading_spaces:] for line in snippet_lines[1:]
]
# Most code snippets have 3 or 4 more leading spaces
# on inner lines, but not all. Remove them.
inner_lines = snippet_lines[1:-1]
leading_spaces = None
for line in inner_lines:
if not line or line[0] == "\n":
continue
spaces = count_leading_spaces(line)
if leading_spaces is None:
leading_spaces = spaces
if spaces < leading_spaces:
leading_spaces = spaces
if leading_spaces:
snippet_lines = (
[snippet_lines[0]]
+ [line[leading_spaces:] for line in snippet_lines[1:-1]]
+ [snippet_lines[-1]]
)
snippet = "\n".join(snippet_lines)
code_blocks.append(snippet)
tmp = tmp[index:]
# Format docstring lists.
section_regex = r"\n( +)# (.*)\n"
section_idx = re.search(section_regex, docstring)
shift = 0
sections = {}
while section_idx and section_idx.group(2):
anchor = section_idx.group(2)
leading_spaces = len(section_idx.group(1))
shift += section_idx.end()
marker = "$" + anchor.replace(" ", "_") + "$"
docstring, content = process_list_block(
docstring, shift, leading_spaces, marker
)
sections[marker] = content
section_idx = re.search(section_regex, docstring[shift:])
# Format docstring section titles.
docstring = re.sub(r"\n(\s+)# (.*)\n", r"\n\1__\2__\n\n", docstring)
# Strip all remaining leading spaces.
lines = docstring.split("\n")
docstring = "\n".join([line.lstrip(" ") for line in lines])
# Reinject list blocks.
for marker, content in sections.items():
docstring = docstring.replace(marker, content)
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
docstring = docstring.replace("$CODE_BLOCK_%d" % i, code_block)
return docstring
print("Cleaning up existing sources directory.")
if os.path.exists("sources"):
shutil.rmtree("sources")
print("Populating sources directory with templates.")
for subdir, dirs, fnames in os.walk("templates"):
for fname in fnames:
new_subdir = subdir.replace("templates", "sources")
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == ".md":
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace("templates", "sources")
shutil.copy(fpath, new_fpath)
def read_file(path):
with open(path) as f:
return f.read()
def collect_class_methods(cls, methods):
if isinstance(methods, (list, tuple)):
return [getattr(cls, m) if isinstance(m, str) else m for m in methods]
methods = []
for _, method in inspect.getmembers(cls, predicate=inspect.isroutine):
if method.__name__[0] == "_" or method.__name__ in EXCLUDE:
continue
methods.append(method)
return methods
def render_function(function, method=True):
subblocks = []
signature = get_function_signature(function, method=method)
if method:
signature = signature.replace(clean_module_name(function.__module__) + ".", "")
subblocks.append("### " + function.__name__ + "\n")
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
return "\n\n".join(subblocks)
def read_page_data(page_data, type):
assert type in ["classes", "functions", "methods"]
data = page_data.get(type, [])
for module in page_data.get(f"all_module_{type}", []):
module_data = []
for name in dir(module):
if name[0] == "_" or name in EXCLUDE:
continue
module_member = getattr(module, name)
if (
inspect.isclass(module_member)
and type == "classes"
or inspect.isfunction(module_member)
and type == "functions"
):
instance = module_member
if module.__name__ in instance.__module__:
if instance not in module_data:
module_data.append(instance)
module_data.sort(key=lambda x: id(x))
data += module_data
return data
if __name__ == "__main__":
readme = read_file("../README.md")
index = read_file("templates/index.md")
index = index.replace("{{autogenerated}}", readme[readme.find("##") :])
with open("sources/index.md", "w") as f:
f.write(index)
print("Generating Hyperopt docs")
for page_data in PAGES:
classes = read_page_data(page_data, "classes")
blocks = []
for element in classes:
if not isinstance(element, (list, tuple)):
element = (element, [])
cls = element[0]
subblocks = []
signature = get_class_signature(cls)
subblocks.append(
'<span style="float:right;">' + class_to_source_link(cls) + "</span>"
)
if element[1]:
subblocks.append("## " + cls.__name__ + " class\n")
else:
subblocks.append("### " + cls.__name__ + "\n")
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
methods = collect_class_methods(cls, element[1])
if methods:
subblocks.append("\n---")
subblocks.append("## " + cls.__name__ + " methods\n")
subblocks.append(
"\n---\n".join(
[render_function(method, method=True) for method in methods]
)
)
blocks.append("\n".join(subblocks))
methods = read_page_data(page_data, "methods")
for method in methods:
blocks.append(render_function(method, method=True))
functions = read_page_data(page_data, "functions")
for function in functions:
blocks.append(render_function(function, method=False))
if not blocks:
raise RuntimeError("Found no content for page " + page_data["page"])
mkdown = "\n----\n\n".join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data["page"]
path = os.path.join("sources", page_name)
if os.path.exists(path):
template = read_file(path)
assert "{{autogenerated}}" in template, (
"Template found for " + path + " but missing {{autogenerated}}" " tag."
)
mkdown = template.replace("{{autogenerated}}", mkdown)
print("...inserting autogenerated content into template:", path)
else:
print("...creating new page with autogenerated content:", path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(path, "w") as f:
f.write(mkdown)
| 12,407 | 32.994521 | 87 | py |
EZ-VSL | EZ-VSL-main/test.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import utils
import numpy as np
import argparse
from model import EZVSL
from datasets import get_test_dataset, inverse_normalize
import cv2
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./checkpoints', help='path to save trained model weights')
parser.add_argument('--experiment_name', type=str, default='ezvsl_vggss', help='experiment name (experiment folder set to "args.model_dir/args.experiment_name)"')
parser.add_argument('--save_visualizations', action='store_true', help='Set to store all VSL visualizations (saved in viz directory within experiment folder)')
# Dataset
parser.add_argument('--testset', default='flickr', type=str, help='testset (flickr or vggss)')
parser.add_argument('--test_data_path', default='', type=str, help='Root directory path of data')
parser.add_argument('--test_gt_path', default='', type=str)
parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')
# Model
parser.add_argument('--tau', default=0.03, type=float, help='tau')
parser.add_argument('--out_dim', default=512, type=int)
parser.add_argument('--alpha', default=0.4, type=float, help='alpha')
# Distributed params
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--node', type=str, default='localhost')
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--dist_url', type=str, default='tcp://localhost:12345')
parser.add_argument('--multiprocessing_distributed', action='store_true')
return parser.parse_args()
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Model dir
model_dir = os.path.join(args.model_dir, args.experiment_name)
viz_dir = os.path.join(model_dir, 'viz')
os.makedirs(viz_dir, exist_ok=True)
# Models
audio_visual_model = EZVSL(args.tau, args.out_dim)
from torchvision.models import resnet18
object_saliency_model = resnet18(pretrained=True)
object_saliency_model.avgpool = nn.Identity()
object_saliency_model.fc = nn.Sequential(
nn.Unflatten(1, (512, 7, 7)),
NormReducer(dim=1),
Unsqueeze(1)
)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.multiprocessing_distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
audio_visual_model.cuda(args.gpu)
object_saliency_model.cuda(args.gpu)
audio_visual_model = torch.nn.parallel.DistributedDataParallel(audio_visual_model, device_ids=[args.gpu])
object_saliency_model = torch.nn.parallel.DistributedDataParallel(object_saliency_model, device_ids=[args.gpu])
# Load weights
ckp_fn = os.path.join(model_dir, 'best.pth')
if os.path.exists(ckp_fn):
ckp = torch.load(ckp_fn, map_location='cpu')
audio_visual_model.load_state_dict({k.replace('module.', ''): ckp['model'][k] for k in ckp['model']})
print(f'loaded from {os.path.join(model_dir, "best.pth")}')
else:
print(f"Checkpoint not found: {ckp_fn}")
# Dataloader
testdataset = get_test_dataset(args)
testdataloader = DataLoader(testdataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
print("Loaded dataloader.")
validate(testdataloader, audio_visual_model, object_saliency_model, viz_dir, args)
@torch.no_grad()
def validate(testdataloader, audio_visual_model, object_saliency_model, viz_dir, args):
audio_visual_model.train(False)
object_saliency_model.train(False)
evaluator_av = utils.Evaluator()
evaluator_obj = utils.Evaluator()
evaluator_av_obj = utils.Evaluator()
for step, (image, spec, bboxes, name) in enumerate(testdataloader):
if args.gpu is not None:
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
# Compute S_AVL
heatmap_av = audio_visual_model(image.float(), spec.float())[1].unsqueeze(1)
heatmap_av = F.interpolate(heatmap_av, size=(224, 224), mode='bilinear', align_corners=True)
heatmap_av = heatmap_av.data.cpu().numpy()
# Compute S_OBJ
img_feat = object_saliency_model(image)
heatmap_obj = F.interpolate(img_feat, size=(224, 224), mode='bilinear', align_corners=True)
heatmap_obj = heatmap_obj.data.cpu().numpy()
# Compute eval metrics and save visualizations
for i in range(spec.shape[0]):
pred_av = utils.normalize_img(heatmap_av[i, 0])
pred_obj = utils.normalize_img(heatmap_obj[i, 0])
pred_av_obj = utils.normalize_img(pred_av * args.alpha + pred_obj * (1 - args.alpha))
gt_map = bboxes['gt_map'].data.cpu().numpy()
thr_av = np.sort(pred_av.flatten())[int(pred_av.shape[0] * pred_av.shape[1] * 0.5)]
evaluator_av.cal_CIOU(pred_av, gt_map, thr_av)
thr_obj = np.sort(pred_obj.flatten())[int(pred_obj.shape[0] * pred_obj.shape[1] * 0.5)]
evaluator_obj.cal_CIOU(pred_obj, gt_map, thr_obj)
thr_av_obj = np.sort(pred_av_obj.flatten())[int(pred_av_obj.shape[0] * pred_av_obj.shape[1] * 0.5)]
evaluator_av_obj.cal_CIOU(pred_av_obj, gt_map, thr_av_obj)
if args.save_visualizations:
denorm_image = inverse_normalize(image).squeeze(0).permute(1, 2, 0).cpu().numpy()[:, :, ::-1]
denorm_image = (denorm_image*255).astype(np.uint8)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_image.jpg'), denorm_image)
# visualize bboxes on raw images
gt_boxes_img = utils.visualize(denorm_image, bboxes['bboxes'])
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_gt_boxes.jpg'), gt_boxes_img)
# visualize heatmaps
heatmap_img = np.uint8(pred_av*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_av.jpg'), fin)
heatmap_img = np.uint8(pred_obj*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_obj.jpg'), fin)
heatmap_img = np.uint8(pred_av_obj*255)
heatmap_img = cv2.applyColorMap(heatmap_img[:, :, np.newaxis], cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap_img, 0.8, np.uint8(denorm_image), 0.2, 0)
cv2.imwrite(os.path.join(viz_dir, f'{name[i]}_pred_av_obj.jpg'), fin)
print(f'{step+1}/{len(testdataloader)}: map_av={evaluator_av.finalize_AP50():.2f} map_obj={evaluator_obj.finalize_AP50():.2f} map_av_obj={evaluator_av_obj.finalize_AP50():.2f}')
def compute_stats(eval):
mAP = eval.finalize_AP50()
ciou = eval.finalize_cIoU()
auc = eval.finalize_AUC()
return mAP, ciou, auc
print('AV: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_av)))
print('Obj: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_obj)))
print('AV_Obj: AP50(cIoU)={}, Avg-cIoU={}, AUC={}'.format(*compute_stats(evaluator_av_obj)))
utils.save_iou(evaluator_av.ciou, 'av', viz_dir)
utils.save_iou(evaluator_obj.ciou, 'obj', viz_dir)
utils.save_iou(evaluator_av_obj.ciou, 'av_obj', viz_dir)
class NormReducer(nn.Module):
def __init__(self, dim):
super(NormReducer, self).__init__()
self.dim = dim
def forward(self, x):
return x.abs().mean(self.dim)
class Unsqueeze(nn.Module):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.unsqueeze(self.dim)
if __name__ == "__main__":
main(get_arguments())
| 8,390 | 42.252577 | 185 | py |
EZ-VSL | EZ-VSL-main/audio_io.py | import av
# import torchaudio
import numpy as np
from fractions import Fraction
# def load_audio_torchaudio(fn):
# data, sr = torchaudio.load(fn)
# return data, sr
def open_audio_av(path):
container = av.open(path)
for stream in container.streams.video:
stream.codec_context.thread_type = av.codec.context.ThreadType.NONE
stream.codec_context.thread_count = 1
for stream in container.streams.audio:
stream.codec_context.thread_type = av.codec.context.ThreadType.NONE
stream.codec_context.thread_count = 1
return container
def load_audio_av(path=None, container=None, rate=None, start_time=None, duration=None, layout="mono"):
if container is None:
container = av.open(path)
audio_stream = container.streams.audio[0]
# Parse metadata
_ss = audio_stream.start_time * audio_stream.time_base if audio_stream.start_time is not None else 0.
_dur = audio_stream.duration * audio_stream.time_base
_ff = _ss + _dur
_rate = audio_stream.rate
if rate is None:
rate = _rate
if start_time is None:
start_time = _ss
if duration is None:
duration = _ff - start_time
duration = min(duration, _ff - start_time)
end_time = start_time + duration
resampler = av.audio.resampler.AudioResampler(format="s16p", layout=layout, rate=rate)
# Read data
chunks = []
container.seek(int(start_time * av.time_base))
for frame in container.decode(audio=0):
chunk_start_time = frame.pts * frame.time_base
chunk_end_time = chunk_start_time + Fraction(frame.samples, frame.rate)
if chunk_end_time < start_time: # Skip until start time
continue
if chunk_start_time > end_time: # Exit if clip has been extracted
break
try:
frame.pts = None
if resampler is not None:
chunks.append((chunk_start_time, resampler.resample(frame).to_ndarray()))
else:
chunks.append((chunk_start_time, frame.to_ndarray()))
except AttributeError:
break
# Trim for frame accuracy
audio = np.concatenate([af[1] for af in chunks], 1)
ss = int((start_time - chunks[0][0]) * rate)
t = int(duration * rate)
if ss < 0:
audio = np.pad(audio, ((0, 0), (-ss, 0)), 'constant', constant_values=0)
ss = 0
audio = audio[:, ss: ss+t]
# Normalize to [-1, 1]
audio = audio / np.iinfo(audio.dtype).max
return audio, rate
def audio_info_av(inpt, audio=None, format=None):
container = inpt
if isinstance(inpt, str):
try:
container = av.open(inpt, format=format)
except av.AVError:
return None, None
audio_stream = container.streams.audio[audio]
time_base = audio_stream.time_base
duration = audio_stream.duration * time_base
start_time = audio_stream.start_time * time_base
channels = audio_stream.channels
fps = audio_stream.rate
chunk_size = audio_stream.frame_size
chunks = audio_stream.frames
meta = {'channels': channels,
'fps': fps,
'start_time': start_time,
'duration': duration,
'chunks': chunks,
'chunk_size': chunk_size}
return meta
| 3,295 | 31 | 105 | py |
EZ-VSL | EZ-VSL-main/utils.py | import os
import json
from torch.optim import *
import numpy as np
from sklearn import metrics
class Evaluator(object):
def __init__(self):
super(Evaluator, self).__init__()
self.ciou = []
def cal_CIOU(self, infer, gtmap, thres=0.01):
infer_map = np.zeros((224, 224))
infer_map[infer >= thres] = 1
ciou = np.sum(infer_map*gtmap) / (np.sum(gtmap) + np.sum(infer_map * (gtmap==0)))
self.ciou.append(ciou)
return ciou, np.sum(infer_map*gtmap), (np.sum(gtmap)+np.sum(infer_map*(gtmap==0)))
def finalize_AUC(self):
cious = [np.sum(np.array(self.ciou) >= 0.05*i) / len(self.ciou)
for i in range(21)]
thr = [0.05*i for i in range(21)]
auc = metrics.auc(thr, cious)
return auc
def finalize_AP50(self):
ap50 = np.mean(np.array(self.ciou) >= 0.5)
return ap50
def finalize_cIoU(self):
ciou = np.mean(np.array(self.ciou))
return ciou
def clear(self):
self.ciou = []
def normalize_img(value, vmax=None, vmin=None):
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if not (vmax - vmin) == 0:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
return value
def visualize(raw_image, boxes):
import cv2
boxes_img = np.uint8(raw_image.copy())[:, :, ::-1]
for box in boxes:
xmin,ymin,xmax,ymax = int(box[0]),int(box[1]),int(box[2]),int(box[3])
cv2.rectangle(boxes_img[:, :, ::-1], (xmin, ymin), (xmax, ymax), (0,0,255), 1)
return boxes_img[:, :, ::-1]
def build_optimizer_and_scheduler_adam(model, args):
optimizer_grouped_parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(optimizer_grouped_parameters, lr=args.init_lr)
scheduler = None
return optimizer, scheduler
def build_optimizer_and_scheduler_sgd(model, args):
optimizer_grouped_parameters = model.parameters()
optimizer = SGD(optimizer_grouped_parameters, lr=args.init_lr)
scheduler = None
return optimizer, scheduler
def save_json(data, filename, save_pretty=False, sort_keys=False):
with open(filename, mode='w', encoding='utf-8') as f:
if save_pretty:
f.write(json.dumps(data, indent=4, sort_keys=sort_keys))
else:
json.dump(data, f)
def save_iou(iou_list, suffix, output_dir):
# sorted iou
sorted_iou = np.sort(iou_list).tolist()
sorted_iou_indices = np.argsort(iou_list).tolist()
file_iou = open(os.path.join(output_dir,"iou_test_{}.txt".format(suffix)),"w")
for indice, value in zip(sorted_iou_indices, sorted_iou):
line = str(indice) + ',' + str(value) + '\n'
file_iou.write(line)
file_iou.close()
| 2,784 | 29.604396 | 90 | py |
EZ-VSL | EZ-VSL-main/model.py | import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models import resnet18
class EZVSL(nn.Module):
def __init__(self, tau, dim):
super(EZVSL, self).__init__()
self.tau = tau
# Vision model
self.imgnet = resnet18(pretrained=True)
self.imgnet.avgpool = nn.Identity()
self.imgnet.fc = nn.Identity()
self.img_proj = nn.Conv2d(512, dim, kernel_size=(1, 1))
# Audio model
self.audnet = resnet18()
self.audnet.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.audnet.avgpool = nn.AdaptiveMaxPool2d((1, 1))
self.audnet.fc = nn.Identity()
self.aud_proj = nn.Linear(512, dim)
# Initialize weights (except pretrained visual model)
for net in [self.audnet, self.img_proj, self.aud_proj]:
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.trunc_normal_(
m.weight, mean=0.0, std=0.01)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.normal_(m.weight, mean=1, std=0.02)
nn.init.constant_(m.bias, 0)
def max_xmil_loss(self, img, aud):
B = img.shape[0]
Slogits = torch.einsum('nchw,mc->nmhw', img, aud) / self.tau
logits = Slogits.flatten(-2, -1).max(dim=-1)[0]
labels = torch.arange(B).long().to(img.device)
loss = F.cross_entropy(logits, labels) + F.cross_entropy(logits.permute(1, 0), labels)
return loss, Slogits
def forward(self, image, audio):
# Image
img = self.imgnet(image).unflatten(1, (512, 7, 7))
img = self.img_proj(img)
img = nn.functional.normalize(img, dim=1)
# Audio
aud = self.audnet(audio)
aud = self.aud_proj(aud)
aud = nn.functional.normalize(aud, dim=1)
# Compute loss
loss, logits = self.max_xmil_loss(img, aud)
# Compute avl maps
with torch.no_grad():
B = img.shape[0]
Savl = logits[torch.arange(B), torch.arange(B)]
return loss, Savl | 2,348 | 35.138462 | 107 | py |
EZ-VSL | EZ-VSL-main/datasets.py | import os
import csv
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from scipy import signal
import random
import json
import xml.etree.ElementTree as ET
from audio_io import load_audio_av, open_audio_av
def load_image(path):
return Image.open(path).convert('RGB')
def load_spectrogram(path, dur=3.):
# Load audio
audio_ctr = open_audio_av(path)
audio_dur = audio_ctr.streams.audio[0].duration * audio_ctr.streams.audio[0].time_base
audio_ss = max(float(audio_dur)/2 - dur/2, 0)
audio, samplerate = load_audio_av(container=audio_ctr, start_time=audio_ss, duration=dur)
# To Mono
audio = np.clip(audio, -1., 1.).mean(0)
# Repeat if audio is too short
if audio.shape[0] < samplerate * dur:
n = int(samplerate * dur / audio.shape[0]) + 1
audio = np.tile(audio, n)
audio = audio[:int(samplerate * dur)]
frequencies, times, spectrogram = signal.spectrogram(audio, samplerate, nperseg=512, noverlap=274)
spectrogram = np.log(spectrogram + 1e-7)
return spectrogram
def load_all_bboxes(annotation_dir, format='flickr'):
gt_bboxes = {}
if format == 'flickr':
anno_files = os.listdir(annotation_dir)
for filename in anno_files:
file = filename.split('.')[0]
gt = ET.parse(f"{annotation_dir}/{filename}").getroot()
bboxes = []
for child in gt:
for childs in child:
bbox = []
if childs.tag == 'bbox':
for index, ch in enumerate(childs):
if index == 0:
continue
bbox.append(int(224 * int(ch.text)/256))
bboxes.append(bbox)
gt_bboxes[file] = bboxes
elif format == 'vggss':
with open('metadata/vggss.json') as json_file:
annotations = json.load(json_file)
for annotation in annotations:
bboxes = [(np.clip(np.array(bbox), 0, 1) * 224).astype(int) for bbox in annotation['bbox']]
gt_bboxes[annotation['file']] = bboxes
return gt_bboxes
def bbox2gtmap(bboxes, format='flickr'):
gt_map = np.zeros([224, 224])
for xmin, ymin, xmax, ymax in bboxes:
temp = np.zeros([224, 224])
temp[ymin:ymax, xmin:xmax] = 1
gt_map += temp
if format == 'flickr':
# Annotation consensus
gt_map = gt_map / 2
gt_map[gt_map > 1] = 1
elif format == 'vggss':
# Single annotation
gt_map[gt_map > 0] = 1
return gt_map
class AudioVisualDataset(Dataset):
def __init__(self, image_files, audio_files, image_path, audio_path, audio_dur=3., image_transform=None, audio_transform=None, all_bboxes=None, bbox_format='flickr'):
super().__init__()
self.audio_path = audio_path
self.image_path = image_path
self.audio_dur = audio_dur
self.audio_files = audio_files
self.image_files = image_files
self.all_bboxes = all_bboxes
self.bbox_format = bbox_format
self.image_transform = image_transform
self.audio_transform = audio_transform
def getitem(self, idx):
file = self.image_files[idx]
file_id = file.split('.')[0]
# Image
img_fn = os.path.join(self.image_path, self.image_files[idx])
frame = self.image_transform(load_image(img_fn))
# Audio
audio_fn = os.path.join(self.audio_path, self.audio_files[idx])
spectrogram = self.audio_transform(load_spectrogram(audio_fn))
bboxes = {}
if self.all_bboxes is not None:
bboxes['bboxes'] = self.all_bboxes[file_id]
bboxes['gt_map'] = bbox2gtmap(self.all_bboxes[file_id], self.bbox_format)
return frame, spectrogram, bboxes, file_id
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
try:
return self.getitem(idx)
except Exception:
return self.getitem(random.sample(range(len(self)), 1)[0])
def get_train_dataset(args):
audio_path = f"{args.train_data_path}/audio/"
image_path = f"{args.train_data_path}/frames/"
# List directory
audio_files = {fn.split('.wav')[0] for fn in os.listdir(audio_path) if fn.endswith('.wav')}
image_files = {fn.split('.jpg')[0] for fn in os.listdir(image_path) if fn.endswith('.jpg')}
avail_files = audio_files.intersection(image_files)
print(f"{len(avail_files)} available files")
# Subsample if specified
if args.trainset.lower() in {'vggss', 'flickr'}:
pass # use full dataset
else:
subset = set(open(f"metadata/{args.trainset}.txt").read().splitlines())
avail_files = avail_files.intersection(subset)
print(f"{len(avail_files)} valid subset files")
avail_files = sorted(list(avail_files))
audio_files = sorted([dt+'.wav' for dt in avail_files])
image_files = sorted([dt+'.jpg' for dt in avail_files])
# Transforms
image_transform = transforms.Compose([
transforms.Resize(int(224 * 1.1), Image.BICUBIC),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
audio_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.0], std=[12.0])])
return AudioVisualDataset(
image_files=image_files,
audio_files=audio_files,
image_path=image_path,
audio_path=audio_path,
audio_dur=3.,
image_transform=image_transform,
audio_transform=audio_transform
)
def get_test_dataset(args):
audio_path = args.test_data_path + 'audio/'
image_path = args.test_data_path + 'frames/'
if args.testset == 'flickr':
testcsv = 'metadata/flickr_test.csv'
elif args.testset == 'vggss':
testcsv = 'metadata/vggss_test.csv'
elif args.testset == 'vggss_heard':
testcsv = 'metadata/vggss_heard_test.csv'
elif args.testset == 'vggss_unheard':
testcsv = 'metadata/vggss_unheard_test.csv'
else:
raise NotImplementedError
bbox_format = {'flickr': 'flickr',
'vggss': 'vggss',
'vggss_heard': 'vggss',
'vggss_unheard': 'vggss'}[args.testset]
# Retrieve list of audio and video files
testset = set([item[0] for item in csv.reader(open(testcsv))])
# Intersect with available files
audio_files = {fn.split('.wav')[0] for fn in os.listdir(audio_path)}
image_files = {fn.split('.jpg')[0] for fn in os.listdir(image_path)}
avail_files = audio_files.intersection(image_files)
testset = testset.intersection(avail_files)
testset = sorted(list(testset))
image_files = [dt+'.jpg' for dt in testset]
audio_files = [dt+'.wav' for dt in testset]
# Bounding boxes
all_bboxes = load_all_bboxes(args.test_gt_path, format=bbox_format)
# Transforms
image_transform = transforms.Compose([
transforms.Resize((224, 224), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
audio_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.0], std=[12.0])])
return AudioVisualDataset(
image_files=image_files,
audio_files=audio_files,
image_path=image_path,
audio_path=audio_path,
audio_dur=3.,
image_transform=image_transform,
audio_transform=audio_transform,
all_bboxes=all_bboxes,
bbox_format=bbox_format
)
def inverse_normalize(tensor):
inverse_mean = [-0.485/0.229, -0.456/0.224, -0.406/0.225]
inverse_std = [1.0/0.229, 1.0/0.224, 1.0/0.225]
tensor = transforms.Normalize(inverse_mean, inverse_std)(tensor)
return tensor
| 8,126 | 33.004184 | 170 | py |
EZ-VSL | EZ-VSL-main/train.py | import os
import argparse
import builtins
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch import multiprocessing as mp
import torch.distributed as dist
import utils
from model import EZVSL
from datasets import get_train_dataset, get_test_dataset
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./checkpoints', help='path to save trained model weights')
parser.add_argument('--experiment_name', type=str, default='ezvsl_vggss', help='experiment name (used for checkpointing and logging)')
# Data params
parser.add_argument('--trainset', default='vggss', type=str, help='trainset (flickr or vggss)')
parser.add_argument('--testset', default='vggss', type=str, help='testset,(flickr or vggss)')
parser.add_argument('--train_data_path', default='', type=str, help='Root directory path of train data')
parser.add_argument('--test_data_path', default='', type=str, help='Root directory path of test data')
parser.add_argument('--test_gt_path', default='', type=str)
# ez-vsl hyper-params
parser.add_argument('--out_dim', default=512, type=int)
parser.add_argument('--tau', default=0.03, type=float, help='tau')
# training/evaluation parameters
parser.add_argument("--epochs", type=int, default=20, help="number of epochs")
parser.add_argument('--batch_size', default=128, type=int, help='Batch Size')
parser.add_argument("--init_lr", type=float, default=0.0001, help="initial learning rate")
parser.add_argument("--seed", type=int, default=12345, help="random seed")
# Distributed params
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--node', type=str, default='localhost')
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--dist_url', type=str, default='tcp://localhost:12345')
parser.add_argument('--multiprocessing_distributed', action='store_true')
return parser.parse_args()
def main(args):
mp.set_start_method('spawn')
args.dist_url = f'tcp://{args.node}:{args.port}'
print('Using url {}'.format(args.dist_url))
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not first GPU on each node
if args.multiprocessing_distributed and (args.gpu != 0 or args.rank != 0):
def print_pass(*args, **kwargs):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# Setup distributed environment
if args.multiprocessing_distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
# Create model dir
model_dir = os.path.join(args.model_dir, args.experiment_name)
os.makedirs(model_dir, exist_ok=True)
utils.save_json(vars(args), os.path.join(model_dir, 'configs.json'), sort_keys=True, save_pretty=True)
# Create model
model = EZVSL(args.tau, args.out_dim)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.multiprocessing_distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / args.world_size)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
print(model)
# Optimizer
optimizer, scheduler = utils.build_optimizer_and_scheduler_adam(model, args)
# Resume if possible
start_epoch, best_cIoU, best_Auc = 0, 0., 0.
if os.path.exists(os.path.join(model_dir, 'latest.pth')):
ckp = torch.load(os.path.join(model_dir, 'latest.pth'), map_location='cpu')
start_epoch, best_cIoU, best_Auc = ckp['epoch'], ckp['best_cIoU'], ckp['best_Auc']
model.load_state_dict(ckp['model'])
optimizer.load_state_dict(ckp['optimizer'])
print(f'loaded from {os.path.join(model_dir, "latest.pth")}')
# Dataloaders
traindataset = get_train_dataset(args)
train_sampler = None
if args.multiprocessing_distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(traindataset)
train_loader = torch.utils.data.DataLoader(
traindataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True,
persistent_workers=args.workers > 0)
testdataset = get_test_dataset(args)
test_loader = torch.utils.data.DataLoader(
testdataset, batch_size=1, shuffle=False,
num_workers=args.workers, pin_memory=False, drop_last=False,
persistent_workers=args.workers > 0)
print("Loaded dataloader.")
# =============================================================== #
# Training loop
cIoU, auc = validate(test_loader, model, args)
print(f'cIoU (epoch {start_epoch}): {cIoU}')
print(f'AUC (epoch {start_epoch}): {auc}')
print(f'best_cIoU: {best_cIoU}')
print(f'best_Auc: {best_Auc}')
for epoch in range(start_epoch, args.epochs):
if args.multiprocessing_distributed:
train_loader.sampler.set_epoch(epoch)
# Train
train(train_loader, model, optimizer, epoch, args)
# Evaluate
cIoU, auc = validate(test_loader, model, args)
print(f'cIoU (epoch {epoch+1}): {cIoU}')
print(f'AUC (epoch {epoch+1}): {auc}')
print(f'best_cIoU: {best_cIoU}')
print(f'best_Auc: {best_Auc}')
# Checkpoint
if args.rank == 0:
ckp = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch+1,
'best_cIoU': best_cIoU,
'best_Auc': best_Auc}
torch.save(ckp, os.path.join(model_dir, 'latest.pth'))
print(f"Model saved to {model_dir}")
if cIoU >= best_cIoU:
best_cIoU, best_Auc = cIoU, auc
if args.rank == 0:
torch.save(ckp, os.path.join(model_dir, 'best.pth'))
def train(train_loader, model, optimizer, epoch, args):
model.train()
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_mtr = AverageMeter('Loss', ':.3f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, loss_mtr],
prefix="Epoch: [{}]".format(epoch),
)
end = time.time()
for i, (image, spec, _, _) in enumerate(train_loader):
data_time.update(time.time() - end)
if args.gpu is not None:
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
loss, _ = model(image.float(), spec.float())
loss_mtr.update(loss.item(), image.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0 or i == len(train_loader) - 1:
progress.display(i)
del loss
def validate(test_loader, model, args):
model.train(False)
evaluator = utils.Evaluator()
for step, (image, spec, bboxes, _) in enumerate(test_loader):
if torch.cuda.is_available():
spec = spec.cuda(args.gpu, non_blocking=True)
image = image.cuda(args.gpu, non_blocking=True)
avl_map = model(image.float(), spec.float())[1].unsqueeze(1)
avl_map = F.interpolate(avl_map, size=(224, 224), mode='bicubic', align_corners=False)
avl_map = avl_map.data.cpu().numpy()
for i in range(spec.shape[0]):
pred = utils.normalize_img(avl_map[i, 0])
gt_map = bboxes['gt_map'].data.cpu().numpy()
thr = np.sort(pred.flatten())[int(pred.shape[0] * pred.shape[1] / 2)]
evaluator.cal_CIOU(pred, gt_map, thr)
cIoU = evaluator.finalize_AP50()
AUC = evaluator.finalize_AUC()
return cIoU, AUC
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix="", fp=None):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
self.fp = fp
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
msg = '\t'.join(entries)
print(msg, flush=True)
if self.fp is not None:
self.fp.write(msg+'\n')
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == "__main__":
main(get_arguments())
| 10,736 | 36.152249 | 138 | py |
CLNet | CLNet-main/main.py | import torch
import torch.nn as nn
from utils.parser import args
from utils import logger, Trainer, Tester
from utils import init_device, init_model, FakeLR, WarmUpCosineAnnealingLR
from dataset import Cost2100DataLoader
def main():
logger.info('=> PyTorch Version: {}'.format(torch.__version__))
# Environment initialization
device, pin_memory = init_device(args.seed, args.cpu, args.gpu, args.cpu_affinity)
# Create the data loader
train_loader, val_loader, test_loader = Cost2100DataLoader(
root=args.data_dir,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=pin_memory,
scenario=args.scenario)()
# Define model
model = init_model(args)
model.to(device)
# Define loss function
criterion = nn.MSELoss().to(device)
# Inference mode
if args.evaluate:
Tester(model, device, criterion)(test_loader)
return
# Define optimizer and scheduler
lr_init = 1e-3 if args.scheduler == 'const' else 2e-3
optimizer = torch.optim.Adam(model.parameters(), lr_init)
if args.scheduler == 'const':
scheduler = FakeLR(optimizer=optimizer)
else:
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=args.epochs * len(train_loader),
T_warmup=30 * len(train_loader),
eta_min=5e-5)
# Define the training pipeline
trainer = Trainer(model=model,
device=device,
optimizer=optimizer,
criterion=criterion,
scheduler=scheduler,
resume=args.resume,
chk_name=str(args.scenario)+'_'+str(args.cr))
# Start training
trainer.loop(args.epochs, train_loader, val_loader, test_loader)
best = 0
# Final testing
loss, rho, nmse = Tester(model, device, criterion)(test_loader)
print(f"\n=! Final test loss: {loss:.3e}"
f"\n test rho: {rho:.3e}"
f"\n test NMSE: {nmse:.3e}\n")
if nmse < best:
# model save
# save encoder
modelSave1 = './Modelsave/32encoder.pth.tar'
try:
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave1)
except:
torch.save({'state_dict': model.module.encoder.state_dict(), }, modelSave1)
# save decoder
modelSave2 = './Modelsave/32decoder.pth.tar'
try:
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
except:
torch.save({'state_dict': model.module.decoder.state_dict(), }, modelSave2)
print('Model saved!')
best = nmse
if __name__ == "__main__":
main()
| 2,892 | 31.505618 | 91 | py |
CLNet | CLNet-main/dataset/cost2100.py | import os
import numpy as np
import scipy.io as sio
import torch
from torch.utils.data import DataLoader, TensorDataset
__all__ = ['Cost2100DataLoader', 'PreFetcher']
class PreFetcher:
r""" Data pre-fetcher to accelerate the data loading
"""
def __init__(self, loader):
self.ori_loader = loader
self.len = len(loader)
self.stream = torch.cuda.Stream()
self.next_input = None
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
for idx, tensor in enumerate(self.next_input):
self.next_input[idx] = tensor.cuda(non_blocking=True)
def __len__(self):
return self.len
def __iter__(self):
self.loader = iter(self.ori_loader)
self.preload()
return self
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
if input is None:
raise StopIteration
for tensor in input:
tensor.record_stream(torch.cuda.current_stream())
self.preload()
return input
class Cost2100DataLoader(object):
r""" PyTorch DataLoader for COST2100 dataset.
"""
def __init__(self, root, batch_size, num_workers, pin_memory, scenario):
assert os.path.isdir(root)
assert scenario in {"in", "out"}
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
dir_train = os.path.join(root, f"DATA_Htrain{scenario}.mat")
dir_val = os.path.join(root, f"DATA_Hval{scenario}.mat")
dir_test = os.path.join(root, f"DATA_Htest{scenario}.mat")
dir_raw = os.path.join(root, f"DATA_HtestF{scenario}_all.mat")
channel, nt, nc, nc_expand = 2, 32, 32, 125
# Training data loading
data_train = sio.loadmat(dir_train)['HT']
data_train = torch.tensor(data_train, dtype=torch.float32).view(
data_train.shape[0], channel, nt, nc)
self.train_dataset = TensorDataset(data_train)
# Validation data loading
data_val = sio.loadmat(dir_val)['HT']
data_val = torch.tensor(data_val, dtype=torch.float32).view(
data_val.shape[0], channel, nt, nc)
self.val_dataset = TensorDataset(data_val)
# Test data loading, including the sparse data and the raw data
data_test = sio.loadmat(dir_test)['HT']
data_test = torch.tensor(data_test, dtype=torch.float32).view(
data_test.shape[0], channel, nt, nc)
raw_test = sio.loadmat(dir_raw)['HF_all']
real = torch.tensor(np.real(raw_test), dtype=torch.float32)
imag = torch.tensor(np.imag(raw_test), dtype=torch.float32)
raw_test = torch.cat((real.view(raw_test.shape[0], nt, nc_expand, 1),
imag.view(raw_test.shape[0], nt, nc_expand, 1)), dim=3)
self.test_dataset = TensorDataset(data_test, raw_test)
def __call__(self):
train_loader = DataLoader(self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True)
val_loader = DataLoader(self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False)
test_loader = DataLoader(self.test_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False)
# Accelerate CUDA data loading with pre-fetcher if GPU is used.
if self.pin_memory is True:
train_loader = PreFetcher(train_loader)
val_loader = PreFetcher(val_loader)
test_loader = PreFetcher(test_loader)
return train_loader, val_loader, test_loader
| 4,282 | 35.922414 | 85 | py |
CLNet | CLNet-main/models/clnet.py | r""" The proposed CLNet
"""
import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
from utils import logger
__all__ = ["clnet"]
class ConvBN(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, groups=1):
if not isinstance(kernel_size, int):
padding = [(i - 1) // 2 for i in kernel_size]
else:
padding = (kernel_size - 1) // 2
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
('bn', nn.BatchNorm2d(out_planes))
]))
class CRBlock(nn.Module):
def __init__(self):
super(CRBlock, self).__init__()
self.path1 = nn.Sequential(OrderedDict([
('conv3x3', ConvBN(2, 7, 3)),
('relu1', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv1x3', ConvBN(7, 7, [1, 3])),
('relu2', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv3x1', ConvBN(7, 7, [3, 1])),
]))
self.path2 = nn.Sequential(OrderedDict([
('conv1x5', ConvBN(2, 7, [1, 5])),
('relu', nn.LeakyReLU(negative_slope=0.3, inplace=True)),
('conv5x1', ConvBN(7, 7, [5, 1])),
]))
self.conv1x1 = ConvBN(7 * 2, 2, 1)
self.identity = nn.Identity()
self.relu = nn.LeakyReLU(negative_slope=0.3, inplace=True)
def forward(self, x):
identity = self.identity(x)
out1 = self.path1(x)
out2 = self.path2(x)
out = torch.cat((out1, out2), dim=1)
out = self.relu(out)
out = self.conv1x1(out)
out = self.relu(out + identity)
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 3
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Encoder(nn.Module):
def __init__(self, reduction=4):
super(Encoder, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
self.encoder1 = nn.Sequential(OrderedDict([
("conv3x3_bn", ConvBN(in_channel, 2, 3)),
("relu1", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv1x9_bn", ConvBN(2, 2, [1, 9])),
("relu2", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv9x1_bn", ConvBN(2, 2, [9, 1])),
]))
self.encoder2 = ConvBN(in_channel, 32,1)
self.encoder_conv = nn.Sequential(OrderedDict([
("relu1", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("conv1x1_bn", ConvBN(34, 2, 1)),
("relu2", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
]))
self.sa = SpatialGate()
self.se = SELayer(32)
self.replace_efc = nn.Conv1d(total_size,total_size // reduction,1)
def forward(self, x):
n, c, h, w = x.detach().size()
encode1 = self.encoder1(x)
encode1 = self.sa(encode1)
encode2 = self.encoder2(x)
encode2 = self.se(encode2)
out = torch.cat((encode1, encode2), dim=1)
out = self.encoder_conv(out)
out = out.view(n, -1)
out = out.unsqueeze(2) #[1,2048,1]
out = self.replace_efc(out) # [1,2048/cr,1]
return out
class Decoder(nn.Module):
def __init__(self, reduction=4):
super(Decoder, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
self.replace_dfc = nn.ConvTranspose1d(total_size // reduction,total_size,1)
decoder = OrderedDict([
("conv5x5_bn", ConvBN(2, 2, 5)),
("relu", nn.LeakyReLU(negative_slope=0.3, inplace=True)),
("CRBlock1", CRBlock()),
("CRBlock2", CRBlock())
])
self.decoder_feature = nn.Sequential(decoder)
self.sigmoid = nn.Sigmoid()
self.hsig= hsigmoid()
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
c,h,w = 2,32,32
out = self.replace_dfc(x) # [1,2048,1]
out = out.view(-1, c, h, w) #
out = self.decoder_feature(out)
out = self.hsig(out)
return out
class CLNet(nn.Module):
def __init__(self, reduction=4):
super(CLNet, self).__init__()
total_size, in_channel, w, h = 2048, 2, 32, 32
logger.info(f'reduction={reduction}')
self.encoder = Encoder(reduction)
self.decoder = Decoder(reduction)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
feature= self.encoder(x)
out = self.decoder(feature)
return out
def clnet(reduction=4):
r""" Create a proposed CLNet.
:param reduction: the reciprocal of compression ratio
:return: an instance of CLNet
"""
model = CLNet(reduction=reduction)
return model
| 7,266 | 32.957944 | 154 | py |
CLNet | CLNet-main/.ipynb_checkpoints/main-checkpoint.py | import torch
import torch.nn as nn
from utils.parser import args
from utils import logger, Trainer, Tester
from utils import init_device, init_model, FakeLR, WarmUpCosineAnnealingLR
from dataset import Cost2100DataLoader
def main():
logger.info('=> PyTorch Version: {}'.format(torch.__version__))
# Environment initialization
device, pin_memory = init_device(args.seed, args.cpu, args.gpu, args.cpu_affinity)
# Create the data loader
train_loader, val_loader, test_loader = Cost2100DataLoader(
root=args.data_dir,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=pin_memory,
scenario=args.scenario)()
# Define model
model = init_model(args)
model.to(device)
# Define loss function
criterion = nn.MSELoss().to(device)
# Inference mode
if args.evaluate:
Tester(model, device, criterion)(test_loader)
return
# Define optimizer and scheduler
lr_init = 1e-3 if args.scheduler == 'const' else 2e-3
optimizer = torch.optim.Adam(model.parameters(), lr_init)
if args.scheduler == 'const':
scheduler = FakeLR(optimizer=optimizer)
else:
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=args.epochs * len(train_loader),
T_warmup=30 * len(train_loader),
eta_min=5e-5)
# Define the training pipeline
trainer = Trainer(model=model,
device=device,
optimizer=optimizer,
criterion=criterion,
scheduler=scheduler,
resume=args.resume,
chk_name=str(args.scenario)+'_'+str(args.cr))
# Start training
trainer.loop(args.epochs, train_loader, val_loader, test_loader)
best = 0
# Final testing
loss, rho, nmse = Tester(model, device, criterion)(test_loader)
print(f"\n=! Final test loss: {loss:.3e}"
f"\n test rho: {rho:.3e}"
f"\n test NMSE: {nmse:.3e}\n")
if nmse < best:
# model save
# save encoder
modelSave1 = './Modelsave/32encoder.pth.tar'
try:
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave1)
except:
torch.save({'state_dict': model.module.encoder.state_dict(), }, modelSave1)
# save decoder
modelSave2 = './Modelsave/32decoder.pth.tar'
try:
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
except:
torch.save({'state_dict': model.module.decoder.state_dict(), }, modelSave2)
print('Model saved!')
best = nmse
if __name__ == "__main__":
main()
| 2,892 | 31.505618 | 91 | py |
CLNet | CLNet-main/utils/statics.py | import torch
from packaging import version
__all__ = ['AverageMeter', 'evaluator']
class AverageMeter(object):
r"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self, name):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.name = name
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return f"==> For {self.name}: sum={self.sum}; avg={self.avg}"
def evaluator(sparse_pred, sparse_gt, raw_gt):
r""" Evaluation of decoding implemented in PyTorch Tensor
Computes normalized mean square error (NMSE) and rho.
"""
with torch.no_grad():
# Basic params
nt = 32
nc = 32
nc_expand = 257
# De-centralize
sparse_gt = sparse_gt - 0.5
sparse_pred = sparse_pred - 0.5
# Calculate the NMSE
power_gt = sparse_gt[:, 0, :, :] ** 2 + sparse_gt[:, 1, :, :] ** 2
difference = sparse_gt - sparse_pred
mse = difference[:, 0, :, :] ** 2 + difference[:, 1, :, :] ** 2
nmse = 10 * torch.log10((mse.sum(dim=[1, 2]) / power_gt.sum(dim=[1, 2])).mean())
# Calculate the Rho
n = sparse_pred.size(0)
sparse_pred = sparse_pred.permute(0, 2, 3, 1) # Move the real/imaginary dim to the last
zeros = sparse_pred.new_zeros((n, nt, nc_expand - nc, 2))
# When pytorch version is above 1.7.0, complex number representation is changed from [a, b] to [a, b.j]
if version.parse(torch.__version__) > version.parse("1.7.0"):
sparse_pred = torch.view_as_complex(torch.cat((sparse_pred, zeros), dim=2))
raw_pred = torch.view_as_real(torch.fft.fft(sparse_pred))[:, :, :125, :]
else:
sparse_pred = torch.cat((sparse_pred, zeros), dim=2)
raw_pred = torch.fft(sparse_pred, signal_ndim=1)[:, :, :125, :]
norm_pred = raw_pred[..., 0] ** 2 + raw_pred[..., 1] ** 2
norm_pred = torch.sqrt(norm_pred.sum(dim=1))
norm_gt = raw_gt[..., 0] ** 2 + raw_gt[..., 1] ** 2
norm_gt = torch.sqrt(norm_gt.sum(dim=1))
real_cross = raw_pred[..., 0] * raw_gt[..., 0] + raw_pred[..., 1] * raw_gt[..., 1]
real_cross = real_cross.sum(dim=1)
imag_cross = raw_pred[..., 0] * raw_gt[..., 1] - raw_pred[..., 1] * raw_gt[..., 0]
imag_cross = imag_cross.sum(dim=1)
norm_cross = torch.sqrt(real_cross ** 2 + imag_cross ** 2)
rho = (norm_cross / (norm_pred * norm_gt)).mean()
return rho, nmse
| 2,882 | 34.158537 | 111 | py |
CLNet | CLNet-main/utils/scheduler.py | import math
from torch.optim.lr_scheduler import _LRScheduler
__all__ = ['WarmUpCosineAnnealingLR', 'FakeLR']
class WarmUpCosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, T_max, T_warmup, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.T_warmup = T_warmup
self.eta_min = eta_min
super(WarmUpCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.T_warmup:
return [base_lr * self.last_epoch / self.T_warmup for base_lr in self.base_lrs]
else:
k = 1 + math.cos(math.pi * (self.last_epoch - self.T_warmup) / (self.T_max - self.T_warmup))
return [self.eta_min + (base_lr - self.eta_min) * k / 2 for base_lr in self.base_lrs]
class FakeLR(_LRScheduler):
def __init__(self, optimizer):
super(FakeLR, self).__init__(optimizer=optimizer)
def get_lr(self):
return self.base_lrs
| 955 | 33.142857 | 104 | py |
CLNet | CLNet-main/utils/init.py | import os
import random
import thop
import torch
from models import clnet
from utils import logger, line_seg
__all__ = ["init_device", "init_model"]
def init_device(seed=None, cpu=None, gpu=None, affinity=None):
# set the CPU affinity
if affinity is not None:
os.system(f'taskset -p {affinity} {os.getpid()}')
# Set the random seed
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Set the GPU id you choose
if gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
# Env setup
if not cpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
if seed is not None:
torch.cuda.manual_seed(seed)
pin_memory = True
logger.info("Running on GPU%d" % (gpu if gpu else 0))
else:
pin_memory = False
device = torch.device('cpu')
logger.info("Running on CPU")
return device, pin_memory
def init_model(args):
# Model loading
model = clnet(reduction=args.cr)
if args.pretrained is not None:
assert os.path.isfile(args.pretrained)
state_dict = torch.load(args.pretrained,
map_location=torch.device('cpu'))['state_dict']
model.load_state_dict(state_dict,strict=False)
#model.load_state_dict(state_dict,strict=False) if errors, try this
logger.info("pretrained model loaded from {}".format(args.pretrained))
# Model flops and params counting
image = torch.randn([1, 2, 32, 32])
flops, params = thop.profile(model, inputs=(image,), verbose=False)
flops, params = thop.clever_format([flops, params], "%.3f")
# Model info logging
logger.info(f'=> Model Name: CLNet [pretrained: {args.pretrained}]')
logger.info(f'=> Model Config: compression ratio=1/{args.cr}')
logger.info(f'=> Model Flops: {flops}')
logger.info(f'=> Model Params Num: {params}\n')
logger.info(f'{line_seg}\n{model}\n{line_seg}\n')
return model
| 2,102 | 29.926471 | 79 | py |
CLNet | CLNet-main/utils/solver.py | import time
import os
import torch
from collections import namedtuple
from utils import logger
from utils.statics import AverageMeter, evaluator
__all__ = ['Trainer', 'Tester']
field = ('nmse', 'rho', 'epoch')
Result = namedtuple('Result', field, defaults=(None,) * len(field))
class Trainer:
r""" The training pipeline for encoder-decoder architecture
"""
def __init__(self, model, device, optimizer, criterion, scheduler, chk_name, resume=None,
save_path='./Experiments/chk', print_freq=20, val_freq=10, test_freq=10):
# Basic arguments
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.device = device
# Verbose arguments
self.resume_file = resume
self.save_path = save_path
self.print_freq = print_freq
self.val_freq = val_freq
self.test_freq = test_freq
# Pipeline arguments
self.cur_epoch = 1
self.all_epoch = None
self.train_loss = None
self.val_loss = None
self.test_loss = None
self.best_rho = Result()
self.best_nmse = Result()
self.tester = Tester(model, device, criterion, print_freq)
self.test_loader = None
self.chk_name = chk_name
def loop(self, epochs, train_loader, val_loader, test_loader):
r""" The main loop function which runs training and validation iteratively.
Args:
epochs (int): The total epoch for training
train_loader (DataLoader): Data loader for training data.
val_loader (DataLoader): Data loader for validation data.
test_loader (DataLoader): Data loader for test data.
"""
self.all_epoch = epochs
self._resume()
for ep in range(self.cur_epoch, epochs + 1):
self.cur_epoch = ep
# conduct training, validation and test
self.train_loss = self.train(train_loader)
if ep % self.val_freq == 0:
self.val_loss = self.val(val_loader)
if ep % self.test_freq == 0:
self.test_loss, rho, nmse = self.test(test_loader)
else:
rho, nmse = None, None
# conduct saving, visualization and log printing
self._loop_postprocessing(rho, nmse)
def train(self, train_loader):
r""" train the model on the given data loader for one epoch.
Args:
train_loader (DataLoader): the training data loader
"""
self.model.train()
with torch.enable_grad():
return self._iteration(train_loader)
def val(self, val_loader):
r""" exam the model with validation set.
Args:
val_loader: (DataLoader): the validation data loader
"""
self.model.eval()
with torch.no_grad():
return self._iteration(val_loader)
def test(self, test_loader):
r""" Truly test the model on the test dataset for one epoch.
Args:
test_loader (DataLoader): the test data loader
"""
self.model.eval()
with torch.no_grad():
return self.tester(test_loader, verbose=False)
def _iteration(self, data_loader):
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, ) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
# Scheduler update, backward pass and optimization
if self.model.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# Log and visdom update
iter_loss.update(loss)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'Epoch: [{self.cur_epoch}/{self.all_epoch}]'
f'[{batch_idx + 1}/{len(data_loader)}] '
f'lr: {self.scheduler.get_lr()[0]:.2e} | '
f'MSE loss: {iter_loss.avg:.3e} | '
f'time: {iter_time.avg:.3f}')
mode = 'Train' if self.model.training else 'Val'
logger.info(f'=> {mode} Loss: {iter_loss.avg:.3e}\n')
return iter_loss.avg
def _save(self, state, name):
if self.save_path is None:
logger.warning('No path to save checkpoints.')
return
os.makedirs(self.save_path, exist_ok=True)
torch.save(state, os.path.join(self.save_path, name))
def _resume(self):
r""" protected function which resume from checkpoint at the beginning of training.
"""
if self.resume_file is None:
return None
assert os.path.isfile(self.resume_file)
logger.info(f'=> loading checkpoint {self.resume_file}')
checkpoint = torch.load(self.resume_file)
self.cur_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.best_rho = checkpoint['best_rho']
self.best_nmse = checkpoint['best_nmse']
self.cur_epoch += 1 # start from the next epoch
logger.info(f'=> successfully loaded checkpoint {self.resume_file} '
f'from epoch {checkpoint["epoch"]}.\n')
def _loop_postprocessing(self, rho, nmse):
r""" private function which makes loop() function neater.
"""
# save state generate
state = {
'epoch': self.cur_epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_rho': self.best_rho,
'best_nmse': self.best_nmse
}
# save model with best rho and nmse
if rho is not None:
if self.best_rho.rho is None or self.best_rho.rho < rho:
self.best_rho = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_rho'] = self.best_rho
self._save(state, name=f"best_rho_{self.chk_name}.pth")
if self.best_nmse.nmse is None or self.best_nmse.nmse > nmse:
self.best_nmse = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_nmse'] = self.best_nmse
self._save(state, name=f"best_nmse_{self.chk_name}.pth")
self._save(state, name=f'last_{self.chk_name}.pth')
# print current best results
if self.best_rho.rho is not None:
print(f'\n=! Best rho: {self.best_rho.rho:.3e} ('
f'Corresponding nmse={self.best_rho.nmse:.3e}; '
f'epoch={self.best_rho.epoch})'
f'\n Best NMSE: {self.best_nmse.nmse:.3e} ('
f'Corresponding rho={self.best_nmse.rho:.3e}; '
f'epoch={self.best_nmse.epoch})\n')
class Tester:
r""" The testing interface for classification
"""
def __init__(self, model, device, criterion, print_freq=20):
self.model = model
self.device = device
self.criterion = criterion
self.print_freq = print_freq
def __call__(self, test_data, verbose=True):
r""" Runs the testing procedure.
Args:
test_data (DataLoader): Data loader for validation data.
"""
self.model.eval()
with torch.no_grad():
loss, rho, nmse = self._iteration(test_data)
if verbose:
print(f'\n=> Test result: \nloss: {loss:.3e}'
f' rho: {rho:.3e} NMSE: {nmse:.3e}\n')
return loss, rho, nmse
def _iteration(self, data_loader):
r""" protected function which test the model on given data loader for one epoch.
"""
iter_rho = AverageMeter('Iter rho')
iter_nmse = AverageMeter('Iter nmse')
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, raw_gt) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
rho, nmse = evaluator(sparse_pred, sparse_gt, raw_gt)
# Log and visdom update
iter_loss.update(loss)
iter_rho.update(rho)
iter_nmse.update(nmse)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '
f'loss: {iter_loss.avg:.3e} | rho: {iter_rho.avg:.3e} | '
f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')
logger.info(f'=> Test rho:{iter_rho.avg:.3e} NMSE: {iter_nmse.avg:.3e}\n')
return iter_loss.avg, iter_rho.avg, iter_nmse.avg
| 9,472 | 34.215613 | 93 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/solver-checkpoint.py | import time
import os
import torch
from collections import namedtuple
from utils import logger
from utils.statics import AverageMeter, evaluator
__all__ = ['Trainer', 'Tester']
field = ('nmse', 'rho', 'epoch')
Result = namedtuple('Result', field, defaults=(None,) * len(field))
class Trainer:
r""" The training pipeline for encoder-decoder architecture
"""
def __init__(self, model, device, optimizer, criterion, scheduler, chk_name, resume=None,
save_path='./Experiments/chk', print_freq=20, val_freq=10, test_freq=10):
# Basic arguments
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.device = device
# Verbose arguments
self.resume_file = resume
self.save_path = save_path
self.print_freq = print_freq
self.val_freq = val_freq
self.test_freq = test_freq
# Pipeline arguments
self.cur_epoch = 1
self.all_epoch = None
self.train_loss = None
self.val_loss = None
self.test_loss = None
self.best_rho = Result()
self.best_nmse = Result()
self.tester = Tester(model, device, criterion, print_freq)
self.test_loader = None
self.chk_name = chk_name
def loop(self, epochs, train_loader, val_loader, test_loader):
r""" The main loop function which runs training and validation iteratively.
Args:
epochs (int): The total epoch for training
train_loader (DataLoader): Data loader for training data.
val_loader (DataLoader): Data loader for validation data.
test_loader (DataLoader): Data loader for test data.
"""
self.all_epoch = epochs
self._resume()
for ep in range(self.cur_epoch, epochs + 1):
self.cur_epoch = ep
# conduct training, validation and test
self.train_loss = self.train(train_loader)
if ep % self.val_freq == 0:
self.val_loss = self.val(val_loader)
if ep % self.test_freq == 0:
self.test_loss, rho, nmse = self.test(test_loader)
else:
rho, nmse = None, None
# conduct saving, visualization and log printing
self._loop_postprocessing(rho, nmse)
def train(self, train_loader):
r""" train the model on the given data loader for one epoch.
Args:
train_loader (DataLoader): the training data loader
"""
self.model.train()
with torch.enable_grad():
return self._iteration(train_loader)
def val(self, val_loader):
r""" exam the model with validation set.
Args:
val_loader: (DataLoader): the validation data loader
"""
self.model.eval()
with torch.no_grad():
return self._iteration(val_loader)
def test(self, test_loader):
r""" Truly test the model on the test dataset for one epoch.
Args:
test_loader (DataLoader): the test data loader
"""
self.model.eval()
with torch.no_grad():
return self.tester(test_loader, verbose=False)
def _iteration(self, data_loader):
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, ) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
# Scheduler update, backward pass and optimization
if self.model.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# Log and visdom update
iter_loss.update(loss)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'Epoch: [{self.cur_epoch}/{self.all_epoch}]'
f'[{batch_idx + 1}/{len(data_loader)}] '
f'lr: {self.scheduler.get_lr()[0]:.2e} | '
f'MSE loss: {iter_loss.avg:.3e} | '
f'time: {iter_time.avg:.3f}')
mode = 'Train' if self.model.training else 'Val'
logger.info(f'=> {mode} Loss: {iter_loss.avg:.3e}\n')
return iter_loss.avg
def _save(self, state, name):
if self.save_path is None:
logger.warning('No path to save checkpoints.')
return
os.makedirs(self.save_path, exist_ok=True)
torch.save(state, os.path.join(self.save_path, name))
def _resume(self):
r""" protected function which resume from checkpoint at the beginning of training.
"""
if self.resume_file is None:
return None
assert os.path.isfile(self.resume_file)
logger.info(f'=> loading checkpoint {self.resume_file}')
checkpoint = torch.load(self.resume_file)
self.cur_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.best_rho = checkpoint['best_rho']
self.best_nmse = checkpoint['best_nmse']
self.cur_epoch += 1 # start from the next epoch
logger.info(f'=> successfully loaded checkpoint {self.resume_file} '
f'from epoch {checkpoint["epoch"]}.\n')
def _loop_postprocessing(self, rho, nmse):
r""" private function which makes loop() function neater.
"""
# save state generate
state = {
'epoch': self.cur_epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_rho': self.best_rho,
'best_nmse': self.best_nmse
}
# save model with best rho and nmse
if rho is not None:
if self.best_rho.rho is None or self.best_rho.rho < rho:
self.best_rho = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_rho'] = self.best_rho
self._save(state, name=f"best_rho_{self.chk_name}.pth")
if self.best_nmse.nmse is None or self.best_nmse.nmse > nmse:
self.best_nmse = Result(rho=rho, nmse=nmse, epoch=self.cur_epoch)
state['best_nmse'] = self.best_nmse
self._save(state, name=f"best_nmse_{self.chk_name}.pth")
self._save(state, name=f'last_{self.chk_name}.pth')
# print current best results
if self.best_rho.rho is not None:
print(f'\n=! Best rho: {self.best_rho.rho:.3e} ('
f'Corresponding nmse={self.best_rho.nmse:.3e}; '
f'epoch={self.best_rho.epoch})'
f'\n Best NMSE: {self.best_nmse.nmse:.3e} ('
f'Corresponding rho={self.best_nmse.rho:.3e}; '
f'epoch={self.best_nmse.epoch})\n')
class Tester:
r""" The testing interface for classification
"""
def __init__(self, model, device, criterion, print_freq=20):
self.model = model
self.device = device
self.criterion = criterion
self.print_freq = print_freq
def __call__(self, test_data, verbose=True):
r""" Runs the testing procedure.
Args:
test_data (DataLoader): Data loader for validation data.
"""
self.model.eval()
with torch.no_grad():
loss, rho, nmse = self._iteration(test_data)
if verbose:
print(f'\n=> Test result: \nloss: {loss:.3e}'
f' rho: {rho:.3e} NMSE: {nmse:.3e}\n')
return loss, rho, nmse
def _iteration(self, data_loader):
r""" protected function which test the model on given data loader for one epoch.
"""
iter_rho = AverageMeter('Iter rho')
iter_nmse = AverageMeter('Iter nmse')
iter_loss = AverageMeter('Iter loss')
iter_time = AverageMeter('Iter time')
time_tmp = time.time()
for batch_idx, (sparse_gt, raw_gt) in enumerate(data_loader):
sparse_gt = sparse_gt.to(self.device)
sparse_pred = self.model(sparse_gt)
loss = self.criterion(sparse_pred, sparse_gt)
rho, nmse = evaluator(sparse_pred, sparse_gt, raw_gt)
# Log and visdom update
iter_loss.update(loss)
iter_rho.update(rho)
iter_nmse.update(nmse)
iter_time.update(time.time() - time_tmp)
time_tmp = time.time()
# plot progress
if (batch_idx + 1) % self.print_freq == 0:
logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '
f'loss: {iter_loss.avg:.3e} | rho: {iter_rho.avg:.3e} | '
f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')
logger.info(f'=> Test rho:{iter_rho.avg:.3e} NMSE: {iter_nmse.avg:.3e}\n')
return iter_loss.avg, iter_rho.avg, iter_nmse.avg
| 9,472 | 34.215613 | 93 | py |
CLNet | CLNet-main/utils/.ipynb_checkpoints/init-checkpoint.py | import os
import random
import thop
import torch
from models import clnet
from utils import logger, line_seg
__all__ = ["init_device", "init_model"]
def init_device(seed=None, cpu=None, gpu=None, affinity=None):
# set the CPU affinity
if affinity is not None:
os.system(f'taskset -p {affinity} {os.getpid()}')
# Set the random seed
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Set the GPU id you choose
if gpu is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
# Env setup
if not cpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
if seed is not None:
torch.cuda.manual_seed(seed)
pin_memory = True
logger.info("Running on GPU%d" % (gpu if gpu else 0))
else:
pin_memory = False
device = torch.device('cpu')
logger.info("Running on CPU")
return device, pin_memory
def init_model(args):
# Model loading
model = clnet(reduction=args.cr)
if args.pretrained is not None:
assert os.path.isfile(args.pretrained)
state_dict = torch.load(args.pretrained,
map_location=torch.device('cpu'))['state_dict']
model.load_state_dict(state_dict,strict=False)
#model.load_state_dict(state_dict,strict=False) if errors, try this
logger.info("pretrained model loaded from {}".format(args.pretrained))
# Model flops and params counting
image = torch.randn([1, 2, 32, 32])
flops, params = thop.profile(model, inputs=(image,), verbose=False)
flops, params = thop.clever_format([flops, params], "%.3f")
# Model info logging
logger.info(f'=> Model Name: CLNet [pretrained: {args.pretrained}]')
logger.info(f'=> Model Config: compression ratio=1/{args.cr}')
logger.info(f'=> Model Flops: {flops}')
logger.info(f'=> Model Params Num: {params}\n')
logger.info(f'{line_seg}\n{model}\n{line_seg}\n')
return model
| 2,102 | 29.926471 | 79 | py |
modir | modir-master/drivers/run_warmup.py | import sys
sys.path += ["../"]
import pandas as pd
from transformers import glue_compute_metrics as compute_metrics, glue_output_modes as output_modes, glue_processors as processors
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
RobertaModel,
)
import transformers
from utils.eval_mrr import passage_dist_eval
from model.models import MSMarcoConfigDict
from model.domain_classifier import DomainClassifier, DummyModule
from utils.lamb import Lamb
from utils.modir_utils import compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint
import os
from os import listdir
from os.path import isfile, join
import argparse
import glob
import json
import logging
import random
import numpy as np
import torch
from tqdm import tqdm, trange
import torch.distributed as dist
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from torch import nn
from utils.util import getattr_recursive, set_seed, is_first_worker
from utils.modir_utils import (
compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint,
build_dl_iter_from_file, get_next,
build_input_from_batch, get_module
)
try:
from apex import amp
except ImportError:
print("apex not imported")
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def train(args, model, dc_model, tokenizer, train_file, tgd_file, file_process_fn):
""" Train the model """
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * \
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# Create a static copy of dc_model
static_dc_model = DomainClassifier(args)
static_dc_model.to(args.device)
if args.max_steps > 0:
t_total = args.max_steps
else:
t_total = args.expected_train_size // real_batch_size * args.num_train_epochs
# layerwise optimization for lamb
optimizer_grouped_parameters = []
layer_optim_params = set()
for layer_name in ["roberta.embeddings", "score_out", "downsample1", "downsample2", "downsample3", "embeddingHead"]:
layer = getattr_recursive(model, layer_name)
if layer is not None:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
if getattr_recursive(model, "roberta.encoder.layer") is not None:
for layer in model.roberta.encoder.layer:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
optimizer_grouped_parameters.append(
{"params": [p for p in model.parameters() if p not in layer_optim_params]})
optimizer_constructors = {
"lamb": lambda param, lr: Lamb(
param, lr=lr, eps=args.adam_epsilon
),
"adamw": lambda param, lr: AdamW(
param, lr=lr, eps=args.adam_epsilon
),
"sgd": lambda param, lr: SGD(
param, lr=lr,
)
}
optimizer = optimizer_constructors[args.optimizer.lower()](
optimizer_grouped_parameters, args.learning_rate)
dc_optimizer = optimizer_constructors[args.dc_optimizer.lower()](
dc_model.parameters(), args.dc_learning_rate)
if args.scheduler.lower() == "linear":
print('Total steps', t_total)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
dc_scheduler = get_linear_schedule_with_warmup(
dc_optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler.lower() == "cosine":
scheduler = CosineAnnealingLR(optimizer, t_total, 1e-8)
dc_scheduler = CosineAnnealingLR(dc_optimizer, t_total, 1e-8)
elif args.scheduler.lower() == "step":
# reduce learning rate by a half every 50k steps
scheduler = StepLR(optimizer, step_size=50000, gamma=0.5)
dc_scheduler = StepLR(dc_optimizer, step_size=50000, gamma=0.5)
else:
raise Exception(
"Scheduler {0} not recognized!".format(args.scheduler))
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
) and args.load_optimizer_scheduler:
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(
os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(
os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
if 'apex' not in sys.modules:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
[model, dc_model, static_dc_model], [optimizer, dc_optimizer] = amp.initialize(
[model, dc_model, static_dc_model],
[optimizer, dc_optimizer],
opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
dc_model = torch.nn.DataParallel(dc_model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
dc_model = torch.nn.parallel.DistributedDataParallel(
dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=False,
)
static_dc_model = torch.nn.parallel.DistributedDataParallel(
static_dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d",
args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
dyn_lamb = args.lamb # dynamic lamb, the lamb that's actually used
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
try:
global_step = int(
args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (args.expected_train_size //
args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
args.expected_train_size // args.gradient_accumulation_steps)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(
" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch)
except:
logger.info(" Start training from a pretrained model")
tr_loss = 0 # useless but just keep it
optim_monitors = [
'loss_adv_D', 'loss_adv_M', 'loss_ranking',
'dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P',
'dc_pre_softmax_logits_0', 'dc_pre_softmax_logits_1',
'dc_post_softmax_prob_0', 'dc_post_softmax_prob_1',
'embedding_norm',
]
optim_cumulator = {k: 0.0 for k in optim_monitors}
model_parts = ['roberta', 'projection']
model_parts_params = {
'roberta': [p for n, p in model.named_parameters() if 'embeddingHead' not in n],
'projection': [p for n, p in model.named_parameters() if 'embeddingHead' in n],
# 'domain_classifier': dc_model.parameters(),
}
grad_norm_cumulator = {k: 0.0 for k in model_parts}
grad_norm_cumulator.update({k+'-clipped': 0.0 for k in model_parts})
grad_norm_cumulator.update({
'domain_classifier': 0.0, 'domain_classifier-clipped': 0.0
})
model.zero_grad()
model.train()
dc_model.zero_grad()
dc_model.train()
tqdm_disable = True
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch",
disable=tqdm_disable or args.local_rank not in [-1, 0],
) # each iter is 1 epoch
set_seed(args) # Added here for reproductibility
accumulated_srd_embs = []
accumulated_tgd_embs = []
prev_dry_dc_state_dict = None
for m_epoch in train_iterator:
if is_first_worker():
tb_writer.add_scalar(
'epoch', m_epoch, global_step
)
# get srd and tgd batches
epoch_dataloader, _ = build_dl_iter_from_file(args, train_file, file_process_fn)
_, tgd_epoch_iter = build_dl_iter_from_file(args, tgd_file, file_process_fn)
for step, batch in tqdm(
enumerate(epoch_dataloader), desc="Iteration",
disable=tqdm_disable or args.local_rank not in [-1,0]
):
model.train() # ?
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# get srd batch and inputs
if step % args.gradient_accumulation_steps == 0:
global_step += 1
batch = tuple(t.to(args.device).long() for t in batch)
batch_size = batch[0].shape[0]
inputs = build_input_from_batch(args, batch, mode='full')
# get tgd batch and inputs
tgd_batch, tgd_epoch_iter = get_next(
tgd_epoch_iter, args, tgd_file, file_process_fn, batch_size)
tgd_batch = tuple(t.to(args.device).long() for t in tgd_batch)
tgd_query_inputs = build_input_from_batch(args, tgd_batch, mode='query')
if step % 2 == 0:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='pos_doc')
else:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='neg_doc')
##### 1. forward of the encoder model #####
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
outputs = model(**inputs, output_dc_emb=True)
else:
with model.no_sync():
outputs = model(**inputs, output_dc_emb=True)
ranking_loss = outputs[0] # ranking loss
if step % 2 == 0:
srd_embs = [outputs[1][0], outputs[1][1]]
else:
srd_embs = [outputs[1][0], outputs[1][2]]
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
tgd_embs = [tgd_query_emb, tgd_doc_emb]
detached_srd_embs = [torch.tensor(x) for x in srd_embs]
detached_tgd_embs = [torch.tensor(x) for x in tgd_embs]
if args.dc_rep_method == 'async':
if len(accumulated_srd_embs) == args.dc_rep_steps:
accumulated_srd_embs.pop(0)
accumulated_tgd_embs.pop(0)
accumulated_srd_embs.append(detached_srd_embs)
accumulated_tgd_embs.append(detached_tgd_embs)
for emb in srd_embs+tgd_embs:
optim_cumulator['embedding_norm'] += emb.norm(dim=1).mean() / 4
if args.n_gpu > 1:
ranking_loss = ranking_loss.mean()
if args.gradient_accumulation_steps > 1:
ranking_loss = ranking_loss / args.gradient_accumulation_steps
optim_cumulator['loss_ranking'] += ranking_loss.item()
# 2. feed detached embeddings to the dc_model and BP L_adv_D
for dc_rep_step in range(1+args.dc_rep_steps):
if args.dc_rep_method == 'repeat':
srd_dc_input_embs = detached_srd_embs
tgd_dc_input_embs = detached_tgd_embs
elif args.dc_rep_method == 'async':
which_step = min(dc_rep_step, len(accumulated_srd_embs)-1)
srd_dc_input_embs = accumulated_srd_embs[which_step]
tgd_dc_input_embs = accumulated_tgd_embs[which_step]
if dc_rep_step == 0:
batched_srd_dc_input_embs = srd_dc_input_embs
batched_tgd_dc_input_embs = tgd_dc_input_embs
elif dc_rep_step % args.dc_rep_step_per_batch != 0:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
continue
else:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
batched_srd_dc_input_embs[0] = torch.cat(batched_srd_dc_input_embs[0])
batched_srd_dc_input_embs[1] = torch.cat(batched_srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0] = torch.cat(batched_tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1] = torch.cat(batched_tgd_dc_input_embs[1])
# 2.1 feed detached embeddings to the dc_model
L_adv_D = 0.0
label_size = batch_size * (1 if dc_rep_step==0 else args.dc_rep_step_per_batch)
srd_labels = torch.tensor([0] * label_size, device=args.device)
tgd_labels = torch.tensor([1] * label_size, device=args.device)
for i_emb, emb in enumerate(batched_srd_dc_input_embs):
labels = srd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_srd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_srd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_srd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_srd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_srd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_srd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
for i_emb, emb in enumerate(batched_tgd_dc_input_embs):
labels = tgd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_tgd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_tgd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_tgd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_tgd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_tgd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_tgd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
if dc_rep_step % args.dc_rep_step_per_batch == 0:
batched_srd_dc_input_embs = [[], []]
batched_tgd_dc_input_embs = [[], []]
if dc_rep_step == 0:
continue # this dc_rep_step is only for logging things for optim_cumulator
if args.n_gpu > 1:
L_adv_D = L_adv_D.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
L_adv_D = L_adv_D / args.gradient_accumulation_steps
optim_cumulator['loss_adv_D'] += L_adv_D.item() / args.dc_rep_steps
# 2.2 BP of L_adv_D; dc_optimizer update
if args.fp16:
with amp.scale_loss(L_adv_D, dc_optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
L_adv_D.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
L_adv_D.backward()
if step % args.gradient_accumulation_steps == 0:
grad_norm_cumulator['domain_classifier'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(dc_optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
dc_model.parameters(), args.max_grad_norm)
grad_norm_cumulator['domain_classifier-clipped'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
dc_optimizer.step()
dc_model.zero_grad()
if step % args.gradient_accumulation_steps == 0:
dc_scheduler.step() # this is outside of the dc_rep_step loop
# 3.1 copy the dc_model, feed (undetached) embeddings to it
get_module(static_dc_model).load_state_dict(get_module(dc_model).state_dict())
L_adv_M = 0.0
if args.dc_loss_choice == 'minimax':
srd_labels = torch.tensor([0] * batch_size, device=args.device)
tgd_labels = torch.tensor([1] * batch_size, device=args.device)
elif args.dc_loss_choice == 'gan':
tgd_labels = torch.tensor([0] * batch_size, device=args.device)
elif args.dc_loss_choice == 'confusion':
srd_labels = 'uniform'
tgd_labels = 'uniform'
else:
raise NotImplementedError()
if args.dc_loss_choice != 'gan':
for emb in srd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
L_adv_M += dc_srd_outputs[1]
for emb in tgd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
L_adv_M += dc_tgd_outputs[1]
if args.dc_loss_choice == 'minimax':
L_adv_M = -L_adv_M
L_adv_M *= dyn_lamb
if args.n_gpu > 1:
L_adv_M = L_adv_M.mean()
if args.gradient_accumulation_steps > 1:
L_adv_M = L_adv_M / args.gradient_accumulation_steps
optim_cumulator['loss_adv_M'] += L_adv_M.item()
# 3.2 BP of ranking loss and L_adv_M; optimizer update
loss = ranking_loss + L_adv_M
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
loss.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
loss.backward()
if step % args.gradient_accumulation_steps == 0:
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part] += compute_total_grad_L2_norm(params)
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part+'-clipped'] += compute_total_grad_L2_norm(params)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
# end of the main part of training
if step % args.gradient_accumulation_steps == 0:
if args.lamb_reduce_to_half_steps > 0:
if is_first_worker():
tb_writer.add_scalar("lambda", dyn_lamb, global_step)
dyn_lamb = args.lamb * 2**(-global_step / args.lamb_reduce_to_half_steps)
if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
logs = {}
logs["linear_layer_L2norm"] = get_module(dc_model).layers[0].weight.norm().item()
logs["linear_layer_mean"] = get_module(dc_model).layers[0].weight.mean().item()
logs["learning_rate"] = scheduler.get_last_lr()[0]
logs["learning_rate_dc"] = dc_optimizer.param_groups[0]['lr']
logs["dc_acc_Q"] = optim_cumulator['dc_correct_Q'] / (1e-10 + optim_cumulator['dc_total_Q'])
logs["dc_acc_P"] = optim_cumulator['dc_correct_P'] / (1e-10 + optim_cumulator['dc_total_P'])
for k in optim_monitors:
if k not in ['dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P']:
logs[k] = float(optim_cumulator[k] / args.logging_steps / args.gradient_accumulation_steps)
optim_cumulator = {k: 0.0 for k in optim_monitors} # reset
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logs.update({k: v/args.logging_steps for k, v in grad_norm_cumulator.items()})
logger.info(json.dumps({**logs, **{"step": global_step}}))
for key, value in grad_norm_cumulator.items():
tb_writer.add_scalar(
'grad_norm-'+key,
value / args.logging_steps,
global_step)
grad_norm_cumulator[key] = 0.0 # reset
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
return global_step, tr_loss / global_step
def load_stuff(model_type, args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
args.num_labels = num_labels
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
print('world_size', args.world_size)
else:
args.world_size = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
configObj = MSMarcoConfigDict[model_type]
model_args = type('', (), {})()
model_args.use_mean = configObj.use_mean
config = configObj.config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=args.num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.output_hidden_states = True
tokenizer = configObj.tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
model_argobj=model_args,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
return config, tokenizer, model, configObj
def get_arguments():
parser = argparse.ArgumentParser()
# required arguments
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--tgd_data_name",
default=None,
type=str,
required=False,
help="The target domain dataset name; if there are multiple, separate with commas.",
)
parser.add_argument(
"--tgd_data_dir",
default=None,
type=str,
required=False,
help="The target domain input data dir; if there are multiple, separate with commas.",
)
parser.add_argument(
"--intraindev_data_dir",
default=None,
type=str,
required=False,
help="The dev set data dir; if there are multiple, separate with commas.",
)
parser.add_argument(
"--intraindev_data_name",
default=None,
type=str,
required=False,
help="The dev set dataset name; if there are multiple, separate with commas.",
)
parser.add_argument(
"--train_model_type",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--saved_embedding_dir",
default="",
type=str,
help="The directory where intraindev embeddings are dumped",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train",
action="store_true",
help="Whether to run training.",
)
parser.add_argument(
"--do_eval",
action="store_true",
help="Whether to run eval on the dev set.",
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Rul evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--eval_type",
default="full",
type=str,
help="MSMarco eval type - dev full or small",
)
parser.add_argument(
"--optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW or SGD",
)
parser.add_argument(
"--dc_optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW or SGD",
)
parser.add_argument(
"--scheduler",
default="linear",
type=str,
help="Scheduler - linear, cosine, or step",
)
parser.add_argument(
"--dc_loss_choice",
default="minimax",
type=str,
help="Adversarial loss choice (ADDA paper, Table 1, 4th column).")
parser.add_argument(
"--dc_layers",
default=1,
type=int,
help="How many layers to use for the domain classifier",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for the ranker model.",
)
parser.add_argument(
"--dc_learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for the domain classifier.",
)
parser.add_argument(
"--lamb",
default=0.0,
type=float,
help="HP for GAN loss.",
)
parser.add_argument(
"--lamb_reduce_to_half_steps",
default=0,
type=int,
help="Reduce dyn_lamb exponentially, and it will be reduced to a half after X steps.",
)
parser.add_argument(
"--dc_rep_steps",
default=1,
type=int,
help="Update dc_model over a single batch for X steps.",
)
parser.add_argument(
"--dc_rep_method",
default="repeat",
type=str,
help="Use what data for dc repetitive training. "
"repeat: use the same batch repetitively; "
"async: use embeddings recorded from previous batches."
)
parser.add_argument(
"--dc_rep_step_per_batch",
default=1,
type=int,
help="For dc_rep, how many steps of embeddings to put in one batch",
)
parser.add_argument(
"--no_gn_clip",
action="store_true",
help="Whether to disable grad norm clipping",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.",
)
parser.add_argument(
"--dropout_rate",
default=0.1,
type=float,
help="Dropout probability",
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.",
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm.",
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=500,
help="Evaluate and save checkpoint every X global steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--expected_train_size",
default=100000,
type=int,
help="Expected train dataset size",
)
parser.add_argument(
"--load_optimizer_scheduler",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
args = parser.parse_args()
# sort intraindev datasets, so that tinymsmarco is the first and the target domain dataset is the second
args.intraindev_data_name = args.intraindev_data_name.split(',')
args.intraindev_data_dir = args.intraindev_data_dir.split(',')
assert args.intraindev_data_name[0] == 'tinymsmarco'
assert len(args.intraindev_data_name) >= 2
tgd_position = args.intraindev_data_name.index(args.tgd_data_name)
args.intraindev_data_name[1], args.intraindev_data_name[tgd_position] = args.intraindev_data_name[tgd_position], args.intraindev_data_name[1]
args.intraindev_data_dir[1], args.intraindev_data_dir[tgd_position] = args.intraindev_data_dir[tgd_position], args.intraindev_data_dir[1]
return args
def set_env(args):
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def save_checkpoint(args, model, tokenizer):
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
if args.local_rank != -1:
dist.barrier()
def evaluation(args, model, tokenizer):
# Evaluation
results = {}
if args.do_eval:
model_dir = args.model_name_or_path if args.model_name_or_path else args.output_dir
checkpoints = [model_dir]
for checkpoint in checkpoints:
global_step = checkpoint.split(
"-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split(
"/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model.eval()
reranking_mrr, full_ranking_mrr = passage_dist_eval(
args, model, tokenizer)
if is_first_worker():
print(
"Reranking/Full ranking mrr: {0}/{1}".format(str(reranking_mrr), str(full_ranking_mrr)))
if args.local_rank != -1:
dist.barrier()
return results
def main():
args = get_arguments()
set_env(args)
config, tokenizer, model, configObj = load_stuff(
args.train_model_type, args)
dc_model = DomainClassifier(
args,
input_size=config.hidden_size,
n_class=2
)
dc_model.to(args.device)
# Training
if args.do_train:
logger.info("Training/evaluation parameters %s", args)
def file_process_fn(line, i):
return configObj.process_fn(line, i, tokenizer, args)
train_fname = args.data_dir+"/triples.train.small.tsv"
train_file = open(train_fname, encoding="utf-8-sig")
tgd_file = open(os.path.join(args.tgd_data_dir, "triples.simple.tsv"))
global_step, tr_loss = train(
args, model, dc_model, tokenizer, train_file, tgd_file, file_process_fn)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
train_file.close()
tgd_file.close()
save_checkpoint(args, model, tokenizer)
results = evaluation(args, model, tokenizer)
return results
if __name__ == "__main__":
main()
| 44,416 | 36.045038 | 145 | py |
modir | modir-master/drivers/run_ann_data_gen.py | import sys
sys.path += ['../']
import torch
import os
from collections import defaultdict
import faiss
from utils.util import (
barrier_array_merge,
convert_to_string_id,
is_first_worker,
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data
)
import csv
import copy
import transformers
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
RobertaModel,
)
from data.msmarco_data import GetProcessingFn
from model.models import MSMarcoConfigDict, ALL_MODELS
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
import numpy as np
from os.path import isfile, join
import argparse
import json
import logging
import random
import time
import pytrec_eval
torch.multiprocessing.set_sharing_strategy('file_system')
logger = logging.getLogger(__name__)
# ANN - active learning ------------------------------------------------------
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
def get_latest_checkpoint(args):
if not os.path.exists(args.training_dir):
return args.init_model_dir, 0
subdirectories = list(next(os.walk(args.training_dir))[1])
def valid_checkpoint(checkpoint):
chk_path = os.path.join(args.training_dir, checkpoint)
scheduler_path = os.path.join(chk_path, "scheduler.pt")
return os.path.exists(scheduler_path)
checkpoint_nums = [get_checkpoint_no(s) for s in subdirectories if valid_checkpoint(s)]
if args.fix_refresh_rate > 0:
checkpoint_nums = [x for x in checkpoint_nums if x % args.fix_refresh_rate == 0]
if len(checkpoint_nums) > 0:
return os.path.join(args.training_dir, "checkpoint-" +
str(max(checkpoint_nums))) + "/", max(checkpoint_nums)
return args.init_model_dir, 0
def load_positive_ids(data_path, dev_set=False):
logger.info(f"Loading query_2_pos_docid from {data_path}")
query_positive_id = {}
query_positive_id_path = os.path.join(
data_path,
"dev-qrel.tsv" if dev_set else "train-qrel.tsv"
)
with open(query_positive_id_path, 'r', encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, docid, rel] in tsvreader:
topicid = int(topicid)
docid = int(docid)
if not dev_set:
assert rel == "1"
query_positive_id[topicid] = docid
else:
if topicid not in query_positive_id:
query_positive_id[topicid] = {}
query_positive_id[topicid][docid] = max(0, int(rel))
return query_positive_id
def load_model(args, checkpoint_path):
label_list = ["0", "1"]
num_labels = len(label_list)
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
args.model_name_or_path = checkpoint_path
config = configObj.config_class.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task="MSMarco",
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model.to(args.device)
logger.info("Inference parameters %s", args)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[
args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
return config, tokenizer, model
def InferenceEmbeddingFromStreamDataLoader(
args,
model,
train_dataloader,
is_query_inference=True,
prefix=""):
# expect dataset from ReconstructTrainingSet
results = {}
eval_batch_size = args.per_gpu_eval_batch_size
# Inference!
logger.info("***** Running ANN Embedding Inference *****")
logger.info(" Batch size = %d", eval_batch_size)
embedding = []
embedding2id = []
if args.local_rank != -1:
dist.barrier()
model.eval()
for idx, batch in enumerate(tqdm(train_dataloader,
desc="Inferencing",
disable=args.local_rank not in [-1,0],
position=0,
leave=True)):
idxs = batch[3].detach().numpy() # [#B]
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()}
if is_query_inference:
if args.world_size == 1:
embs = model.query_emb(**inputs)
else:
embs = model.module.query_emb(**inputs)
else:
if args.world_size == 1:
embs = model.body_emb(**inputs)
else:
embs = model.module.body_emb(**inputs)
embs = embs.detach().cpu().numpy()
# check for multi chunk output for long sequence
if len(embs.shape) == 3:
for chunk_no in range(embs.shape[1]):
embedding2id.append(idxs)
embedding.append(embs[:, chunk_no, :])
else:
embedding2id.append(idxs)
embedding.append(embs)
embedding = np.concatenate(embedding, axis=0)
embedding2id = np.concatenate(embedding2id, axis=0)
return embedding, embedding2id
# streaming inference
def StreamInferenceDoc(args, model, fn, prefix, f, output_path, is_query_inference=True):
inference_batch_size = args.per_gpu_eval_batch_size # * max(1, args.n_gpu)
inference_dataset = StreamingDataset(f, fn)
inference_dataloader = DataLoader(
inference_dataset,
batch_size=inference_batch_size)
if args.local_rank != -1:
dist.barrier() # directory created
_embedding, _embedding2id = InferenceEmbeddingFromStreamDataLoader(
args, model, inference_dataloader, is_query_inference=is_query_inference, prefix=prefix)
logger.info("merging embeddings")
# preserve to memory
full_embedding = barrier_array_merge(
args,
_embedding,
prefix=prefix + "_emb_p_",
output_path=output_path,
load_cache=False,
only_load_in_master=True)
full_embedding2id = barrier_array_merge(
args,
_embedding2id,
prefix=prefix + "_embid_p_",
output_path=output_path,
load_cache=False,
only_load_in_master=True)
return full_embedding, full_embedding2id
def generate_new_ann(
args,
output_num,
checkpoint_path,
srd_query_positive_id,
srd_dev_query_positive_id,
tgd_query_positive_id,
latest_step_num):
config, tokenizer, model = load_model(args, checkpoint_path)
logger.info("***** inference of srd dev query *****")
srd_dev_query_collection_path = os.path.join(args.srd_data_dir, "dev-query")
srd_dev_query_cache = EmbeddingCache(srd_dev_query_collection_path)
with srd_dev_query_cache as emb:
srd_dev_query_embedding, srd_dev_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"dev_query_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=True
)
logger.info("***** inference of srd passages *****")
srd_passage_collection_path = os.path.join(args.srd_data_dir, "passages")
srd_passage_cache = EmbeddingCache(srd_passage_collection_path)
with srd_passage_cache as emb:
srd_passage_embedding, srd_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"passage_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=False
)
if args.inference:
return
logger.info("***** inference of srd train query *****")
srd_query_collection_path = os.path.join(args.srd_data_dir, "train-query")
srd_query_cache = EmbeddingCache(srd_query_collection_path)
with srd_query_cache as emb:
srd_query_embedding, srd_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"query_" + str(latest_step_num) + "_",
emb,
output_path=args.output_dir,
is_query_inference=True
)
if is_first_worker():
# ANN search for dev passages and dev queries
srd_dim = srd_passage_embedding.shape[1]
print('srd passage embedding shape: ' + str(srd_passage_embedding.shape))
faiss.omp_set_num_threads(16)
srd_cpu_index = faiss.IndexFlatIP(srd_dim)
srd_cpu_index.add(srd_passage_embedding)
logger.info("***** Done Dev ANN Index *****")
_, srd_I = srd_cpu_index.search(srd_dev_query_embedding, 100) # I: [number of queries, topk]
result_dict, num_queries_srd = EvalDevQuery(
args, srd_dev_query_embedding2id, srd_passage_embedding2id,
srd_dev_query_positive_id, srd_I)
result_dict_with_srd_name = {}
for k, v in result_dict.items():
result_dict_with_srd_name['msmarco-'+k] = v
dump_eval_result(result_dict_with_srd_name, args.output_dir, output_num, checkpoint_path)
if args.tgd_data_dir is not None:
logger.info("***** inference of tgd passages *****")
tgd_passage_collection_path = os.path.join(args.tgd_data_dir, "passages")
tgd_passage_cache = EmbeddingCache(tgd_passage_collection_path)
with tgd_passage_cache as emb:
tgd_passage_embedding, tgd_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"passage_" + str(latest_step_num) + "_",
emb,
output_path=args.tgd_output_dir,
is_query_inference=False
)
logger.info("***** inference of tgd query *****")
tgd_query_collection_path = os.path.join(args.tgd_data_dir, "train-query")
tgd_query_cache = EmbeddingCache(tgd_query_collection_path)
with tgd_query_cache as emb:
tgd_query_embedding, tgd_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True, tgd=True),
"query_" + str(latest_step_num) + "_",
emb,
output_path=args.tgd_output_dir,
is_query_inference=True
)
if is_first_worker():
if args.tgd_data_dir is not None:
construct_new_train_set(
args,
tgd_passage_embedding, tgd_passage_embedding2id,
tgd_query_embedding, tgd_query_embedding2id,
tgd_query_positive_id,
output_num,
checkpoint_path,
output_path=args.tgd_output_dir
)
# the ranking training set: (query, pos_doc, [nearest_neg_doc]*n)
construct_new_train_set(
args,
srd_passage_embedding, srd_passage_embedding2id,
srd_query_embedding, srd_query_embedding2id,
srd_query_positive_id,
output_num,
checkpoint_path,
output_path=args.output_dir
)
# return result_dict['ndcg@20'], num_queries_dev
def dump_eval_result(result_dict, output_path, output_num, checkpoint_path):
ndcg_output_path = os.path.join(
output_path, f"ann_ndcg_" + str(output_num))
if os.path.exists(ndcg_output_path):
with open(ndcg_output_path) as fin:
json_dict = json.load(fin)
else:
json_dict = {}
json_dict.update(result_dict)
with open(ndcg_output_path, 'w') as f:
json_dict['checkpoint'] = checkpoint_path
json.dump(json_dict, f)
def construct_new_nngan_train_set(
args,
srd_query_embedding, srd_query_embedding2id,
srd_passage_embedding, srd_passage_embedding2id,
tgd_passage_embedding, tgd_passage_embedding2id,
output_num,
checkpoint_path,
output_path,
max_size,
):
# the domain adaptation training set:
# THIS ONE NOT USED NOW: (srd_query, [nearest_srd_doc]*n, [nearest_tgd_doc]*n)
# (srd_doc, [nearest_srd_doc]*n, [nearest_tgd_doc]*n)
# (tgd_doc, [nearest_tgd_doc]*n, [nearest_srd_doc]*n)
dim = srd_query_embedding.shape[1]
faiss.omp_set_num_threads(16)
srd_passage_index = faiss.IndexFlatIP(dim)
srd_passage_index.add(srd_passage_embedding)
tgd_passage_index = faiss.IndexFlatIP(dim)
tgd_passage_index.add(tgd_passage_embedding)
logger.info("***** Done srd & tgd passage index *****")
chunk_factor = args.ann_chunk_factor
effective_idx = output_num % chunk_factor
if chunk_factor <= 0:
chunk_factor = 1
search_and_build_dataset(
args,
chunk_factor=chunk_factor, effective_idx=effective_idx,
pos_index=srd_passage_index, pos_index2id=srd_passage_embedding2id,
neg_index=tgd_passage_index, neg_index2id=tgd_passage_embedding2id,
query_embedding=srd_passage_embedding, query_embedding2id=srd_passage_embedding2id,
output_fname = os.path.join(output_path, f"sd_sd_td_{output_num}"),
max_size=max_size
)
search_and_build_dataset(
args,
chunk_factor=1, effective_idx=0,
pos_index=tgd_passage_index, pos_index2id=tgd_passage_embedding2id,
neg_index=srd_passage_index, neg_index2id=srd_passage_embedding2id,
query_embedding=tgd_passage_embedding, query_embedding2id=tgd_passage_embedding2id,
output_fname = os.path.join(output_path, f"td_td_sd_{output_num}"),
max_size=int(1e10)
)
def search_and_build_dataset(
args,
chunk_factor, effective_idx,
pos_index, pos_index2id,
neg_index, neg_index2id,
query_embedding, query_embedding2id,
output_fname,
max_size,
):
num_queries = len(query_embedding)
queries_per_chunk = num_queries // chunk_factor
q_start_idx = queries_per_chunk * effective_idx
if effective_idx == chunk_factor - 1:
q_end_idx = num_queries
else:
q_end_idx = q_start_idx + queries_per_chunk
q_end_idx = min(q_end_idx, q_start_idx+max_size)
query_embedding = query_embedding[q_start_idx:q_end_idx]
query_embedding2id = query_embedding2id[q_start_idx:q_end_idx]
effective_q_id = set(query_embedding2id.flatten())
_, pos_I = pos_index.search(query_embedding, args.nn_topk_training)
_, neg_I = neg_index.search(query_embedding, args.nn_posneg_sample)
with open(output_fname, 'w') as fout:
for query_idx in range(pos_I.shape[0]):
if query_idx % 5000 == 0:
logger.info(f"query_idx = {query_idx}")
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id:
continue
selected_pos_ann_idx = random.choices(
pos_I[query_idx], #[1:], # excluding itself
k=args.nn_posneg_sample
)
selected_neg_ann_idx = neg_I[query_idx]
print("{}\t{}\t{}".format(
query_id,
','.join([str(pos_index2id[pid]) for pid in selected_pos_ann_idx]),
','.join([str(neg_index2id[pid]) for pid in selected_neg_ann_idx]),
), file=fout)
def construct_new_train_set(
args,
passage_embedding, passage_embedding2id,
query_embedding, query_embedding2id,
training_query_positive_id,
output_num,
checkpoint_path,
output_path,
):
# ANN search for (train) passages and queries, output the new training set to files
dim = passage_embedding.shape[1]
print('passage embedding shape: ' + str(passage_embedding.shape))
faiss.omp_set_num_threads(16)
cpu_index = faiss.IndexFlatIP(dim)
cpu_index.add(passage_embedding)
logger.info("***** Done ANN Index *****")
# Construct new training set ==================================
chunk_factor = args.ann_chunk_factor
effective_idx = output_num % chunk_factor
if chunk_factor <= 0:
chunk_factor = 1
num_queries = len(query_embedding)
queries_per_chunk = num_queries // chunk_factor
q_start_idx = queries_per_chunk * effective_idx
q_end_idx = num_queries if (
effective_idx == (
chunk_factor -
1)) else (
q_start_idx +
queries_per_chunk)
query_embedding = query_embedding[q_start_idx:q_end_idx]
query_embedding2id = query_embedding2id[q_start_idx:q_end_idx]
logger.info(
"Chunked {} query from {}".format(
len(query_embedding),
num_queries))
# I: [number of queries, topk]
_, I = cpu_index.search(query_embedding, args.topk_training)
effective_q_id = set(query_embedding2id.flatten())
query_negative_passage = GenerateNegativePassaageID(
args,
query_embedding2id,
passage_embedding2id,
training_query_positive_id,
I,
effective_q_id)
logger.info("***** Construct ANN Triplet *****")
train_data_output_path = os.path.join(
output_path, f"ann_training_data_" + str(output_num))
with open(train_data_output_path, 'w') as f:
query_range = list(range(I.shape[0]))
random.shuffle(query_range)
for query_idx in query_range:
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id or query_id not in training_query_positive_id:
continue
pos_pid = training_query_positive_id[query_id]
f.write(
"{}\t{}\t{}\n".format(
query_id, pos_pid, ','.join(
str(neg_pid) for neg_pid in query_negative_passage[query_id])))
def GenerateNegativePassaageID(
args,
query_embedding2id,
passage_embedding2id,
training_query_positive_id,
I_nearest_neighbor,
effective_q_id):
query_negative_passage = {}
SelectTopK = args.ann_measure_topk_mrr
mrr = 0 # only meaningful if it is SelectTopK = True
num_queries = 0
for query_idx in range(I_nearest_neighbor.shape[0]):
query_id = query_embedding2id[query_idx]
if query_id not in effective_q_id:
continue
num_queries += 1
pos_pid = training_query_positive_id[query_id]
top_ann_pid = I_nearest_neighbor[query_idx, :].copy()
if SelectTopK:
selected_ann_idx = top_ann_pid[:args.negative_sample + 1]
else:
negative_sample_I_idx = list(range(I_nearest_neighbor.shape[1]))
random.shuffle(negative_sample_I_idx)
selected_ann_idx = top_ann_pid[negative_sample_I_idx]
query_negative_passage[query_id] = []
neg_cnt = 0
rank = 0
for idx in selected_ann_idx:
neg_pid = passage_embedding2id[idx]
rank += 1
if neg_pid == pos_pid:
if rank <= 10:
mrr += 1 / rank
continue
if neg_pid in query_negative_passage[query_id]:
continue
if neg_cnt >= args.negative_sample:
break
query_negative_passage[query_id].append(neg_pid)
neg_cnt += 1
if SelectTopK:
print("Rank:" + str(args.rank) +
" --- ANN MRR:" + str(mrr / num_queries))
return query_negative_passage
def EvalDevQuery(
args,
query_embedding2id,
passage_embedding2id,
dev_query_positive_id,
I_nearest_neighbor):
# [qid][docid] = docscore, here we use -rank as score, so the higher the rank (1 > 2), the higher the score (-1 > -2)
prediction = {}
for query_idx in range(I_nearest_neighbor.shape[0]):
query_id = query_embedding2id[query_idx]
prediction[query_id] = {}
top_ann_pid = I_nearest_neighbor[query_idx, :].copy()
selected_ann_idx = top_ann_pid[:50]
rank = 0
seen_pid = set()
for idx in selected_ann_idx:
pred_pid = passage_embedding2id[idx]
if pred_pid not in seen_pid:
# this check handles multiple vector per document
rank += 1
prediction[query_id][pred_pid] = -rank
seen_pid.add(pred_pid)
# use out of the box evaluation script
evaluator = pytrec_eval.RelevanceEvaluator(
convert_to_string_id(dev_query_positive_id),
{'map_cut', 'ndcg_cut', 'recip_rank','recall', 'P'}
)
eval_query_cnt = 0
result = evaluator.evaluate(convert_to_string_id(prediction))
ndcg = defaultdict(int)
precision = defaultdict(int)
Map = 0
mrr = 0
recall_1k = 0
recall_100 = 0
cuts = [5, 10, 20]
for k in result.keys():
eval_query_cnt += 1
for cut in cuts:
ndcg[cut] += result[k][f"ndcg_cut_{cut}"]
precision[cut] += result[k][f"P_{cut}"]
Map += result[k]["map_cut_10"]
mrr += result[k]["recip_rank"]
recall_1k += result[k]["recall_1000"]
recall_100 += result[k]["recall_100"]
result_dict = {}
for cut in cuts:
result_dict[f'ndcg@{cut}'] = ndcg[cut] / eval_query_cnt
result_dict[f'p@{cut}'] = precision[cut] / eval_query_cnt
result_dict['map'] = Map / eval_query_cnt
result_dict['mrr'] = mrr / eval_query_cnt
result_dict['recall_1k'] = recall_1k / eval_query_cnt
result_dict['recall_100'] = recall_100 / eval_query_cnt
print("Rank:" + str(args.rank) \
+ " --- ANN NDCG@20:" + str(result_dict['ndcg@20']) \
+ " --- ANN MRR:" + str(result_dict['mrr']) \
+ " --- ANN P@20:" + str(result_dict['p@20'])
)
return result_dict, eval_query_cnt
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--srd_data_dir",
default=None,
type=str,
required=True,
help="The input data dir for source domain train set. "
"Should contain the .tsv files (or other data files) for the task. "
"For now it's msmarco.",
)
parser.add_argument(
"--tgd_data_dir",
default=None,
type=str,
required=False,
help="The input data dir for target domain train set. For now it's cqgtc.",
)
parser.add_argument(
"--training_dir",
default=None,
type=str,
required=True,
help="Training dir, will look for latest checkpoint dir in here",
)
parser.add_argument(
"--init_model_dir",
default=None,
type=str,
required=True,
help="Initial model dir, will use this if no checkpoint is found in training_dir",
)
parser.add_argument(
"--last_checkpoint_dir",
default="",
type=str,
help="Last checkpoint used, this is for rerunning this script when some ann data is already generated",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the training data will be written",
)
parser.add_argument(
"--tgd_output_dir",
default=None,
type=str,
required=False,
help="The output directory where the tgd data will be written",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
required=True,
help="The directory where cached data will be written",
)
parser.add_argument(
"--end_output_num",
default=-
1,
type=int,
help="Stop after this number of data versions has been generated, default run forever",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence (document) length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length_tgd",
default=None,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Default is args.max_query_length. "
"This argument is used only when target domain is arguana, where max_query_len=512 is needed.",
)
parser.add_argument(
"--max_doc_character",
default=10000,
type=int,
help="used before tokenizer to save tokenizer latency",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=128,
type=int,
help="The starting output file number",
)
parser.add_argument(
"--ann_chunk_factor",
default=5, # for 500k queryes, divided into 100k chunks for each epoch
type=int,
help="devide training queries into chunks",
)
parser.add_argument(
"--topk_training",
default=500,
type=int,
help="top k from which negative samples are collected",
)
parser.add_argument(
"--negative_sample",
default=5,
type=int,
help="at each resample, how many negative samples per query do I use",
)
parser.add_argument(
"--nn_topk_training",
default=50,
type=int,
help="top k from which negative samples are collected (for nn discriminator)",
)
parser.add_argument(
"--nn_posneg_sample",
default=5,
type=int,
help="at each resample, how many negative samples per query do I use",
)
parser.add_argument(
"--ann_measure_topk_mrr",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--only_keep_latest_embedding_file",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--fix_refresh_rate",
type=int,
default=0,
help="Fix the ANN index refresh rate to X global steps. If X is 0 then we don't fix it.",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--inference",
default=False,
action="store_true",
help="only do inference if specify",
)
args = parser.parse_args()
return args
def set_env(args):
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
else:
args.world_size = 1
args.rank = 0
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
)
def ann_data_gen(args):
last_checkpoint = args.last_checkpoint_dir
ann_no, _, _ = get_latest_ann_data(args.output_dir) # train only, since we only care about ann_no
output_num = ann_no + 1
logger.info("starting output number %d", output_num)
if is_first_worker():
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.tgd_output_dir is not None:
if not os.path.exists(args.tgd_output_dir):
os.makedirs(args.tgd_output_dir)
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir)
srd_positive_id = load_positive_ids(args.srd_data_dir)
srd_dev_positive_id = load_positive_ids(args.srd_data_dir, dev_set=True)
tgd_positive_id = None
if args.tgd_data_dir is not None:
tgd_positive_id = load_positive_ids(args.tgd_data_dir)
while args.end_output_num == -1 or output_num <= args.end_output_num:
next_checkpoint, latest_step_num = get_latest_checkpoint(args)
if args.only_keep_latest_embedding_file:
latest_step_num = 0
if next_checkpoint == last_checkpoint:
time.sleep(60)
else:
logger.info("start generate ann data number %d", output_num)
logger.info("next checkpoint at " + next_checkpoint)
generate_new_ann( # for both train and tgd
args,
output_num,
next_checkpoint,
srd_positive_id,
srd_dev_positive_id,
tgd_positive_id,
latest_step_num)
if args.inference:
break
logger.info("finished generating ann data number %d", output_num)
output_num += 1
last_checkpoint = next_checkpoint
if args.local_rank != -1:
dist.barrier()
def main():
args = get_arguments()
set_env(args)
ann_data_gen(args)
if __name__ == "__main__":
main()
| 31,788 | 31.2077 | 121 | py |
modir | modir-master/drivers/run_ann.py | import sys
sys.path += ['../']
import os
import time
import torch
from data.msmarco_data import GetTrainingDataProcessingFn, GetTripletTrainingDataProcessingFn
from utils.util import (
getattr_recursive,
set_seed,
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data,
is_first_worker
)
import pandas as pd
from transformers import glue_processors as processors
from transformers import (
AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup
)
import transformers
from utils.lamb import Lamb
from utils.modir_utils import (
compute_total_grad_L2_norm, intrain_dev_eval, intrain_save_checkpoint,
build_dl_iter_from_file, get_next,
build_input_from_batch, get_module
)
from data.msmarco_data import GetProcessingFn
from model.models import MSMarcoConfigDict, ALL_MODELS
from model.domain_classifier import DomainClassifier, DummyModule, dry_dc_evaluation
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
import numpy as np
from os.path import isfile, join
import argparse
import glob
import json
import logging
import random
import faiss
try:
from apex import amp
except ImportError:
print("apex not imported")
torch.multiprocessing.set_sharing_strategy('file_system')
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
# logging.disable(20) # supressing logger.info
faiss.omp_set_num_threads(16)
faiss_dim = 768 # it's unlikely that this will need to be changed
def GetTripletTrainingDataProcessingFnWithSeparatePassageCache(
args, query_cache, passage_cache, another_passage_cache
):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
for pos_pid, neg_pid in zip(
[int(pos_pid) for pos_pid in line_arr[1].split(',')],
[int(neg_pid) for neg_pid in line_arr[2].split(',')]
):
query_data = GetProcessingFn(
args, query=False)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
neg_data = GetProcessingFn(
args, query=False)(
another_passage_cache[neg_pid], neg_pid)[0]
yield (
query_data[0], query_data[1], query_data[2],
pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2]
)
return fn
def build_train_dataset_from_ann(
args,
query_cache, passage_cache,
tb_writer,
global_step,
last_ann_no,
ann_dir,
):
# check if new ann training data is availabe
ann_no, ann_path, ndcg_json = get_latest_ann_data(ann_dir)
if ann_path is not None and ann_no != last_ann_no:
try:
logger.info("Training on new ANN data at %s", ann_path)
with open(ann_path, 'r') as f:
ann_training_data = f.readlines()
aligned_size = (len(ann_training_data) //
args.world_size) * args.world_size
ann_training_data = ann_training_data[:aligned_size]
logger.info("Total ann queries: %d", len(ann_training_data))
if args.triplet:
train_dataset = StreamingDataset(
ann_training_data,
GetTripletTrainingDataProcessingFn(
args, query_cache, passage_cache, tgd=not(ann_dir==args.ann_dir))
)
else:
train_dataset = StreamingDataset(
ann_training_data,
GetTrainingDataProcessingFn(
args, query_cache, passage_cache)
)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size)
if args.local_rank != -1:
dist.barrier()
update = True
except FileNotFoundError:
update = False
train_dataloader = None
if is_first_worker() and ann_dir==args.ann_dir:
# add ndcg at checkpoint step used instead of current step
# ndcg_json will not be None since this is args.ann_dir
metric_step = ndcg_json['checkpoint'].strip('/').split('/')[-1].split('-')[-1]
try:
metric_step = int(metric_step)
except ValueError:
metric_step = 0
for key in ndcg_json:
if key != 'checkpoint':
tb_writer.add_scalar(
key, ndcg_json[key], metric_step
)
last_ann_no = ann_no
return update, (train_dataloader, last_ann_no)
return False, (None, None)
def show(model):
# for debugging: print the first parameter of the model
for p in model.parameters():
entry = p
while True:
try:
entry = entry[0]
except:
return entry.item()
def train(args, model, dc_model, tokenizer,
caches, tgd_file_name, file_process_fn,
):
""" Train the model """
logger.info("Training/evaluation parameters %s", args)
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
query_cache, passage_cache = caches
tgd_file = open(tgd_file_name)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * \
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# Create a static copy of dc_model
static_dc_model = DomainClassifier(args)
static_dc_model.to(args.device)
# optimizer for ANCE
optimizer_grouped_parameters = []
layer_optim_params = set()
for layer_name in [
"roberta.embeddings",
"score_out",
"downsample1",
"downsample2",
"downsample3"]:
layer = getattr_recursive(model, layer_name)
if layer is not None:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
if getattr_recursive(model, "roberta.encoder.layer") is not None:
for layer in model.roberta.encoder.layer:
optimizer_grouped_parameters.append({"params": layer.parameters()})
for p in layer.parameters():
layer_optim_params.add(p)
optimizer_grouped_parameters.append(
{"params": [p for p in model.parameters() if p not in layer_optim_params]})
if args.optimizer.lower() == "lamb":
optimizer_constructor = lambda param, lr, decay: Lamb(
param, lr=lr, eps=args.adam_epsilon, weight_decay=decay
)
elif args.optimizer.lower() == "adamw":
optimizer_constructor = lambda param, lr, decay: AdamW(
param, lr=lr, eps=args.adam_epsilon, weight_decay=decay
)
else:
raise NotImplementedError(
f"Optimizer {args.optimizer} not recognized! Can only be lamb or adamW")
optimizer = optimizer_constructor(optimizer_grouped_parameters, args.learning_rate, args.weight_decay)
dc_optimizer = optimizer_constructor(dc_model.parameters(), args.dc_learning_rate, args.dc_weightDecay)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(
os.path.join(
args.model_name_or_path,
"optimizer.pt")) and args.load_optimizer_scheduler:
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(
os.path.join(
args.model_name_or_path,
"optimizer.pt")))
logger.info("Start fp16 and distributed model init")
if args.fp16:
if 'apex' not in sys.modules:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
[model, dc_model, static_dc_model], [optimizer, dc_optimizer] = amp.initialize(
[model, dc_model, static_dc_model],
[optimizer, dc_optimizer],
opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
dc_model = torch.nn.DataParallel(dc_model)
static_dc_model = torch.nn.DataParallel(static_dc_model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
dc_model = torch.nn.parallel.DistributedDataParallel(
dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
static_dc_model = torch.nn.parallel.DistributedDataParallel(
static_dc_model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train
logger.info("***** Running training *****")
logger.info(" Max steps = %d", args.max_steps)
logger.info(
" Instantaneous batch size per GPU = %d",
args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(
" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
global_step = 0
dyn_lamb = args.lamb
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model
# path
if "-" in args.model_name_or_path:
try:
global_step = int(
args.model_name_or_path.split("-")[-1].split("/")[0])
except:
global_step=0
else:
global_step = 0
logger.info(
" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from global step %d", global_step)
optim_monitors = [
'loss_adv_D', 'loss_adv_M', 'loss_ranking',
'dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P',
'dc_pre_softmax_logits_0', 'dc_pre_softmax_logits_1',
'dc_post_softmax_prob_0', 'dc_post_softmax_prob_1',
'embedding_norm',
]
optim_cumulator = {k: 0.0 for k in optim_monitors}
model_parts = ['roberta', 'projection']
model_parts_params = {
'roberta': [p for n, p in model.named_parameters() if 'embeddingHead' not in n],
'projection': [p for n, p in model.named_parameters() if 'embeddingHead' in n],
# 'domain_classifier': dc_model.parameters(),
}
grad_norm_cumulator = {k: 0.0 for k in model_parts}
grad_norm_cumulator.update({k+'-clipped': 0.0 for k in model_parts})
grad_norm_cumulator.update({
'domain_classifier': 0.0, 'domain_classifier-clipped': 0.0
})
model.zero_grad()
model.train()
dc_model.zero_grad()
dc_model.train()
set_seed(args) # Added here for reproductibility
last_ann_no = -1
train_dataloader = None
train_dataloader_iter = None
epoch_num = -1
step = 0
accumulated_srd_embs = []
accumulated_tgd_embs = []
prev_dry_dc_state_dict = None
# actual_refresh_rate = None
# prev_refresh_gstep = None
# half_eval_done = False
if args.single_warmup:
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
dc_scheduler = get_linear_schedule_with_warmup(
dc_optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
srd_update_ann, tgd_update_ann = False, False
while global_step < args.max_steps:
if step % args.gradient_accumulation_steps == 0:
if global_step % args.logging_steps == 0:
if not srd_update_ann:
srd_update_ann, (newdataloader, newlan) = build_train_dataset_from_ann(
args,
query_cache, passage_cache,
tb_writer,
global_step,
last_ann_no,
ann_dir=args.ann_dir
)
# only update if both domains' new ann are ready
if srd_update_ann:
train_dataloader, last_ann_no = newdataloader, newlan
train_dataloader_iter = iter(train_dataloader)
epoch_num += 1
if is_first_worker():
tb_writer.add_scalar(
'epoch', epoch_num, global_step
)
if global_step > 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict,
all_datasets=True)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
srd_update_ann, tgd_update_ann = False, False
_, tgd_epoch_iter = build_dl_iter_from_file(args, tgd_file, file_process_fn)
step += 1
# get srd batch and inputs
try:
batch = next(train_dataloader_iter)
except StopIteration:
logger.info("Finished iterating current dataset, begin reiterate")
train_dataloader_iter = iter(train_dataloader)
batch = next(train_dataloader_iter)
batch = tuple(t.to(args.device) for t in batch)
batch_size = batch[0].shape[0]
inputs = build_input_from_batch(args, batch, mode='full', triplet=True)
# get tgd batch and inputs
tgd_batch, tgd_epoch_iter = get_next(
tgd_epoch_iter, args, tgd_file, file_process_fn, batch_size)
tgd_batch = tuple(t.to(args.device).long() for t in tgd_batch)
tgd_query_inputs = build_input_from_batch(args, tgd_batch, mode='query')
if step % 2 == 0:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='pos_doc')
else:
tgd_doc_inputs = build_input_from_batch(args, tgd_batch, mode='neg_doc')
##### 1. forward of the encoder model #####
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
outputs = model(**inputs, output_dc_emb=True)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
outputs = model(**inputs, output_dc_emb=True)
ranking_loss = outputs[0]
if step % 2 == 0:
srd_embs = [outputs[1][0], outputs[1][1]]
else:
srd_embs = [outputs[1][0], outputs[1][2]]
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
tgd_query_emb = get_module(model).query_emb(**tgd_query_inputs)
tgd_doc_emb = get_module(model).body_emb(**tgd_doc_inputs)
tgd_embs = [tgd_query_emb, tgd_doc_emb]
detached_srd_embs = [torch.tensor(x) for x in srd_embs]
detached_tgd_embs = [torch.tensor(x) for x in tgd_embs]
if args.dc_rep_method == 'async':
if len(accumulated_srd_embs) == args.dc_rep_steps:
accumulated_srd_embs.pop(0)
accumulated_tgd_embs.pop(0)
accumulated_srd_embs.append(detached_srd_embs)
accumulated_tgd_embs.append(detached_tgd_embs)
for emb in srd_embs+tgd_embs:
optim_cumulator['embedding_norm'] += emb.norm(dim=1).mean() / 4
if args.n_gpu > 1:
ranking_loss = ranking_loss.mean()
if args.gradient_accumulation_steps > 1:
ranking_loss = ranking_loss / args.gradient_accumulation_steps
optim_cumulator['loss_ranking'] += ranking_loss.item()
# 2. feed detached embeddings to the dc_model and BP L_adv_D
for dc_rep_step in range(1+args.dc_rep_steps):
if args.dc_rep_method == 'repeat':
srd_dc_input_embs = detached_srd_embs
tgd_dc_input_embs = detached_tgd_embs
elif args.dc_rep_method == 'async':
which_step = min(dc_rep_step, len(accumulated_srd_embs)-1)
srd_dc_input_embs = accumulated_srd_embs[which_step]
tgd_dc_input_embs = accumulated_tgd_embs[which_step]
if dc_rep_step == 0:
batched_srd_dc_input_embs = srd_dc_input_embs
batched_tgd_dc_input_embs = tgd_dc_input_embs
elif dc_rep_step % args.dc_rep_step_per_batch != 0:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
continue
else:
batched_srd_dc_input_embs[0].append(srd_dc_input_embs[0])
batched_srd_dc_input_embs[1].append(srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0].append(tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1].append(tgd_dc_input_embs[1])
batched_srd_dc_input_embs[0] = torch.cat(batched_srd_dc_input_embs[0])
batched_srd_dc_input_embs[1] = torch.cat(batched_srd_dc_input_embs[1])
batched_tgd_dc_input_embs[0] = torch.cat(batched_tgd_dc_input_embs[0])
batched_tgd_dc_input_embs[1] = torch.cat(batched_tgd_dc_input_embs[1])
# 2.1 feed detached embeddings to the dc_model
L_adv_D = 0.0
label_size = batch_size * (1 if dc_rep_step==0 else args.dc_rep_step_per_batch)
srd_labels = torch.tensor([0] * label_size, device=args.device)
tgd_labels = torch.tensor([1] * label_size, device=args.device)
for i_emb, emb in enumerate(batched_srd_dc_input_embs):
labels = srd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_srd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_srd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_srd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_srd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_srd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_srd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
for i_emb, emb in enumerate(batched_tgd_dc_input_embs):
labels = tgd_labels
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = dc_model(emb, labels=labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = dc_model(emb, labels=labels)
L_adv_D += dc_tgd_outputs[1] * args.dc_rep_step_per_batch # scale up because of the average in cross_entropy
if dc_rep_step == 0:
suffix = 'Q' if i_emb==0 else 'P'
optim_cumulator[f'dc_total_{suffix}'] += dc_tgd_outputs[2][0]
optim_cumulator[f'dc_correct_{suffix}'] += dc_tgd_outputs[2][1]
optim_cumulator['dc_pre_softmax_logits_0'] += dc_tgd_outputs[0][:, 0].mean() / 4
optim_cumulator['dc_pre_softmax_logits_1'] += dc_tgd_outputs[0][:, 1].mean() / 4
probs = torch.softmax(dc_tgd_outputs[0], dim=1)
optim_cumulator['dc_post_softmax_prob_0'] += probs[:, 0].mean() / 4
optim_cumulator['dc_post_softmax_prob_1'] += probs[:, 1].mean() / 4
if dc_rep_step % args.dc_rep_step_per_batch == 0:
batched_srd_dc_input_embs = [[], []]
batched_tgd_dc_input_embs = [[], []]
if dc_rep_step == 0:
continue # this dc_rep_step is only for logging things for optim_cumulator
if args.n_gpu > 1:
L_adv_D = L_adv_D.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
L_adv_D = L_adv_D / args.gradient_accumulation_steps
optim_cumulator['loss_adv_D'] += L_adv_D.item() / args.dc_rep_steps
# 2.2 BP of L_adv_D; dc_optimizer update
if args.fp16:
with amp.scale_loss(L_adv_D, dc_optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
L_adv_D.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
L_adv_D.backward()
if step % args.gradient_accumulation_steps == 0:
grad_norm_cumulator['domain_classifier'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(dc_optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
dc_model.parameters(), args.max_grad_norm)
grad_norm_cumulator['domain_classifier-clipped'] += compute_total_grad_L2_norm(
dc_model.parameters()
) / args.dc_rep_steps
dc_optimizer.step()
dc_model.zero_grad()
if step % args.gradient_accumulation_steps == 0:
dc_scheduler.step() # this is outside of the dc_rep_step loop
# 3.1 copy the dc_model, feed (undetached) embeddings to it
get_module(static_dc_model).load_state_dict(get_module(dc_model).state_dict())
L_adv_M = 0.0
if args.dc_loss_choice == 'minimax':
srd_labels = torch.tensor([0] * batch_size, device=args.device)
tgd_labels = torch.tensor([1] * batch_size, device=args.device)
elif args.dc_loss_choice == 'gan':
tgd_labels = torch.tensor([0] * batch_size, device=args.device)
elif args.dc_loss_choice == 'confusion':
srd_labels = 'uniform'
tgd_labels = 'uniform'
else:
raise NotImplementedError()
if args.dc_loss_choice != 'gan':
for emb in srd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_srd_outputs = static_dc_model(emb, labels=srd_labels)
L_adv_M += dc_srd_outputs[1]
for emb in tgd_embs:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
dc_tgd_outputs = static_dc_model(emb, labels=tgd_labels)
L_adv_M += dc_tgd_outputs[1]
if args.dc_loss_choice == 'minimax':
L_adv_M = -L_adv_M
L_adv_M *= dyn_lamb
if args.n_gpu > 1:
L_adv_M = L_adv_M.mean()
if args.gradient_accumulation_steps > 1:
L_adv_M = L_adv_M / args.gradient_accumulation_steps
optim_cumulator['loss_adv_M'] += L_adv_M.item()
# 3.2 BP of ranking loss and L_adv_M; optimizer update
loss = ranking_loss + L_adv_M
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0 or args.world_size == 1:
loss.backward()
else:
with model.no_sync(), dc_model.no_sync(), static_dc_model.no_sync():
loss.backward()
if step % args.gradient_accumulation_steps == 0:
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part] += compute_total_grad_L2_norm(params)
if not args.no_gn_clip:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
for model_part, params in model_parts_params.items():
grad_norm_cumulator[model_part+'-clipped'] += compute_total_grad_L2_norm(params)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
# end of the main part of training
if step % args.gradient_accumulation_steps == 0:
if args.lamb_reduce_to_half_steps > 0:
if is_first_worker():
tb_writer.add_scalar("lambda", dyn_lamb, global_step)
dyn_lamb = args.lamb * 2**(-global_step / args.lamb_reduce_to_half_steps)
if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
logs = {}
logs["linear_layer_L2norm"] = get_module(dc_model).layers[0].weight.norm().item()
logs["linear_layer_mean"] = get_module(dc_model).layers[0].weight.mean().item()
logs["learning_rate"] = scheduler.get_last_lr()[0]
logs["learning_rate_dc"] = dc_optimizer.param_groups[0]['lr']
logs["dc_acc_Q"] = optim_cumulator['dc_correct_Q'] / (1e-10 + optim_cumulator['dc_total_Q'])
logs["dc_acc_P"] = optim_cumulator['dc_correct_P'] / (1e-10 + optim_cumulator['dc_total_P'])
for k in optim_monitors:
if k not in ['dc_total_Q', 'dc_correct_Q', 'dc_total_P', 'dc_correct_P']:
logs[k] = float(optim_cumulator[k] / args.logging_steps / args.gradient_accumulation_steps)
optim_cumulator = {k: 0.0 for k in optim_monitors} # reset
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logs.update({k: v/args.logging_steps for k, v in grad_norm_cumulator.items()})
logger.info(json.dumps({**logs, **{"step": global_step}}))
for key, value in grad_norm_cumulator.items():
tb_writer.add_scalar(
'grad_norm-'+key,
value / args.logging_steps,
global_step)
grad_norm_cumulator[key] = 0.0 # reset
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
prev_dry_dc_state_dict = intrain_dev_eval(
args, global_step, model, tb_writer, prev_dry_dc_state_dict)
intrain_save_checkpoint(
args, global_step, model, tokenizer, optimizer, scheduler)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
tgd_file.close()
return global_step
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the cached passage and query files",
)
parser.add_argument(
"--tgd_raw_data_dir",
default=None,
type=str,
help="The input raw data dir for target domain.",
)
parser.add_argument(
"--tgd_data_name",
default=None,
type=str,
required=False,
help="The target domain dataset name.",
)
parser.add_argument(
"--intraindev_data_dir",
default=None,
type=str,
required=False,
help="The input data dir for in-train-dev set.",
)
parser.add_argument(
"--intraindev_data_name",
default=None,
type=str,
required=False,
help="The in-train-dev dataset name.",
)
parser.add_argument(
"--ann_dir",
default=None,
type=str,
required=True,
help="The ann training data dir. Should contain the output of ann data generation job",
)
parser.add_argument(
"--tgd_ann_dir",
default=None,
type=str,
required=False,
help="The ann training data dir for tgd. Should contain the output of ann data generation job",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(
processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--saved_embedding_dir",
default="",
type=str,
help="The directory where intraindev embeddings are dumped",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence (document) length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length_tgd",
default=None,
type=int,
help="The maximum total input query length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Default is args.max_query_length. "
"This argument is used only when target domain is arguana, where max_query_len=512 is needed.",
)
parser.add_argument(
"--triplet",
default=False,
action="store_true",
help="Whether to run training.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--optimizer",
default="lamb",
type=str,
help="Optimizer - lamb or adamW",
)
parser.add_argument(
"--dc_method",
default="classification",
type=str,
help="What to do for domain confusion. "
"classification: classify the source of a vector representation. "
"knn: a representation's k-nearest neighbors should have members from both domain "
"(its implementation is removed; check c1fae2c).")
parser.add_argument(
"--dc_loss_choice",
default="minimax",
type=str,
help="Adversarial loss choice (ADDA paper, Table 1, 4th column).")
parser.add_argument(
"--dc_layers",
default=1,
type=int,
help="How many layers to use for the domain classifier",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--dc_learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for dc_model.",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.",
)
parser.add_argument(
"--dropout_rate",
default=0.1,
type=float,
help="Dropout probability",
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.",
)
parser.add_argument(
"--lamb",
default=0.1,
type=float,
help="Coefficient for domain classification loss.",
)
parser.add_argument(
"--lamb_reduce_to_half_steps",
default=0,
type=int,
help="Reduce dyn_lamb exponentially, and it will be reduced to a half after X steps.",
)
parser.add_argument(
"--dc_rep_steps",
default=1,
type=int,
help="Update dc_model over a single batch for X steps.",
)
parser.add_argument(
"--dc_rep_method",
default="repeat",
type=str,
help="Use what data for dc repetitive training. "
"repeat: use the same batch repetitively; "
"async: use embeddings recorded from previous batches."
)
parser.add_argument(
"--dc_rep_step_per_batch",
default=1,
type=int,
help="For dc_rep, how many steps of embeddings to put in one batch",
)
parser.add_argument(
"--dc_weightDecay",
default=0.0,
type=float,
help="Weight decay if we apply some for domain classifier.",
)
parser.add_argument(
"--no_gn_clip",
action="store_true",
help="Whether to disable grad norm clipping",
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm.",
)
parser.add_argument(
"--max_steps",
default=1000000,
type=int,
help="If > 0: set total number of training steps to perform",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=500,
help="Evaluate the model every X updates steps.",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Avoid using CUDA when available",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# ----------------- ANN HyperParam ------------------
parser.add_argument(
"--load_optimizer_scheduler",
default=False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--single_warmup",
default=False,
action="store_true",
help="use single or re-warmup",
)
# ----------------- End of Doc Ranking HyperParam ------------------
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip",
type=str,
default="",
help="For distant debugging.",
)
parser.add_argument(
"--server_port",
type=str,
default="",
help="For distant debugging.",
)
args = parser.parse_args()
# sort intraindev datasets, so that tinymsmarco is the first and the target domain dataset is the second
args.intraindev_data_name = args.intraindev_data_name.split(',')
args.intraindev_data_dir = args.intraindev_data_dir.split(',')
assert args.intraindev_data_name[0] == 'tinymsmarco'
assert len(args.intraindev_data_name) >= 2
try:
tgd_position = args.intraindev_data_name.index(args.tgd_data_name)
args.intraindev_data_name[1], args.intraindev_data_name[tgd_position] = args.intraindev_data_name[tgd_position], args.intraindev_data_name[1]
args.intraindev_data_dir[1], args.intraindev_data_dir[tgd_position] = args.intraindev_data_dir[tgd_position], args.intraindev_data_dir[1]
args.mix_tgd = False
except ValueError:
args.mix_tgd = True
return args
def set_env(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(
args.server_ip,
args.server_port),
redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def load_model(args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
else:
args.world_size = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will
# download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
config = configObj.config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.output_hidden_states = True
change_dropout_rate(config, args.dropout_rate)
tokenizer = configObj.tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = configObj.model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will
# download model & vocab
torch.distributed.barrier()
model.to(args.device)
def file_process_fn(line, i):
return configObj.process_fn(line, i, tokenizer, args)
return tokenizer, model, file_process_fn
def change_dropout_rate(config, val):
config.attention_probs_dropout_prob = val
config.hidden_dropout_prob = val
def save_checkpoint(args, model, tokenizer):
# Saving best-practices: if you use defaults names for the model, you can
# reload it using from_pretrained()
if is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained
# model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
if args.local_rank != -1:
dist.barrier()
def main(profiling=False):
args = get_arguments()
if profiling:
args.max_steps = 200
set_env(args)
tokenizer, model, file_process_fn = load_model(args)
dc_model = DomainClassifier(args)
dc_model.to(args.device)
query_collection_path = os.path.join(args.data_dir, "train-query")
query_cache = EmbeddingCache(query_collection_path)
passage_collection_path = os.path.join(args.data_dir, "passages")
passage_cache = EmbeddingCache(passage_collection_path)
tgd_file_name = os.path.join(args.tgd_raw_data_dir, "triples.simple.tsv")
with query_cache, passage_cache:
global_step = train(
args, model, dc_model, tokenizer,
(query_cache, passage_cache),
tgd_file_name, file_process_fn
)
logger.info(" global_step = %s", global_step)
save_checkpoint(args, model, tokenizer)
if __name__ == "__main__":
profiling = False
if profiling:
import cProfile
from pstats import SortKey
cProfile.run("main(profiling=True)", sort=SortKey.CUMULATIVE)
else:
main()
| 46,511 | 35.027885 | 149 | py |
modir | modir-master/utils/eval_mrr.py | import sys
sys.path += ["../"]
from utils.msmarco_eval import quality_checks_qids, compute_metrics, load_reference
import torch.distributed as dist
import gzip
import faiss
import numpy as np
from data.process_fn import dual_process_fn
from tqdm import tqdm
import torch
import os
from utils.util import concat_key, is_first_worker, all_gather, StreamingDataset
from torch.utils.data import DataLoader
def embedding_inference(args, path, model, fn, bz, num_workers=2, is_query=True):
f = open(path, encoding="utf-8")
model = model.module if hasattr(model, "module") else model
sds = StreamingDataset(f, fn)
loader = DataLoader(sds, batch_size=bz, num_workers=1)
emb_list, id_list = [], []
model.eval()
for i, batch in tqdm(enumerate(loader), desc="Eval", disable=args.local_rank not in [-1, 0]):
if os.environ['DEBUG']=='True' and i==20: break # debug only
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0].long(
), "attention_mask": batch[1].long()}
idx = batch[3].long()
if is_query:
embs = model.query_emb(**inputs)
else:
embs = model.body_emb(**inputs)
if len(embs.shape) == 3:
B, C, E = embs.shape
# [b1c1, b1c2, b1c3, b1c4, b2c1 ....]
embs = embs.view(B*C, -1)
idx = idx.repeat_interleave(C)
assert embs.shape[0] == idx.shape[0]
emb_list.append(embs.detach().cpu().numpy())
id_list.append(idx.detach().cpu().numpy())
f.close()
emb_arr = np.concatenate(emb_list, axis=0)
id_arr = np.concatenate(id_list, axis=0)
return emb_arr, id_arr
def parse_top_dev(input_path, qid_col, pid_col):
ret = {}
with open(input_path, encoding="utf-8") as f:
for line in f:
cells = line.strip().split("\t")
qid = int(cells[qid_col])
pid = int(cells[pid_col])
if qid not in ret:
ret[qid] = []
ret[qid].append(pid)
return ret
def search_knn(xq, xb, k, distance_type=faiss.METRIC_L2):
""" wrapper around the faiss knn functions without index """
nq, d = xq.shape
nb, d2 = xb.shape
assert d == d2
I = np.empty((nq, k), dtype='int64')
D = np.empty((nq, k), dtype='float32')
if distance_type == faiss.METRIC_L2:
heaps = faiss.float_maxheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = faiss.swig_ptr(D)
heaps.ids = faiss.swig_ptr(I)
faiss.knn_L2sqr(
faiss.swig_ptr(xq), faiss.swig_ptr(xb),
d, nq, nb, heaps
)
elif distance_type == faiss.METRIC_INNER_PRODUCT:
heaps = faiss.float_minheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = faiss.swig_ptr(D)
heaps.ids = faiss.swig_ptr(I)
faiss.knn_inner_product(
faiss.swig_ptr(xq), faiss.swig_ptr(xb),
d, nq, nb, heaps
)
return D, I
def get_topk_restricted(q_emb, psg_emb_arr, pid_dict, psg_ids, pid_subset, top_k):
subset_ix = np.array([pid_dict[x]
for x in pid_subset if x != -1 and x in pid_dict])
if len(subset_ix) == 0:
_D = np.ones((top_k,))*-128
_I = (np.ones((top_k,))*-1).astype(int)
return _D, _I
else:
sub_emb = psg_emb_arr[subset_ix]
_D, _I = search_knn(q_emb, sub_emb, top_k,
distance_type=faiss.METRIC_INNER_PRODUCT)
return _D.squeeze(), psg_ids[subset_ix[_I]].squeeze() # (top_k,)
def passage_dist_eval(args, model, tokenizer, use_valid=False, vld_path=None):
if not use_valid:
base_path = args.data_dir
passage_path = os.path.join(base_path, "collection.tsv")
queries_path = os.path.join(base_path, "queries.dev.small.tsv")
top1000_path = os.path.join(base_path, "top1000.dev.tsv")
mrr_ref_path = os.path.join(base_path, "qrels.dev.small.tsv")
else:
assert vld_path is not None
print('Use Valid Set', vld_path)
base_path = vld_path
passage_path = os.path.join(base_path, 'collection.tsv')
queries_path = os.path.join(base_path, 'queries.tsv')
mrr_ref_path = os.path.join(base_path, "qrels.tsv")
def fn(line, i):
return dual_process_fn(line, i, tokenizer, args)
if not use_valid:
top1k_qid_pid = parse_top_dev(top1000_path, qid_col=0, pid_col=1)
else:
top1k_qid_pid = None
ref_dict = load_reference(mrr_ref_path)
print('Start evaluating')
reranking_mrr, full_ranking_mrr = combined_dist_eval(
args, model, queries_path, passage_path, fn, fn, top1k_qid_pid, ref_dict)
return reranking_mrr, full_ranking_mrr
def combined_dist_eval(args, model, queries_path, passage_path,
query_fn, psg_fn, topk_dev_qid_pid, ref_dict):
# get query/psg embeddings here
eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
query_embs, query_ids = embedding_inference(
args, queries_path, model, query_fn, eval_batch_size, 1, True)
query_pkl = {"emb": query_embs, "id": query_ids}
all_query_list = all_gather(query_pkl)
query_embs = concat_key(all_query_list, "emb")
query_ids = concat_key(all_query_list, "id")
print(query_embs.shape, query_ids.shape)
psg_embs, psg_ids = embedding_inference(
args, passage_path, model, psg_fn, eval_batch_size, 2, False)
print(psg_embs.shape)
top_k = 100
D, I = search_knn(query_embs, psg_embs, top_k,
distance_type=faiss.METRIC_INNER_PRODUCT)
I = psg_ids[I]
# compute reranking and full ranking mrr here
if topk_dev_qid_pid is not None:
# topk_dev_qid_pid is used for computing reranking mrr
pid_dict = dict([(p, i) for i, p in enumerate(psg_ids)])
arr_data = []
d_data = []
for i, qid in enumerate(query_ids):
q_emb = query_embs[i:i+1]
pid_subset = topk_dev_qid_pid[qid]
ds, top_pids = get_topk_restricted(
q_emb, psg_embs, pid_dict, psg_ids, pid_subset, 10)
arr_data.append(top_pids)
d_data.append(ds)
_D = np.array(d_data)
_I = np.array(arr_data)
# reranking mrr
reranking_mrr = compute_mrr(_D, _I, query_ids, ref_dict)
else:
reranking_mrr = 0.0
D2 = D[:, :100]
I2 = I[:, :100]
# full mrr
full_ranking_mrr = compute_mrr(D2, I2, query_ids, ref_dict)
del psg_embs
torch.cuda.empty_cache()
if args.local_rank != -1:
dist.barrier()
return reranking_mrr, full_ranking_mrr
def compute_mrr(D, I, qids, ref_dict):
knn_pkl = {"D": D, "I": I}
all_knn_list = all_gather(knn_pkl)
mrr = 0.0
if is_first_worker():
D_merged = concat_key(all_knn_list, "D", axis=1)
I_merged = concat_key(all_knn_list, "I", axis=1)
print(D_merged.shape, I_merged.shape)
# we pad with negative pids and distance -128 - if they make it to the top we have a problem
idx = np.argsort(D_merged, axis=1)[:, ::-1][:, :10]
sorted_I = np.take_along_axis(I_merged, idx, axis=1)
candidate_dict = {}
for i, qid in enumerate(qids):
seen_pids = set()
if qid not in candidate_dict:
candidate_dict[qid] = [0]*1000
j = 0
for pid in sorted_I[i]:
if pid >= 0 and pid not in seen_pids:
candidate_dict[qid][j] = pid
j += 1
seen_pids.add(pid)
allowed, message = quality_checks_qids(ref_dict, candidate_dict)
if message != '':
print(message)
mrr_metrics = compute_metrics(ref_dict, candidate_dict)
mrr = mrr_metrics["MRR @10"]
print(mrr)
return mrr
| 7,984 | 34.807175 | 100 | py |
modir | modir-master/utils/dpr_utils.py | import collections
import sys
sys.path += ['../']
import glob
import logging
import os
from typing import List, Tuple, Dict
import faiss
import pickle
import numpy as np
import unicodedata
import torch
import torch.distributed as dist
from torch import nn
from torch.serialization import default_restore_location
import regex
from transformers import AdamW
from utils.lamb import Lamb
logger = logging.getLogger()
CheckpointState = collections.namedtuple("CheckpointState",
['model_dict', 'optimizer_dict', 'scheduler_dict', 'offset', 'epoch',
'encoder_params'])
def get_encoder_checkpoint_params_names():
return ['do_lower_case', 'pretrained_model_cfg', 'encoder_model_type',
'pretrained_file',
'projection_dim', 'sequence_length']
def get_encoder_params_state(args):
"""
Selects the param values to be saved in a checkpoint, so that a trained model faile can be used for downstream
tasks without the need to specify these parameter again
:return: Dict of params to memorize in a checkpoint
"""
params_to_save = get_encoder_checkpoint_params_names()
r = {}
for param in params_to_save:
r[param] = getattr(args, param)
return r
def set_encoder_params_from_state(state, args):
if not state:
return
params_to_save = get_encoder_checkpoint_params_names()
override_params = [(param, state[param]) for param in params_to_save if param in state and state[param]]
for param, value in override_params:
if hasattr(args, param):
logger.warning('Overriding args parameter value from checkpoint state. Param = %s, value = %s', param,
value)
setattr(args, param, value)
return args
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, 'module') else model
def get_model_file(args, file_prefix) -> str:
out_cp_files = glob.glob(os.path.join(args.output_dir, file_prefix + '*')) if args.output_dir else []
logger.info('Checkpoint files %s', out_cp_files)
model_file = None
if args.model_file and os.path.exists(args.model_file):
model_file = args.model_file
elif len(out_cp_files) > 0:
model_file = max(out_cp_files, key=os.path.getctime)
return model_file
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
logger.info('Reading saved model from %s', model_file)
state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, 'cpu'))
logger.info('model_state_dict keys %s', state_dict.keys())
return CheckpointState(**state_dict)
def get_optimizer(args, model: nn.Module, weight_decay: float = 0.0, ) -> torch.optim.Optimizer:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.optimizer == "adamW":
return AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "lamb":
return Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
else:
raise Exception("optimizer {0} not recognized! Can only be lamb or adamW".format(args.optimizer))
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
"""
SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + SIZE_STORAGE_BYTES > max_size:
raise ValueError(
'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))
rank = dist.get_rank()
world_size = dist.get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(
256 ** SIZE_STORAGE_BYTES)
size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')
cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))
cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))
start = rank * max_size
size = enc_size + SIZE_STORAGE_BYTES
buffer[start: start + size].copy_(cpu_buffer[:size])
if group is None:
group = dist.group.WORLD
dist.all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size: (i + 1) * max_size]
size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')
if size > 0:
result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data.'
)
class DenseHNSWFlatIndexer(object):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(self, vector_sz: int, buffer_size: int = 50000, store_n: int = 512
, ef_search: int = 128, ef_construction: int = 200):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
# IndexHNSWFlat supports L2 similarity only
# so we have to apply DOT -> L2 similairy space conversion with the help of an extra dimension
index = faiss.IndexHNSWFlat(vector_sz + 1, store_n)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
self.phi = 0
def index_data(self, data: List[Tuple[object, np.array]]):
n = len(data)
# max norm is required before putting all vectors in the index to convert inner product similarity to L2
if self.phi > 0:
raise RuntimeError('DPR HNSWF index needs to index all data at once,'
'results will be unpredictable otherwise.')
phi = 0
for i, item in enumerate(data):
id, doc_vector = item
norms = (doc_vector ** 2).sum()
phi = max(phi, norms)
logger.info('HNSWF DotProduct -> L2 space phi={}'.format(phi))
self.phi = 0
# indexing in batches is beneficial for many faiss index types
for i in range(0, n, self.buffer_size):
db_ids = [t[0] for t in data[i:i + self.buffer_size]]
vectors = [np.reshape(t[1], (1, -1)) for t in data[i:i + self.buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [np.hstack((doc_vector, aux_dims[i].reshape(-1, 1))) for i, doc_vector in
enumerate(vectors)]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
self._update_id_mapping(db_ids)
self.index.add(hnsw_vectors)
logger.info('data indexed %d', len(self.index_id_to_db_id))
indexed_cnt = len(self.index_id_to_db_id)
logger.info('Total data indexed %d', indexed_cnt)
def search_knn(self, query_vectors: np.array, top_docs: int) -> List[Tuple[List[object], List[float]]]:
aux_dim = np.zeros(len(query_vectors), dtype='float32')
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
logger.info('query_hnsw_vectors %s', query_nhsw_vectors.shape)
scores, indexes = self.index.search(query_nhsw_vectors, top_docs)
# convert to external ids
db_ids = [[self.index_id_to_db_id[i] for i in query_top_idxs] for query_top_idxs in indexes]
result = [(db_ids[i], scores[i]) for i in range(len(db_ids))]
return result
def _update_id_mapping(self, db_ids: List):
self.index_id_to_db_id.extend(db_ids)
def check_answer(passages, answers, doc_ids, tokenizer):
"""Search through all the top docs to see if they have any of the answers."""
hits = []
for i, doc_id in enumerate(doc_ids):
text = passages[doc_id][0]
hits.append(has_answer(answers, text, tokenizer))
return hits
def has_answer(answers, text, tokenizer):
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
if text is None:
logger.warning("no doc in db")
return False
text = _normalize(text)
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
class SimpleTokenizer:
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def _normalize(text):
return unicodedata.normalize('NFD', text)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
| 12,483 | 35.934911 | 118 | py |
modir | modir-master/utils/modir_utils.py | import os
import sys
import csv
import numpy as np
import faiss
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
try:
from apex import amp
except ImportError:
print("apex not imported")
from utils.util import (
is_first_worker,
StreamingDataset,
EmbeddingCache,
)
from model.domain_classifier import DomainClassifier, dry_dc_evaluation
from drivers.run_ann_data_gen import StreamInferenceDoc, EvalDevQuery
from data.msmarco_data import GetProcessingFn
import logging
logger = logging.getLogger(__name__)
def compute_total_grad_L2_norm(param_list):
total_norm = 0.0
for p in param_list:
if p.grad is not None:
total_norm += torch.norm(p.grad) ** 2
total_norm = total_norm ** 0.5
return total_norm.item()
def intrain_dev_eval(args, global_step, model, tb_writer, prev_dry_dc_state_dict,
all_datasets=False):
model.eval()
query_embs = []
passage_embs = []
intraindev_data_name = args.intraindev_data_name[:2]
with amp.disable_casts(): # back to fp32
for i_dev, data_name in enumerate(intraindev_data_name):
data_path = args.intraindev_data_dir[i_dev]
dev_evaluation_results = dev_evaluation(
args, data_path, model, return_embs=True)
if is_first_worker():
# query/passage embs obtained by the first worker
# are actually generated by all workers
# see the implementation of StreamInferenceDoc()
dev_result_dict, (query_emb, passage_emb, query_emb2id, passage_emb2id) = dev_evaluation_results
if i_dev <= 1:
query_embs.append(query_emb)
passage_embs.append(passage_emb)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_query-step{global_step}.npy"),
query_emb
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_passage-step{global_step}.npy"),
passage_emb
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_query2id-step{global_step}.npy"),
query_emb2id
)
np.save(
os.path.join(args.saved_embedding_dir, f"{data_name}_passage2id-step{global_step}.npy"),
passage_emb2id
)
if i_dev == 1:
dry_dc_model = DomainClassifier(args)
dry_dc_model.to(args.device)
dry_dc_acc, prev_dry_dc_acc, prev_dry_dc_state_dict = dry_dc_evaluation(
args, dry_dc_model, query_embs, passage_embs, prev_dry_dc_state_dict)
tb_writer.add_scalar("dc_acc_dry_Q", float(dry_dc_acc[0]), global_step)
tb_writer.add_scalar("dc_acc_dry_P", float(dry_dc_acc[1]), global_step)
if prev_dry_dc_acc[0] is not None:
tb_writer.add_scalar("dc_acc_prev_dry_Q", float(prev_dry_dc_acc[0]), global_step)
tb_writer.add_scalar("dc_acc_prev_dry_P", float(prev_dry_dc_acc[1]), global_step)
del dry_dc_model
torch.cuda.empty_cache()
print(data_name, dev_result_dict)
for k, v in dev_result_dict.items():
tb_writer.add_scalar(f"{data_name}-{k}", v, global_step)
if args.local_rank != -1:
dist.barrier()
model.train()
return prev_dry_dc_state_dict
def intrain_save_checkpoint(args, global_step,
model, tokenizer, optimizer, scheduler):
if is_first_worker():
# identical with the one from original_drivers/run_ann
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(
optimizer.state_dict(),
os.path.join(
output_dir,
"optimizer.pt"))
torch.save(
scheduler.state_dict(),
os.path.join(
output_dir,
"scheduler.pt"))
logger.info(
"Saving optimizer and scheduler states to %s",
output_dir)
if args.local_rank != -1:
dist.barrier()
def build_input_from_batch(args, batch, mode='full', triplet=False):
if triplet: # from ann
if mode == 'query':
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()
}
elif mode == 'pos_doc':
inputs = {
"input_ids": batch[3].long(),
"attention_mask": batch[4].long()
}
elif mode == 'neg_doc':
inputs = {
"input_ids": batch[6].long(),
"attention_mask": batch[7].long()
}
else:
inputs = {
"query_ids": batch[0].long(),
"attention_mask_q": batch[1].long(),
"input_ids_a": batch[3].long(),
"attention_mask_a": batch[4].long(),
"input_ids_b": batch[6].long(),
"attention_mask_b": batch[7].long()
}
else: # from raw data
if mode == 'query':
inputs = {
"input_ids": batch[0].long(),
"attention_mask": batch[1].long()
}
elif mode == 'pos_doc':
inputs = {
"input_ids": batch[2].long(),
"attention_mask": batch[3].long()
}
elif mode == 'neg_doc':
inputs = {
"input_ids": batch[4].long(),
"attention_mask": batch[5].long()
}
elif mode == 'full':
inputs = {
"query_ids": batch[0].long(),
"attention_mask_q": batch[1].long(),
"input_ids_a": batch[2].long(),
"attention_mask_a": batch[3].long(),
"input_ids_b": batch[4].long(),
"attention_mask_b": batch[5].long()
}
return inputs
def get_module(model):
return model.module if hasattr(model, "module") else model
def build_dl_iter_from_file(args, file_obj, process_fn):
file_obj.seek(0)
sds = StreamingDataset(file_obj, process_fn)
dataloader = DataLoader(sds, batch_size=args.per_gpu_train_batch_size, num_workers=0)
iterator = iter(dataloader)
return dataloader, iterator
def get_next(iterator, args, file_obj, process_fn, batch_size):
try:
batch = next(iterator)
assert batch_size == batch[0].shape[0]
except (AssertionError, StopIteration):
# print('Build new iterator')
_, iterator = build_dl_iter_from_file(args, file_obj, process_fn)
batch = next(iterator)
return batch, iterator
def dev_evaluation(args, data_path, model,
return_embs=False):
logger.info("Loading dev query_2_pos_docid")
dev_query_positive_id = {}
query_positive_id_path = os.path.join(data_path, "dev-qrel.tsv")
with open(query_positive_id_path, 'r', encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, docid, rel] in tsvreader:
topicid = int(topicid)
docid = int(docid)
if topicid not in dev_query_positive_id:
dev_query_positive_id[topicid] = {}
dev_query_positive_id[topicid][docid] = max(0, int(rel))
old_max_seq_length = args.max_seq_length
args.max_seq_length = 512 # otherwise it crashes
args.rank = args.local_rank
old_max_query_length = args.max_query_length
if 'arguana' in data_path:
args.max_query_length = 512
dev_tmp_ann_data_dir = "../dev_tmp_ann_data"
os.makedirs(dev_tmp_ann_data_dir, exist_ok=True)
logger.info("***** inference of dev query *****")
dev_query_collection_path = os.path.join(data_path, "dev-query")
dev_query_cache = EmbeddingCache(dev_query_collection_path)
with dev_query_cache as emb:
dev_query_embedding, dev_query_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=True),
"dev_query_0_",
emb,
output_path=dev_tmp_ann_data_dir,
is_query_inference=True)
logger.info("***** inference of dev passages *****")
dev_passage_collection_path = os.path.join(data_path, "dev-passages")
dev_passage_cache = EmbeddingCache(dev_passage_collection_path)
with dev_passage_cache as emb:
dev_passage_embedding, dev_passage_embedding2id = StreamInferenceDoc(
args,
model,
GetProcessingFn(args, query=False),
"dev_passage_0_",
emb,
output_path=dev_tmp_ann_data_dir,
is_query_inference=False)
args.max_seq_length = old_max_seq_length
args.max_query_length = old_max_query_length
torch.cuda.empty_cache()
if is_first_worker():
# ANN search for dev passages and dev queries
dev_dim = dev_passage_embedding.shape[1]
print('dev passage embedding shape: ' + str(dev_passage_embedding.shape))
faiss.omp_set_num_threads(16)
dev_cpu_index = faiss.IndexFlatIP(dev_dim)
dev_cpu_index.add(dev_passage_embedding)
logger.info("***** Done Dev ANN Index *****")
_, dev_I = dev_cpu_index.search(dev_query_embedding, 100) # I: [number of queries, topk]
result_dict, num_queries_dev = EvalDevQuery(
args, dev_query_embedding2id, dev_passage_embedding2id,
dev_query_positive_id, dev_I)
if return_embs:
return (result_dict,
(dev_query_embedding, dev_passage_embedding, dev_query_embedding2id, dev_passage_embedding2id))
else:
return result_dict
| 10,586 | 36.676157 | 115 | py |
modir | modir-master/utils/util.py | import sys
sys.path += ['../']
import pandas as pd
from sklearn.metrics import roc_curve, auc
import gzip
import copy
import torch
from torch import nn
import torch.distributed as dist
from tqdm import tqdm, trange
import os
from os import listdir
from os.path import isfile, join
import json
import logging
import random
import pytrec_eval
import pickle
import numpy as np
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from multiprocessing import Process
from torch.utils.data import DataLoader, Dataset, TensorDataset, IterableDataset
import re
from model.models import MSMarcoConfigDict, ALL_MODELS
from typing import List, Set, Dict, Tuple, Callable, Iterable, Any
logger = logging.getLogger(__name__)
class InputFeaturesPair(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(
self,
input_ids_a,
attention_mask_a=None,
token_type_ids_a=None,
input_ids_b=None,
attention_mask_b=None,
token_type_ids_b=None,
label=None):
self.input_ids_a = input_ids_a
self.attention_mask_a = attention_mask_a
self.token_type_ids_a = token_type_ids_a
self.input_ids_b = input_ids_b
self.attention_mask_b = attention_mask_b
self.token_type_ids_b = token_type_ids_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def getattr_recursive(obj, name):
for layer in name.split("."):
if hasattr(obj, layer):
obj = getattr(obj, layer)
else:
return None
return obj
def barrier_array_merge(
args,
data_array,
merge_axis=0,
prefix="",
output_path=None,
load_cache=False,
only_load_in_master=False):
# data array: [B, any dimension]
# merge alone one axis
assert output_path is not None
if args.local_rank == -1:
return data_array
if not load_cache:
rank = args.rank
if is_first_worker():
if not os.path.exists(output_path):
os.makedirs(output_path)
dist.barrier() # directory created
pickle_path = os.path.join(
output_path,
"{1}_data_obj_{0}.pb".format(
str(rank),
prefix))
with open(pickle_path, 'wb') as handle:
pickle.dump(data_array, handle, protocol=4)
# make sure all processes wrote their data before first process
# collects it
dist.barrier()
data_array = None
data_list = []
# return empty data
if only_load_in_master:
if not is_first_worker():
dist.barrier()
return None
for i in range(
args.world_size): # TODO: dynamically find the max instead of HardCode
pickle_path = os.path.join(
output_path,
"{1}_data_obj_{0}.pb".format(
str(i),
prefix))
try:
with open(pickle_path, 'rb') as handle:
b = pickle.load(handle)
data_list.append(b)
except BaseException:
continue
data_array_agg = np.concatenate(data_list, axis=merge_axis)
dist.barrier()
return data_array_agg
def pad_input_ids(input_ids, max_length,
pad_on_left=False,
pad_token=0):
padding_length = max_length - len(input_ids)
padding_id = [pad_token] * padding_length
if padding_length <= 0:
input_ids = input_ids[:max_length]
else:
if pad_on_left:
input_ids = padding_id + input_ids
else:
input_ids = input_ids + padding_id
return input_ids
def pad_ids(input_ids, attention_mask, token_type_ids, max_length,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
padding_length = max_length - len(input_ids)
padding_id = [pad_token] * padding_length
padding_type = [pad_token_segment_id] * padding_length
padding_attention = [0 if mask_padding_with_zero else 1] * padding_length
if padding_length <= 0:
input_ids = input_ids[:max_length]
attention_mask = attention_mask[:max_length]
token_type_ids = token_type_ids[:max_length]
else:
if pad_on_left:
input_ids = padding_id + input_ids
attention_mask = padding_attention + attention_mask
token_type_ids = padding_type + token_type_ids
else:
input_ids = input_ids + padding_id
attention_mask = attention_mask + padding_attention
token_type_ids = token_type_ids + padding_type
return input_ids, attention_mask, token_type_ids
# to reuse pytrec_eval, id must be string
def convert_to_string_id(result_dict):
string_id_dict = {}
# format [string, dict[string, val]]
for k, v in result_dict.items():
_temp_v = {}
for inner_k, inner_v in v.items():
_temp_v[str(inner_k)] = inner_v
string_id_dict[str(k)] = _temp_v
return string_id_dict
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def is_first_worker():
return not dist.is_available() or not dist.is_initialized() or dist.get_rank() == 0
def concat_key(all_list, key, axis=0):
return np.concatenate([ele[key] for ele in all_list], axis=axis)
def get_checkpoint_no(checkpoint_path):
nums = re.findall(r'\d+', checkpoint_path)
return int(nums[-1]) if len(nums) > 0 else 0
def get_latest_ann_data(ann_data_path):
ANN_PREFIX = f"ann_training_data_"
if not os.path.exists(ann_data_path):
return -1, None, None
files = list(next(os.walk(ann_data_path))[2])
num_start_pos = len(ANN_PREFIX)
data_no_list = [int(s[num_start_pos:])
for s in files if s[:num_start_pos] == ANN_PREFIX]
if len(data_no_list) > 0:
data_no = max(data_no_list)
try:
with open(os.path.join(ann_data_path, "ann_ndcg_" + str(data_no)), 'r') as f:
ndcg_json = json.load(f)
except FileNotFoundError:
ndcg_json = None
return data_no, os.path.join(
ann_data_path, "ann_training_data_" + str(data_no)), ndcg_json
return -1, None, None
def numbered_byte_file_generator(base_path, file_no, record_size):
for i in range(file_no):
with open('{}_split{}'.format(base_path, i), 'rb') as f:
while True:
b = f.read(record_size)
if not b:
# eof
break
yield b
class EmbeddingCache:
def __init__(self, base_path, seed=-1):
self.base_path = base_path
with open(base_path + '_meta', 'r') as f:
meta = json.load(f)
self.dtype = np.dtype(meta['type'])
self.total_number = meta['total_number']
self.record_size = int(
meta['embedding_size']) * self.dtype.itemsize + 4
if seed >= 0:
self.ix_array = np.random.RandomState(
seed).permutation(self.total_number)
else:
self.ix_array = np.arange(self.total_number)
self.f = None
def open(self):
self.f = open(self.base_path, 'rb')
def close(self):
self.f.close()
def read_single_record(self):
record_bytes = self.f.read(self.record_size)
passage_len = int.from_bytes(record_bytes[:4], 'big')
passage = np.frombuffer(record_bytes[4:], dtype=self.dtype)
return passage_len, passage
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def __getitem__(self, key):
if key < 0 or key > self.total_number:
raise IndexError(
"Index {} is out of bound for cached embeddings of size {}".format(
key, self.total_number))
self.f.seek(key * self.record_size)
return self.read_single_record()
def __iter__(self):
self.f.seek(0)
for i in range(self.total_number):
new_ix = self.ix_array[i]
yield self.__getitem__(new_ix)
def __len__(self):
return self.total_number
class StreamingDataset(IterableDataset):
def __init__(self, elements, fn, distributed=True):
super().__init__()
self.elements = elements
self.fn = fn
self.num_replicas=-1
self.distributed = distributed
def __iter__(self):
if dist.is_initialized():
self.num_replicas = dist.get_world_size()
self.rank = dist.get_rank()
else:
pass
# print("Not running in distributed mode")
for i, element in enumerate(self.elements):
if self.distributed and self.num_replicas != -1 and i % self.num_replicas != self.rank:
continue
records = self.fn(element, i)
for rec in records:
yield rec
def tokenize_to_file(args, i, num_process, in_path, out_path, line_fn):
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
with open(in_path, 'r', encoding='utf-8') if in_path[-2:] != "gz" else gzip.open(in_path, 'rt', encoding='utf8') as in_f,\
open('{}_split{}'.format(out_path, i), 'wb') as out_f:
for idx, line in enumerate(in_f):
if idx % num_process != i:
continue
out_f.write(line_fn(args, line, tokenizer))
def multi_file_process(args, num_process, in_path, out_path, line_fn):
processes = []
for i in range(num_process):
p = Process(
target=tokenize_to_file,
args=(
args,
i,
num_process,
in_path,
out_path,
line_fn,
))
processes.append(p)
p.start()
for p in processes:
p.join()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
if not dist.is_initialized() or dist.get_world_size() == 1:
return [data]
world_size = dist.get_world_size()
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
| 12,596 | 29.354217 | 126 | py |
modir | modir-master/utils/lamb.py | """Lamb optimizer."""
import collections
import math
import torch
from tensorboardX import SummaryWriter
from torch.optim import Optimizer
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
"""Log a histogram of trust ratio scalars in across layers."""
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if i in state:
results[i].append(state[i])
for k, v in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
| 4,887 | 38.419355 | 109 | py |
modir | modir-master/data/process_fn.py | import torch
def pad_ids(input_ids, attention_mask, token_type_ids, max_length, pad_token, mask_padding_with_zero, pad_token_segment_id, pad_on_left=False):
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1]
* padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] *
padding_length) + token_type_ids
else:
input_ids += [pad_token] * padding_length
attention_mask += [0 if mask_padding_with_zero else 1] * padding_length
token_type_ids += [pad_token_segment_id] * padding_length
return input_ids, attention_mask, token_type_ids
def dual_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 2:
# this is for training and validation
# id, passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
text = cells[1].strip()
input_id_a = tokenizer.encode(
text, add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int), torch.tensor(
attention_mask_a, dtype=torch.bool), torch.tensor(token_type_ids_a, dtype=torch.uint8)]
qid = int(cells[0])
features.append(qid)
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 2.".format(str(len(cells))))
return [features]
def triple_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
for j, text in enumerate(cells):
max_len = args.max_query_length if j==0 else args.max_seq_length
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=max_len,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, max_len, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return [features]
def triple2dual_process_fn(line, i, tokenizer, args):
ret = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
# return 2 entries per line, 1 pos + 1 neg
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
pos_feats = []
neg_feats = []
for i, text in enumerate(cells):
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
if i == 0:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
elif i == 1:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 1]
else:
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 0]
ret = [pos_feats, neg_feats]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return ret
| 5,071 | 43.884956 | 167 | py |
modir | modir-master/data/msmarco_data.py | import sys
import os
import torch
sys.path += ['../']
import gzip
import pickle
from utils.util import pad_input_ids, multi_file_process, numbered_byte_file_generator, EmbeddingCache
import csv
from model.models import MSMarcoConfigDict, ALL_MODELS
from torch.utils.data import DataLoader, Dataset, TensorDataset, IterableDataset, get_worker_info
import numpy as np
from os import listdir
from os.path import isfile, join
import argparse
import json
def write_query_rel(args, pid2offset, query_file, positive_id_file, out_query_file, out_id_file,
splits=32):
print(
"Writing query files " +
str(out_query_file) +
" and " +
str(out_id_file))
query_positive_id = set()
query_positive_id_path = os.path.join(
args.data_dir,
positive_id_file,
)
print("Loading query_2_pos_docid")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, _, docid, rel] in tsvreader:
query_positive_id.add(int(topicid))
query_collection_path = os.path.join(
args.data_dir,
query_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file,
)
qid2offset = {}
print('start query file split processing')
multi_file_process(
args,
splits,
query_collection_path,
out_query_path,
QueryPreprocessingFn)
print('start merging splits')
idx = 0
with open(out_query_path, 'wb') as f:
for record in numbered_byte_file_generator(
out_query_path, splits, 8 + 4 + args.max_query_length * 4):
q_id = int.from_bytes(record[:8], 'big')
if q_id not in query_positive_id:
# exclude the query as it is not in label set
continue
f.write(record[8:])
qid2offset[q_id] = idx
idx += 1
if idx < 3:
print(str(idx) + " " + str(q_id))
qid2offset_path = os.path.join(
args.out_data_dir,
"qid2offset.pickle",
)
with open(qid2offset_path, 'wb') as handle:
pickle.dump(qid2offset, handle, protocol=4)
print("done saving qid2offset")
print("Total lines written: " + str(idx))
meta = {'type': 'int32', 'total_number': idx,
'embedding_size': args.max_query_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
out_id_path = os.path.join(
args.out_data_dir,
out_id_file,
)
print("Writing qrels")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f, \
open(out_id_path, "w", encoding='utf-8') as out_id:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
out_line_count = 0
for [topicid, _, docid, rel] in tsvreader:
topicid = int(topicid)
if args.data_type == 0:
docid = int(docid[1:])
else:
docid = int(docid)
out_id.write(str(qid2offset[topicid]) +
"\t" +
str(pid2offset[docid]) +
"\t" +
rel +
"\n")
out_line_count += 1
print("Total lines written: " + str(out_line_count))
def preprocess(args):
pid2offset = {}
if args.data_type == 0:
in_passage_path = os.path.join(
args.data_dir,
"msmarco-docs.tsv",
)
else:
in_passage_path = os.path.join(
args.data_dir,
"collection.tsv",
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages",
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
out_line_count = 0
print('start passage file split processing')
multi_file_process(
args,
32,
in_passage_path,
out_passage_path,
PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(
out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {
'type': 'int32',
'total_number': out_line_count,
'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
pid2offset_path = os.path.join(
args.out_data_dir,
"pid2offset.pickle",
)
with open(pid2offset_path, 'wb') as handle:
pickle.dump(pid2offset, handle, protocol=4)
print("done saving pid2offset")
if args.data_type == 0:
write_query_rel(
args,
pid2offset,
"msmarco-doctrain-queries.tsv",
"msmarco-doctrain-qrels.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"msmarco-test2019-queries.tsv",
"2019qrels-docs.txt",
"dev-query",
"dev-qrel.tsv")
else:
write_query_rel(
args,
pid2offset,
"queries.train.tsv",
"qrels.train.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"queries.dev.small.tsv",
"qrels.dev.small.tsv",
"dev-query",
"dev-qrel.tsv")
def preprocess_treccovid(args):
splits = 8
pid2offset = {}
if args.data_type == 0:
in_passage_path = os.path.join(
args.data_dir,
"msmarco-docs.tsv",
)
else:
in_passage_path = os.path.join(
args.data_dir,
"collection.tsv",
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages",
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
out_line_count = 0
print('start passage file split processing')
multi_file_process(
args,
splits,
in_passage_path,
out_passage_path,
PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(
out_passage_path, splits, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {
'type': 'int32',
'total_number': out_line_count,
'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
pid2offset_path = os.path.join(
args.out_data_dir,
"pid2offset.pickle",
)
with open(pid2offset_path, 'wb') as handle:
pickle.dump(pid2offset, handle, protocol=4)
print("done saving pid2offset")
if args.data_type == 0:
write_query_rel(
args,
pid2offset,
"msmarco-doctrain-queries.tsv",
"msmarco-doctrain-qrels.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"msmarco-test2019-queries.tsv",
"2019qrels-docs.txt",
"dev-query",
"dev-qrel.tsv")
else:
write_query_rel(
args,
pid2offset,
"queries.tsv",
"qrels.tsv",
"train-query",
"train-qrel.tsv",
splits=splits)
# ^ using the same input as dev below
# but producing train as output (who knows what's needed in the next step)
write_query_rel(
args,
pid2offset,
"queries.tsv",
"qrels.tsv",
"dev-query",
"dev-qrel.tsv",
splits=splits)
def PassagePreprocessingFn(args, line, tokenizer):
if args.data_type == 0:
line_arr = line.split('\t')
p_id = int(line_arr[0][1:]) # remove "D"
url = line_arr[1].rstrip()
title = line_arr[2].rstrip()
p_text = line_arr[3].rstrip()
if not args.model_type == "seeddot_nll":
full_text = url + "<sep>" + title + "<sep>" + p_text
else:
full_text = url + " " + tokenizer.sep_token + " " + title + " " + tokenizer.sep_token+" " + p_text
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = full_text[:args.max_doc_character]
else:
line = line.strip()
line_arr = line.split('\t')
p_id = int(line_arr[0])
p_text = line_arr[1].rstrip()
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = p_text[:args.max_doc_character]
passage = tokenizer.encode(
full_text,
add_special_tokens=True,
max_length=args.max_seq_length,
)
passage_len = min(len(passage), args.max_seq_length)
if not args.model_type == "seeddot_nll":
input_id_b = pad_input_ids(passage, args.max_seq_length)
else:
input_id_b = pad_input_ids(passage, args.max_seq_length, pad_token=tokenizer.pad_token_id)
return p_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def QueryPreprocessingFn(args, line, tokenizer):
line_arr = line.split('\t')
q_id = int(line_arr[0])
passage = tokenizer.encode(
line_arr[1].rstrip(),
add_special_tokens=True,
max_length=args.max_query_length)
passage_len = min(len(passage), args.max_query_length)
if not args.model_type == "seeddot_nll":
input_id_b = pad_input_ids(passage, args.max_query_length)
else:
input_id_b = pad_input_ids(passage, args.max_query_length, pad_token=tokenizer.pad_token_id)
return q_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def GetProcessingFn(args, query=False, tgd=False):
def fn(vals, i):
passage_len, passage = vals
if not query:
max_len = args.max_seq_length
else:
if tgd and args.max_query_length_tgd is not None:
max_len = args.max_query_length_tgd
else:
max_len = args.max_query_length
pad_len = max(0, max_len - passage_len)
token_type_ids = ([0] if query else [1]) * passage_len + [0] * pad_len
attention_mask = [1] * passage_len + [0] * pad_len
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor(
[f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor(
[f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor(
[f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor(
[f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(
all_input_ids_a,
all_attention_mask_a,
all_token_type_ids_a,
query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
pos_label = torch.tensor(1, dtype=torch.long)
neg_label = torch.tensor(0, dtype=torch.long)
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2], pos_label)
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2], neg_label)
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache, tgd=False):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True, tgd=tgd)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2], qid)
return fn
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir",
)
parser.add_argument(
"--out_data_dir",
default=None,
type=str,
required=True,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_doc_character",
default=10000,
type=int,
help="used before tokenizer to save tokenizer latency",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 for doc, 1 for passage",
)
parser.add_argument(
"--beir_dataset",
action='store_true',
help="use the new preprocess function instead",
)
args = parser.parse_args()
return args
def main():
args = get_arguments()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
if not args.beir_dataset:
preprocess(args)
else:
preprocess_treccovid(args)
if __name__ == '__main__':
main()
| 16,967 | 29.085106 | 162 | py |
modir | modir-master/data/DPR_data.py | from os.path import join
import sys
sys.path += ['../']
import argparse
import json
import os
import random
import numpy as np
import torch
from torch.utils.data import Dataset, TensorDataset
from model.models import MSMarcoConfigDict, ALL_MODELS
import csv
from utils.util import multi_file_process, numbered_byte_file_generator, EmbeddingCache
import pickle
def normalize_question(question: str) -> str:
if question[-1] == '?':
question = question[:-1]
return question
def write_qas_query(args, qas_file, out_query_file):
print("Writing qas query files " + str(out_query_file))
print("print",args.answer_dir,qas_file)
qas_path = os.path.join(
args.answer_dir,
qas_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
qid = 0
with open(qas_path, "r", encoding="utf-8") as f, open(out_query_path, "wb") as out_query:
reader = csv.reader(f, delimiter='\t')
for row in reader:
question = normalize_question(row[0])
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
def write_query_rel(args, pid2offset, query_file, out_query_file, out_ann_file, out_train_file, passage_id_name="passage_id"):
print("Writing query files " + str(out_query_file) + " and " + str(out_ann_file))
query_path = os.path.join(
args.question_dir,
query_file,
)
with open(query_path, 'r', encoding="utf-8") as f:
data = json.load(f)
print('Aggregated data size: {}'.format(len(data)))
data = [r for r in data if len(r['positive_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
data = [r for r in data if len(r['hard_negative_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
out_ann_file = os.path.join(
args.out_data_dir,
out_ann_file ,
)
out_training_path = os.path.join(
args.out_data_dir,
out_train_file ,
)
qid = 0
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
with open(out_query_path, "wb") as out_query, \
open(out_ann_file, "w", encoding='utf-8') as out_ann, \
open(out_training_path, "w", encoding='utf-8') as out_training:
for sample in data:
positive_ctxs = sample['positive_ctxs']
neg_ctxs = sample['hard_negative_ctxs']
question = normalize_question(sample['question'])
first_pos_pid = pid2offset[int(positive_ctxs[0][passage_id_name])]
neg_pids = [str(pid2offset[int(neg_ctx[passage_id_name])]) for neg_ctx in neg_ctxs]
out_ann.write("{}\t{}\t{}\n".format(qid, first_pos_pid, sample["answers"]))
out_training.write("{}\t{}\t{}\n".format(qid, first_pos_pid, ','.join(neg_pids)))
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
print("Total lines written: " + str(qid))
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
def write_mapping(args, id2offset, out_name):
out_path = os.path.join(
args.out_data_dir,
out_name ,
)
with open(out_path, 'w') as f:
for item in id2offset.items():
f.write("{}\t{}\n".format(item[0], item[1]))
def load_mapping(data_dir, out_name):
out_path = os.path.join(
data_dir,
out_name ,
)
pid2offset = {}
offset2pid = {}
with open(out_path, 'r') as f:
for line in f.readlines():
line_arr = line.split('\t')
pid2offset[int(line_arr[0])] = int(line_arr[1])
offset2pid[int(line_arr[1])] = int(line_arr[0])
return pid2offset, offset2pid
def preprocess(args):
pid2offset = {}
in_passage_path = os.path.join(
args.wiki_dir,
"psgs_w100.tsv" ,
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages" ,
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
else:
out_line_count = 0
print('start passage file split processing')
multi_file_process(args, 32, in_passage_path, out_passage_path, PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {'type': 'int32', 'total_number': out_line_count, 'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
write_mapping(args, pid2offset, "pid2offset")
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[pid2offset[1]])
if args.data_type == 0:
write_query_rel(args, pid2offset, "nq-train.json", "train-query", "train-ann", "train-data")
elif args.data_type == 1:
write_query_rel(args, pid2offset, "trivia-train.json", "train-query", "train-ann", "train-data", "psg_id")
else:
# use both training dataset and merge them
write_query_rel(args, pid2offset, "nq-train.json", "train-query-nq", "train-ann-nq", "train-data-nq")
write_query_rel(args, pid2offset, "trivia-train.json", "train-query-trivia", "train-ann-trivia", "train-data-trivia", "psg_id")
with open(args.out_data_dir + "train-query-nq", "rb") as nq_query, \
open(args.out_data_dir + "train-query-trivia", "rb") as trivia_query, \
open(args.out_data_dir + "train-query", "wb") as out_query:
out_query.write(nq_query.read())
out_query.write(trivia_query.read())
with open(args.out_data_dir + "train-query-nq_meta", "r", encoding='utf-8') as nq_query, \
open(args.out_data_dir + "train-query-trivia_meta", "r", encoding='utf-8') as trivia_query, \
open(args.out_data_dir + "train-query_meta", "w", encoding='utf-8') as out_query:
a = json.load(nq_query)
b = json.load(trivia_query)
meta = {'type': 'int32', 'total_number': a['total_number'] + b['total_number'], 'embedding_size': args.max_seq_length}
json.dump(meta, out_query)
embedding_cache = EmbeddingCache(args.out_data_dir + "train-query")
print("First line after merge")
with embedding_cache as emb:
print(emb[58812])
with open(args.out_data_dir + "train-ann-nq", "r", encoding='utf-8') as nq_ann, \
open(args.out_data_dir + "train-ann-trivia", "r", encoding='utf-8') as trivia_ann, \
open(args.out_data_dir + "train-ann", "w", encoding='utf-8') as out_ann:
out_ann.writelines(nq_ann.readlines())
out_ann.writelines(trivia_ann.readlines())
write_query_rel(args, pid2offset, "nq-dev.json", "dev-query", "dev-ann", "dev-data")
write_query_rel(args, pid2offset, "trivia-dev.json", "dev-query-trivia", "dev-ann-trivia", "dev-data-trivia", "psg_id")
write_qas_query(args, "nq-test.csv", "test-query")
write_qas_query(args, "trivia-test.csv", "trivia-test-query")
def PassagePreprocessingFn(args, line, tokenizer):
line_arr = list(csv.reader([line], delimiter='\t'))[0]
if line_arr[0] == 'id':
return bytearray()
p_id = int(line_arr[0])
text = line_arr[1]
title = line_arr[2]
token_ids = tokenizer.encode(title, text_pair=text, add_special_tokens=True,
max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if p_id < 5:
a = np.array(token_ids, np.int32)
print("pid {}, passagelen {}, shape {}".format(p_id, passage_len, a.shape))
return p_id.to_bytes(8, 'big') + passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def QueryPreprocessingFn(args, qid, text, tokenizer):
token_ids = tokenizer.encode(text, add_special_tokens=True, max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if qid < 5:
a = np.array(token_ids, np.int32)
print("qid {}, passagelen {}, shape {}".format(qid, passage_len, a.shape))
return passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def GetProcessingFn(args, query=False):
def fn(vals, i):
passage_len, passage = vals
max_len = args.max_seq_length
pad_len = max(0, max_len - passage_len)
token_type_ids = [0] * passage_len + [0] * pad_len
attention_mask = passage != 0
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor([f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor([f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor([f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor([f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2])
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2])
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2])
return fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_data_dir",
default="/webdata-nfs/jialliu/dpr/ann/ann_multi_data_256/",
type=str,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default="dpr",
type=str,
help="Model type selected in the list: " + ", ".join(MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default="bert-base-uncased",
type=str,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 is nq, 1 is trivia, 2 is both",
)
parser.add_argument(
"--question_dir",
type=str,
help="location of the raw QnA question data",
)
parser.add_argument(
"--wiki_dir",
type=str,
help="location of the wiki corpus",
)
parser.add_argument(
"--answer_dir",
type=str,
help="location of the QnA answers for evaluation",
)
args = parser.parse_args()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
preprocess(args)
if __name__ == '__main__':
main()
| 14,512 | 34.923267 | 135 | py |
modir | modir-master/model/domain_classifier.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
class DomainClassifier(nn.Module):
def __init__(self,
args,
input_size=768,
n_class=2):
super(DomainClassifier, self).__init__()
if args.dc_layers == 1:
layers = [
nn.Linear(input_size, n_class, bias=False)
]
elif args.dc_layers == 2:
layers = [
nn.Linear(input_size, 200),
nn.ReLU(),
nn.Linear(200, n_class, bias=False)
]
elif args.dc_layers == 3:
layers = [
nn.Linear(input_size, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, n_class, bias=False)
]
else:
raise NotImplementedError()
self.layers = nn.ModuleList(layers)
def forward(self, inputs, labels=None):
"""
it doens't work for run_warmup_da.py for now
since it no longer supports lamb and gradient reversal
"""
x = inputs
for layer in self.layers:
x = layer(x)
logits = torch.clamp(x, min=-5.0, max=5.0)
if labels is None:
return logits
elif type(labels) is str:
assert labels == 'uniform'
return (
logits,
self.uniform_loss(logits),
None,
)
else:
return (
logits,
F.cross_entropy(logits, labels),
self.get_acc(logits, labels)
)
@staticmethod
def uniform_loss(logits):
batch_size = logits.shape[0]
device = logits.device
return (
F.cross_entropy(logits, torch.tensor([0] * batch_size, device=device)) + \
F.cross_entropy(logits, torch.tensor([1] * batch_size, device=device))
) / 2
@staticmethod
def get_acc(logits, labels):
preds = torch.argmax(logits, dim=1)
total = int(len(labels))
correct = int(sum(labels==preds))
return (total, correct, correct/total)
class DummyModule(nn.Module):
def __init__(self, *args, **kwargs):
super(DummyModule, self).__init__()
self.register_parameter(name='dummy', param=nn.Parameter(torch.randn(1)))
def forward(self, inputs, *args, **kwargs):
pass
def dry_test(model, device, test_dataset):
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True)
n_datasets = 2
total, correct = 0, 0
class_total, class_correct = [0 for _ in range(n_datasets)], [0 for _ in range(n_datasets)]
for batch_idx, batch in enumerate(test_dataloader):
inputs, labels = batch[0].to(device), batch[1].to(device)
outputs = model(inputs)
preds = torch.argmax(outputs, dim=1)
total += len(labels)
correct += sum(labels==preds)
for class_id in range(n_datasets):
class_total[class_id] += sum(labels==class_id)
class_correct[class_id] += sum(torch.logical_and(labels==class_id, preds==class_id))
result_dict = {'total_acc': int(correct)/total}
for class_id in range(n_datasets):
result_dict[f'class {class_id} acc'] = int(class_correct[class_id]) / int(class_total[class_id])
return {k: f'{v:.5f}' for k, v in result_dict.items()}
def dry_dc_evaluation(args, dc_model, query_embs, passage_embs,
prev_dc_state_dict):
# we take all queries from both domains
# and discard passages from one of the domains
# so that each domain has the same number of vectors (query+passage)
single_domain_query_size = min([x.shape[0] for x in query_embs])
single_domain_passage_size = min([x.shape[0] for x in passage_embs])
srd_query = query_embs[0][:single_domain_query_size]
srd_passage = passage_embs[0][:single_domain_passage_size]
tgd_query = np.concatenate([x[:single_domain_query_size] for x in query_embs[1:]])
tgd_passage = np.concatenate([x[:single_domain_passage_size] for x in passage_embs[1:]])
train_ratio = 0.7
srd_query_train_size = int(train_ratio * single_domain_query_size)
srd_passage_train_size = int(train_ratio * single_domain_passage_size)
tgd_query_train_size = int(train_ratio * single_domain_query_size) * (len(query_embs) - 1)
tgd_passage_train_size = int(train_ratio * single_domain_passage_size) * (len(query_embs) - 1)
train_query_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_query[:srd_query_train_size],
tgd_query[:tgd_query_train_size]]
)),
torch.tensor(np.concatenate(
[[0] * srd_query_train_size,
[1] * tgd_query_train_size]
))
)
train_passage_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_passage[:srd_passage_train_size],
tgd_passage[:tgd_passage_train_size]]
)),
torch.tensor(np.concatenate(
[[0] * srd_passage_train_size,
[1] * tgd_passage_train_size]
))
)
srd_query_test_size = single_domain_query_size - srd_query_train_size
srd_passage_test_size = single_domain_passage_size - srd_passage_train_size
tgd_query_test_size = single_domain_query_size * (len(query_embs) - 1) - tgd_query_train_size
tgd_passage_test_size = single_domain_passage_size * (len(query_embs) - 1) - tgd_passage_train_size
test_query_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_query[srd_query_train_size:],
tgd_query[tgd_query_train_size:]]
)),
torch.tensor(np.concatenate(
[[0] * srd_query_test_size,
[1] * tgd_query_test_size]
))
)
test_passage_dataset = TensorDataset(
torch.tensor(np.concatenate(
[srd_passage[srd_passage_train_size:],
tgd_passage[tgd_passage_train_size:]]
)),
torch.tensor(np.concatenate(
[[0] * srd_passage_test_size,
[1] * tgd_passage_test_size]
))
)
if prev_dc_state_dict is not None:
prev_dc_model = DomainClassifier(args)
prev_dc_model.to(args.device)
prev_dc_model.load_state_dict(prev_dc_state_dict)
prev_test_query_results = dry_test(prev_dc_model, args.device, test_query_dataset)
prev_test_passage_results = dry_test(prev_dc_model, args.device, test_passage_dataset)
else:
prev_test_query_results = {'total_acc': None}
prev_test_passage_results = {'total_acc': None}
optimizer = torch.optim.Adam(dc_model.parameters(), lr=5e-4)
# if args.fp16:
# try:
# from apex import amp
# except ImportError:
# raise ImportError(
# "Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# dc_model, optimizer = amp.initialize(
# dc_model, optimizer, opt_level=args.fp16_opt_level)
step = 0
total_step = 500 # actually it's 50 query and 50 passage, so 100 steps in total
train_query_dataloader = DataLoader(train_query_dataset, batch_size=48, shuffle=True)
train_passage_dataloader = DataLoader(train_passage_dataset, batch_size=48, shuffle=True)
query_iterator = iter(train_query_dataloader)
passage_iterator = iter(train_passage_dataloader)
while step < total_step:
try:
query_batch = next(query_iterator)
except StopIteration:
query_iterator = iter(train_query_dataloader)
query_batch = next(query_iterator)
try:
passage_batch = next(passage_iterator)
except StopIteration:
passage_iterator = iter(train_passage_dataloader)
passage_batch = next(passage_iterator)
step += 1
for batch in [query_batch, passage_batch]:
inputs, labels = batch[0].to(args.device), batch[1].to(args.device)
outputs = dc_model(inputs)
optimizer.zero_grad()
loss = F.cross_entropy(outputs, labels)
# if args.fp16:
# with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
# else:
loss.backward()
optimizer.step()
test_query_results = dry_test(dc_model, args.device, test_query_dataset)
test_passage_results = dry_test(dc_model, args.device, test_passage_dataset)
return (
[test_query_results['total_acc'], test_passage_results['total_acc']],
[prev_test_query_results['total_acc'], prev_test_passage_results['total_acc']],
dc_model.state_dict()
)
| 8,906 | 37.227468 | 104 | py |
modir | modir-master/model/models.py | import sys
sys.path += ['../']
import torch
from torch import nn
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForSequenceClassification,
RobertaTokenizer,
BertModel,
BertTokenizer,
BertConfig
)
import torch.nn.functional as F
from data.process_fn import triple_process_fn, triple2dual_process_fn
class EmbeddingMixin:
"""
Mixin for common functions in most embedding models. Each model should define its own bert-like backbone and forward.
We inherit from RobertaModel to use from_pretrained
"""
def __init__(self, model_argobj):
if model_argobj is None:
self.use_mean = False
else:
self.use_mean = model_argobj.use_mean
print("Using mean:", self.use_mean)
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
def masked_mean(self, t, mask):
s = torch.sum(t * mask.unsqueeze(-1).float(), axis=1)
d = mask.sum(axis=1, keepdim=True).float()
return s / d
def masked_mean_or_first(self, emb_all, mask):
# emb_all is a tuple from bert - sequence output, pooler
assert isinstance(emb_all, tuple)
if self.use_mean:
return self.masked_mean(emb_all[0], mask)
else:
return emb_all[0][:, 0]
def query_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
def body_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
class NLL(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True,
output_dc_emb=False):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs * a_embs).sum(-1).unsqueeze(1),
(q_embs * b_embs).sum(-1).unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
if output_dc_emb:
return (loss.mean(), (q_embs, a_embs, b_embs))
else:
return (loss.mean(),)
class NLL_MultiChunk(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
[batchS, full_length] = input_ids_a.size()
chunk_factor = full_length // self.base_len
# special handle of attention mask -----
attention_mask_body = attention_mask_a.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), a_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_a = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
# special handle of attention mask -----
attention_mask_body = attention_mask_b.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), b_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_b = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
logit_matrix = torch.cat(
[logits_a.unsqueeze(1), logits_b.unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
return (loss.mean(),)
class RobertaDot_NLL_LN(NLL, RobertaForSequenceClassification):
"""None
Compress embedding to 200d, then computes NLL loss.
"""
def __init__(self, config, model_argobj=None):
NLL.__init__(self, model_argobj)
RobertaForSequenceClassification.__init__(self, config)
self.embeddingHead = nn.Linear(config.hidden_size, 768)
self.norm = nn.LayerNorm(768)
self.apply(self._init_weights)
def query_emb(self, input_ids, attention_mask):
outputs1 = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
full_emb = self.masked_mean_or_first(outputs1, attention_mask)
query1 = self.norm(self.embeddingHead(full_emb))
return query1
def body_emb(self, input_ids, attention_mask):
return self.query_emb(input_ids, attention_mask)
class RobertaDot_CLF_ANN_NLL_MultiChunk(NLL_MultiChunk, RobertaDot_NLL_LN):
def __init__(self, config):
RobertaDot_NLL_LN.__init__(self, config)
self.base_len = 512
def body_emb(self, input_ids, attention_mask):
[batchS, full_length] = input_ids.size()
chunk_factor = full_length // self.base_len
input_seq = input_ids.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
attention_mask_seq = attention_mask.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
outputs_k = self.roberta(input_ids=input_seq,
attention_mask=attention_mask_seq)
compressed_output_k = self.embeddingHead(
outputs_k[0]) # [batch, len, dim]
compressed_output_k = self.norm(compressed_output_k[:, 0, :])
[batch_expand, embeddingS] = compressed_output_k.size()
complex_emb_k = compressed_output_k.reshape(
batchS, chunk_factor, embeddingS)
return complex_emb_k # size [batchS, chunk_factor, embeddingS]
class HFBertEncoder(BertModel):
def __init__(self, config):
BertModel.__init__(self, config)
assert config.hidden_size > 0, 'Encoder hidden_size can\'t be zero'
self.init_weights()
@classmethod
def init_encoder(cls, args, dropout: float = 0.1):
cfg = BertConfig.from_pretrained("bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
return cls.from_pretrained("bert-base-uncased", config=cfg)
def forward(self, input_ids, attention_mask):
hidden_states = None
sequence_output, pooled_output = super().forward(input_ids=input_ids,
attention_mask=attention_mask)
pooled_output = sequence_output[:, 0, :]
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class BiEncoder(nn.Module):
""" Bi-Encoder model component. Encapsulates query/question and context/passage encoders.
"""
def __init__(self, args):
super(BiEncoder, self).__init__()
self.question_model = HFBertEncoder.init_encoder(args)
self.ctx_model = HFBertEncoder.init_encoder(args)
def query_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.question_model(input_ids, attention_mask)
return pooled_output
def body_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.ctx_model(input_ids, attention_mask)
return pooled_output
def forward(self, query_ids, attention_mask_q, input_ids_a = None, attention_mask_a = None, input_ids_b = None, attention_mask_b = None):
if input_ids_b is None:
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
return (q_embs, a_embs)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs*a_embs).sum(-1).unsqueeze(1), (q_embs*b_embs).sum(-1).unsqueeze(1)], dim=1) #[B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0*lsm[:,0]
return (loss.mean(),)
# --------------------------------------------------
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
RobertaConfig,
) if hasattr(conf,'pretrained_config_archive_map')
),
(),
)
default_process_fn = triple_process_fn
class MSMarcoConfig:
def __init__(self, name, model, process_fn=default_process_fn, use_mean=True, tokenizer_class=RobertaTokenizer, config_class=RobertaConfig):
self.name = name
self.process_fn = process_fn
self.model_class = model
self.use_mean = use_mean
self.tokenizer_class = tokenizer_class
self.config_class = config_class
configs = [
MSMarcoConfig(name="rdot_nll",
model=RobertaDot_NLL_LN,
use_mean=False,
),
MSMarcoConfig(name="rdot_nll_multi_chunk",
model=RobertaDot_CLF_ANN_NLL_MultiChunk,
use_mean=False,
),
MSMarcoConfig(name="dpr",
model=BiEncoder,
tokenizer_class=BertTokenizer,
config_class=BertConfig,
use_mean=False,
),
]
MSMarcoConfigDict = {cfg.name: cfg for cfg in configs}
| 11,058 | 35.863333 | 144 | py |
container | container-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import models
import utils
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
set_training_mode=args.finetune == '' # keep in eval mode during finetuning
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 20,346 | 47.330166 | 119 | py |
container | container-main/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,771 | 41.646154 | 114 | py |
container | container-main/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 3,508 | 35.175258 | 98 | py |
container | container-main/hubconf.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from models import *
dependencies = ["torch", "torchvision", "timm"]
| 138 | 22.166667 | 47 | py |
container | container-main/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| 7,067 | 28.573222 | 94 | py |
container | container-main/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 4,114 | 36.409091 | 105 | py |
container | container-main/models.py | import torch
import torch.nn as nn
from functools import partial
import math
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
import pdb
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384', 'container_light'
]
class Mlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CMlp(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
pdb.set_trace()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Attention_pure(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
C = int(C // 3)
qkv = x.reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj_drop(x)
return x
class MixBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, 3 * dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.conv = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
self.attn = Attention_pure(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sa_weight = nn.Parameter(torch.Tensor([0.0]))
def forward(self, x):
x = x + self.pos_embed(x)
B, _, H, W = x.shape
residual = x
x = self.norm1(x)
qkv = self.conv1(x)
conv = qkv[:, 2 * self.dim:, :, :]
conv = self.conv(conv)
sa = qkv.flatten(2).transpose(1, 2)
sa = self.attn(sa)
sa = sa.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
x = residual + self.drop_path(self.conv2(torch.sigmoid(self.sa_weight) * sa + (1 - torch.sigmoid(self.sa_weight)) * conv))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class CBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.pos_embed(x)
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Block(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.norm = nn.LayerNorm(embed_dim)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x
class HybridEmbed(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, 1)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3],
num_heads=12, mlp_ratio=[8, 8, 4, 4], qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.embed_dim = embed_dim
self.depth = depth
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed1 = PatchEmbed(
img_size=img_size[0], patch_size=patch_size[0], in_chans=in_chans, embed_dim=embed_dim[0])
self.patch_embed2 = PatchEmbed(
img_size=img_size[1], patch_size=patch_size[1], in_chans=embed_dim[0], embed_dim=embed_dim[1])
self.patch_embed3 = PatchEmbed(
img_size=img_size[2], patch_size=patch_size[2], in_chans=embed_dim[1], embed_dim=embed_dim[2])
self.patch_embed4 = PatchEmbed(
img_size=img_size[3], patch_size=patch_size[3], in_chans=embed_dim[2], embed_dim=embed_dim[3])
num_patches1 = self.patch_embed1.num_patches
num_patches2 = self.patch_embed2.num_patches
num_patches3 = self.patch_embed3.num_patches
num_patches4 = self.patch_embed4.num_patches
self.pos_drop = nn.Dropout(p=drop_rate)
self.mixture =True
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] # stochastic depth decay rule
self.blocks1 = nn.ModuleList([
CBlock(
dim=embed_dim[0], num_heads=num_heads, mlp_ratio=mlp_ratio[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth[0])])
self.blocks2 = nn.ModuleList([
CBlock(
dim=embed_dim[1], num_heads=num_heads, mlp_ratio=mlp_ratio[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]], norm_layer=norm_layer)
for i in range(depth[1])])
self.blocks3 = nn.ModuleList([
CBlock(
dim=embed_dim[2], num_heads=num_heads, mlp_ratio=mlp_ratio[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]], norm_layer=norm_layer)
for i in range(depth[2])])
self.blocks4 = nn.ModuleList([
MixBlock(
dim=embed_dim[3], num_heads=num_heads, mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]+depth[2]], norm_layer=norm_layer)
for i in range(depth[3])])
self.norm = nn.BatchNorm2d(embed_dim[-1])
# Representation layer
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head
self.head = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed1(x)
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
x = self.patch_embed2(x)
for blk in self.blocks2:
x = blk(x)
x = self.patch_embed3(x)
for blk in self.blocks3:
x = blk(x)
x = self.patch_embed4(x)
for blk in self.blocks4:
x = blk(x)
x = self.norm(x)
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x.flatten(2).mean(-1)
x = self.head(x)
return x
@register_model
def container_v1_light(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=[224, 56, 28, 14], patch_size=[4, 2, 2, 2], embed_dim=[64, 128, 320, 512], depth=[3, 4, 8, 3], num_heads=16, mlp_ratio=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
| 18,794 | 44.071942 | 164 | py |
container | container-main/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,292 | 37.216667 | 103 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/base.py | import os
import mxnet as mx
from magent.utility import has_gpu
from magent.model import BaseModel
class MXBaseModel(BaseModel):
def __init__(self, env, handle, name, subclass_name):
"""init a model
Parameters
----------
env: magent.Environment
handle: handle (ctypes.c_int32)
name: str
subclass_name: str
name of subclass
"""
BaseModel.__init__(self, env, handle)
self.name = name
self.subclass_name = subclass_name
def _get_ctx(self):
"""return correct context , priority: gpu > cpu
Returns
-------
ctx: mx.context
"""
if has_gpu():
return mx.gpu()
else:
return mx.cpu()
def save(self, dir_name, epoch):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name = os.path.join(dir_name, self.name, )
if not os.path.exists(dir_name):
os.mkdir(dir_name)
pre = os.path.join(dir_name, self.subclass_name)
self.model.save_checkpoint(pre, epoch, save_optimizer_states=True)
def load(self, dir_name, epoch=0, name=None):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
name = name or self.name
dir_name = os.path.join(dir_name, name)
pre = os.path.join(dir_name, self.subclass_name)
_, arg_params, aux_params = mx.model.load_checkpoint(pre, epoch)
self.model.set_params(arg_params, aux_params, force_init=True)
| 1,779 | 25.567164 | 74 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/a2c.py | """advantage actor critic"""
import os
import time
import numpy as np
import mxnet as mx
from .base import MXBaseModel
class AdvantageActorCritic(MXBaseModel):
def __init__(self, env, handle, name, eval_obs=None,
batch_size=64, reward_decay=0.99, learning_rate=1e-3,
train_freq=1, value_coef=0.1, ent_coef=0.1,
custom_view_space=None, custom_feature_space=None,
*args, **kwargs):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
eval_obs: numpy array
evaluation set of observation
train_freq: int
mean training times of a sample
ent_coef: float
weight of entropy loss in total loss
value_coef: float
weight of value loss in total loss
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
MXBaseModel.__init__(self, env, handle, name, "mxa2c")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.reward_decay = reward_decay
self.batch_size = batch_size
self.learning_rate= learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.eval_obs = eval_obs
self.value_coef = value_coef
self.ent_coef = ent_coef
self.train_ct = 0
# ======================= build network =======================
self.ctx = self._get_ctx()
self.input_view = mx.sym.var("input_view")
self.input_feature = mx.sym.var("input_feature")
policy, value = self._create_network(self.input_view, self.input_feature)
log_policy = mx.sym.log(policy)
out_policy = mx.sym.BlockGrad(policy)
neg_entropy = ent_coef * mx.sym.sum(policy * log_policy, axis=1)
neg_entropy = mx.sym.MakeLoss(data=neg_entropy)
self.sym = mx.sym.Group([log_policy, value, neg_entropy, out_policy])
self.model = mx.mod.Module(self.sym, data_names=['input_view', 'input_feature'],
label_names=None, context=self.ctx)
# bind (set initial batch size)
self.bind_size = batch_size
self.model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)],
label_shapes=None)
# init params
self.model.init_params(initializer=mx.init.Xavier())
self.model.init_optimizer(optimizer='adam', optimizer_params={
'learning_rate': learning_rate,
'clip_gradient': 10,
})
# init training buffers
self.view_buf = np.empty((1,) + self.view_space)
self.feature_buf = np.empty((1,) + self.feature_space)
self.action_buf = np.empty(1, dtype=np.int32)
self.advantage_buf, self.value_buf = np.empty(1), np.empty(1)
self.terminal_buf = np.empty(1, dtype=np.bool)
# print("parameters", self.model.get_params())
# mx.viz.plot_network(self.output).view()
def _create_network(self, input_view, input_feature):
"""define computation graph of network
Parameters
----------
view_space: tuple
feature_space: tuple
the input shape
"""
kernel_num = [32, 32]
hidden_size = [256]
if False:
h_conv1 = mx.sym.Convolution(data=input_view, kernel=(3, 3),
num_filter=kernel_num[0], layout="NHWC")
h_conv1 = mx.sym.Activation(data=h_conv1, act_type="relu")
h_conv2 = mx.sym.Convolution(data=h_conv1, kernel=(3, 3),
num_filter=kernel_num[1], layout="NHWC")
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
else:
input_view = mx.sym.flatten(data=input_view)
h_conv2 = mx.sym.FullyConnected(input_view, num_hidden=hidden_size[0])
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
flatten_view = mx.sym.flatten(data=h_conv2)
h_view = mx.sym.FullyConnected(data=flatten_view, num_hidden=hidden_size[0])
h_view = mx.sym.Activation(data=h_view, act_type="relu")
h_emb = mx.sym.FullyConnected(data=input_feature, num_hidden=hidden_size[0])
h_emb = mx.sym.Activation(data=h_emb, act_type="relu")
dense = h_view + h_emb
policy = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions, no_bias=True)
policy = mx.sym.SoftmaxActivation(data=policy)
policy = mx.sym.clip(data=policy, a_min=1e-5, a_max=1 - 1e-5)
value = mx.sym.FullyConnected(data=dense, num_hidden=1)
return policy, value
def infer_action(self, raw_obs, ids, policy="e_greedy", eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
n = len(view)
ret = np.empty(n, dtype=np.int32)
self._reset_bind_size(n)
data_batch = mx.io.DataBatch(data=[mx.nd.array(view), mx.nd.array(feature)])
self.model.forward(data_batch, is_train=False)
policy = self.model.get_outputs()[3].asnumpy()
actions = np.arange(self.num_actions)
for i in range(n):
ret[i] = np.random.choice(actions, p=policy[i])
return ret
def train(self, sample_buffer, print_every=1000):
"""feed new data sample and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
Returns
-------
loss: list
policy gradient loss, critic loss, entropy loss
value: float
estimated state value
"""
# calc buffer size
n = 0
for episode in sample_buffer.episodes():
if episode.terminal:
n += len(episode.rewards)
else:
n += len(episode.rewards) - 1
if n == 0:
return [0.0, 0.0, 0.0], 0.0
# resize to the new size
self.view_buf.resize((n,) + self.view_space)
self.feature_buf.resize((n,) + self.feature_space)
self.action_buf.resize(n)
self.value_buf.resize(n)
self.advantage_buf.resize(n)
view, feature = self.view_buf, self.feature_buf
action, value = self.action_buf, self.value_buf
advantage = self.advantage_buf
ct = 0
gamma = self.reward_decay
# collect episodes from multiple separate buffers to a continuous buffer
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(episode.rewards)
self._reset_bind_size(m)
data_batch = mx.io.DataBatch(data=[mx.nd.array(v), mx.nd.array(f)])
self.model.forward(data_batch, is_train=False)
value = self.model.get_outputs()[1].asnumpy().flatten()
delta_t = np.empty(m)
if episode.terminal:
delta_t[:m-1] = r[:m-1] + gamma * value[1:m] - value[:m-1]
delta_t[m-1] = r[m-1] + gamma * 0 - value[m-1]
else:
delta_t[:m-1] = r[:m-1] + gamma * value[1:m] - value[:m-1]
m -= 1
v, f, a = v[:-1], f[:-1], a[:-1]
if m == 0:
continue
# discount advantage
keep = 0
for i in reversed(range(m)):
keep = keep * gamma + delta_t[i]
advantage[ct+i] = keep
view[ct:ct+m] = v
feature[ct:ct+m] = f
action[ct:ct+m] = a
ct += m
assert n == ct
n = len(advantage)
neg_advantage = -advantage
neg_advs_np = np.zeros((n, self.num_actions), dtype=np.float32)
neg_advs_np[np.arange(n), action] = neg_advantage
neg_advs = mx.nd.array(neg_advs_np)
# the grads of values are exactly negative advantages
v_grads = mx.nd.array(self.value_coef * (neg_advantage[:, np.newaxis]))
data_batch = mx.io.DataBatch(data=[mx.nd.array(view), mx.nd.array(feature)])
self._reset_bind_size(n)
self.model.forward(data_batch, is_train=True)
self.model.backward(out_grads=[neg_advs, v_grads])
self.model.update()
log_policy, value, entropy_loss, _ = self.model.get_outputs()
value = mx.nd.mean(value).asnumpy()[0]
log_policy = log_policy.asnumpy()[np.arange(n), action]
pg_loss = np.mean(neg_advantage * log_policy)
entropy_loss = np.mean(entropy_loss.asnumpy())
value_loss = self.value_coef * np.mean(np.square(advantage))
print("sample %d %.4f %.4f %.4f %.4f" % (n, pg_loss, value_loss, entropy_loss, value))
return [pg_loss, value_loss, entropy_loss], value
def _reset_bind_size(self, new_size):
"""reset input shape of the model
Parameters
----------
new_size: int
new batch size
"""
if self.bind_size == new_size:
return
else:
self.bind_size = new_size
self.model.reshape(
data_shapes=[
('input_view', (new_size,) + self.view_space),
('input_feature', (new_size,) + self.feature_space)],
)
def get_info(self):
"""get information of the model
Returns
-------
info: string
"""
return "mx dqn train_time: %d" % (self.train_ct)
| 10,630 | 34.674497 | 95 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/dqn.py | import time
import numpy as np
import mxnet as mx
from .base import MXBaseModel
from ..common import ReplayBuffer
from ...utility import has_gpu
class DeepQNetwork(MXBaseModel):
def __init__(self, env, handle, name,
batch_size=64, learning_rate=1e-4, reward_decay=0.99,
train_freq=1, target_update=2000, memory_size=2 ** 20, eval_obs=None,
use_dueling=True, use_double=True, infer_batch_size=8192,
custom_view_space=None, custom_feature_space=None, num_gpu=1):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
train_freq: int
mean training times of a sample
target_update: int
target will update every target_update batches
memory_size: int
weight of entropy loss in total loss
eval_obs: numpy array
evaluation set of observation
use_dueling: bool
whether use dueling q network
use_double: bool
whether use double q network
num_gpu: int
number of gpu
infer_batch_size: int
batch size while inferring actions
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
MXBaseModel.__init__(self, env, handle, name, "mxdqn")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.batch_size = batch_size
self.infer_batch_size = infer_batch_size
self.learning_rate = learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.target_update = target_update # update frequency of target network
self.eval_obs = eval_obs
self.num_gpu = num_gpu
self.use_dueling = use_dueling
self.use_double = use_double
self.train_ct = 0
# ======================= build network =======================
self.ctx = self._get_ctx()
if self.num_gpu > 1 and self.ctx == mx.gpu():
self.ctx = []
for i in range(self.num_gpu):
self.ctx.append(mx.gpu(i))
self.input_view = mx.sym.var("input_view")
self.input_feature = mx.sym.var("input_feature")
self.mask = mx.sym.var("mask")
self.action = mx.sym.var("action")
self.target = mx.sym.var("target")
self.qvalues = self._create_network(self.input_view, self.input_feature)
self.gamma = reward_decay
self.action_onehot = mx.sym.one_hot(self.action, depth=self.num_actions)
td_error = mx.sym.square(self.target - mx.sym.sum(self.qvalues * self.action_onehot, axis=1))
self.loss = mx.sym.sum(td_error * self.mask) / mx.sym.sum(self.mask)
self.loss = mx.sym.MakeLoss(data=self.loss)
self.out_qvalues = mx.sym.BlockGrad(self.qvalues)
self.output = mx.sym.Group([self.out_qvalues, self.loss])
self.model = mx.mod.Module(self.output,
data_names=['input_view', 'input_feature'],
label_names=['action', 'target', 'mask'], context=self.ctx)
self.target_model = mx.mod.Module(self.qvalues,
data_names=['input_view', 'input_feature'],
label_names=[], context=self.ctx)
# bind (set initial batch size)
self.bind_size = batch_size
self.model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)],
label_shapes=[('action', (batch_size,)),
('target', (batch_size,)),
('mask', (batch_size,))])
self.target_model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)])
# init params
self.model.init_params(initializer=mx.init.Xavier())
self.model.init_optimizer(optimizer='adam', optimizer_params={
'learning_rate': learning_rate,
'clip_gradient': 10.0})
self._copy_network(self.target_model, self.model)
# init replay buffers
self.replay_buf_len = 0
self.memory_size = memory_size
self.replay_buf_view = ReplayBuffer(shape=(memory_size,) + self.view_space)
self.replay_buf_feature = ReplayBuffer(shape=(memory_size,) + self.feature_space)
self.replay_buf_action = ReplayBuffer(shape=(memory_size,), dtype=np.int32)
self.replay_buf_reward = ReplayBuffer(shape=(memory_size,))
self.replay_buf_terminal = ReplayBuffer(shape=(memory_size,), dtype=np.bool)
self.replay_buf_mask = ReplayBuffer(shape=(memory_size,))
# if mask[i] == 0, then the item is used for padding, not for training
# print("parameters", self.model.get_params())
# mx.viz.plot_network(self.loss).view()
def _create_network(self, input_view, input_feature, use_conv=True):
"""define computation graph of network
Parameters
----------
input_view: mx.symbol
input_feature: mx.symbol
the input tensor
"""
kernel_num = [32, 32]
hidden_size = [256]
if use_conv:
input_view = mx.sym.transpose(data=input_view, axes=[0, 3, 1, 2])
h_conv1 = mx.sym.Convolution(data=input_view, kernel=(3, 3),
num_filter=kernel_num[0], layout="NCHW")
h_conv1 = mx.sym.Activation(data=h_conv1, act_type="relu")
h_conv2 = mx.sym.Convolution(data=h_conv1, kernel=(3, 3),
num_filter=kernel_num[1], layout="NCHW")
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
else:
input_view = mx.sym.flatten(data=input_view)
h_conv2 = mx.sym.FullyConnected(input_view, num_hidden=hidden_size[0])
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
flatten_view = mx.sym.flatten(data=h_conv2)
h_view = mx.sym.FullyConnected(data=flatten_view, num_hidden=hidden_size[0])
h_view = mx.sym.Activation(data=h_view, act_type="relu")
h_emb = mx.sym.FullyConnected(data=input_feature, num_hidden=hidden_size[0])
h_emb = mx.sym.Activation(data=h_emb, act_type="relu")
dense = mx.sym.concat(h_view, h_emb)
if self.use_dueling:
# state value
value = mx.sym.FullyConnected(data=dense, num_hidden=1)
advantage = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)
mean = mx.sym.mean(advantage, axis=1, keepdims=True)
advantage = mx.sym.broadcast_sub(advantage, mean)
qvalues = mx.sym.broadcast_add(advantage, value)
else:
qvalues = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)
return qvalues
def infer_action(self, raw_obs, ids, policy="e_greedy", eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
policy: str
can be eps-greedy or greedy
eps: float
used when policy is eps-greedy
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
if policy == 'e_greedy':
eps = eps
elif policy == 'greedy':
eps = 0
n = len(view)
if n < self.num_gpu:
view = np.tile(view, (self.num_gpu, 1, 1, 1))
feature = np.tile(feature, (self.num_gpu, 1))
batch_size = min(len(view), self.infer_batch_size)
self._reset_bind_size(batch_size)
best_actions = []
infer_iter = mx.io.NDArrayIter(data=[view, feature], batch_size=batch_size)
for batch in infer_iter:
self.model.forward(batch, is_train=False)
qvalue_batch = self.model.get_outputs()[0]
batch_action = mx.nd.argmax(qvalue_batch, axis=1)
best_actions.append(batch_action)
best_actions = np.array([x.asnumpy() for x in best_actions]).flatten()
best_actions = best_actions[:n]
random = np.random.randint(self.num_actions, size=(n,))
cond = np.random.uniform(0, 1, size=(n,)) < eps
ret = np.where(cond, random, best_actions)
return ret.astype(np.int32)
def _calc_target(self, next_view, next_feature, rewards, terminal):
"""calculate target value"""
n = len(rewards)
data_batch = mx.io.DataBatch(data=[mx.nd.array(next_view), mx.nd.array(next_feature)])
self._reset_bind_size(n)
if self.use_double:
self.target_model.forward(data_batch, is_train=False)
self.model.forward(data_batch, is_train=False)
t_qvalues = self.target_model.get_outputs()[0].asnumpy()
qvalues = self.model.get_outputs()[0].asnumpy()
next_value = t_qvalues[np.arange(n), np.argmax(qvalues, axis=1)]
else:
self.target_model.forward(data_batch, is_train=False)
t_qvalues = self.target_model.get_outputs()[0].asnumpy()
next_value = np.max(t_qvalues, axis=1)
target = np.where(terminal, rewards, rewards + self.gamma * next_value)
return target
def _add_to_replay_buffer(self, sample_buffer):
"""add samples in sample_buffer to replay buffer"""
n = 0
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(r)
mask = np.ones((m,))
terminal = np.zeros((m,), dtype=np.bool)
if episode.terminal:
terminal[-1] = True
else:
mask[-1] = 0
self.replay_buf_view.put(v)
self.replay_buf_feature.put(f)
self.replay_buf_action.put(a)
self.replay_buf_reward.put(r)
self.replay_buf_terminal.put(terminal)
self.replay_buf_mask.put(mask)
n += m
self.replay_buf_len = min(self.memory_size, self.replay_buf_len + n)
return n
def train(self, sample_buffer, print_every=1000):
""" add new samples in sample_buffer to replay buffer and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
print_every: int
print log every print_every batches
Returns
-------
loss: float
bellman residual loss
value: float
estimated state value
"""
add_num = self._add_to_replay_buffer(sample_buffer)
batch_size = self.batch_size
total_loss = 0
n_batches = int(self.train_freq * add_num / batch_size)
if n_batches == 0:
return 0, 0
print("batch number: %d add: %d replay_len: %d/%d" %
(n_batches, add_num, self.replay_buf_len, self.memory_size))
start_time = time.time()
ct = 0
for i in range(n_batches):
# fetch a batch
index = np.random.choice(self.replay_buf_len - 1, batch_size)
batch_view = self.replay_buf_view.get(index)
batch_feature = self.replay_buf_feature.get(index)
batch_action = self.replay_buf_action.get(index)
batch_reward = self.replay_buf_reward.get(index)
batch_terminal = self.replay_buf_terminal.get(index)
batch_mask = self.replay_buf_mask.get(index)
batch_next_view = self.replay_buf_view.get(index+1)
batch_next_feature = self.replay_buf_feature.get(index+1)
batch_target = self._calc_target(batch_next_view, batch_next_feature,
batch_reward, batch_terminal)
self._reset_bind_size(batch_size)
batch = mx.io.DataBatch(data=[mx.nd.array(batch_view),
mx.nd.array(batch_feature)],
label=[mx.nd.array(batch_action),
mx.nd.array(batch_target),
mx.nd.array(batch_mask)])
self.model.forward(batch, is_train=True)
self.model.backward()
self.model.update()
loss = np.mean(self.model.get_outputs()[1].asnumpy())
total_loss += loss
if ct % self.target_update == 0:
self._copy_network(self.target_model, self.model)
if ct % print_every == 0:
print("batch %5d, loss %.6f, eval %.6f" % (ct, loss, self._eval(batch_target)))
ct += 1
self.train_ct += 1
total_time = time.time() - start_time
step_average = total_time / max(1.0, (ct / 1000.0))
print("batches: %d, total time: %.2f, 1k average: %.2f" % (ct, total_time, step_average))
return total_loss / ct if ct != 0 else 0, self._eval(batch_target)
def _reset_bind_size(self, new_size):
"""reset batch size"""
if self.bind_size == new_size:
return
else:
self.bind_size = new_size
def _reshape(model, is_target):
data_shapes = [('input_view', (new_size,) + self.view_space),
('input_feature', (new_size,) + self.feature_space)]
label_shapes = [('action', (new_size,)),
('target', (new_size,)),
('mask', (new_size,))]
if is_target:
label_shapes = None
model.reshape(data_shapes=data_shapes, label_shapes=label_shapes)
_reshape(self.model, False)
_reshape(self.target_model, True)
def _copy_network(self, dest, source):
"""copy to target network"""
arg_params, aux_params = source.get_params()
dest.set_params(arg_params, aux_params)
def _eval(self, target):
"""evaluate estimated q value"""
if self.eval_obs is None:
return np.mean(target)
else:
self._reset_bind_size(len(self.eval_obs[0]))
with self.ctx:
batch = mx.io.DataBatch(data=[mx.nd.array(self.eval_obs[0]),
mx.nd.array(self.eval_obs[1])])
self.model.forward(batch, is_train=False)
return np.mean(self.model.get_outputs()[0].asnumpy())
def get_info(self):
return "mx dqn train_time: %d" % (self.train_ct) | 15,724 | 39.424165 | 101 | py |
cwn | cwn-main/mp/cell_mp.py | """
Based on https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/nn/conv/message_passing.py
MIT License
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from inspect import Parameter
from typing import List, Optional, Set
from torch_geometric.typing import Adj, Size
import torch
from torch import Tensor
from torch_sparse import SparseTensor
from torch_scatter import gather_csr, scatter, segment_csr
from torch_geometric.nn.conv.utils.helpers import expand_left
from mp.cell_mp_inspector import CellularInspector
class CochainMessagePassing(torch.nn.Module):
"""The base class for building message passing models on cochain complexes.
# TODO: Add support for co-boundary adjacencies
The class considers three types of adjacencies:
boundary, upper and lower adjacencies.
Args:
up_msg_size (int): The dimensionality of the messages coming from the upper adjacent cells.
down_msg_size (int): The dimensionality of the messages coming from the
lower adjacent cells.
aggr_up (string, optional): The aggregation scheme to use for upper-adjacencies
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"` or :obj:`None`).
(default: :obj:`"add"`)
aggr_down (string, optional): The aggregation scheme to use for lower-adjacencies
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"` or :obj:`None`).
(default: :obj:`"add"`)
aggr_boundary (string, optional): The aggregation scheme to use for boundary adjacencies.
flow (string, optional): The flow adjacency of message passing
(:obj:`"source_to_target"` or :obj:`"target_to_source"`).
(default: :obj:`"source_to_target"`)
node_dim (int, optional): The axis along which to propagate.
(default: :obj:`-2`)
boundary_msg_size (int, optional): The dimensionality of the messages coming from the
boundary cells.
use_down_msg (bool): Whether to propagate messages via the lower adjacencies.
use_boundary_msg (bool): Whether to propagate messages via the boundary adjacencies.
"""
special_args: Set[str] = {
'up_index', 'up_adj_t', 'up_index_i', 'up_index_j', 'up_size',
'up_size_i', 'up_size_j', 'up_ptr', 'agg_up_index', 'up_dim_size',
'down_index', 'down_adj_t', 'down_index_i', 'down_index_j', 'down_size',
'down_size_i', 'down_size_j', 'down_ptr', 'agg_down_index', 'down_dim_size',
'boundary_index', 'boundary_adj_t', 'boundary_index_i', 'boundary_index_j', 'boundary_size',
'boundary_size_i', 'boundary_size_j', 'boundary_ptr', 'agg_boundary_index', 'boundary_dim_size',
}
def __init__(self,
up_msg_size,
down_msg_size,
aggr_up: Optional[str] = "add",
aggr_down: Optional[str] = "add",
aggr_boundary: Optional[str] = "add",
flow: str = "source_to_target",
node_dim: int = -2,
boundary_msg_size=None,
use_down_msg=True,
use_boundary_msg=True):
super(CochainMessagePassing, self).__init__()
self.up_msg_size = up_msg_size
self.down_msg_size = down_msg_size
self.use_boundary_msg = use_boundary_msg
self.use_down_msg = use_down_msg
# Use the same out dimension for boundaries as for down adjacency by default
self.boundary_msg_size = down_msg_size if boundary_msg_size is None else boundary_msg_size
self.aggr_up = aggr_up
self.aggr_down = aggr_down
self.aggr_boundary = aggr_boundary
assert self.aggr_up in ['add', 'mean', 'max', None]
assert self.aggr_down in ['add', 'mean', 'max', None]
self.flow = flow
assert self.flow in ['source_to_target', 'target_to_source']
# This is the dimension in which nodes live in the feature matrix x.
# i.e. if x has shape [N, in_channels], then node_dim = 0 or -2
self.node_dim = node_dim
self.inspector = CellularInspector(self)
# This stores the parameters of these functions. If pop first is true
# the first parameter is not stored (I presume this is for self.)
# I presume this doesn't pop first to avoid including the self parameter multiple times.
self.inspector.inspect(self.message_up)
self.inspector.inspect(self.message_down)
self.inspector.inspect(self.message_boundary)
self.inspector.inspect(self.aggregate_up, pop_first_n=1)
self.inspector.inspect(self.aggregate_down, pop_first_n=1)
self.inspector.inspect(self.aggregate_boundary, pop_first_n=1)
self.inspector.inspect(self.message_and_aggregate_up, pop_first_n=1)
self.inspector.inspect(self.message_and_aggregate_down, pop_first_n=1)
self.inspector.inspect(self.message_and_aggregate_boundary, pop_first_n=1)
self.inspector.inspect(self.update, pop_first_n=3)
# Return the parameter name for these functions minus those specified in special_args
# TODO(Cris): Split user args by type of adjacency to make sure no bugs are introduced.
self.__user_args__ = self.inspector.keys(
['message_up', 'message_down', 'message_boundary', 'aggregate_up',
'aggregate_down', 'aggregate_boundary']).difference(self.special_args)
self.__fused_user_args__ = self.inspector.keys(
['message_and_aggregate_up',
'message_and_aggregate_down',
'message_and_aggregate_boundary']).difference(self.special_args)
self.__update_user_args__ = self.inspector.keys(
['update']).difference(self.special_args)
# Support for "fused" message passing.
self.fuse_up = self.inspector.implements('message_and_aggregate_up')
self.fuse_down = self.inspector.implements('message_and_aggregate_down')
self.fuse_boundary = self.inspector.implements('message_and_aggregate_boundary')
def __check_input_together__(self, index_up, index_down, size_up, size_down):
# If we have both up and down adjacency, then check the sizes agree.
if (index_up is not None and index_down is not None
and size_up is not None and size_down is not None):
assert size_up[0] == size_down[0]
assert size_up[1] == size_down[1]
def __check_input_separately__(self, index, size):
"""This gets an up or down index and the size of the assignment matrix"""
the_size: List[Optional[int]] = [None, None]
if isinstance(index, Tensor):
assert index.dtype == torch.long
assert index.dim() == 2
assert index.size(0) == 2
if size is not None:
the_size[0] = size[0]
the_size[1] = size[1]
return the_size
elif isinstance(index, SparseTensor):
if self.flow == 'target_to_source':
raise ValueError(
('Flow adjacency "target_to_source" is invalid for '
'message propagation via `torch_sparse.SparseTensor`. If '
'you really want to make use of a reverse message '
'passing flow, pass in the transposed sparse tensor to '
'the message passing module, e.g., `adj_t.t()`.'))
the_size[0] = index.sparse_size(1)
the_size[1] = index.sparse_size(0)
return the_size
elif index is None:
return the_size
raise ValueError(
('`MessagePassing.propagate` only supports `torch.LongTensor` of '
'shape `[2, num_messages]` or `torch_sparse.SparseTensor` for '
'argument `edge_index`.'))
def __set_size__(self, size: List[Optional[int]], dim: int, src: Tensor):
the_size = size[dim]
if the_size is None:
size[dim] = src.size(self.node_dim)
elif the_size != src.size(self.node_dim):
raise ValueError(
(f'Encountered tensor with size {src.size(self.node_dim)} in '
f'dimension {self.node_dim}, but expected size {the_size}.'))
def __lift__(self, src, index, dim):
if isinstance(index, Tensor):
index = index[dim]
return src.index_select(self.node_dim, index)
elif isinstance(index, SparseTensor):
if dim == 1:
rowptr = index.storage.rowptr()
rowptr = expand_left(rowptr, dim=self.node_dim, dims=src.dim())
return gather_csr(src, rowptr)
elif dim == 0:
col = index.storage.col()
return src.index_select(self.node_dim, col)
raise ValueError
def __collect__(self, args, index, size, adjacency, kwargs):
i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1)
assert adjacency in ['up', 'down', 'boundary']
out = {}
for arg in args:
# Here the x_i and x_j parameters are automatically extracted
# from an argument having the prefix x.
if arg[-2:] not in ['_i', '_j']:
out[arg] = kwargs.get(arg, Parameter.empty)
elif index is not None:
dim = 0 if arg[-2:] == '_j' else 1
# Extract any part up to _j or _i. So for x_j extract x
if adjacency == 'up' and arg.startswith('up_'):
data = kwargs.get(arg[3:-2], Parameter.empty)
size_data = data
elif adjacency == 'down' and arg.startswith('down_'):
data = kwargs.get(arg[5:-2], Parameter.empty)
size_data = data
elif adjacency == 'boundary' and arg.startswith('boundary_'):
if dim == 0:
# We need to use the boundary attribute matrix (i.e. boundary_attr) for the features
# And we need to use the x matrix to extract the number of parent cells
data = kwargs.get('boundary_attr', Parameter.empty)
size_data = kwargs.get(arg[9:-2], Parameter.empty)
else:
data = kwargs.get(arg[9:-2], Parameter.empty)
size_data = data
else:
continue
# This was used before for the case when data is supplied directly
# as (x_i, x_j) as opposed to a matrix X [N, in_channels]
# (the 2nd case is handled by the next if)
if isinstance(data, (tuple, list)):
raise ValueError('This format is not supported for cellular message passing')
# This is the usual case when we get a feature matrix of shape [N, in_channels]
if isinstance(data, Tensor):
# Same size checks as above.
self.__set_size__(size, dim, size_data)
# Select the features of the nodes indexed by i or j from the data matrix
data = self.__lift__(data, index, j if arg[-2:] == '_j' else i)
out[arg] = data
# Automatically builds some default parameters that can be used in the message passing
# functions as needed. This was modified to be discriminative of upper and lower adjacency.
if isinstance(index, Tensor):
out[f'{adjacency}_adj_t'] = None
out[f'{adjacency}_ptr'] = None
out[f'{adjacency}_index'] = index
out[f'{adjacency}_index_i'] = index[i]
out[f'{adjacency}_index_j'] = index[j]
elif isinstance(index, SparseTensor):
out['edge_index'] = None
out[f'{adjacency}_adj_t'] = index
out[f'{adjacency}_index_i'] = index.storage.row()
out[f'{adjacency}_index_j'] = index.storage.col()
out[f'{adjacency}_ptr'] = index.storage.rowptr()
out[f'{adjacency}_weight'] = index.storage.value()
out[f'{adjacency}_attr'] = index.storage.value()
out[f'{adjacency}_type'] = index.storage.value()
# We need this if in contrast to pyg because index can be None for some adjacencies.
if isinstance(index, Tensor) or isinstance(index, SparseTensor):
# This is the old `index` argument used for aggregation of the messages.
out[f'agg_{adjacency}_index'] = out[f'{adjacency}_index_i']
out[f'{adjacency}_size'] = size
out[f'{adjacency}_size_i'] = size[1] or size[0]
out[f'{adjacency}_size_j'] = size[0] or size[1]
out[f'{adjacency}_dim_size'] = out[f'{adjacency}_size_i']
return out
def get_msg_and_agg_func(self, adjacency):
if adjacency == 'up':
return self.message_and_aggregate_up
if adjacency == 'down':
return self.message_and_aggregate_down
elif adjacency == 'boundary':
return self.message_and_aggregate_boundary
else:
return None
def get_msg_func(self, adjacency):
if adjacency == 'up':
return self.message_up
elif adjacency == 'down':
return self.message_down
elif adjacency == 'boundary':
return self.message_boundary
else:
return None
def get_agg_func(self, adjacency):
if adjacency == 'up':
return self.aggregate_up
elif adjacency == 'down':
return self.aggregate_down
elif adjacency == 'boundary':
return self.aggregate_boundary
else:
return None
def get_fuse_boolean(self, adjacency):
if adjacency == 'up':
return self.fuse_up
elif adjacency == 'down':
return self.fuse_down
elif adjacency == 'boundary':
return self.fuse_boundary
else:
return None
def __message_and_aggregate__(self, index: Adj,
adjacency: str,
size: List[Optional[int]] = None,
**kwargs):
assert adjacency in ['up', 'down', 'boundary']
# Fused message and aggregation
fuse = self.get_fuse_boolean(adjacency)
if isinstance(index, SparseTensor) and fuse:
# Collect the objects to pass to the function params in __user_arg.
coll_dict = self.__collect__(self.__fused_user_args__, index, size, adjacency, kwargs)
# message and aggregation are fused in a single function
msg_aggr_kwargs = self.inspector.distribute(
f'message_and_aggregate_{adjacency}', coll_dict)
message_and_aggregate = self.get_msg_and_agg_func(adjacency)
return message_and_aggregate(index, **msg_aggr_kwargs)
# Otherwise, run message and aggregation in separation.
elif isinstance(index, Tensor) or not fuse:
# Collect the objects to pass to the function params in __user_arg.
coll_dict = self.__collect__(self.__user_args__, index, size, adjacency, kwargs)
# Up message and aggregation
msg_kwargs = self.inspector.distribute(f'message_{adjacency}', coll_dict)
message = self.get_msg_func(adjacency)
out = message(**msg_kwargs)
# import pdb; pdb.set_trace()
aggr_kwargs = self.inspector.distribute(f'aggregate_{adjacency}', coll_dict)
aggregate = self.get_agg_func(adjacency)
return aggregate(out, **aggr_kwargs)
def propagate(self, up_index: Optional[Adj],
down_index: Optional[Adj],
boundary_index: Optional[Adj], # The None default does not work here!
up_size: Size = None,
down_size: Size = None,
boundary_size: Size = None,
**kwargs):
"""The initial call to start propagating messages."""
up_size = self.__check_input_separately__(up_index, up_size)
down_size = self.__check_input_separately__(down_index, down_size)
boundary_size = self.__check_input_separately__(boundary_index, boundary_size)
self.__check_input_together__(up_index, down_index, up_size, down_size)
up_out, down_out = None, None
# Up messaging and aggregation
if up_index is not None:
up_out = self.__message_and_aggregate__(up_index, 'up', up_size, **kwargs)
# Down messaging and aggregation
if self.use_down_msg and down_index is not None:
down_out = self.__message_and_aggregate__(down_index, 'down', down_size, **kwargs)
# boundary messaging and aggregation
boundary_out = None
if self.use_boundary_msg and 'boundary_attr' in kwargs and kwargs['boundary_attr'] is not None:
boundary_out = self.__message_and_aggregate__(boundary_index, 'boundary', boundary_size, **kwargs)
coll_dict = {}
up_coll_dict = self.__collect__(self.__update_user_args__, up_index, up_size, 'up',
kwargs)
down_coll_dict = self.__collect__(self.__update_user_args__,
down_index, down_size, 'down', kwargs)
coll_dict.update(up_coll_dict)
coll_dict.update(down_coll_dict)
update_kwargs = self.inspector.distribute('update', coll_dict)
return self.update(up_out, down_out, boundary_out, **update_kwargs)
def message_up(self, up_x_j: Tensor, up_attr: Tensor) -> Tensor:
r"""Constructs upper messages from cell :math:`j` to cell :math:`i` for each edge in
:obj:`up_index`. This function can take any argument as input which was initially
passed to :meth:`propagate`. Furthermore, tensors passed to :meth:`propagate` can be mapped
to the respective cells :math:`i` and :math:`j` by appending :obj:`_i` or
:obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. The parameter
:obj:`up_attr` includes the features of the shared coboundary cell.
"""
return up_x_j
def message_down(self, down_x_j: Tensor, down_attr: Tensor) -> Tensor:
r"""Constructs lower messages from cell :math:`j` to cell :math:`i` for each edge in
:obj:`down_index`. This function can take any argument as input which was initially
passed to :meth:`propagate`. Furthermore, tensors passed to :meth:`propagate` can be mapped
to the respective cells :math:`i` and :math:`j` by appending :obj:`_i` or
:obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. The parameter
:obj:`down_attr` includes the features of the shared boundary cell.
"""
return down_x_j
def message_boundary(self, boundary_x_j: Tensor):
r"""Constructs boundary messages from cell :math:`j` to cell :math:`i` for each edge in
:obj:`boundary_index`. This function can take any argument as input which was initially
passed to :meth:`propagate`. Furthermore, tensors passed to :meth:`propagate` can be mapped
to the respective cells :math:`i` and :math:`j` by appending :obj:`_i` or
:obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.
"""
return boundary_x_j
def aggregate_up(self, inputs: Tensor, agg_up_index: Tensor,
up_ptr: Optional[Tensor] = None,
up_dim_size: Optional[int] = None) -> Tensor:
r"""Aggregates messages from upper adjacent cells.
Takes in the output of message computation as first argument and any
argument which was initially passed to :meth:`propagate`.
By default, this function will delegate its call to scatter functions
that support "add", "mean" and "max" operations as specified in
:meth:`__init__` by the :obj:`aggr` argument.
"""
if up_ptr is not None:
up_ptr = expand_left(up_ptr, dim=self.node_dim, dims=inputs.dim())
return segment_csr(inputs, up_ptr, reduce=self.aggr_up)
else:
return scatter(inputs, agg_up_index, dim=self.node_dim, dim_size=up_dim_size,
reduce=self.aggr_up)
def aggregate_down(self, inputs: Tensor, agg_down_index: Tensor,
down_ptr: Optional[Tensor] = None,
down_dim_size: Optional[int] = None) -> Tensor:
r"""Aggregates messages from lower adjacent cells.
Takes in the output of message computation as first argument and any
argument which was initially passed to :meth:`propagate`.
By default, this function will delegate its call to scatter functions
that support "add", "mean" and "max" operations as specified in
:meth:`__init__` by the :obj:`aggr` argument.
"""
if down_ptr is not None:
down_ptr = expand_left(down_ptr, dim=self.node_dim, dims=inputs.dim())
return segment_csr(inputs, down_ptr, reduce=self.aggr_down)
else:
return scatter(inputs, agg_down_index, dim=self.node_dim, dim_size=down_dim_size,
reduce=self.aggr_down)
def aggregate_boundary(self, inputs: Tensor, agg_boundary_index: Tensor,
boundary_ptr: Optional[Tensor] = None,
boundary_dim_size: Optional[int] = None) -> Tensor:
r"""Aggregates messages from the boundary cells.
Takes in the output of message computation as first argument and any
argument which was initially passed to :meth:`propagate`.
By default, this function will delegate its call to scatter functions
that support "add", "mean" and "max" operations as specified in
:meth:`__init__` by the :obj:`aggr` argument.
"""
# import pdb; pdb.set_trace()
if boundary_ptr is not None:
down_ptr = expand_left(boundary_ptr, dim=self.node_dim, dims=inputs.dim())
return segment_csr(inputs, down_ptr, reduce=self.aggr_boundary)
else:
return scatter(inputs, agg_boundary_index, dim=self.node_dim, dim_size=boundary_dim_size,
reduce=self.aggr_boundary)
def message_and_aggregate_up(self, up_adj_t: SparseTensor) -> Tensor:
r"""Fuses computations of :func:`message_up` and :func:`aggregate_up` into a
single function.
If applicable, this saves both time and memory since messages do not
explicitly need to be materialized.
This function will only gets called in case it is implemented and
propagation takes place based on a :obj:`torch_sparse.SparseTensor`.
"""
raise NotImplementedError
def message_and_aggregate_down(self, down_adj_t: SparseTensor) -> Tensor:
r"""Fuses computations of :func:`message_down` and :func:`aggregate_down` into a
single function.
If applicable, this saves both time and memory since messages do not
explicitly need to be materialized.
This function will only gets called in case it is implemented and
propagation takes place based on a :obj:`torch_sparse.SparseTensor`.
"""
raise NotImplementedError
def message_and_aggregate_boundary(self, boundary_adj_t: SparseTensor) -> Tensor:
r"""Fuses computations of :func:`message_boundary` and :func:`aggregate_boundary` into a
single function.
If applicable, this saves both time and memory since messages do not
explicitly need to be materialized.
This function will only gets called in case it is implemented and
propagation takes place based on a :obj:`torch_sparse.SparseTensor`.
"""
raise NotImplementedError
def update(self, up_inputs: Optional[Tensor], down_inputs: Optional[Tensor],
boundary_inputs: Optional[Tensor], x: Tensor) -> (Tensor, Tensor, Tensor):
r"""Updates cell embeddings. Takes in the output of the aggregations from different
adjacencies as the first three arguments and any argument which was initially passed to
:meth:`propagate`.
"""
if up_inputs is None:
up_inputs = torch.zeros(x.size(0), self.up_msg_size).to(device=x.device)
if down_inputs is None:
down_inputs = torch.zeros(x.size(0), self.down_msg_size).to(device=x.device)
if boundary_inputs is None:
boundary_inputs = torch.zeros(x.size(0), self.boundary_msg_size).to(device=x.device)
return up_inputs, down_inputs, boundary_inputs
class CochainMessagePassingParams:
"""A helper class storing the parameters to be supplied to the propagate function.
This object stores the equivalent of the `x` and `edge_index` objects from PyTorch Geometric.
TODO: The boundary_index and boundary_attr as well as other essential parameters are
currently passed as keyword arguments. Special parameters should be created.
Args:
x: The features of the cochain where message passing will be performed.
up_index: The index for the upper adjacencies of the cochain.
down_index: The index for the lower adjacencies of the cochain.
"""
def __init__(self, x: Tensor, up_index: Adj = None, down_index: Adj = None, **kwargs):
self.x = x
self.up_index = up_index
self.down_index = down_index
self.kwargs = kwargs
if 'boundary_index' in self.kwargs:
self.boundary_index = self.kwargs['boundary_index']
else:
self.boundary_index = None
if 'boundary_attr' in self.kwargs:
self.boundary_attr = self.kwargs['boundary_attr']
else:
self.boundary_attr = None
| 26,998 | 48 | 110 | py |
cwn | cwn-main/mp/test_layers.py | import torch
import torch.optim as optim
from mp.layers import (
DummyCellularMessagePassing, CINConv, OrientedConv, InitReduceConv, EmbedVEWithReduce)
from data.dummy_complexes import get_house_complex, get_molecular_complex
from torch import nn
from data.datasets.flow import load_flow_dataset
def test_dummy_cellular_message_passing_with_down_msg():
house_complex = get_house_complex()
v_params = house_complex.get_cochain_params(dim=0)
e_params = house_complex.get_cochain_params(dim=1)
t_params = house_complex.get_cochain_params(dim=2)
dsmp = DummyCellularMessagePassing()
v_x, e_x, t_x = dsmp.forward(v_params, e_params, t_params)
expected_v_x = torch.tensor([[12], [9], [25], [25], [23]], dtype=torch.float)
assert torch.equal(v_x, expected_v_x)
expected_e_x = torch.tensor([[10], [20], [47], [22], [42], [37]], dtype=torch.float)
assert torch.equal(e_x, expected_e_x)
expected_t_x = torch.tensor([[1]], dtype=torch.float)
assert torch.equal(t_x, expected_t_x)
def test_dummy_cellular_message_passing_with_boundary_msg():
house_complex = get_house_complex()
v_params = house_complex.get_cochain_params(dim=0)
e_params = house_complex.get_cochain_params(dim=1)
t_params = house_complex.get_cochain_params(dim=2)
dsmp = DummyCellularMessagePassing(use_boundary_msg=True, use_down_msg=False)
v_x, e_x, t_x = dsmp.forward(v_params, e_params, t_params)
expected_v_x = torch.tensor([[12], [9], [25], [25], [23]], dtype=torch.float)
assert torch.equal(v_x, expected_v_x)
expected_e_x = torch.tensor([[4], [7], [23], [9], [25], [24]], dtype=torch.float)
assert torch.equal(e_x, expected_e_x)
expected_t_x = torch.tensor([[15]], dtype=torch.float)
assert torch.equal(t_x, expected_t_x)
def test_dummy_cellular_message_passing_on_molecular_cell_complex():
molecular_complex = get_molecular_complex()
v_params = molecular_complex.get_cochain_params(dim=0)
e_params = molecular_complex.get_cochain_params(dim=1)
ring_params = molecular_complex.get_cochain_params(dim=2)
dsmp = DummyCellularMessagePassing(use_boundary_msg=True, use_down_msg=True)
v_x, e_x, ring_x = dsmp.forward(v_params, e_params, ring_params)
expected_v_x = torch.tensor([[12], [24], [24], [15], [25], [31], [47], [24]],
dtype=torch.float)
assert torch.equal(v_x, expected_v_x)
expected_e_x = torch.tensor([[35], [79], [41], [27], [66], [70], [92], [82], [53]],
dtype=torch.float)
assert torch.equal(e_x, expected_e_x)
# The first cell feature is given by 1[x] + 0[up] + (2+2)[down] + (1+2+3+4)[boundaries] = 15
# The 2nd cell is given by 2[x] + 0[up] + (1+2)[down] + (2+5+6+7+8)[boundaries] = 33
expected_ring_x = torch.tensor([[15], [33]], dtype=torch.float)
assert torch.equal(ring_x, expected_ring_x)
def test_cin_conv_training():
msg_net = nn.Sequential(nn.Linear(2, 1))
update_net = nn.Sequential(nn.Linear(1, 3))
cin_conv = CINConv(1, 1, msg_net, msg_net, update_net, 0.05)
all_params_before = []
for p in cin_conv.parameters():
all_params_before.append(p.clone().data)
assert len(all_params_before) > 0
house_complex = get_house_complex()
v_params = house_complex.get_cochain_params(dim=0)
e_params = house_complex.get_cochain_params(dim=1)
t_params = house_complex.get_cochain_params(dim=2)
yv = house_complex.get_labels(dim=0)
ye = house_complex.get_labels(dim=1)
yt = house_complex.get_labels(dim=2)
y = torch.cat([yv, ye, yt])
optimizer = optim.SGD(cin_conv.parameters(), lr=0.001)
optimizer.zero_grad()
out_v, out_e, out_t = cin_conv.forward(v_params, e_params, t_params)
out = torch.cat([out_v, out_e, out_t], dim=0)
criterion = nn.CrossEntropyLoss()
loss = criterion(out, y)
loss.backward()
optimizer.step()
all_params_after = []
for p in cin_conv.parameters():
all_params_after.append(p.clone().data)
assert len(all_params_after) == len(all_params_before)
# Check that parameters have been updated.
for i, _ in enumerate(all_params_before):
assert not torch.equal(all_params_before[i], all_params_after[i])
def test_orient_conv_on_flow_dataset():
import numpy as np
np.random.seed(4)
update_up = nn.Sequential(nn.Linear(1, 4))
update_down = nn.Sequential(nn.Linear(1, 4))
update = nn.Sequential(nn.Linear(1, 4))
train, _, G = load_flow_dataset(num_points=400, num_train=3, num_test=3)
number_of_edges = G.number_of_edges()
model = OrientedConv(1, 1, 1, update_up_nn=update_up, update_down_nn=update_down,
update_nn=update, act_fn=torch.tanh)
model.eval()
out = model.forward(train[0])
assert out.size(0) == number_of_edges
assert out.size(1) == 4
def test_init_reduce_conv_on_house_complex():
house_complex = get_house_complex()
v_params = house_complex.get_cochain_params(dim=0)
e_params = house_complex.get_cochain_params(dim=1)
t_params = house_complex.get_cochain_params(dim=2)
conv = InitReduceConv(reduce='add')
ex = conv.forward(v_params.x, e_params.boundary_index)
expected_ex = torch.tensor([[3], [5], [7], [5], [9], [8]], dtype=torch.float)
assert torch.equal(expected_ex, ex)
tx = conv.forward(e_params.x, t_params.boundary_index)
expected_tx = torch.tensor([[14]], dtype=torch.float)
assert torch.equal(expected_tx, tx)
def test_embed_with_reduce_layer_on_house_complex():
house_complex = get_house_complex()
cochains = house_complex.cochains
params = house_complex.get_all_cochain_params()
embed_layer = nn.Embedding(num_embeddings=32, embedding_dim=10)
init_reduce = InitReduceConv()
conv = EmbedVEWithReduce(embed_layer, None, init_reduce)
# Simulate the lack of features in these dimensions.
params[1].x = None
params[2].x = None
xs = conv.forward(*params)
assert len(xs) == 3
assert xs[0].dim() == 2
assert xs[0].size(0) == cochains[0].num_cells
assert xs[0].size(1) == 10
assert xs[1].size(0) == cochains[1].num_cells
assert xs[1].size(1) == 10
assert xs[2].size(0) == cochains[2].num_cells
assert xs[2].size(1) == 10
| 6,243 | 34.276836 | 96 | py |
cwn | cwn-main/mp/cell_mp_inspector.py | """
Based on https://github.com/rusty1s/pytorch_geometric/blob/76d61eaa9fc8702aa25f29dfaa5134a169d0f1f6/torch_geometric/nn/conv/utils/inspector.py
MIT License
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import inspect
from collections import OrderedDict
from typing import Dict, Any, Callable
from torch_geometric.nn.conv.utils.inspector import Inspector
class CellularInspector(Inspector):
"""Wrapper of the PyTorch Geometric Inspector so to adapt it to our use cases."""
def __implements__(self, cls, func_name: str) -> bool:
if cls.__name__ == 'CochainMessagePassing':
return False
if func_name in cls.__dict__.keys():
return True
return any(self.__implements__(c, func_name) for c in cls.__bases__)
def inspect(self, func: Callable, pop_first_n: int = 0) -> Dict[str, Any]:
params = inspect.signature(func).parameters
params = OrderedDict(params)
for _ in range(pop_first_n):
params.popitem(last=False)
self.params[func.__name__] = params
| 2,143 | 41.88 | 142 | py |
cwn | cwn-main/mp/ring_exp_models.py | import torch
from mp.layers import SparseCINConv
from mp.nn import get_nonlinearity, get_graph_norm
from data.complex import ComplexBatch
from torch.nn import Linear, Sequential
from torch_geometric.nn import GINConv
class RingSparseCIN(torch.nn.Module):
"""
A simple cellular version of GIN employed for Ring experiments.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
max_dim: int = 2, nonlinearity='relu', train_eps=False, use_coboundaries=False,
graph_norm='id'):
super(RingSparseCIN, self).__init__()
self.max_dim = max_dim
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.init_layer = Linear(num_input_features, num_input_features)
act_module = get_nonlinearity(nonlinearity, return_module=True)
self.graph_norm = get_graph_norm(graph_norm)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
self.convs.append(
SparseCINConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None, passed_msg_up_nn=None,
passed_update_up_nn=None, passed_update_boundaries_nn=None,
train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
self.lin1 = Linear(hidden, num_classes)
def reset_parameters(self):
self.init_layer.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
def forward(self, data: ComplexBatch, include_partial=False):
xs = None
res = {}
data.nodes.x = self.init_layer(data.nodes.x)
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
xs = conv(*params)
data.set_xs(xs)
if include_partial:
for k in range(len(xs)):
res[f"layer{c}_{k}"] = xs[k]
x = xs[0]
# Extract the target node from each graph
mask = data.nodes.mask
x = self.lin1(x[mask])
if include_partial:
res['out'] = x
return x, res
return x
def __repr__(self):
return self.__class__.__name__
class RingGIN(torch.nn.Module):
def __init__(self, num_features, num_layers, hidden, num_classes, nonlinearity='relu',
graph_norm='bn'):
super(RingGIN, self).__init__()
self.nonlinearity = nonlinearity
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
self.init_linear = Linear(num_features, num_features)
self.graph_norm = get_graph_norm(graph_norm)
# BN is needed to make GIN work empirically beyond 2 layers for the ring experiments.
self.conv1 = GINConv(
Sequential(
Linear(num_features, hidden),
self.graph_norm(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
self.graph_norm(hidden),
conv_nonlinearity(),
), train_eps=False)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden, hidden),
self.graph_norm(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
self.graph_norm(hidden),
conv_nonlinearity(),
), train_eps=False))
self.lin1 = Linear(hidden, num_classes)
def reset_parameters(self):
self.init_linear.reset_parameters()
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
def forward(self, data):
act = get_nonlinearity(self.nonlinearity, return_module=False)
x, edge_index, mask = data.x, data.edge_index, data.mask
x = self.init_linear(x)
x = act(self.conv1(x, edge_index))
for conv in self.convs:
x = conv(x, edge_index)
# Select the target node of each graph in the batch
x = x[mask]
x = self.lin1(x)
return x
def __repr__(self):
return self.__class__.__name__
| 4,771 | 35.151515 | 102 | py |
cwn | cwn-main/mp/test_permutation.py | import torch
from data.utils import compute_ring_2complex
from data.perm_utils import permute_graph, generate_permutation_matrices
from data.dummy_complexes import get_mol_testing_complex_list, convert_to_graph
from data.complex import ComplexBatch
from mp.models import SparseCIN
def test_sparse_cin0_perm_invariance_on_dummy_mol_complexes():
# Generate reference graph list
dummy_complexes = get_mol_testing_complex_list()
dummy_graphs = [convert_to_graph(complex) for complex in dummy_complexes]
for graph in dummy_graphs:
graph.edge_attr = None
# (We convert back to complexes to regenerate signals on edges and rings, fixing max_k to 7)
dummy_complexes = [compute_ring_2complex(graph.x, graph.edge_index, None, graph.num_nodes, max_k=7,
include_down_adj=False, init_method='sum', init_edges=True, init_rings=True)
for graph in dummy_graphs]
# Instantiate model
model = SparseCIN(num_input_features=1, num_classes=16, num_layers=3, hidden=32, use_coboundaries=True, nonlinearity='elu')
model.eval()
# Compute reference complex embeddings
embeddings = [model.forward(ComplexBatch.from_complex_list([comp], max_dim=comp.dimension)) for comp in dummy_complexes]
# Test invariance for multiple random permutations
for comp_emb, graph in zip(embeddings, dummy_graphs):
permutations = generate_permutation_matrices(graph.num_nodes, 5)
for perm in permutations:
permuted_graph = permute_graph(graph, perm)
permuted_comp = compute_ring_2complex(permuted_graph.x, permuted_graph.edge_index, None, permuted_graph.num_nodes,
max_k=7, include_down_adj=False, init_method='sum', init_edges=True, init_rings=True)
permuted_emb = model.forward(ComplexBatch.from_complex_list([permuted_comp], max_dim=permuted_comp.dimension))
assert torch.allclose(comp_emb, permuted_emb, atol=1e-6)
| 1,983 | 52.621622 | 127 | py |
cwn | cwn-main/mp/molec_models.py | import torch
import torch.nn.functional as F
from torch.nn import Linear, Embedding, Sequential, BatchNorm1d as BN
from torch_geometric.nn import JumpingKnowledge, GINEConv
from mp.layers import InitReduceConv, EmbedVEWithReduce, OGBEmbedVEWithReduce, SparseCINConv, CINppConv
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from data.complex import ComplexBatch
from mp.nn import pool_complex, get_pooling_fn, get_nonlinearity, get_graph_norm
class EmbedSparseCIN(torch.nn.Module):
"""
A cellular version of GIN with some tailoring to nimbly work on molecules from the ZINC database.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, atom_types, bond_types, out_size, num_layers, hidden,
dropout_rate: float = 0.5, max_dim: int = 2, jump_mode=None, nonlinearity='relu',
readout='sum', train_eps=False, final_hidden_multiplier: int = 2,
readout_dims=(0, 1, 2), final_readout='sum', apply_dropout_before='lin2',
init_reduce='sum', embed_edge=False, embed_dim=None, use_coboundaries=False,
graph_norm='bn'):
super(EmbedSparseCIN, self).__init__()
self.max_dim = max_dim
if readout_dims is not None:
self.readout_dims = tuple([dim for dim in readout_dims if dim <= max_dim])
else:
self.readout_dims = list(range(max_dim+1))
if embed_dim is None:
embed_dim = hidden
self.v_embed_init = Embedding(atom_types, embed_dim)
self.e_embed_init = None
if embed_edge:
self.e_embed_init = Embedding(bond_types, embed_dim)
self.reduce_init = InitReduceConv(reduce=init_reduce)
self.init_conv = EmbedVEWithReduce(self.v_embed_init, self.e_embed_init, self.reduce_init)
self.final_readout = final_readout
self.dropout_rate = dropout_rate
self.apply_dropout_before = apply_dropout_before
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.readout = readout
self.graph_norm = get_graph_norm(graph_norm)
act_module = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
SparseCINConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None,
passed_msg_up_nn=None, passed_update_up_nn=None,
passed_update_boundaries_nn=None, train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
self.lin1s = torch.nn.ModuleList()
for _ in range(max_dim + 1):
if jump_mode == 'cat':
# These layers don't use a bias. Then, in case a level is not present the output
# is just zero and it is not given by the biases.
self.lin1s.append(Linear(num_layers * hidden, final_hidden_multiplier * hidden,
bias=False))
else:
self.lin1s.append(Linear(hidden, final_hidden_multiplier * hidden))
self.lin2 = Linear(final_hidden_multiplier * hidden, out_size)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.init_conv.reset_parameters()
self.lin1s.reset_parameters()
self.lin2.reset_parameters()
def jump_complex(self, jump_xs):
# Perform JumpingKnowledge at each level of the complex
xs = []
for jumpx in jump_xs:
xs += [self.jump(jumpx)]
return xs
def forward(self, data: ComplexBatch, include_partial=False):
act = get_nonlinearity(self.nonlinearity, return_module=False)
xs, jump_xs = None, None
res = {}
# Check input node/edge features are scalars.
assert data.cochains[0].x.size(-1) == 1
if 1 in data.cochains and data.cochains[1].x is not None:
assert data.cochains[1].x.size(-1) == 1
# Embed and populate higher-levels
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
xs = list(self.init_conv(*params))
# Apply dropout on the input features like all models do on ZINC.
for i, x in enumerate(xs):
xs[i] = F.dropout(xs[i], p=self.dropout_rate, training=self.training)
data.set_xs(xs)
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
start_to_process = 0
xs = conv(*params, start_to_process=start_to_process)
data.set_xs(xs)
if include_partial:
for k in range(len(xs)):
res[f"layer{c}_{k}"] = xs[k]
if self.jump_mode is not None:
if jump_xs is None:
jump_xs = [[] for _ in xs]
for i, x in enumerate(xs):
jump_xs[i] += [x]
if self.jump_mode is not None:
xs = self.jump_complex(jump_xs)
xs = pool_complex(xs, data, self.max_dim, self.readout)
# Select the dimensions we want at the end.
xs = [xs[i] for i in self.readout_dims]
if include_partial:
for k in range(len(xs)):
res[f"pool_{k}"] = xs[k]
new_xs = []
for i, x in enumerate(xs):
if self.apply_dropout_before == 'lin1':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
new_xs.append(act(self.lin1s[self.readout_dims[i]](x)))
x = torch.stack(new_xs, dim=0)
if self.apply_dropout_before == 'final_readout':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
if self.final_readout == 'mean':
x = x.mean(0)
elif self.final_readout == 'sum':
x = x.sum(0)
else:
raise NotImplementedError
if self.apply_dropout_before not in ['lin1', 'final_readout']:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
if include_partial:
res['out'] = x
return x, res
return x
def __repr__(self):
return self.__class__.__name__
class EmbedCINpp(EmbedSparseCIN):
"""
Inherit from EmbedSparseCIN and add messages from lower adj cells
"""
def __init__(self, atom_types, bond_types, out_size, num_layers, hidden,
dropout_rate: float = 0.5, max_dim: int = 2, jump_mode=None,
nonlinearity='relu', readout='sum', train_eps=False,
final_hidden_multiplier: int = 2, readout_dims=(0, 1, 2),
final_readout='sum', apply_dropout_before='lin2', init_reduce='sum',
embed_edge=False, embed_dim=None, use_coboundaries=False, graph_norm='bn'):
super(EmbedCINpp, self).__init__(atom_types, bond_types, out_size, num_layers,
hidden, dropout_rate, max_dim, jump_mode,
nonlinearity, readout, train_eps,
final_hidden_multiplier, readout_dims,
final_readout, apply_dropout_before,
init_reduce, embed_edge, embed_dim,
use_coboundaries, graph_norm)
self.convs = torch.nn.ModuleList() #reset convs to use CINppConv instead of SparseCINConv
act_module = get_nonlinearity(nonlinearity, return_module=True)
if embed_dim is None:
embed_dim = hidden
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
CINppConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None,
passed_msg_up_nn=None, passed_msg_down_nn=None, passed_update_up_nn=None,
passed_update_down_nn=None, passed_update_boundaries_nn=None, train_eps=train_eps,
max_dim=self.max_dim, hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
class OGBEmbedSparseCIN(torch.nn.Module):
"""
A cellular version of GIN with some tailoring to nimbly work on molecules from the ogbg-mol* dataset.
It uses OGB atom and bond encoders.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, out_size, num_layers, hidden, dropout_rate: float = 0.5,
indropout_rate: float = 0.0, max_dim: int = 2, jump_mode=None,
nonlinearity='relu', readout='sum', train_eps=False, final_hidden_multiplier: int = 2,
readout_dims=(0, 1, 2), final_readout='sum', apply_dropout_before='lin2',
init_reduce='sum', embed_edge=False, embed_dim=None, use_coboundaries=False,
graph_norm='bn'):
super(OGBEmbedSparseCIN, self).__init__()
self.max_dim = max_dim
if readout_dims is not None:
self.readout_dims = tuple([dim for dim in readout_dims if dim <= max_dim])
else:
self.readout_dims = list(range(max_dim+1))
if embed_dim is None:
embed_dim = hidden
self.v_embed_init = AtomEncoder(embed_dim)
self.e_embed_init = None
if embed_edge:
self.e_embed_init = BondEncoder(embed_dim)
self.reduce_init = InitReduceConv(reduce=init_reduce)
self.init_conv = OGBEmbedVEWithReduce(self.v_embed_init, self.e_embed_init, self.reduce_init)
self.final_readout = final_readout
self.dropout_rate = dropout_rate
self.in_dropout_rate = indropout_rate
self.apply_dropout_before = apply_dropout_before
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.readout = readout
act_module = get_nonlinearity(nonlinearity, return_module=True)
self.graph_norm = get_graph_norm(graph_norm)
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
SparseCINConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None,
passed_msg_up_nn=None, passed_update_up_nn=None,
passed_update_boundaries_nn=None, train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
self.lin1s = torch.nn.ModuleList()
for _ in range(max_dim + 1):
if jump_mode == 'cat':
# These layers don't use a bias. Then, in case a level is not present the output
# is just zero and it is not given by the biases.
self.lin1s.append(Linear(num_layers * hidden, final_hidden_multiplier * hidden,
bias=False))
else:
self.lin1s.append(Linear(hidden, final_hidden_multiplier * hidden))
self.lin2 = Linear(final_hidden_multiplier * hidden, out_size)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.init_conv.reset_parameters()
self.lin1s.reset_parameters()
self.lin2.reset_parameters()
def jump_complex(self, jump_xs):
# Perform JumpingKnowledge at each level of the complex
xs = []
for jumpx in jump_xs:
xs += [self.jump(jumpx)]
return xs
def forward(self, data: ComplexBatch, include_partial=False):
act = get_nonlinearity(self.nonlinearity, return_module=False)
xs, jump_xs = None, None
res = {}
# Embed and populate higher-levels
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
xs = list(self.init_conv(*params))
# Apply dropout on the input features
for i, x in enumerate(xs):
xs[i] = F.dropout(xs[i], p=self.in_dropout_rate, training=self.training)
data.set_xs(xs)
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
start_to_process = 0
xs = conv(*params, start_to_process=start_to_process)
# Apply dropout on the output of the conv layer
for i, x in enumerate(xs):
xs[i] = F.dropout(xs[i], p=self.dropout_rate, training=self.training)
data.set_xs(xs)
if include_partial:
for k in range(len(xs)):
res[f"layer{c}_{k}"] = xs[k]
if self.jump_mode is not None:
if jump_xs is None:
jump_xs = [[] for _ in xs]
for i, x in enumerate(xs):
jump_xs[i] += [x]
if self.jump_mode is not None:
xs = self.jump_complex(jump_xs)
xs = pool_complex(xs, data, self.max_dim, self.readout)
# Select the dimensions we want at the end.
xs = [xs[i] for i in self.readout_dims]
if include_partial:
for k in range(len(xs)):
res[f"pool_{k}"] = xs[k]
new_xs = []
for i, x in enumerate(xs):
if self.apply_dropout_before == 'lin1':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
new_xs.append(act(self.lin1s[self.readout_dims[i]](x)))
x = torch.stack(new_xs, dim=0)
if self.apply_dropout_before == 'final_readout':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
if self.final_readout == 'mean':
x = x.mean(0)
elif self.final_readout == 'sum':
x = x.sum(0)
else:
raise NotImplementedError
if self.apply_dropout_before not in ['lin1', 'final_readout']:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
if include_partial:
res['out'] = x
return x, res
return x
def __repr__(self):
return self.__class__.__name__
class OGBEmbedCINpp(OGBEmbedSparseCIN):
"""
Inherit from EmbedSparseCIN and add messages from lower adj cells
"""
def __init__(self, out_size, num_layers, hidden, dropout_rate: float = 0.5,
indropout_rate: float = 0, max_dim: int = 2, jump_mode=None,
nonlinearity='relu', readout='sum', train_eps=False,
final_hidden_multiplier: int = 2, readout_dims=(0, 1, 2),
final_readout='sum', apply_dropout_before='lin2', init_reduce='sum',
embed_edge=False, embed_dim=None, use_coboundaries=False, graph_norm='bn'):
super().__init__(out_size, num_layers, hidden, dropout_rate, indropout_rate,
max_dim, jump_mode, nonlinearity, readout, train_eps,
final_hidden_multiplier, readout_dims, final_readout,
apply_dropout_before, init_reduce, embed_edge, embed_dim,
use_coboundaries, graph_norm)
self.convs = torch.nn.ModuleList() #reset convs to use CINppConv instead of SparseCINConv
act_module = get_nonlinearity(nonlinearity, return_module=True)
if embed_dim is None:
embed_dim = hidden
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
CINppConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None,
passed_msg_up_nn=None, passed_msg_down_nn=None, passed_update_up_nn=None,
passed_update_down_nn=None, passed_update_boundaries_nn=None, train_eps=train_eps,
max_dim=self.max_dim, hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
class EmbedSparseCINNoRings(torch.nn.Module):
"""
CIN model on cell complexes up to dimension 1 (edges). It does not make use of rings.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, atom_types, bond_types, out_size, num_layers, hidden,
dropout_rate: float = 0.5, nonlinearity='relu',
readout='sum', train_eps=False, final_hidden_multiplier: int = 2,
final_readout='sum', apply_dropout_before='lin2',
init_reduce='sum', embed_edge=False, embed_dim=None, use_coboundaries=False,
graph_norm='bn'):
super(EmbedSparseCINNoRings, self).__init__()
self.max_dim = 1
self.readout_dims = [0, 1]
if embed_dim is None:
embed_dim = hidden
self.v_embed_init = Embedding(atom_types, embed_dim)
self.e_embed_init = None
if embed_edge:
self.e_embed_init = Embedding(bond_types, embed_dim)
self.reduce_init = InitReduceConv(reduce=init_reduce)
self.init_conv = EmbedVEWithReduce(self.v_embed_init, self.e_embed_init, self.reduce_init)
self.final_readout = final_readout
self.dropout_rate = dropout_rate
self.apply_dropout_before = apply_dropout_before
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.readout = readout
self.graph_norm = get_graph_norm(graph_norm)
act_module = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
SparseCINConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None,
passed_msg_up_nn=None, passed_update_up_nn=None,
passed_update_boundaries_nn=None, train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
self.lin1s = torch.nn.ModuleList()
for _ in range(self.max_dim + 1):
self.lin1s.append(Linear(hidden, final_hidden_multiplier * hidden))
self.lin2 = Linear(final_hidden_multiplier * hidden, out_size)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
self.init_conv.reset_parameters()
self.lin1s.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data: ComplexBatch):
act = get_nonlinearity(self.nonlinearity, return_module=False)
# Check input node/edge features are scalars.
assert data.cochains[0].x.size(-1) == 1
if 1 in data.cochains and data.cochains[1].x is not None:
assert data.cochains[1].x.size(-1) == 1
# Extract node and edge params
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
# Make the upper index of the edges None to ignore the rings. Even though max_dim = 1
# our current code does extract upper adjacencies for edges if rings are present.
if len(params) > 1:
params[1].up_index = None
# Embed the node and edge features
xs = list(self.init_conv(*params))
# Apply dropout on the input features
for i, x in enumerate(xs):
xs[i] = F.dropout(xs[i], p=self.dropout_rate, training=self.training)
data.set_xs(xs)
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
if len(params) > 1:
params[1].up_index = None
xs = conv(*params)
data.set_xs(xs)
xs = pool_complex(xs, data, self.max_dim, self.readout)
# Select the dimensions we want at the end.
xs = [xs[i] for i in self.readout_dims]
new_xs = []
for i, x in enumerate(xs):
if self.apply_dropout_before == 'lin1':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
new_xs.append(act(self.lin1s[self.readout_dims[i]](x)))
x = torch.stack(new_xs, dim=0)
if self.apply_dropout_before == 'final_readout':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
if self.final_readout == 'mean':
x = x.mean(0)
elif self.final_readout == 'sum':
x = x.sum(0)
else:
raise NotImplementedError
if self.apply_dropout_before not in ['lin1', 'final_readout']:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class EmbedGIN(torch.nn.Module):
"""
GIN with cell complex inputs to test our pipeline.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, atom_types, bond_types, out_size, num_layers, hidden,
dropout_rate: float = 0.5, nonlinearity='relu',
readout='sum', train_eps=False, apply_dropout_before='lin2',
init_reduce='sum', embed_edge=False, embed_dim=None):
super(EmbedGIN, self).__init__()
self.max_dim = 1
if embed_dim is None:
embed_dim = hidden
self.v_embed_init = Embedding(atom_types, embed_dim)
self.e_embed_init = None
if embed_edge:
self.e_embed_init = Embedding(bond_types, embed_dim)
self.reduce_init = InitReduceConv(reduce=init_reduce)
self.init_conv = EmbedVEWithReduce(self.v_embed_init, self.e_embed_init, self.reduce_init)
self.dropout_rate = dropout_rate
self.apply_dropout_before = apply_dropout_before
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
act_module = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = embed_dim if i == 0 else hidden
self.convs.append(
GINEConv(
# Here we instantiate and pass the MLP performing the `update` function.
Sequential(
Linear(layer_dim, hidden),
BN(hidden),
act_module(),
Linear(hidden, hidden),
BN(hidden),
act_module(),
), train_eps=train_eps))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, out_size)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
self.init_conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data: ComplexBatch):
act = get_nonlinearity(self.nonlinearity, return_module=False)
# Check input node/edge features are scalars.
assert data.cochains[0].x.size(-1) == 1
if 1 in data.cochains and data.cochains[1].x is not None:
assert data.cochains[1].x.size(-1) == 1
# Extract node and edge params
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
# Embed the node and edge features
xs = list(self.init_conv(*params))
# Apply dropout on the input node features
xs[0] = F.dropout(xs[0], p=self.dropout_rate, training=self.training)
data.set_xs(xs)
# We fetch input parameters only at dimension 0 (nodes)
params = data.get_all_cochain_params(max_dim=0, include_down_features=False)[0]
x = params.x
edge_index = params.up_index
edge_attr = params.kwargs['up_attr']
# For the edge case when no edges are present.
if edge_index is None:
edge_index = torch.LongTensor([[], []])
edge_attr = torch.FloatTensor([[0]*x.size(-1)])
for c, conv in enumerate(self.convs):
x = conv(x=x, edge_index=edge_index, edge_attr=edge_attr)
# Pool only over nodes
batch_size = data.cochains[0].batch.max() + 1
x = self.pooling_fn(x, data.nodes.batch, size=batch_size)
if self.apply_dropout_before == 'lin1':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = act(self.lin1(x))
if self.apply_dropout_before in ['final_readout', 'lin2']:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
| 26,185 | 42.140033 | 106 | py |
cwn | cwn-main/mp/test_models.py | import torch
import pytest
import itertools
from data.complex import ComplexBatch
from data.dummy_complexes import get_testing_complex_list
from mp.models import CIN0, EdgeCIN0, SparseCIN
from data.data_loading import DataLoader, load_dataset
def test_cin_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = CIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5, jump_mode='cat',
max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_preds = []
for batch in data_loader:
batched_pred = model.forward(batch)
batched_preds.append(batched_pred)
batched_preds = torch.cat(batched_preds, dim=0)
preds = []
for complex in data_list:
pred = model.forward(ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim))
preds.append(pred)
preds = torch.cat(preds, dim=0)
# Atol was reduced from 1e-6 to 1e-5 to remove flakiness.
assert (preds.size() == batched_preds.size())
assert torch.allclose(preds, batched_preds, atol=1e-5)
def test_edge_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
for top_features in [True, False]:
data_loader = DataLoader(data_list, batch_size=4)
model = EdgeCIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5,
jump_mode='cat', include_top_features=top_features)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_preds = []
for batch in data_loader:
batched_pred = model.forward(batch)
batched_preds.append(batched_pred)
batched_preds = torch.cat(batched_preds, dim=0)
preds = []
for complex in data_list:
pred = model.forward(ComplexBatch.from_complex_list([complex]))
preds.append(pred)
preds = torch.cat(preds, dim=0)
assert torch.allclose(preds, batched_preds, atol=1e-6)
def test_edge_cin0_model_with_batching_while_including_top_features_and_max_dim_one():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
data_loader = DataLoader(data_list, batch_size=4)
model1 = EdgeCIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5,
jump_mode='cat', include_top_features=True)
# We use the model in eval mode to avoid problems with batch norm.
model1.eval()
batched_preds = []
for batch in data_loader:
batched_pred = model1.forward(batch)
batched_preds.append(batched_pred)
batched_preds1 = torch.cat(batched_preds, dim=0)
model2 = EdgeCIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5,
jump_mode='cat', include_top_features=False)
# We use the model in eval mode to avoid problems with batch norm.
model2.eval()
batched_preds = []
for batch in data_loader:
batched_pred = model2.forward(batch)
batched_preds.append(batched_pred)
batched_preds2 = torch.cat(batched_preds, dim=0)
# Check excluding the top features providea a different
# output compared to the model that includes them.
assert not torch.equal(batched_preds1, batched_preds2)
def test_cin_model_with_batching_over_complexes_missing_two_cells():
"""Check this runs without errors"""
data_list = get_testing_complex_list()
data_loader = DataLoader(data_list, batch_size=2)
# Run using a model that works up to two_cells.
model = CIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5, max_dim=2,
jump_mode='max')
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
preds1 = []
for batch in data_loader:
out = model.forward(batch)
preds1.append(out)
preds1 = torch.cat(preds1, dim=0)
# Run using a model that works up to edges.
model = CIN0(num_input_features=1, num_classes=3, num_layers=3, hidden=5, max_dim=1,
jump_mode='max')
model.eval()
data_loader = DataLoader(data_list, batch_size=2, max_dim=1)
preds2 = []
for batch in data_loader:
out = model.forward(batch)
preds2.append(out)
preds2 = torch.cat(preds2, dim=0)
# Make sure the two outputs are different. The model using two_cells set the two_cell outputs
# to zero, so the output of the readout should also be different.
assert not torch.equal(preds1, preds2)
def test_sparse_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = SparseCIN(num_input_features=1, num_classes=3, num_layers=3, hidden=5,
jump_mode='cat', max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = {}
for batch in data_loader:
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in data_list:
# print(f"Complex dim {complex.dimension}")
pred, res = model.forward(ComplexBatch.from_complex_list([complex],
max_dim=batch_max_dim),
include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
@pytest.mark.data
def test_sparse_cin0_model_with_batching_on_proteins():
"""Check this runs without errors and that batching and no batching produce the same output."""
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
max_dim = 3
torch.manual_seed(0)
data_loader = DataLoader(dataset, batch_size=32, max_dim=max_dim)
model = SparseCIN(num_input_features=dataset.num_features_in_dim(0),
num_classes=2, num_layers=3, hidden=5, jump_mode=None, max_dim=max_dim)
model.eval()
batched_res = {}
for batch in data_loader:
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in dataset:
# print(f"Complex dim {complex.dimension}")
pred, res = model.forward(ComplexBatch.from_complex_list([complex], max_dim=max_dim),
include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
| 9,019 | 37.712446 | 99 | py |
cwn | cwn-main/mp/layers.py | import torch
from typing import Any, Callable, Optional
from torch import Tensor
from mp.cell_mp import CochainMessagePassing, CochainMessagePassingParams
from torch_geometric.nn.inits import reset
from torch.nn import Linear, Sequential, BatchNorm1d as BN, Identity
from data.complex import Cochain
from torch_scatter import scatter
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from abc import ABC, abstractmethod
class DummyCochainMessagePassing(CochainMessagePassing):
"""This is a dummy parameter-free message passing model used for testing."""
def __init__(self, up_msg_size, down_msg_size, boundary_msg_size=None,
use_boundary_msg=False, use_down_msg=True):
super(DummyCochainMessagePassing, self).__init__(up_msg_size, down_msg_size,
boundary_msg_size=boundary_msg_size,
use_boundary_msg=use_boundary_msg,
use_down_msg=use_down_msg)
def message_up(self, up_x_j: Tensor, up_attr: Tensor) -> Tensor:
# (num_up_adj, x_feature_dim) + (num_up_adj, up_feat_dim)
# We assume the feature dim is the same across al levels
return up_x_j + up_attr
def message_down(self, down_x_j: Tensor, down_attr: Tensor) -> Tensor:
# (num_down_adj, x_feature_dim) + (num_down_adj, down_feat_dim)
# We assume the feature dim is the same across al levels
return down_x_j + down_attr
def forward(self, cochain: CochainMessagePassingParams):
up_out, down_out, boundary_out = self.propagate(cochain.up_index, cochain.down_index,
cochain.boundary_index, x=cochain.x,
up_attr=cochain.kwargs['up_attr'],
down_attr=cochain.kwargs['down_attr'],
boundary_attr=cochain.kwargs['boundary_attr'])
# down or boundary will be zero if one of them is not used.
return cochain.x + up_out + down_out + boundary_out
class DummyCellularMessagePassing(torch.nn.Module):
def __init__(self, input_dim=1, max_dim: int = 2, use_boundary_msg=False, use_down_msg=True):
super(DummyCellularMessagePassing, self).__init__()
self.max_dim = max_dim
self.mp_levels = torch.nn.ModuleList()
for dim in range(max_dim+1):
mp = DummyCochainMessagePassing(input_dim, input_dim, boundary_msg_size=input_dim,
use_boundary_msg=use_boundary_msg, use_down_msg=use_down_msg)
self.mp_levels.append(mp)
def forward(self, *cochain_params: CochainMessagePassingParams):
assert len(cochain_params) <= self.max_dim+1
out = []
for dim in range(len(cochain_params)):
out.append(self.mp_levels[dim].forward(cochain_params[dim]))
return out
class CINCochainConv(CochainMessagePassing):
"""This is a dummy parameter-free message passing model used for testing."""
def __init__(self, up_msg_size: int, down_msg_size: int,
msg_up_nn: Callable, msg_down_nn: Callable, update_nn: Callable,
eps: float = 0., train_eps: bool = False):
super(CINCochainConv, self).__init__(up_msg_size, down_msg_size, use_boundary_msg=False)
self.msg_up_nn = msg_up_nn
self.msg_down_nn = msg_down_nn
self.update_nn = update_nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def forward(self, cochain: CochainMessagePassingParams):
out_up, out_down, _ = self.propagate(cochain.up_index, cochain.down_index,
None, x=cochain.x,
up_attr=cochain.kwargs['up_attr'],
down_attr=cochain.kwargs['down_attr'])
out_up += (1 + self.eps) * cochain.x
out_down += (1 + self.eps) * cochain.x
return self.update_nn(out_up + out_down)
def reset_parameters(self):
reset(self.msg_up_nn)
reset(self.msg_down_nn)
reset(self.update_nn)
self.eps.data.fill_(self.initial_eps)
def message_up(self, up_x_j: Tensor, up_attr: Tensor) -> Tensor:
if up_attr is not None:
x = torch.cat([up_x_j, up_attr], dim=-1)
return self.msg_up_nn(x)
else:
return self.msg_up_nn(up_x_j)
def message_down(self, down_x_j: Tensor, down_attr: Tensor) -> Tensor:
x = torch.cat([down_x_j, down_attr], dim=-1)
return self.msg_down_nn(x)
class CINConv(torch.nn.Module):
def __init__(self, up_msg_size: int, down_msg_size: int,
msg_up_nn: Callable, msg_down_nn: Callable, update_nn: Callable,
eps: float = 0., train_eps: bool = False, max_dim: int = 2):
super(CINConv, self).__init__()
self.max_dim = max_dim
self.mp_levels = torch.nn.ModuleList()
for dim in range(max_dim+1):
mp = CINCochainConv(up_msg_size, down_msg_size,
msg_up_nn, msg_down_nn, update_nn, eps, train_eps)
self.mp_levels.append(mp)
def forward(self, *cochain_params: CochainMessagePassingParams):
assert len(cochain_params) <= self.max_dim+1
out = []
for dim in range(len(cochain_params)):
out.append(self.mp_levels[dim].forward(cochain_params[dim]))
return out
class EdgeCINConv(torch.nn.Module):
"""
CIN convolutional layer which performs cochain message passing only
_up to_ 1-dimensional cells (edges).
"""
def __init__(self, up_msg_size: int, down_msg_size: int,
v_msg_up_nn: Callable, e_msg_down_nn: Callable, e_msg_up_nn: Callable,
v_update_nn: Callable, e_update_nn: Callable, eps: float = 0., train_eps=False):
super(EdgeCINConv, self).__init__()
self.max_dim = 1
self.mp_levels = torch.nn.ModuleList()
v_mp = CINCochainConv(up_msg_size, down_msg_size,
v_msg_up_nn, lambda *args: None, v_update_nn, eps, train_eps)
e_mp = CINCochainConv(up_msg_size, down_msg_size,
e_msg_up_nn, e_msg_down_nn, e_update_nn, eps, train_eps)
self.mp_levels.extend([v_mp, e_mp])
def forward(self, *cochain_params: CochainMessagePassingParams):
assert len(cochain_params) <= self.max_dim+1
out = []
for dim in range(len(cochain_params)):
out.append(self.mp_levels[dim].forward(cochain_params[dim]))
return out
class SparseCINCochainConv(CochainMessagePassing):
"""This is a CIN Cochain layer that operates of boundaries and upper adjacent cells."""
def __init__(self, dim: int,
up_msg_size: int,
down_msg_size: int,
boundary_msg_size: Optional[int],
msg_up_nn: Callable,
msg_boundaries_nn: Callable,
update_up_nn: Callable,
update_boundaries_nn: Callable,
combine_nn: Callable,
eps: float = 0.,
train_eps: bool = False):
super(SparseCINCochainConv, self).__init__(up_msg_size, down_msg_size, boundary_msg_size=boundary_msg_size,
use_down_msg=False)
self.dim = dim
self.msg_up_nn = msg_up_nn
self.msg_boundaries_nn = msg_boundaries_nn
self.update_up_nn = update_up_nn
self.update_boundaries_nn = update_boundaries_nn
self.combine_nn = combine_nn
self.initial_eps = eps
if train_eps:
self.eps1 = torch.nn.Parameter(torch.Tensor([eps]))
self.eps2 = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps1', torch.Tensor([eps]))
self.register_buffer('eps2', torch.Tensor([eps]))
self.reset_parameters()
def forward(self, cochain: CochainMessagePassingParams):
out_up, _, out_boundaries = self.propagate(cochain.up_index, cochain.down_index,
cochain.boundary_index, x=cochain.x,
up_attr=cochain.kwargs['up_attr'],
boundary_attr=cochain.kwargs['boundary_attr'])
# As in GIN, we can learn an injective update function for each multi-set
out_up += (1 + self.eps1) * cochain.x
out_boundaries += (1 + self.eps2) * cochain.x
out_up = self.update_up_nn(out_up)
out_boundaries = self.update_boundaries_nn(out_boundaries)
# We need to combine the two such that the output is injective
# Because the cross product of countable spaces is countable, then such a function exists.
# And we can learn it with another MLP.
return self.combine_nn(torch.cat([out_up, out_boundaries], dim=-1))
def reset_parameters(self):
reset(self.msg_up_nn)
reset(self.msg_boundaries_nn)
reset(self.update_up_nn)
reset(self.update_boundaries_nn)
reset(self.combine_nn)
self.eps1.data.fill_(self.initial_eps)
self.eps2.data.fill_(self.initial_eps)
def message_up(self, up_x_j: Tensor, up_attr: Tensor) -> Tensor:
return self.msg_up_nn((up_x_j, up_attr))
def message_boundary(self, boundary_x_j: Tensor) -> Tensor:
return self.msg_boundaries_nn(boundary_x_j)
class CINppCochainConv(SparseCINCochainConv):
"""CINppCochainConv
"""
def __init__(self, dim: int, up_msg_size: int, down_msg_size: int, boundary_msg_size: int,
msg_up_nn: Callable[..., Any], msg_boundaries_nn: Callable[..., Any], msg_down_nn: Callable[..., Any],
update_up_nn: Callable[..., Any], update_boundaries_nn: Callable[..., Any], update_down_nn: Callable[..., Any],
combine_nn: Callable[..., Any], eps: float = 0, train_eps: bool = False):
super(CINppCochainConv, self).__init__(dim, up_msg_size, down_msg_size, boundary_msg_size,
msg_up_nn, msg_boundaries_nn,
update_up_nn, update_boundaries_nn,
combine_nn, eps, train_eps)
self.msg_down_nn = msg_down_nn
self.update_down_nn = update_down_nn
if train_eps:
self.eps3 = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps3', torch.Tensor([eps]))
reset(self.msg_down_nn)
reset(self.update_down_nn)
self.eps3.data.fill_(self.initial_eps)
def message_down(self, down_x_j: Tensor, down_attr: Tensor) -> Tensor:
return self.msg_down_nn((down_x_j, down_attr))
def forward(self, cochain: CochainMessagePassingParams):
out_up, out_down, out_boundaries = self.propagate(cochain.up_index, cochain.down_index,
cochain.boundary_index, x=cochain.x,
up_attr=cochain.kwargs['up_attr'],
boundary_attr=cochain.kwargs['boundary_attr'])
# As in GIN, we can learn an injective update function for each multi-set
out_up += (1 + self.eps1) * cochain.x
out_down += (1 + self.eps2) * cochain.x
out_boundaries += (1 + self.eps3) * cochain.x
out_up = self.update_up_nn(out_up)
out_down = self.update_down_nn(out_down)
out_boundaries = self.update_boundaries_nn(out_boundaries)
# We need to combine the three such that the output is injective
# Because the cross product of countable spaces is countable, then such a function exists.
# And we can learn it with another MLP.
return self.combine_nn(torch.cat([out_up, out_down, out_boundaries], dim=-1))
class Catter(torch.nn.Module):
def __init__(self):
super(Catter, self).__init__()
def forward(self, x):
return torch.cat(x, dim=-1)
class SparseCINConv(torch.nn.Module):
"""A cellular version of GIN which performs message passing from cellular upper
neighbors and boundaries, but not from lower neighbors (hence why "Sparse")
"""
# TODO: Refactor the way we pass networks externally to allow for different networks per dim.
def __init__(self, up_msg_size: int, down_msg_size: int, boundary_msg_size: Optional[int],
passed_msg_up_nn: Optional[Callable], passed_msg_boundaries_nn: Optional[Callable],
passed_update_up_nn: Optional[Callable],
passed_update_boundaries_nn: Optional[Callable],
eps: float = 0., train_eps: bool = False, max_dim: int = 2,
graph_norm=BN, use_coboundaries=False, **kwargs):
super(SparseCINConv, self).__init__()
self.max_dim = max_dim
self.mp_levels = torch.nn.ModuleList()
for dim in range(max_dim+1):
msg_up_nn = passed_msg_up_nn
if msg_up_nn is None:
if use_coboundaries:
msg_up_nn = Sequential(
Catter(),
Linear(kwargs['layer_dim'] * 2, kwargs['layer_dim']),
kwargs['act_module']())
else:
msg_up_nn = lambda xs: xs[0]
msg_boundaries_nn = passed_msg_boundaries_nn
if msg_boundaries_nn is None:
msg_boundaries_nn = lambda x: x
update_up_nn = passed_update_up_nn
if update_up_nn is None:
update_up_nn = Sequential(
Linear(kwargs['layer_dim'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module'](),
Linear(kwargs['hidden'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']()
)
update_boundaries_nn = passed_update_boundaries_nn
if update_boundaries_nn is None:
update_boundaries_nn = Sequential(
Linear(kwargs['layer_dim'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module'](),
Linear(kwargs['hidden'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']()
)
combine_nn = Sequential(
Linear(kwargs['hidden']*2, kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']())
mp = SparseCINCochainConv(dim, up_msg_size, down_msg_size, boundary_msg_size=boundary_msg_size,
msg_up_nn=msg_up_nn, msg_boundaries_nn=msg_boundaries_nn, update_up_nn=update_up_nn,
update_boundaries_nn=update_boundaries_nn, combine_nn=combine_nn, eps=eps,
train_eps=train_eps)
self.mp_levels.append(mp)
def forward(self, *cochain_params: CochainMessagePassingParams, start_to_process=0):
assert len(cochain_params) <= self.max_dim+1
out = []
for dim in range(len(cochain_params)):
if dim < start_to_process:
out.append(cochain_params[dim].x)
else:
out.append(self.mp_levels[dim].forward(cochain_params[dim]))
return out
class CINppConv(SparseCINConv):
"""
"""
def __init__(self, up_msg_size: int, down_msg_size: int, boundary_msg_size: Optional[int],
passed_msg_up_nn: Optional[Callable], passed_msg_down_nn: Optional[Callable],
passed_msg_boundaries_nn: Optional[Callable],
passed_update_up_nn: Optional[Callable],
passed_update_down_nn: Optional[Callable],
passed_update_boundaries_nn: Optional[Callable],
eps: float = 0., train_eps: bool = False, max_dim: int = 2,
graph_norm=BN, use_coboundaries=False, **kwargs):
super(CINppConv, self).__init__(up_msg_size, down_msg_size, boundary_msg_size,
passed_msg_up_nn, passed_msg_boundaries_nn,
passed_update_up_nn, passed_update_boundaries_nn,
eps, train_eps, max_dim, graph_norm, use_coboundaries, **kwargs)
self.max_dim = max_dim
self.mp_levels = torch.nn.ModuleList()
for dim in range(max_dim+1):
msg_up_nn = passed_msg_up_nn
if msg_up_nn is None:
if use_coboundaries:
msg_up_nn = Sequential(
Catter(),
Linear(kwargs['layer_dim'] * 2, kwargs['layer_dim']),
kwargs['act_module']())
else:
msg_up_nn = lambda xs: xs[0]
msg_down_nn = passed_msg_down_nn
if msg_down_nn is None:
if use_coboundaries:
msg_down_nn = Sequential(
Catter(),
Linear(kwargs['layer_dim'] * 2, kwargs['layer_dim']),
kwargs['act_module']())
else:
msg_down_nn = lambda xs: xs[0]
msg_boundaries_nn = passed_msg_boundaries_nn
if msg_boundaries_nn is None:
msg_boundaries_nn = lambda x: x
update_up_nn = passed_update_up_nn
if update_up_nn is None:
update_up_nn = Sequential(
Linear(kwargs['layer_dim'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module'](),
Linear(kwargs['hidden'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']()
)
update_down_nn = passed_update_down_nn
if update_down_nn is None:
update_down_nn = Sequential(
Linear(kwargs['layer_dim'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module'](),
Linear(kwargs['hidden'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']()
)
update_boundaries_nn = passed_update_boundaries_nn
if update_boundaries_nn is None:
update_boundaries_nn = Sequential(
Linear(kwargs['layer_dim'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module'](),
Linear(kwargs['hidden'], kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']()
)
combine_nn = Sequential(
Linear(kwargs['hidden']*3, kwargs['hidden']),
graph_norm(kwargs['hidden']),
kwargs['act_module']())
mp = CINppCochainConv(dim, up_msg_size, down_msg_size, boundary_msg_size=boundary_msg_size,
msg_up_nn=msg_up_nn, msg_down_nn=msg_down_nn, msg_boundaries_nn=msg_boundaries_nn, update_up_nn=update_up_nn,
update_down_nn=update_down_nn, update_boundaries_nn=update_boundaries_nn, combine_nn=combine_nn, eps=eps,
train_eps=train_eps)
self.mp_levels.append(mp)
class OrientedConv(CochainMessagePassing):
def __init__(self, dim: int, up_msg_size: int, down_msg_size: int,
update_up_nn: Optional[Callable], update_down_nn: Optional[Callable],
update_nn: Optional[Callable], act_fn, orient=True):
super(OrientedConv, self).__init__(up_msg_size, down_msg_size, use_boundary_msg=False)
self.dim = dim
self.update_up_nn = update_up_nn
self.update_down_nn = update_down_nn
self.update_nn = update_nn
self.act_fn = act_fn
self.orient = orient
def forward(self, cochain: Cochain):
assert len(cochain.upper_orient) == cochain.upper_index.size(1)
assert len(cochain.lower_orient) == cochain.lower_index.size(1)
assert cochain.upper_index.max() < len(cochain.x)
assert cochain.lower_index.max() < len(cochain.x)
out_up, out_down, _ = self.propagate(cochain.upper_index, cochain.lower_index, None, x=cochain.x,
up_attr=cochain.upper_orient.view(-1, 1), down_attr=cochain.lower_orient.view(-1, 1))
out_up = self.update_up_nn(out_up)
out_down = self.update_down_nn(out_down)
x = self.update_nn(cochain.x)
return self.act_fn(x + out_up + out_down)
def reset_parameters(self):
reset(self.update_up_nn)
reset(self.update_down_nn)
reset(self.update_nn)
# TODO: As a temporary hack, we pass the orientation through the up and down attributes.
def message_up(self, up_x_j: Tensor, up_attr: Tensor) -> Tensor:
if self.orient:
return up_x_j * up_attr
return up_x_j
def message_down(self, down_x_j: Tensor, down_attr: Tensor) -> Tensor:
if self.orient:
return down_x_j * down_attr
return down_x_j
class InitReduceConv(torch.nn.Module):
def __init__(self, reduce='add'):
"""
Args:
reduce (str): Way to aggregate boundaries. Can be "sum, add, mean, min, max"
"""
super(InitReduceConv, self).__init__()
self.reduce = reduce
def forward(self, boundary_x, boundary_index):
features = boundary_x.index_select(0, boundary_index[0])
out_size = boundary_index[1, :].max() + 1
return scatter(features, boundary_index[1], dim=0, dim_size=out_size, reduce=self.reduce)
class AbstractEmbedVEWithReduce(torch.nn.Module, ABC):
def __init__(self,
v_embed_layer: Callable,
e_embed_layer: Optional[Callable],
init_reduce: InitReduceConv):
"""
Args:
v_embed_layer: Layer to embed the integer features of the vertices
e_embed_layer: Layer (potentially None) to embed the integer features of the edges.
init_reduce: Layer to initialise the 2D cell features and potentially the edge features.
"""
super(AbstractEmbedVEWithReduce, self).__init__()
self.v_embed_layer = v_embed_layer
self.e_embed_layer = e_embed_layer
self.init_reduce = init_reduce
@abstractmethod
def _prepare_v_inputs(self, v_params):
pass
@abstractmethod
def _prepare_e_inputs(self, e_params):
pass
def forward(self, *cochain_params: CochainMessagePassingParams):
assert 1 <= len(cochain_params) <= 3
v_params = cochain_params[0]
e_params = cochain_params[1] if len(cochain_params) >= 2 else None
c_params = cochain_params[2] if len(cochain_params) == 3 else None
vx = self.v_embed_layer(self._prepare_v_inputs(v_params))
out = [vx]
if e_params is None:
assert c_params is None
return out
reduced_ex = self.init_reduce(vx, e_params.boundary_index)
ex = reduced_ex
if e_params.x is not None:
ex = self.e_embed_layer(self._prepare_e_inputs(e_params))
# The output of this should be the same size as the vertex features.
assert ex.size(1) == vx.size(1)
out.append(ex)
if c_params is not None:
# We divide by two in case this was obtained from node aggregation.
# The division should not do any harm if this is an aggregation of learned embeddings.
cx = self.init_reduce(reduced_ex, c_params.boundary_index) / 2.
out.append(cx)
return out
def reset_parameters(self):
reset(self.v_embed_layer)
reset(self.e_embed_layer)
class EmbedVEWithReduce(AbstractEmbedVEWithReduce):
def __init__(self,
v_embed_layer: torch.nn.Embedding,
e_embed_layer: Optional[torch.nn.Embedding],
init_reduce: InitReduceConv):
super(EmbedVEWithReduce, self).__init__(v_embed_layer, e_embed_layer, init_reduce)
def _prepare_v_inputs(self, v_params):
assert v_params.x is not None
assert v_params.x.dim() == 2
assert v_params.x.size(1) == 1
# The embedding layer expects integers so we convert the tensor to int.
return v_params.x.squeeze(1).to(dtype=torch.long)
def _prepare_e_inputs(self, e_params):
assert self.e_embed_layer is not None
assert e_params.x.dim() == 2
assert e_params.x.size(1) == 1
# The embedding layer expects integers so we convert the tensor to int.
return e_params.x.squeeze(1).to(dtype=torch.long)
class OGBEmbedVEWithReduce(AbstractEmbedVEWithReduce):
def __init__(self,
v_embed_layer: AtomEncoder,
e_embed_layer: Optional[BondEncoder],
init_reduce: InitReduceConv):
super(OGBEmbedVEWithReduce, self).__init__(v_embed_layer, e_embed_layer, init_reduce)
def _prepare_v_inputs(self, v_params):
assert v_params.x is not None
assert v_params.x.dim() == 2
# Inputs in ogbg-mol* datasets are already long.
# This is to test the layer with other datasets.
return v_params.x.to(dtype=torch.long)
def _prepare_e_inputs(self, e_params):
assert self.e_embed_layer is not None
assert e_params.x.dim() == 2
# Inputs in ogbg-mol* datasets are already long.
# This is to test the layer with other datasets.
return e_params.x.to(dtype=torch.long)
| 26,386 | 43.422559 | 130 | py |
cwn | cwn-main/mp/nn.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from torch.nn import BatchNorm1d as BN, LayerNorm as LN, Identity
def get_nonlinearity(nonlinearity, return_module=True):
if nonlinearity == 'relu':
module = torch.nn.ReLU
function = F.relu
elif nonlinearity == 'elu':
module = torch.nn.ELU
function = F.elu
elif nonlinearity == 'id':
module = torch.nn.Identity
function = lambda x: x
elif nonlinearity == 'sigmoid':
module = torch.nn.Sigmoid
function = F.sigmoid
elif nonlinearity == 'tanh':
module = torch.nn.Tanh
function = torch.tanh
else:
raise NotImplementedError('Nonlinearity {} is not currently supported.'.format(nonlinearity))
if return_module:
return module
return function
def get_pooling_fn(readout):
if readout == 'sum':
return global_add_pool
elif readout == 'mean':
return global_mean_pool
else:
raise NotImplementedError('Readout {} is not currently supported.'.format(readout))
def get_graph_norm(norm):
if norm == 'bn':
return BN
elif norm == 'ln':
return LN
elif norm == 'id':
return Identity
else:
raise ValueError(f'Graph Normalisation {norm} not currently supported')
def pool_complex(xs, data, max_dim, readout_type):
pooling_fn = get_pooling_fn(readout_type)
# All complexes have nodes so we can extract the batch size from cochains[0]
batch_size = data.cochains[0].batch.max() + 1
# The MP output is of shape [message_passing_dim, batch_size, feature_dim]
pooled_xs = torch.zeros(max_dim+1, batch_size, xs[0].size(-1),
device=batch_size.device)
for i in range(len(xs)):
# It's very important that size is supplied.
pooled_xs[i, :, :] = pooling_fn(xs[i], data.cochains[i].batch, size=batch_size)
return pooled_xs
| 2,030 | 32.295082 | 101 | py |
cwn | cwn-main/mp/models.py | import torch
import torch.nn.functional as F
from torch.nn import Linear, Sequential, BatchNorm1d as BN
from torch_geometric.nn import JumpingKnowledge
from mp.layers import (
CINConv, EdgeCINConv, SparseCINConv, CINppConv,DummyCellularMessagePassing, OrientedConv)
from mp.nn import get_nonlinearity, get_pooling_fn, pool_complex, get_graph_norm
from data.complex import ComplexBatch, CochainBatch
class CIN0(torch.nn.Module):
"""
A cellular version of GIN.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.5,
max_dim: int = 2, jump_mode=None, nonlinearity='relu', readout='sum'):
super(CIN0, self).__init__()
self.max_dim = max_dim
self.dropout_rate = dropout_rate
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
conv_update = Sequential(
Linear(layer_dim, hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
conv_nonlinearity(),
BN(hidden))
conv_up = Sequential(
Linear(layer_dim * 2, layer_dim),
conv_nonlinearity(),
BN(layer_dim))
conv_down = Sequential(
Linear(layer_dim * 2, layer_dim),
conv_nonlinearity(),
BN(layer_dim))
self.convs.append(
CINConv(layer_dim, layer_dim,
conv_up, conv_down, conv_update, train_eps=False, max_dim=self.max_dim))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
if jump_mode == 'cat':
self.lin1 = Linear(num_layers * hidden, hidden)
else:
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def pool_complex(self, xs, data):
# All complexes have nodes so we can extract the batch size from cochains[0]
batch_size = data.cochains[0].batch.max() + 1
# The MP output is of shape [message_passing_dim, batch_size, feature_dim]
pooled_xs = torch.zeros(self.max_dim + 1, batch_size, xs[0].size(-1),
device=batch_size.device)
for i in range(len(xs)):
# It's very important that size is supplied.
pooled_xs[i, :, :] = self.pooling_fn(xs[i], data.cochains[i].batch, size=batch_size)
return pooled_xs
def jump_complex(self, jump_xs):
# Perform JumpingKnowledge at each level of the complex
xs = []
for jumpx in jump_xs:
xs += [self.jump(jumpx)]
return xs
def forward(self, data: ComplexBatch):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
xs, jump_xs = None, None
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim)
xs = conv(*params)
data.set_xs(xs)
if self.jump_mode is not None:
if jump_xs is None:
jump_xs = [[] for _ in xs]
for i, x in enumerate(xs):
jump_xs[i] += [x]
if self.jump_mode is not None:
xs = self.jump_complex(jump_xs)
pooled_xs = self.pool_complex(xs, data)
x = pooled_xs.sum(dim=0)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class SparseCIN(torch.nn.Module):
"""
A cellular version of GIN.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.5,
max_dim: int = 2, jump_mode=None, nonlinearity='relu', readout='sum',
train_eps=False, final_hidden_multiplier: int = 2, use_coboundaries=False,
readout_dims=(0, 1, 2), final_readout='sum', apply_dropout_before='lin2',
graph_norm='bn'):
super(SparseCIN, self).__init__()
self.max_dim = max_dim
if readout_dims is not None:
self.readout_dims = tuple([dim for dim in readout_dims if dim <= max_dim])
else:
self.readout_dims = list(range(max_dim+1))
self.final_readout = final_readout
self.dropout_rate = dropout_rate
self.apply_dropout_before = apply_dropout_before
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
self.graph_norm = get_graph_norm(graph_norm)
act_module = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
self.convs.append(
SparseCINConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None, passed_msg_up_nn=None,
passed_update_up_nn=None, passed_update_boundaries_nn=None,
train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
self.lin1s = torch.nn.ModuleList()
for _ in range(max_dim + 1):
if jump_mode == 'cat':
# These layers don't use a bias. Then, in case a level is not present the output
# is just zero and it is not given by the biases.
self.lin1s.append(Linear(num_layers * hidden, final_hidden_multiplier * hidden,
bias=False))
else:
self.lin1s.append(Linear(hidden, final_hidden_multiplier * hidden))
self.lin2 = Linear(final_hidden_multiplier * hidden, num_classes)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.lin1s.reset_parameters()
self.lin2.reset_parameters()
def pool_complex(self, xs, data):
# All complexes have nodes so we can extract the batch size from cochains[0]
batch_size = data.cochains[0].batch.max() + 1
# print(batch_size)
# The MP output is of shape [message_passing_dim, batch_size, feature_dim]
pooled_xs = torch.zeros(self.max_dim + 1, batch_size, xs[0].size(-1),
device=batch_size.device)
for i in range(len(xs)):
# It's very important that size is supplied.
pooled_xs[i, :, :] = self.pooling_fn(xs[i], data.cochains[i].batch, size=batch_size)
new_xs = []
for i in range(self.max_dim + 1):
new_xs.append(pooled_xs[i])
return new_xs
def jump_complex(self, jump_xs):
# Perform JumpingKnowledge at each level of the complex
xs = []
for jumpx in jump_xs:
xs += [self.jump(jumpx)]
return xs
def forward(self, data: ComplexBatch, include_partial=False):
act = get_nonlinearity(self.nonlinearity, return_module=False)
xs, jump_xs = None, None
res = {}
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
start_to_process = 0
# if i == len(self.convs) - 2:
# start_to_process = 1
# if i == len(self.convs) - 1:
# start_to_process = 2
xs = conv(*params, start_to_process=start_to_process)
data.set_xs(xs)
if include_partial:
for k in range(len(xs)):
res[f"layer{c}_{k}"] = xs[k]
if self.jump_mode is not None:
if jump_xs is None:
jump_xs = [[] for _ in xs]
for i, x in enumerate(xs):
jump_xs[i] += [x]
if self.jump_mode is not None:
xs = self.jump_complex(jump_xs)
xs = self.pool_complex(xs, data)
# Select the dimensions we want at the end.
xs = [xs[i] for i in self.readout_dims]
if include_partial:
for k in range(len(xs)):
res[f"pool_{k}"] = xs[k]
new_xs = []
for i, x in enumerate(xs):
if self.apply_dropout_before == 'lin1':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
new_xs.append(act(self.lin1s[self.readout_dims[i]](x)))
x = torch.stack(new_xs, dim=0)
if self.apply_dropout_before == 'final_readout':
x = F.dropout(x, p=self.dropout_rate, training=self.training)
if self.final_readout == 'mean':
x = x.mean(0)
elif self.final_readout == 'sum':
x = x.sum(0)
else:
raise NotImplementedError
if self.apply_dropout_before not in ['lin1', 'final_readout']:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
if include_partial:
res['out'] = x
return x, res
return x
def __repr__(self):
return self.__class__.__name__
class CINpp(SparseCIN):
"""CINpp
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.5, max_dim: int = 2, jump_mode=None,
nonlinearity='relu', readout='sum', train_eps=False,
final_hidden_multiplier: int = 2, use_coboundaries=False,
readout_dims=(0, 1, 2), final_readout='sum',
apply_dropout_before='lin2', graph_norm='bn'):
super(CINpp, self).__init__(num_input_features, num_classes, num_layers, hidden,
dropout_rate, max_dim, jump_mode, nonlinearity,
readout, train_eps, final_hidden_multiplier,
use_coboundaries, readout_dims, final_readout,
apply_dropout_before, graph_norm)
self.convs = torch.nn.ModuleList()
act_module = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
self.convs.append(
CINppConv(up_msg_size=layer_dim, down_msg_size=layer_dim,
boundary_msg_size=layer_dim, passed_msg_boundaries_nn=None, passed_msg_up_nn=None,
passed_msg_down_nn=None, passed_update_up_nn=None, passed_update_down_nn=None,
passed_update_boundaries_nn=None, train_eps=train_eps, max_dim=self.max_dim,
hidden=hidden, act_module=act_module, layer_dim=layer_dim,
graph_norm=self.graph_norm, use_coboundaries=use_coboundaries))
class EdgeCIN0(torch.nn.Module):
"""
A variant of CIN0 operating up to edge level. It may optionally ignore two_cell features.
This model is based on
https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.5,
jump_mode=None, nonlinearity='relu', include_top_features=True,
update_top_features=True,
readout='sum'):
super(EdgeCIN0, self).__init__()
self.max_dim = 1
self.include_top_features = include_top_features
# If the top features are included, then they can be updated by a network.
self.update_top_features = include_top_features and update_top_features
self.dropout_rate = dropout_rate
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.update_top_nns = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
v_conv_update = Sequential(
Linear(layer_dim, hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
conv_nonlinearity(),
BN(hidden))
e_conv_update = Sequential(
Linear(layer_dim, hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
conv_nonlinearity(),
BN(hidden))
v_conv_up = Sequential(
Linear(layer_dim * 2, layer_dim),
conv_nonlinearity(),
BN(layer_dim))
e_conv_down = Sequential(
Linear(layer_dim * 2, layer_dim),
conv_nonlinearity(),
BN(layer_dim))
e_conv_inp_dim = layer_dim * 2 if include_top_features else layer_dim
e_conv_up = Sequential(
Linear(e_conv_inp_dim, layer_dim),
conv_nonlinearity(),
BN(layer_dim))
self.convs.append(
EdgeCINConv(layer_dim, layer_dim, v_conv_up, e_conv_down, e_conv_up,
v_conv_update, e_conv_update, train_eps=False))
if self.update_top_features and i < num_layers - 1:
self.update_top_nns.append(Sequential(
Linear(layer_dim, hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
conv_nonlinearity(),
BN(hidden))
)
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
if jump_mode == 'cat':
self.lin1 = Linear(num_layers * hidden, hidden)
else:
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
for net in self.update_top_nns:
net.reset_parameters()
def pool_complex(self, xs, data):
# All complexes have nodes so we can extract the batch size from cochains[0]
batch_size = data.cochains[0].batch.max() + 1
# The MP output is of shape [message_passing_dim, batch_size, feature_dim]
pooled_xs = torch.zeros(self.max_dim + 1, batch_size, xs[0].size(-1),
device=batch_size.device)
for i in range(len(xs)):
# It's very important that size is supplied.
pooled_xs[i, :, :] = self.pooling_fn(xs[i], data.cochains[i].batch, size=batch_size)
return pooled_xs
def jump_complex(self, jump_xs):
# Perform JumpingKnowledge at each level of the complex
xs = []
for jumpx in jump_xs:
xs += [self.jump(jumpx)]
return xs
def forward(self, data: ComplexBatch):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
xs, jump_xs = None, None
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params(max_dim=self.max_dim,
include_top_features=self.include_top_features)
xs = conv(*params)
# If we are at the last convolutional layer, we do not need to update after
# We also check two_cell features do indeed exist in this batch before doing this.
if self.update_top_features and c < len(self.convs) - 1 and 2 in data.cochains:
top_x = self.update_top_nns[c](data.cochains[2].x)
data.set_xs(xs + [top_x])
else:
data.set_xs(xs)
if self.jump_mode is not None:
if jump_xs is None:
jump_xs = [[] for _ in xs]
for i, x in enumerate(xs):
jump_xs[i] += [x]
if self.jump_mode is not None:
xs = self.jump_complex(jump_xs)
pooled_xs = self.pool_complex(xs, data)
x = pooled_xs.sum(dim=0)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class Dummy(torch.nn.Module):
"""
A dummy cellular network model.
No parameters in the convolutional layers.
Readout at each layer is by summation.
Outputs are computed by one single linear layer.
"""
def __init__(self, num_input_features, num_classes, num_layers, max_dim: int = 2,
readout='sum'):
super(Dummy, self).__init__()
self.max_dim = max_dim
self.convs = torch.nn.ModuleList()
self.pooling_fn = get_pooling_fn(readout)
for i in range(num_layers):
self.convs.append(DummyCellularMessagePassing(max_dim=self.max_dim))
self.lin = Linear(num_input_features, num_classes)
def reset_parameters(self):
self.lin.reset_parameters()
def forward(self, data: ComplexBatch):
xs = None
for c, conv in enumerate(self.convs):
params = data.get_all_cochain_params()
xs = conv(*params)
data.set_xs(xs)
# All complexes have nodes so we can extract the batch size from cochains[0]
batch_size = data.cochains[0].batch.max() + 1
# The MP output is of shape [message_passing_dim, batch_size, feature_dim]
# We assume that all layers have the same feature size.
# Note that levels where we do MP at but where there was no data are set to 0.
# TODO: shall we retain the device as an attribute of self? then `device=batch_size.device`
# would become `device=self.device`
pooled_xs = torch.zeros(self.max_dim + 1, batch_size, xs[0].size(-1),
device=batch_size.device)
for i in range(len(xs)):
# It's very important that size is supplied.
# Otherwise, if we have complexes with no cells at certain levels, the wrong
# shape could be inferred automatically from data.cochains[i].batch.
# This makes sure the output tensor will have the right dimensions.
pooled_xs[i, :, :] = self.pooling_fn(xs[i], data.cochains[i].batch, size=batch_size)
# Reduce across the levels of the complexes
x = pooled_xs.sum(dim=0)
x = self.lin(x)
return x
def __repr__(self):
return self.__class__.__name__
class EdgeOrient(torch.nn.Module):
"""
A model for edge-defined signals taking edge orientation into account.
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.0, jump_mode=None, nonlinearity='id', readout='sum',
fully_invar=False):
super(EdgeOrient, self).__init__()
self.max_dim = 1
self.fully_invar = fully_invar
orient = not self.fully_invar
self.dropout_rate = dropout_rate
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
# !!!!! Biases must be set to false. Otherwise, the model is not equivariant !!!!
update_up = Linear(layer_dim, hidden, bias=False)
update_down = Linear(layer_dim, hidden, bias=False)
update = Linear(layer_dim, hidden, bias=False)
self.convs.append(
OrientedConv(dim=1, up_msg_size=layer_dim, down_msg_size=layer_dim,
update_up_nn=update_up, update_down_nn=update_down, update_nn=update,
act_fn=get_nonlinearity(nonlinearity, return_module=False), orient=orient))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data: CochainBatch, include_partial=False):
if self.fully_invar:
data.x = torch.abs(data.x)
for c, conv in enumerate(self.convs):
x = conv(data)
data.x = x
cell_pred = x
# To obtain orientation invariance, we take the absolute value of the features.
# Unless we did that already before the first layer.
batch_size = data.batch.max() + 1
if not self.fully_invar:
x = torch.abs(x)
x = self.pooling_fn(x, data.batch, size=batch_size)
# At this point we have invariance: we can use any non-linearity we like.
# Here, independently from previous non-linearities, we choose ReLU.
# Note that this makes the model non-linear even when employing identity
# in previous layers.
x = torch.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
if include_partial:
return x, cell_pred
return x
def __repr__(self):
return self.__class__.__name__
class EdgeMPNN(torch.nn.Module):
"""
An MPNN operating in the line graph.
"""
def __init__(self, num_input_features, num_classes, num_layers, hidden,
dropout_rate: float = 0.0, jump_mode=None, nonlinearity='relu', readout='sum',
fully_invar=True):
super(EdgeMPNN, self).__init__()
self.max_dim = 1
self.dropout_rate = dropout_rate
self.fully_invar = fully_invar
orient = not self.fully_invar
self.jump_mode = jump_mode
self.convs = torch.nn.ModuleList()
self.nonlinearity = nonlinearity
self.pooling_fn = get_pooling_fn(readout)
for i in range(num_layers):
layer_dim = num_input_features if i == 0 else hidden
# We pass this lambda function to discard upper adjacencies
update_up = lambda x: 0
update_down = Linear(layer_dim, hidden, bias=False)
update = Linear(layer_dim, hidden, bias=False)
self.convs.append(
OrientedConv(dim=1, up_msg_size=layer_dim, down_msg_size=layer_dim,
update_up_nn=update_up, update_down_nn=update_down, update_nn=update,
act_fn=get_nonlinearity(nonlinearity, return_module=False), orient=orient))
self.jump = JumpingKnowledge(jump_mode) if jump_mode is not None else None
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.jump_mode is not None:
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data: CochainBatch, include_partial=False):
if self.fully_invar:
data.x = torch.abs(data.x)
for c, conv in enumerate(self.convs):
x = conv(data)
data.x = x
cell_pred = x
batch_size = data.batch.max() + 1
if not self.fully_invar:
x = torch.abs(x)
x = self.pooling_fn(x, data.batch, size=batch_size)
# At this point we have invariance: we can use any non-linearity we like.
# Here, independently from previous non-linearities, we choose ReLU.
# Note that this makes the model non-linear even when employing identity
# in previous layers.
x = torch.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
if include_partial:
return x, cell_pred
return x
def __repr__(self):
return self.__class__.__name__
class MessagePassingAgnostic(torch.nn.Module):
"""
A model which does not perform any message passing.
Initial simplicial/cell representations are obtained by applying a dense layer, instead.
Sort of resembles a 'DeepSets'-likes architecture but on Simplicial/Cell Complexes.
"""
def __init__(self, num_input_features, num_classes, hidden, dropout_rate: float = 0.5,
max_dim: int = 2, nonlinearity='relu', readout='sum'):
super(MessagePassingAgnostic, self).__init__()
self.max_dim = max_dim
self.dropout_rate = dropout_rate
self.readout_type = readout
self.act = get_nonlinearity(nonlinearity, return_module=False)
self.lin0s = torch.nn.ModuleList()
for dim in range(max_dim + 1):
self.lin0s.append(Linear(num_input_features, hidden))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
for lin0 in self.lin0s:
lin0.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data: ComplexBatch):
params = data.get_all_cochain_params(max_dim=self.max_dim, include_down_features=False)
xs = list()
for dim in range(len(params)):
x_dim = params[dim].x
x_dim = self.lin0s[dim](x_dim)
xs.append(self.act(x_dim))
pooled_xs = pool_complex(xs, data, self.max_dim, self.readout_type)
pooled_xs = self.act(self.lin1(pooled_xs))
x = pooled_xs.sum(dim=0)
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
| 27,151 | 40.015106 | 102 | py |
cwn | cwn-main/mp/test_orientation.py | import torch
import numpy as np
from data.datasets.flow import load_flow_dataset
from mp.models import EdgeOrient, EdgeMPNN
from mp.layers import OrientedConv
from data.complex import CochainBatch
from data.data_loading import DataLoader
from data.datasets.flow_utils import build_cochain
def generate_oriented_flow_pair():
# This is the complex from slide 19 of https://crisbodnar.github.io/files/mml_talk.pdf
B1 = np.array([
[-1, -1, 0, 0, 0, 0],
[+1, 0, -1, 0, 0, +1],
[ 0, +1, 0, -1, 0, -1],
[ 0, 0, +1, +1, -1, 0],
[ 0, 0, 0, 0, +1, 0],
])
B2 = np.array([
[-1, 0],
[+1, 0],
[ 0, +1],
[ 0, -1],
[ 0, 0],
[+1, +1],
])
x = np.array([[1.0], [0.0], [0.0], [1.0], [1.0], [-1.0]])
id = np.identity(x.shape[0])
T2 = np.diag([+1.0, +1.0, +1.0, +1.0, -1.0, -1.0])
cochain1 = build_cochain(B1, B2, id, x, 0)
cochain2 = build_cochain(B1, B2, T2, x, 0)
return cochain1, cochain2, torch.tensor(T2, dtype=torch.float)
def test_edge_orient_model_on_flow_dataset_with_batching():
dataset, _, _ = load_flow_dataset(num_points=100, num_train=20, num_test=2)
np.random.seed(4)
data_loader = DataLoader(dataset, batch_size=16)
model = EdgeOrient(num_input_features=1, num_classes=2, num_layers=2, hidden=5)
# We use the model in eval mode to test its inference behavior.
model.eval()
batched_preds = []
for batch in data_loader:
batched_pred = model.forward(batch)
batched_preds.append(batched_pred)
batched_preds = torch.cat(batched_preds, dim=0)
preds = []
for cochain in dataset:
pred = model.forward(CochainBatch.from_cochain_list([cochain]))
preds.append(pred)
preds = torch.cat(preds, dim=0)
assert (preds.size() == batched_preds.size())
assert torch.allclose(preds, batched_preds, atol=1e-5)
def test_edge_orient_conv_is_orientation_equivariant():
cochain1, cochain2, T2 = generate_oriented_flow_pair()
assert torch.equal(cochain1.lower_index, cochain2.lower_index)
assert torch.equal(cochain1.upper_index, cochain2.upper_index)
layer = OrientedConv(dim=1, up_msg_size=1, down_msg_size=1, update_up_nn=None,
update_down_nn=None, update_nn=None, act_fn=None)
out_up1, out_down1, _ = layer.propagate(cochain1.upper_index, cochain1.lower_index, None, x=cochain1.x,
up_attr=cochain1.upper_orient.view(-1, 1), down_attr=cochain1.lower_orient.view(-1, 1))
out_up2, out_down2, _ = layer.propagate(cochain2.upper_index, cochain2.lower_index, None, x=cochain2.x,
up_attr=cochain2.upper_orient.view(-1, 1), down_attr=cochain2.lower_orient.view(-1, 1))
assert torch.equal(T2 @ out_up1, out_up2)
assert torch.equal(T2 @ out_down1, out_down2)
assert torch.equal(T2 @ (cochain1.x + out_up1 + out_down1), cochain2.x + out_up2 + out_down2)
def test_edge_orient_model_with_tanh_is_orientation_equivariant_and_invariant_at_readout():
cochain1, cochain2, T2 = generate_oriented_flow_pair()
assert torch.equal(cochain1.lower_index, cochain2.lower_index)
assert torch.equal(cochain1.upper_index, cochain2.upper_index)
model = EdgeOrient(num_input_features=1, num_classes=2, num_layers=2, hidden=5,
nonlinearity='tanh', dropout_rate=0.0)
model.eval()
final1, pred1 = model.forward(CochainBatch.from_cochain_list([cochain1]), include_partial=True)
final2, pred2 = model.forward(CochainBatch.from_cochain_list([cochain2]), include_partial=True)
# Check equivariant.
assert torch.equal(T2 @ pred1, pred2)
# Check invariant after readout.
assert torch.equal(final1, final2)
def test_edge_orient_model_with_id_is_orientation_equivariant_and_invariant_at_readout():
cochain1, cochain2, T2 = generate_oriented_flow_pair()
assert torch.equal(cochain1.lower_index, cochain2.lower_index)
assert torch.equal(cochain1.upper_index, cochain2.upper_index)
model = EdgeOrient(num_input_features=1, num_classes=2, num_layers=2, hidden=5,
nonlinearity='id', dropout_rate=0.0)
model.eval()
final1, pred1 = model.forward(CochainBatch.from_cochain_list([cochain1]), include_partial=True)
final2, pred2 = model.forward(CochainBatch.from_cochain_list([cochain2]), include_partial=True)
# Check equivariant.
assert torch.equal(T2 @ pred1, pred2)
# Check invariant after readout.
assert torch.equal(final1, final2)
def test_edge_orient_model_with_relu_is_not_orientation_equivariant_or_invariant():
cochain1, cochain2, T2 = generate_oriented_flow_pair()
assert torch.equal(cochain1.lower_index, cochain2.lower_index)
assert torch.equal(cochain1.upper_index, cochain2.upper_index)
model = EdgeOrient(num_input_features=1, num_classes=2, num_layers=2, hidden=5,
nonlinearity='relu', dropout_rate=0.0)
model.eval()
_, pred1 = model.forward(CochainBatch.from_cochain_list([cochain1]), include_partial=True)
_, pred2 = model.forward(CochainBatch.from_cochain_list([cochain2]), include_partial=True)
# Check not equivariant.
assert not torch.equal(T2 @ pred1, pred2)
# Check not invariant.
assert not torch.equal(pred1, pred2)
def test_edge_mpnn_model_is_orientation_invariant():
cochain1, cochain2, T2 = generate_oriented_flow_pair()
assert torch.equal(cochain1.lower_index, cochain2.lower_index)
assert torch.equal(cochain1.upper_index, cochain2.upper_index)
model = EdgeMPNN(num_input_features=1, num_classes=2, num_layers=2, hidden=5,
nonlinearity='id', dropout_rate=0.0)
model.eval()
_, pred1 = model.forward(CochainBatch.from_cochain_list([cochain1]), include_partial=True)
_, pred2 = model.forward(CochainBatch.from_cochain_list([cochain2]), include_partial=True)
# Check the model is orientation invariant.
assert torch.equal(pred1, pred2)
| 5,926 | 39.047297 | 107 | py |
cwn | cwn-main/mp/graph_models.py | """
Code based on https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/gin.py
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn.functional as F
from torch.nn import Linear, Sequential, BatchNorm1d as BN
from torch_geometric.nn import GINConv, JumpingKnowledge
from mp.nn import get_nonlinearity, get_pooling_fn
class GIN0(torch.nn.Module):
def __init__(self, num_features, num_layers, hidden, num_classes, readout='sum',
dropout_rate=0.5, nonlinearity='relu'):
super(GIN0, self).__init__()
self.pooling_fn = get_pooling_fn(readout)
self.nonlinearity = nonlinearity
self.dropout_rate = dropout_rate
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
self.conv1 = GINConv(
Sequential(
Linear(num_features, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=False)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=False))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
x, edge_index, batch = data.x, data.edge_index, data.batch
x = self.conv1(x, edge_index)
for conv in self.convs:
x = conv(x, edge_index)
x = self.pooling_fn(x, batch)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class GIN0WithJK(torch.nn.Module):
def __init__(self, num_features, num_layers, hidden, num_classes, mode='cat', readout='sum',
dropout_rate=0.5, nonlinearity='relu'):
super(GIN0WithJK, self).__init__()
self.pooling_fn = get_pooling_fn(readout)
self.dropout_rate = dropout_rate
self.nonlinearity = nonlinearity
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
self.conv1 = GINConv(
Sequential(
Linear(num_features, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=False)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=False))
self.jump = JumpingKnowledge(mode)
if mode == 'cat':
self.lin1 = Linear(num_layers * hidden, hidden)
else:
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
x, edge_index, batch = data.x, data.edge_index, data.batch
x = self.conv1(x, edge_index)
xs = [x]
for conv in self.convs:
x = conv(x, edge_index)
xs += [x]
x = self.jump(xs)
x = self.pooling_fn(x, batch)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class GIN(torch.nn.Module):
def __init__(self, num_features, num_layers, hidden, num_classes, readout='sum',
dropout_rate=0.5, nonlinearity='relu'):
super(GIN, self).__init__()
self.pooling_fn = get_pooling_fn(readout)
self.dropout_rate = dropout_rate
self.nonlinearity = nonlinearity
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
self.conv1 = GINConv(
Sequential(
Linear(num_features, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=True)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=True))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
x, edge_index, batch = data.x, data.edge_index, data.batch
x = self.conv1(x, edge_index)
for conv in self.convs:
x = conv(x, edge_index)
x = self.pooling_fn(x, batch)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
class GINWithJK(torch.nn.Module):
def __init__(self, num_features, num_layers, hidden, num_classes, mode='cat', readout='sum',
dropout_rate=0.5, nonlinearity='relu'):
super(GINWithJK, self).__init__()
self.pooling_fn = get_pooling_fn(readout)
self.dropout_rate = dropout_rate
self.nonlinearity = nonlinearity
conv_nonlinearity = get_nonlinearity(nonlinearity, return_module=True)
self.conv1 = GINConv(
Sequential(
Linear(num_features, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=True)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(
GINConv(
Sequential(
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
Linear(hidden, hidden),
BN(hidden),
conv_nonlinearity(),
), train_eps=True))
self.jump = JumpingKnowledge(mode)
if mode == 'cat':
self.lin1 = Linear(num_layers * hidden, hidden)
else:
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
model_nonlinearity = get_nonlinearity(self.nonlinearity, return_module=False)
x, edge_index, batch = data.x, data.edge_index, data.batch
x = self.conv1(x, edge_index)
xs = [x]
for conv in self.convs:
x = conv(x, edge_index)
xs += [x]
x = self.jump(xs)
x = self.pooling_fn(x, batch)
x = model_nonlinearity(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.lin2(x)
return x
def __repr__(self):
return self.__class__.__name__
| 10,168 | 37.086142 | 96 | py |
cwn | cwn-main/mp/test_molec_models.py | import torch
import itertools
import pytest
from data.complex import ComplexBatch
from data.dummy_complexes import get_testing_complex_list
from mp.molec_models import EmbedSparseCIN, OGBEmbedSparseCIN, EmbedSparseCINNoRings, EmbedGIN
from data.data_loading import DataLoader, load_dataset
def test_zinc_sparse_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedSparseCIN(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5,
jump_mode='cat', max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
def test_embed_sparse_cin_no_rings_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedSparseCINNoRings(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = []
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred = model.forward(batch)
batched_res.append(batched_pred)
batched_res = torch.cat(batched_res, dim=0)
unbatched_res = []
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred = model.forward(batch)
unbatched_res.append(pred)
unbatched_res = torch.cat(unbatched_res, dim=0)
assert torch.allclose(unbatched_res, batched_res, atol=1e-6)
def test_embed_gin_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedGIN(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = []
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred = model.forward(batch)
batched_res.append(batched_pred)
batched_res = torch.cat(batched_res, dim=0)
unbatched_res = []
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred = model.forward(batch)
unbatched_res.append(pred)
unbatched_res = torch.cat(unbatched_res, dim=0)
assert torch.allclose(unbatched_res, batched_res, atol=1e-6)
@pytest.mark.data
def test_zinc_sparse_cin0_model_with_batching_on_proteins():
"""Check this runs without errors and that batching and no batching produce the same output."""
dataset = load_dataset('PROTEINS', max_dim=2, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
max_dim = 2
torch.manual_seed(0)
data_loader = DataLoader(dataset, batch_size=32, max_dim=max_dim)
model = EmbedSparseCIN(atom_types=64, bond_types=4, out_size=3, num_layers=3, hidden=5,
jump_mode='cat', max_dim=max_dim)
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
# ZincSparseCIN assumes features are unidimensional like in ZINC
batch.cochains[0].x = batch.cochains[0].x[:, :1]
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in dataset:
batch = ComplexBatch.from_complex_list([complex], max_dim=max_dim)
# Simulate no edge and two_cell features to test init layer
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
# ZincSparseCIN assumes features are unidimensional like in ZINC
batch.cochains[0].x = batch.cochains[0].x[:, :1]
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
def test_ogb_sparse_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = OGBEmbedSparseCIN(out_size=3, num_layers=3, hidden=5,
jump_mode=None, max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
| 11,149 | 38.122807 | 102 | py |
cwn | cwn-main/mp/test_cell_mp.py | import pytest
import torch
from data.helper_test import check_edge_index_are_the_same, check_edge_attr_are_the_same
from mp.cell_mp import CochainMessagePassing
from torch_geometric.nn.conv import MessagePassing
from data.dummy_complexes import (get_square_dot_complex, get_house_complex,
get_colon_complex, get_fullstop_complex,
get_bridged_complex, convert_to_graph)
from data.utils import compute_ring_2complex
def test_edge_propagate_in_cmp():
"""We build a graph in the shape of a house (a triangle on top of a square)
and test propagation at the edge level."""
house_complex = get_house_complex()
e = house_complex.get_cochain_params(dim=1)
assert e.kwargs['boundary_index'] is not None, e.kwargs['boundary_index']
# Extract the message passing object and propagate
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, down_msg, boundary_msg = cmp.propagate(e.up_index, e.down_index,
e.boundary_index, x=e.x,
up_attr=e.kwargs['up_attr'],
down_attr=e.kwargs['down_attr'],
boundary_attr=e.kwargs['boundary_attr'])
expected_down_msg = torch.tensor([[6], [10], [17], [9], [13], [10]], dtype=torch.float)
assert torch.equal(down_msg, expected_down_msg)
expected_up_msg = torch.tensor([[0], [0], [11], [0], [9], [8]], dtype=torch.float)
assert torch.equal(up_msg, expected_up_msg)
expected_boundary_msg = torch.tensor([[3], [5], [7], [5], [9], [8]], dtype=torch.float)
assert torch.equal(boundary_msg, expected_boundary_msg)
def test_propagate_at_vertex_level_in_cmp():
"""We build a graph in the shape of a house (a triangle on top of a square)
and test propagation at the vertex level. This makes sure propagate works when
down_index is None.
"""
house_complex = get_house_complex()
v = house_complex.get_cochain_params(dim=0)
# Extract the message passing object and propagate
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, down_msg, boundary_msg = cmp.propagate(v.up_index, v.down_index,
v.boundary_index, x=v.x,
up_attr=v.kwargs['up_attr'],
down_attr=v.kwargs['down_attr'],
boundary_attr=v.kwargs['boundary_attr'])
expected_up_msg = torch.tensor([[6], [4], [11], [9], [7]], dtype=torch.float)
assert torch.equal(up_msg, expected_up_msg)
expected_down_msg = torch.zeros(5, 1)
assert torch.equal(down_msg, expected_down_msg)
expected_boundary_msg = torch.zeros(5, 1)
assert torch.equal(boundary_msg, expected_boundary_msg)
def test_propagate_at_two_cell_level_in_cmp_when_there_is_a_single_one():
"""We build a graph in the shape of a house (a triangle on top of a square)
and test propagation at the two_cell level. This makes sure that propagate works when
up_index is None."""
house_complex = get_house_complex()
t = house_complex.get_cochain_params(dim=2)
# Extract the message passing object and propagate
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, down_msg, boundary_msg = cmp.propagate(t.up_index, t.down_index,
t.boundary_index, x=t.x,
up_attr=t.kwargs['up_attr'],
down_attr=t.kwargs['down_attr'],
boundary_attr=t.kwargs['boundary_attr'])
expected_up_msg = torch.zeros(1, 1)
assert torch.equal(up_msg, expected_up_msg)
expected_down_msg = torch.zeros(1, 1)
assert torch.equal(down_msg, expected_down_msg)
expected_boundary_msg = torch.tensor([[14]], dtype=torch.float)
assert torch.equal(boundary_msg, expected_boundary_msg)
def test_propagate_at_two_cell_level_in_cmp():
"""We build a graph formed of two triangles sharing an edge.
This makes sure that propagate works when up_index is None."""
# TODO: Refactor this test to use the kite complex
# When there is a single two_cell, there is no upper or lower adjacency
up_index = None
down_index = torch.tensor([[0, 1],
[1, 0]], dtype=torch.long)
# Add features for the edges shared by the triangles
down_attr = torch.tensor([[1], [1]])
# We initialise the vertices with dummy scalar features
x = torch.tensor([[32], [17]], dtype=torch.float)
# Extract the message passing object and propagate
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, down_msg, _ = cmp.propagate(up_index, down_index, None, x=x, down_attr=down_attr)
expected_updated_x = torch.tensor([[17], [32]], dtype=torch.float)
assert torch.equal(up_msg + down_msg, expected_updated_x)
def test_smp_messaging_with_isolated_nodes():
"""
This checks how pyG handles messages for isolated nodes. This shows that it sends a zero vector.
"""
square_dot_complex = get_square_dot_complex()
params = square_dot_complex.get_cochain_params(dim=0)
mp = MessagePassing()
out = mp.propagate(edge_index=params.up_index, x=params.x)
isolated_out = out[4]
# This confirms pyG returns a zero message to isolated vertices
assert torch.equal(isolated_out, torch.zeros_like(isolated_out))
for i in range(4):
assert not torch.equal(out[i], torch.zeros_like(out[i]))
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, down_msg, _ = cmp.propagate(up_index=params.up_index, down_index=None, boundary_index=None,
x=params.x, up_attr=None)
assert torch.equal(out, up_msg)
assert torch.equal(down_msg, torch.zeros_like(down_msg))
def test_cmp_messaging_with_isolated_node_only():
"""
This checks how pyG handles messages for one isolated node.
"""
fullstop_complex = get_fullstop_complex()
params = fullstop_complex.get_cochain_params(dim=0)
empty_edge_index = torch.LongTensor([[],[]])
mp = MessagePassing()
mp_out = mp.propagate(edge_index=empty_edge_index, x=params.x)
# This confirms pyG returns a zero message when edge_index is empty
assert torch.equal(mp_out, torch.zeros_like(mp_out))
# This confirms behavior is consistent with our framework
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, _, _ = cmp.propagate(up_index=params.up_index, down_index=None, boundary_index=None,
x=params.x, up_attr=None)
assert torch.equal(mp_out, up_msg)
def test_cmp_messaging_with_two_isolated_nodes_only():
"""
This checks how pyG handles messages for two isolated nodes.
"""
colon_complex = get_colon_complex()
params = colon_complex.get_cochain_params(dim=0)
empty_edge_index = torch.LongTensor([[],[]])
mp = MessagePassing()
mp_out = mp.propagate(edge_index=empty_edge_index, x=params.x)
# This confirms pyG returns a zero message when edge_index is empty
assert torch.equal(mp_out, torch.zeros_like(mp_out))
# This confirms behavior is consistent with our framework
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
up_msg, _, _ = cmp.propagate(up_index=params.up_index, down_index=None, boundary_index=None,
x=params.x, up_attr=None)
assert torch.equal(mp_out, up_msg)
def test_cmp_messaging_with_replicated_adjs():
"""
This checks message passing works as expected in case cells/simplices
share more than one (co)boundary.
"""
bridged_complex = get_bridged_complex()
bridged_graph = convert_to_graph(bridged_complex)
bridged_complex_from_graph = compute_ring_2complex(
bridged_graph.x, bridged_graph.edge_index, bridged_graph.edge_attr, bridged_graph.num_nodes,
bridged_graph.y, init_method='sum', init_edges=True, init_rings=True)
check_edge_index_are_the_same(bridged_complex_from_graph.edges.upper_index, bridged_complex.edges.upper_index)
check_edge_index_are_the_same(bridged_complex_from_graph.two_cells.lower_index, bridged_complex.two_cells.lower_index)
check_edge_attr_are_the_same(bridged_complex.cochains[1].boundary_index, bridged_complex.cochains[1].x, bridged_graph.edge_index, bridged_graph.edge_attr)
check_edge_attr_are_the_same(bridged_complex_from_graph.cochains[1].boundary_index, bridged_complex_from_graph.cochains[1].x, bridged_graph.edge_index, bridged_graph.edge_attr)
# verify up-messaging with multiple shared coboundaries
e = bridged_complex.get_cochain_params(dim=1)
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
e_up_msg, e_down_msg, e_boundary_msg = cmp.propagate(e.up_index, e.down_index,
e.boundary_index, x=e.x,
up_attr=e.kwargs['up_attr'],
down_attr=e.kwargs['down_attr'],
boundary_attr=e.kwargs['boundary_attr'])
expected_e_up_msg = torch.tensor([[4+5+6+2+3+4], # edge 0
[3+5+6+1+3+4], # edge 1
[2+5+6+1+2+4], # edge 2
[1+5+6+1+2+3], # edge 3
[1+4+6+2+3+6], # edge 4
[1+4+5+2+3+5]], # edge 5
dtype=torch.float)
assert torch.equal(e_up_msg, expected_e_up_msg)
# same but start from graph instead
e = bridged_complex_from_graph.get_cochain_params(dim=1)
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
e_up_msg, e_down_msg, e_boundary_msg = cmp.propagate(e.up_index, e.down_index,
e.boundary_index, x=e.x,
up_attr=e.kwargs['up_attr'],
down_attr=e.kwargs['down_attr'],
boundary_attr=e.kwargs['boundary_attr'])
expected_e_up_msg = torch.tensor([[4+5+6+2+3+4], # edge 0-1 (0)
[1+5+6+1+2+3], # edge 0-3 (3)
[3+5+6+1+3+4], # edge 1-2 (1)
[1+4+5+2+3+5], # edge 1-4 (5)
[2+5+6+1+2+4], # edge 2-3 (2)
[1+4+6+2+3+6]], # edge 3-4 (4)
dtype=torch.float)
assert torch.equal(e_up_msg, expected_e_up_msg)
# verify down-messaging with multiple shared boundaries
t = bridged_complex.get_cochain_params(dim=2)
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
t_up_msg, t_down_msg, t_boundary_msg = cmp.propagate(t.up_index, t.down_index,
t.boundary_index, x=t.x,
up_attr=t.kwargs['up_attr'],
down_attr=t.kwargs['down_attr'],
boundary_attr=t.kwargs['boundary_attr'])
expected_t_down_msg = torch.tensor([[2+2+3+3], # ring 0
[1+1+3+3], # ring 1
[1+1+2+2]], # ring 2
dtype=torch.float)
assert torch.equal(t_down_msg, expected_t_down_msg)
expected_t_boundary_msg = torch.tensor([[1+6+5+4], # ring 0
[2+3+5+6], # ring 1
[1+2+3+4]], # ring 2
dtype=torch.float)
assert torch.equal(t_boundary_msg, expected_t_boundary_msg)
# same but start from graph instead
t = bridged_complex_from_graph.get_cochain_params(dim=2)
cmp = CochainMessagePassing(up_msg_size=1, down_msg_size=1)
t_up_msg, t_down_msg, t_boundary_msg = cmp.propagate(t.up_index, t.down_index,
t.boundary_index, x=t.x,
up_attr=t.kwargs['up_attr'],
down_attr=t.kwargs['down_attr'],
boundary_attr=t.kwargs['boundary_attr'])
t0_x = 1+2+4+5 # 12
t1_x = 2+3+4+5 # 14
t2_x = 1+2+3+4 # 10
expected_t_down_msg = torch.tensor([[t1_x + t1_x + t2_x + t2_x], # ring 0-1-4-3 (0)
[t0_x + t0_x + t1_x + t1_x], # ring 0-1-2-3 (2)
[t0_x + t0_x + t2_x + t2_x]], # ring 1-2-3-4 (1)
dtype=torch.float)
assert torch.equal(t_down_msg, expected_t_down_msg)
expected_t_boundary_msg = torch.tensor([[1+6+5+4], # ring 0
[1+2+3+4], # ring 2
[2+3+5+6]], # ring 1
dtype=torch.float)
assert torch.equal(t_boundary_msg, expected_t_boundary_msg) | 13,576 | 49.285185 | 180 | py |
cwn | cwn-main/data/perm_utils.py | import torch
import numpy as np
from scipy import sparse as sp
from torch_geometric.data import Data
def permute_graph(graph: Data, P: np.ndarray) -> Data:
# TODO: support edge features and their permutation
assert graph.edge_attr is None
# Check validity of permutation matrix
n = graph.x.size(0)
if not is_valid_permutation_matrix(P, n):
raise AssertionError
# Apply permutation to features
x = graph.x.numpy()
x_perm = torch.FloatTensor(P @ x)
# Apply perm to labels, if per-node
if graph.y is None:
y_perm = None
elif graph.y.size(0) == n:
y = graph.y.numpy()
y_perm = torch.tensor(P @ y)
else:
y_perm = graph.y.clone().detach()
# Apply permutation to adjacencies, if any
if graph.edge_index.size(1) > 0:
inps = (np.ones(graph.edge_index.size(1)), (graph.edge_index[0].numpy(), graph.edge_index[1].numpy()))
A = sp.csr_matrix(inps, shape=(n,n))
P = sp.csr_matrix(P)
A_perm = P.dot(A).dot(P.transpose()).tocoo()
edge_index_perm = torch.LongTensor(np.vstack((A_perm.row, A_perm.col)))
else:
edge_index_perm = graph.edge_index.clone().detach()
# Instantiate new graph
graph_perm = Data(x=x_perm, edge_index=edge_index_perm, y=y_perm)
return graph_perm
def is_valid_permutation_matrix(P: np.ndarray, n: int):
valid = True
valid &= P.ndim == 2
valid &= P.shape[0] == n
valid &= np.all(P.sum(0) == np.ones(n))
valid &= np.all(P.sum(1) == np.ones(n))
valid &= np.all(P.max(0) == np.ones(n))
valid &= np.all(P.max(1) == np.ones(n))
if n > 1:
valid &= np.all(P.min(0) == np.zeros(n))
valid &= np.all(P.min(1) == np.zeros(n))
valid &= not np.array_equal(P, np.eye(n))
return valid
def generate_permutation_matrices(size, amount=10, seed=43):
Ps = list()
random_state = np.random.RandomState(seed)
count = 0
while count < amount:
I = np.eye(size)
perm = random_state.permutation(size)
P = I[perm]
if is_valid_permutation_matrix(P, size):
Ps.append(P)
count += 1
return Ps | 2,177 | 29.25 | 110 | py |
cwn | cwn-main/data/test_data.py | import torch
from data.dummy_complexes import get_house_complex
def test_up_and_down_feature_extraction_on_house_complex():
house_complex = get_house_complex()
v_cochain_params = house_complex.get_cochain_params(dim=0)
v_up_attr = v_cochain_params.kwargs['up_attr']
expected_v_up_attr = torch.tensor([[1], [1], [4], [4], [2], [2], [3], [3], [6], [6], [5], [5]],
dtype=torch.float)
assert torch.equal(expected_v_up_attr, v_up_attr)
e_cochain_params = house_complex.get_cochain_params(dim=1)
e_up_attr = e_cochain_params.kwargs['up_attr']
expected_e_up_attr = torch.tensor([[1], [1], [1], [1], [1], [1]], dtype=torch.float)
assert torch.equal(expected_e_up_attr, e_up_attr)
e_down_attr = e_cochain_params.kwargs['down_attr']
expected_e_down_attr = torch.tensor([[2], [2], [1], [1], [3], [3], [3], [3], [4], [4], [4], [4],
[3], [3], [4], [4], [5], [5]], dtype=torch.float)
assert torch.equal(expected_e_down_attr, e_down_attr)
t_cochain_params = house_complex.get_cochain_params(dim=2)
t_up_attr = t_cochain_params.kwargs['up_attr']
assert t_up_attr is None
t_down_attr = t_cochain_params.kwargs['down_attr']
assert t_down_attr is None
def test_get_all_cochain_params_with_max_dim_one_and_no_top_features():
house_complex = get_house_complex()
params = house_complex.get_all_cochain_params(max_dim=1, include_top_features=False)
assert len(params) == 2
v_cochain_params, e_cochain_params = params
v_up_attr = v_cochain_params.kwargs['up_attr']
expected_v_up_attr = torch.tensor([[1], [1], [4], [4], [2], [2], [3], [3], [6], [6], [5], [5]],
dtype=torch.float)
assert torch.equal(expected_v_up_attr, v_up_attr)
e_up_attr = e_cochain_params.kwargs['up_attr']
assert e_up_attr is None
assert e_cochain_params.up_index is not None
assert e_cochain_params.up_index.size(1) == 6
e_down_attr = e_cochain_params.kwargs['down_attr']
expected_e_down_attr = torch.tensor([[2], [2], [1], [1], [3], [3], [3], [3], [4], [4], [4], [4],
[3], [3], [4], [4], [5], [5]], dtype=torch.float)
assert torch.equal(expected_e_down_attr, e_down_attr)
| 2,318 | 41.163636 | 100 | py |
cwn | cwn-main/data/helper_test.py | import itertools
import torch
import networkx as nx
from torch_geometric.utils import convert
from torch_geometric.data import Data
def check_edge_index_are_the_same(upper_index, edge_index):
"""Checks that two edge/cell indexes are the same."""
# These two tensors should have the same content but in different order.
assert upper_index.size() == edge_index.size()
num_edges = edge_index.size(1)
edge_set1 = set()
edge_set2 = set()
for i in range(num_edges):
e1, e2 = edge_index[0, i].item(), edge_index[1, i].item()
edge1 = tuple(sorted([e1, e2]))
edge_set1.add(edge1)
e1, e2 = upper_index[0, i].item(), upper_index[1, i].item()
edge2 = tuple(sorted([e1, e2]))
edge_set2.add(edge2)
assert edge_set1 == edge_set2
def get_table(boundary_index):
"""Indexes each cell based on the boundary index."""
elements = boundary_index.size(1)
id_to_cell = dict()
for i in range(elements):
cell_id = boundary_index[1, i].item()
boundary = boundary_index[0, i].item()
if cell_id not in id_to_cell:
id_to_cell[cell_id] = []
id_to_cell[cell_id].append(boundary)
return id_to_cell
def check_edge_attr_are_the_same(boundary_index, ex, edge_index, edge_attr):
"""Checks that a pairs of edge attributes are identical."""
# The maximum node that has an edge must be the same.
assert boundary_index[0, :].max() == edge_index.max()
# The number of edges present in both tensors should be the same.
assert boundary_index.size(1) == edge_index.size(1)
id_to_edge = get_table(boundary_index)
edge_to_id = dict()
for edge_idx, edge in id_to_edge.items():
edge_to_id[tuple(sorted(edge))] = edge_idx
edges = boundary_index.size(1)
for i in range(edges):
e1, e2 = edge_index[0, i].item(), edge_index[1, i].item()
edge = tuple(sorted([e1, e2]))
edge_attr1 = ex[edge_to_id[edge]].squeeze()
edge_attr2 = edge_attr[i].squeeze()
# NB: edge feats may be multidimensional, so we cannot
# generally use the `==` operator here
assert torch.equal(edge_attr1, edge_attr2)
def get_rings(n, edge_index, max_ring):
"""Extracts the induced cycles from a graph using networkx."""
x = torch.zeros((n, 1))
data = Data(x, edge_index=edge_index)
graph = convert.to_networkx(data)
def is_cycle_edge(i1, i2, cycle):
if i2 == i1 + 1:
return True
if i1 == 0 and i2 == len(cycle) - 1:
return True
return False
def is_chordless(cycle):
for (i1, v1), (i2, v2) in itertools.combinations(enumerate(cycle), 2):
if not is_cycle_edge(i1, i2, cycle) and graph.has_edge(v1, v2):
return False
return True
nx_rings = set()
for cycle in nx.simple_cycles(graph):
# Because we need to use a DiGraph for this method, it will also return each edge
# as a cycle. So we skip these together with cycles above the maximum length.
if len(cycle) <= 2 or len(cycle) > max_ring:
continue
# We skip the cycles with chords
if not is_chordless(cycle):
continue
# Store the cycle in a canonical form
nx_rings.add(tuple(sorted(cycle)))
return nx_rings
def get_complex_rings(r_boundary_index, e_boundary_index):
"""Extracts the vertices that are part of a ring."""
# Construct the edge and ring tables
id_to_ring = get_table(r_boundary_index)
id_to_edge = get_table(e_boundary_index)
rings = set()
for ring, edges in id_to_ring.items():
# Compose the two tables to extract the vertices in the ring.
vertices = [vertex for edge in edges for vertex in id_to_edge[edge]]
# Eliminate duplicates.
vertices = set(vertices)
# Store the ring in sorted order.
rings.add(tuple(sorted(vertices)))
return rings
def compare_complexes(yielded, expected, include_down_adj):
"""Checks that two cell complexes are the same."""
assert yielded.dimension == expected.dimension
assert torch.equal(yielded.y, expected.y)
for dim in range(expected.dimension + 1):
y_cochain = yielded.cochains[dim]
e_cochain = expected.cochains[dim]
assert y_cochain.num_cells == e_cochain.num_cells
assert y_cochain.num_cells_up == e_cochain.num_cells_up
assert y_cochain.num_cells_up == e_cochain.num_cells_up
assert y_cochain.num_cells_down == e_cochain.num_cells_down, dim
assert torch.equal(y_cochain.x, e_cochain.x)
if dim > 0:
assert torch.equal(y_cochain.boundary_index, e_cochain.boundary_index)
if include_down_adj:
if y_cochain.lower_index is None:
assert e_cochain.lower_index is None
assert y_cochain.shared_boundaries is None
assert e_cochain.shared_boundaries is None
else:
assert torch.equal(y_cochain.lower_index, e_cochain.lower_index)
assert torch.equal(y_cochain.shared_boundaries, e_cochain.shared_boundaries)
else:
assert y_cochain.boundary_index is None and e_cochain.boundary_index is None
assert y_cochain.lower_index is None and e_cochain.lower_index is None
assert y_cochain.shared_boundaries is None and e_cochain.shared_boundaries is None
if dim < expected.dimension:
if y_cochain.upper_index is None:
assert e_cochain.upper_index is None
assert y_cochain.shared_coboundaries is None
assert e_cochain.shared_coboundaries is None
else:
assert torch.equal(y_cochain.upper_index, e_cochain.upper_index)
assert torch.equal(y_cochain.shared_coboundaries, e_cochain.shared_coboundaries)
else:
assert y_cochain.upper_index is None and e_cochain.upper_index is None
assert y_cochain.shared_coboundaries is None and e_cochain.shared_coboundaries is None
def compare_complexes_without_2feats(yielded, expected, include_down_adj):
"""Checks that two cell complexes are the same, except for the features of the 2-cells."""
assert yielded.dimension == expected.dimension
assert torch.equal(yielded.y, expected.y)
for dim in range(expected.dimension + 1):
y_cochain = yielded.cochains[dim]
e_cochain = expected.cochains[dim]
assert y_cochain.num_cells == e_cochain.num_cells
assert y_cochain.num_cells_up == e_cochain.num_cells_up
assert y_cochain.num_cells_up == e_cochain.num_cells_up
assert y_cochain.num_cells_down == e_cochain.num_cells_down, dim
if dim > 0:
assert torch.equal(y_cochain.boundary_index, e_cochain.boundary_index)
if include_down_adj:
if y_cochain.lower_index is None:
assert e_cochain.lower_index is None
assert y_cochain.shared_boundaries is None
assert e_cochain.shared_boundaries is None
else:
assert torch.equal(y_cochain.lower_index, e_cochain.lower_index)
assert torch.equal(y_cochain.shared_boundaries, e_cochain.shared_boundaries)
else:
assert y_cochain.boundary_index is None and e_cochain.boundary_index is None
assert y_cochain.lower_index is None and e_cochain.lower_index is None
assert y_cochain.shared_boundaries is None and e_cochain.shared_boundaries is None
if dim < expected.dimension:
if y_cochain.upper_index is None:
assert e_cochain.upper_index is None
assert y_cochain.shared_coboundaries is None
assert e_cochain.shared_coboundaries is None
else:
assert torch.equal(y_cochain.upper_index, e_cochain.upper_index)
assert torch.equal(y_cochain.shared_coboundaries, e_cochain.shared_coboundaries)
else:
assert y_cochain.upper_index is None and e_cochain.upper_index is None
assert y_cochain.shared_coboundaries is None and e_cochain.shared_coboundaries is None
if dim != 2:
assert torch.equal(y_cochain.x, e_cochain.x)
else:
assert y_cochain.x is None and e_cochain.x is None
| 8,463 | 41.532663 | 98 | py |
cwn | cwn-main/data/data_loading.py | """
Code is adapted from https://github.com/rusty1s/pytorch_geometric/blob/6442a6e287563b39dae9f5fcffc52cd780925f89/torch_geometric/data/dataloader.py
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import torch
from torch.utils.data.dataloader import default_collate
from torch_geometric.data import Data, Batch
from torch._six import container_abcs, string_classes, int_classes
from definitions import ROOT_DIR
from data.complex import Cochain, CochainBatch, Complex, ComplexBatch
from data.datasets import (
load_sr_graph_dataset, load_tu_graph_dataset, load_zinc_graph_dataset, load_ogb_graph_dataset,
load_ring_transfer_dataset, load_ring_lookup_dataset, load_pep_f_graph_dataset, load_pep_s_graph_dataset)
from data.datasets import (
SRDataset, ClusterDataset, TUDataset, ComplexDataset, FlowDataset,
OceanDataset, ZincDataset, CSLDataset, OGBDataset, RingTransferDataset, RingLookupDataset,
DummyDataset, DummyMolecularDataset, PeptidesFunctionalDataset, PeptidesStructuralDataset)
class Collater(object):
"""Object that converts python lists of objects into the appropiate storage format.
Args:
follow_batch: Creates assignment batch vectors for each key in the list.
max_dim: The maximum dimension of the cochains considered from the supplied list.
"""
def __init__(self, follow_batch, max_dim=2):
self.follow_batch = follow_batch
self.max_dim = max_dim
def collate(self, batch):
"""Converts a data list in the right storage format."""
elem = batch[0]
if isinstance(elem, Cochain):
return CochainBatch.from_cochain_list(batch, self.follow_batch)
elif isinstance(elem, Complex):
return ComplexBatch.from_complex_list(batch, self.follow_batch, max_dim=self.max_dim)
elif isinstance(elem, Data):
return Batch.from_data_list(batch, self.follow_batch)
elif isinstance(elem, torch.Tensor):
return default_collate(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: self.collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(self.collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [self.collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(type(elem)))
def __call__(self, batch):
return self.collate(batch)
class DataLoader(torch.utils.data.DataLoader):
r"""Data loader which merges cochain complexes into to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch. (default: :obj:`False`)
follow_batch (list or tuple, optional): Creates assignment batch
vectors for each key in the list. (default: :obj:`[]`)
max_dim (int): The maximum dimension of the chains to be used in the batch.
(default: 2)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, follow_batch=[],
max_dim=2, **kwargs):
if "collate_fn" in kwargs:
del kwargs["collate_fn"]
# Save for Pytorch Lightning...
self.follow_batch = follow_batch
super(DataLoader,
self).__init__(dataset, batch_size, shuffle,
collate_fn=Collater(follow_batch, max_dim), **kwargs)
def load_dataset(name, root=os.path.join(ROOT_DIR, 'datasets'), max_dim=2, fold=0,
init_method='sum', n_jobs=2, **kwargs) -> ComplexDataset:
"""Returns a ComplexDataset with the specified name and initialised with the given params."""
if name.startswith('sr'):
dataset = SRDataset(os.path.join(root, 'SR_graphs'), name, max_dim=max_dim,
num_classes=16, max_ring_size=kwargs.get('max_ring_size', None),
n_jobs=n_jobs, init_method=init_method)
elif name == 'CLUSTER':
dataset = ClusterDataset(os.path.join(root, 'CLUSTER'), max_dim)
elif name == 'IMDBBINARY':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=True, init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'IMDBMULTI':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=3,
fold=fold, degree_as_tag=True, init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'REDDITBINARY':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'REDDITMULTI5K':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=5,
fold=fold, degree_as_tag=False, init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'PROTEINS':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, include_down_adj=kwargs['include_down_adj'],
init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'NCI1':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, include_down_adj=kwargs['include_down_adj'],
init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'NCI109':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, include_down_adj=kwargs['include_down_adj'],
init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'PTC':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, include_down_adj=kwargs['include_down_adj'],
init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'MUTAG':
dataset = TUDataset(os.path.join(root, name), name, max_dim=max_dim, num_classes=2,
fold=fold, degree_as_tag=False, include_down_adj=kwargs['include_down_adj'],
init_method=init_method, max_ring_size=kwargs.get('max_ring_size', None))
elif name == 'FLOW':
dataset = FlowDataset(os.path.join(root, name), name, num_points=kwargs['flow_points'],
train_samples=1000, val_samples=200, train_orient=kwargs['train_orient'],
test_orient=kwargs['test_orient'], n_jobs=n_jobs)
elif name == 'OCEAN':
dataset = OceanDataset(os.path.join(root, name), name, train_orient=kwargs['train_orient'],
test_orient=kwargs['test_orient'])
elif name == 'RING-TRANSFER':
dataset = RingTransferDataset(os.path.join(root, name), nodes=kwargs['max_ring_size'])
elif name == 'RING-LOOKUP':
dataset = RingLookupDataset(os.path.join(root, name), nodes=kwargs['max_ring_size'])
elif name == 'ZINC':
dataset = ZincDataset(os.path.join(root, name), max_ring_size=kwargs['max_ring_size'],
include_down_adj=kwargs['include_down_adj'],
use_edge_features=kwargs['use_edge_features'], n_jobs=n_jobs)
elif name == 'ZINC-FULL':
dataset = ZincDataset(os.path.join(root, name), subset=False, max_ring_size=kwargs['max_ring_size'],
include_down_adj=kwargs['include_down_adj'],
use_edge_features=kwargs['use_edge_features'], n_jobs=n_jobs)
elif name == 'CSL':
dataset = CSLDataset(os.path.join(root, name), max_ring_size=kwargs['max_ring_size'],
fold=fold, n_jobs=n_jobs)
elif name in ['MOLHIV', 'MOLPCBA', 'MOLTOX21', 'MOLTOXCAST', 'MOLMUV',
'MOLBACE', 'MOLBBBP', 'MOLCLINTOX', 'MOLSIDER', 'MOLESOL',
'MOLFREESOLV', 'MOLLIPO']:
official_name = 'ogbg-'+name.lower()
dataset = OGBDataset(os.path.join(root, name), official_name, max_ring_size=kwargs['max_ring_size'],
use_edge_features=kwargs['use_edge_features'], simple=kwargs['simple_features'],
include_down_adj=kwargs['include_down_adj'], init_method=init_method, n_jobs=n_jobs)
elif name == 'DUMMY':
dataset = DummyDataset(os.path.join(root, name))
elif name == 'DUMMYM':
dataset = DummyMolecularDataset(os.path.join(root, name))
elif name == 'PEPTIDES-F':
dataset = PeptidesFunctionalDataset(os.path.join(root, name), max_ring_size=kwargs['max_ring_size'],
include_down_adj=kwargs['include_down_adj'], init_method=init_method, n_jobs=n_jobs)
elif name == 'PEPTIDES-S':
dataset = PeptidesStructuralDataset(os.path.join(root, name), max_ring_size=kwargs['max_ring_size'],
include_down_adj=kwargs['include_down_adj'], init_method=init_method, n_jobs=n_jobs)
else:
raise NotImplementedError(name)
return dataset
def load_graph_dataset(name, root=os.path.join(ROOT_DIR, 'datasets'), fold=0, **kwargs):
"""Returns a graph dataset with the specified name and initialised with the given params."""
if name.startswith('sr'):
graph_list, train_ids, val_ids, test_ids = load_sr_graph_dataset(name, root=root)
data = (graph_list, train_ids, val_ids, test_ids, None)
elif name == 'IMDBBINARY':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=True, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'IMDBMULTI':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=True, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 3)
elif name == 'REDDITBINARY':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'REDDITMULTI5K':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 5)
elif name == 'PROTEINS':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'NCI1':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'NCI109':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'PTC':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'MUTAG':
graph_list, train_ids, val_ids, test_ids = load_tu_graph_dataset(name, root=root, degree_as_tag=False, fold=fold, seed=0)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'ZINC':
graph_list, train_ids, val_ids, test_ids = load_zinc_graph_dataset(root=root)
data = (graph_list, train_ids, val_ids, test_ids, 1)
elif name == 'PEPTIDES-F':
graph_list, train_ids, val_ids, test_ids = load_pep_f_graph_dataset(root=root)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'PEPTIDES-S':
graph_list, train_ids, val_ids, test_ids = load_pep_s_graph_dataset(root=root)
data = (graph_list, train_ids, val_ids, test_ids, 2)
elif name == 'ZINC-FULL':
graph_list, train_ids, val_ids, test_ids = load_zinc_graph_dataset(root=root, subset=False)
data = (graph_list, train_ids, val_ids, test_ids, 1)
elif name in ['MOLHIV', 'MOLPCBA', 'MOLTOX21', 'MOLTOXCAST', 'MOLMUV',
'MOLBACE', 'MOLBBBP', 'MOLCLINTOX', 'MOLSIDER', 'MOLESOL',
'MOLFREESOLV', 'MOLLIPO']:
graph_list, train_ids, val_ids, test_ids = load_ogb_graph_dataset(
os.path.join(root, name), 'ogbg-'+name.lower())
data = (graph_list, train_ids, val_ids, test_ids, graph_list.num_tasks)
elif name == 'RING-TRANSFER':
graph_list, train_ids, val_ids, test_ids = load_ring_transfer_dataset(
nodes=kwargs['max_ring_size'], num_classes=5)
data = (graph_list, train_ids, val_ids, test_ids, 5)
elif name == 'RING-LOOKUP':
graph_list, train_ids, val_ids, test_ids = load_ring_lookup_dataset(
nodes=kwargs['max_ring_size'])
data = (graph_list, train_ids, val_ids, test_ids, kwargs['max_ring_size'] - 1)
else:
raise NotImplementedError
return data
| 14,860 | 56.378378 | 146 | py |
cwn | cwn-main/data/tu_utils.py | """
Based on code from https://github.com/weihua916/powerful-gnns/blob/master/util.py
MIT License
Copyright (c) 2021 Weihua Hu
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import networkx as nx
import numpy as np
import torch
import torch_geometric
from torch_geometric.data import Data
from sklearn.model_selection import StratifiedKFold
class S2VGraph(object):
def __init__(self, g, label, node_tags=None, node_features=None):
"""
g: a networkx graph
label: an integer graph label
node_tags: a list of integer node tags
node_features: a torch float tensor, one-hot representation of the tag that is used as input to neural nets
edge_mat: a torch long tensor, contain edge list, will be used to create torch sparse tensor
neighbors: list of neighbors (without self-loop)
"""
self.label = label
self.g = g
self.node_tags = node_tags
self.neighbors = []
self.node_features = 0
self.edge_mat = 0
self.max_neighbor = 0
def load_data(path, dataset, degree_as_tag):
"""
dataset: name of dataset
test_proportion: ratio of test train split
seed: random seed for random splitting of dataset
"""
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
with open('%s/%s.txt' % (path, dataset), 'r') as f:
n_g = int(f.readline().strip())
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
node_features = []
n_edges = 0
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
tmp = int(row[1]) + 2
if tmp == len(row):
# no node attributes
row = [int(w) for w in row]
attr = None
else:
row, attr = [int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
if tmp > len(row):
node_features.append(attr)
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
if node_features != []:
node_features = np.stack(node_features)
node_feature_flag = True
else:
node_features = None
node_feature_flag = False
assert len(g) == n
g_list.append(S2VGraph(g, l, node_tags))
#add labels and edge_mat
for g in g_list:
g.neighbors = [[] for i in range(len(g.g))]
for i, j in g.g.edges():
g.neighbors[i].append(j)
g.neighbors[j].append(i)
degree_list = []
for i in range(len(g.g)):
g.neighbors[i] = g.neighbors[i]
degree_list.append(len(g.neighbors[i]))
g.max_neighbor = max(degree_list)
g.label = label_dict[g.label]
edges = [list(pair) for pair in g.g.edges()]
edges.extend([[i, j] for j, i in edges])
deg_list = list(dict(g.g.degree(range(len(g.g)))).values()) # <- this might not be used...!?
g.edge_mat = torch.LongTensor(edges).transpose(0,1)
if degree_as_tag:
for g in g_list:
g.node_tags = list(dict(g.g.degree).values())
# ^^^ !? it should probably be replaced by the following one:
# g.node_tags = [g.g.degree[node] for node in range(len(g.g))]
#Extracting unique tag labels
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags))
tagset = list(tagset)
tag2index = {tagset[i]:i for i in range(len(tagset))}
for g in g_list:
g.node_features = torch.zeros(len(g.node_tags), len(tagset))
g.node_features[range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags]] = 1
# ==================
# Here we recompute degree encodings with external code,
# as we observed some unexpected behaviors likely due to
# incompatibilities w.r.t. python versions
# ==================
def get_node_degrees(graph):
edge_index = graph.edge_mat
if edge_index.shape[1] == 0: # just isolated nodes
degrees = torch.zeros((graph.node_features.shape[0],1))
else:
degrees = torch_geometric.utils.degree(edge_index[0]).unsqueeze(1)
return degrees
if degree_as_tag:
# 1. cumulate node degrees
degs = torch.cat([get_node_degrees(graph) for graph in g_list], 0)
# 2. compute unique values
uniques, corrs = np.unique(degs, return_inverse=True, axis=0)
# 3. encode
pointer = 0
for graph in g_list:
n = graph.node_features.shape[0]
hots = torch.LongTensor(corrs[pointer:pointer+n])
graph.node_features = torch.nn.functional.one_hot(hots, len(uniques)).float()
pointer += n
# ====================
print('# classes: %d' % len(label_dict))
print('# maximum node tag: %d' % len(tagset))
print("# data: %d" % len(g_list))
return g_list, len(label_dict)
def S2V_to_PyG(data):
new_data = Data()
setattr(new_data, 'edge_index', data.edge_mat)
setattr(new_data, 'x', data.node_features)
setattr(new_data, 'num_nodes', data.node_features.shape[0])
setattr(new_data, 'y', torch.tensor(data.label).unsqueeze(0).long())
return new_data
def separate_data(graph_list, seed, fold_idx):
assert 0 <= fold_idx and fold_idx < 10, "fold_idx must be from 0 to 9."
skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)
labels = [graph.label for graph in graph_list]
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, test_idx = idx_list[fold_idx]
train_graph_list = [graph_list[i] for i in train_idx]
test_graph_list = [graph_list[i] for i in test_idx]
return train_graph_list, test_graph_list
def separate_data_given_split(graph_list, path, fold_idx):
### Splits data based on pre-computed splits
assert -1 <= fold_idx and fold_idx < 10, "fold_idx must be from 0 to 9."
train_filename = os.path.join(path, '10fold_idx', 'train_idx-{}.txt'.format(fold_idx+1))
test_filename = os.path.join(path, '10fold_idx', 'test_idx-{}.txt'.format(fold_idx+1))
train_idx = np.loadtxt(train_filename, dtype=int)
test_idx = np.loadtxt(test_filename, dtype=int)
train_graph_list = [graph_list[i] for i in train_idx]
test_graph_list = [graph_list[i] for i in test_idx]
return train_graph_list, test_graph_list
def get_fold_indices(complex_list, seed, fold_idx):
assert 0 <= fold_idx and fold_idx < 10, "fold_idx must be from 0 to 9."
skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)
labels = [complex.y.item() for complex in complex_list]
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, test_idx = idx_list[fold_idx]
return train_idx.tolist(), test_idx.tolist()
| 8,611 | 34.883333 | 119 | py |
cwn | cwn-main/data/utils.py | import graph_tool as gt
import graph_tool.topology as top
import numpy as np
import torch
import gudhi as gd
import itertools
import networkx as nx
from tqdm import tqdm
from data.complex import Cochain, Complex
from typing import List, Dict, Optional, Union
from torch import Tensor
from torch_geometric.typing import Adj
from torch_scatter import scatter
from data.parallel import ProgressParallel
from joblib import delayed
def pyg_to_simplex_tree(edge_index: Tensor, size: int):
"""Constructs a simplex tree from a PyG graph.
Args:
edge_index: The edge_index of the graph (a tensor of shape [2, num_edges])
size: The number of nodes in the graph.
"""
st = gd.SimplexTree()
# Add vertices to the simplex.
for v in range(size):
st.insert([v])
# Add the edges to the simplex.
edges = edge_index.numpy()
for e in range(edges.shape[1]):
edge = [edges[0][e], edges[1][e]]
st.insert(edge)
return st
def get_simplex_boundaries(simplex):
boundaries = itertools.combinations(simplex, len(simplex) - 1)
return [tuple(boundary) for boundary in boundaries]
def build_tables(simplex_tree, size):
complex_dim = simplex_tree.dimension()
# Each of these data structures has a separate entry per dimension.
id_maps = [{} for _ in range(complex_dim+1)] # simplex -> id
simplex_tables = [[] for _ in range(complex_dim+1)] # matrix of simplices
boundaries_tables = [[] for _ in range(complex_dim+1)]
simplex_tables[0] = [[v] for v in range(size)]
id_maps[0] = {tuple([v]): v for v in range(size)}
for simplex, _ in simplex_tree.get_simplices():
dim = len(simplex) - 1
if dim == 0:
continue
# Assign this simplex the next unused ID
next_id = len(simplex_tables[dim])
id_maps[dim][tuple(simplex)] = next_id
simplex_tables[dim].append(simplex)
return simplex_tables, id_maps
def extract_boundaries_and_coboundaries_from_simplex_tree(simplex_tree, id_maps, complex_dim: int):
"""Build two maps simplex -> its coboundaries and simplex -> its boundaries"""
# The extra dimension is added just for convenience to avoid treating it as a special case.
boundaries = [{} for _ in range(complex_dim+2)] # simplex -> boundaries
coboundaries = [{} for _ in range(complex_dim+2)] # simplex -> coboundaries
boundaries_tables = [[] for _ in range(complex_dim+1)]
for simplex, _ in simplex_tree.get_simplices():
# Extract the relevant boundary and coboundary maps
simplex_dim = len(simplex) - 1
level_coboundaries = coboundaries[simplex_dim]
level_boundaries = boundaries[simplex_dim + 1]
# Add the boundaries of the simplex to the boundaries table
if simplex_dim > 0:
boundaries_ids = [id_maps[simplex_dim-1][boundary] for boundary in get_simplex_boundaries(simplex)]
boundaries_tables[simplex_dim].append(boundaries_ids)
# This operation should be roughly be O(dim_complex), so that is very efficient for us.
# For details see pages 6-7 https://hal.inria.fr/hal-00707901v1/document
simplex_coboundaries = simplex_tree.get_cofaces(simplex, codimension=1)
for coboundary, _ in simplex_coboundaries:
assert len(coboundary) == len(simplex) + 1
if tuple(simplex) not in level_coboundaries:
level_coboundaries[tuple(simplex)] = list()
level_coboundaries[tuple(simplex)].append(tuple(coboundary))
if tuple(coboundary) not in level_boundaries:
level_boundaries[tuple(coboundary)] = list()
level_boundaries[tuple(coboundary)].append(tuple(simplex))
return boundaries_tables, boundaries, coboundaries
def build_adj(boundaries: List[Dict], coboundaries: List[Dict], id_maps: List[Dict], complex_dim: int,
include_down_adj: bool):
"""Builds the upper and lower adjacency data structures of the complex
Args:
boundaries: A list of dictionaries of the form
boundaries[dim][simplex] -> List[simplex] (the boundaries)
coboundaries: A list of dictionaries of the form
coboundaries[dim][simplex] -> List[simplex] (the coboundaries)
id_maps: A dictionary from simplex -> simplex_id
"""
def initialise_structure():
return [[] for _ in range(complex_dim+1)]
upper_indexes, lower_indexes = initialise_structure(), initialise_structure()
all_shared_boundaries, all_shared_coboundaries = initialise_structure(), initialise_structure()
# Go through all dimensions of the complex
for dim in range(complex_dim+1):
# Go through all the simplices at that dimension
for simplex, id in id_maps[dim].items():
# Add the upper adjacent neighbours from the level below
if dim > 0:
for boundary1, boundary2 in itertools.combinations(boundaries[dim][simplex], 2):
id1, id2 = id_maps[dim - 1][boundary1], id_maps[dim - 1][boundary2]
upper_indexes[dim - 1].extend([[id1, id2], [id2, id1]])
all_shared_coboundaries[dim - 1].extend([id, id])
# Add the lower adjacent neighbours from the level above
if include_down_adj and dim < complex_dim and simplex in coboundaries[dim]:
for coboundary1, coboundary2 in itertools.combinations(coboundaries[dim][simplex], 2):
id1, id2 = id_maps[dim + 1][coboundary1], id_maps[dim + 1][coboundary2]
lower_indexes[dim + 1].extend([[id1, id2], [id2, id1]])
all_shared_boundaries[dim + 1].extend([id, id])
return all_shared_boundaries, all_shared_coboundaries, lower_indexes, upper_indexes
def construct_features(vx: Tensor, cell_tables, init_method: str) -> List:
"""Combines the features of the component vertices to initialise the cell features"""
features = [vx]
for dim in range(1, len(cell_tables)):
aux_1 = []
aux_0 = []
for c, cell in enumerate(cell_tables[dim]):
aux_1 += [c for _ in range(len(cell))]
aux_0 += cell
node_cell_index = torch.LongTensor([aux_0, aux_1])
in_features = vx.index_select(0, node_cell_index[0])
features.append(scatter(in_features, node_cell_index[1], dim=0,
dim_size=len(cell_tables[dim]), reduce=init_method))
return features
def extract_labels(y, size):
v_y, complex_y = None, None
if y is None:
return v_y, complex_y
y_shape = list(y.size())
if y_shape[0] == 1:
# This is a label for the whole graph (for graph classification).
# We will use it for the complex.
complex_y = y
else:
# This is a label for the vertices of the complex.
assert y_shape[0] == size
v_y = y
return v_y, complex_y
def generate_cochain(dim, x, all_upper_index, all_lower_index,
all_shared_boundaries, all_shared_coboundaries, cell_tables, boundaries_tables,
complex_dim, y=None):
"""Builds a Cochain given all the adjacency data extracted from the complex."""
if dim == 0:
assert len(all_lower_index[dim]) == 0
assert len(all_shared_boundaries[dim]) == 0
num_cells_down = len(cell_tables[dim-1]) if dim > 0 else None
num_cells_up = len(cell_tables[dim+1]) if dim < complex_dim else 0
up_index = (torch.tensor(all_upper_index[dim], dtype=torch.long).t()
if len(all_upper_index[dim]) > 0 else None)
down_index = (torch.tensor(all_lower_index[dim], dtype=torch.long).t()
if len(all_lower_index[dim]) > 0 else None)
shared_coboundaries = (torch.tensor(all_shared_coboundaries[dim], dtype=torch.long)
if len(all_shared_coboundaries[dim]) > 0 else None)
shared_boundaries = (torch.tensor(all_shared_boundaries[dim], dtype=torch.long)
if len(all_shared_boundaries[dim]) > 0 else None)
boundary_index = None
if len(boundaries_tables[dim]) > 0:
boundary_index = [list(), list()]
for s, cell in enumerate(boundaries_tables[dim]):
for boundary in cell:
boundary_index[1].append(s)
boundary_index[0].append(boundary)
boundary_index = torch.LongTensor(boundary_index)
if num_cells_down is None:
assert shared_boundaries is None
if num_cells_up == 0:
assert shared_coboundaries is None
if up_index is not None:
assert up_index.size(1) == shared_coboundaries.size(0)
assert num_cells_up == shared_coboundaries.max() + 1
if down_index is not None:
assert down_index.size(1) == shared_boundaries.size(0)
assert num_cells_down >= shared_boundaries.max() + 1
return Cochain(dim=dim, x=x, upper_index=up_index,
lower_index=down_index, shared_coboundaries=shared_coboundaries,
shared_boundaries=shared_boundaries, y=y, num_cells_down=num_cells_down,
num_cells_up=num_cells_up, boundary_index=boundary_index)
def compute_clique_complex_with_gudhi(x: Tensor, edge_index: Adj, size: int,
expansion_dim: int = 2, y: Tensor = None,
include_down_adj=True,
init_method: str = 'sum') -> Complex:
"""Generates a clique complex of a pyG graph via gudhi.
Args:
x: The feature matrix for the nodes of the graph
edge_index: The edge_index of the graph (a tensor of shape [2, num_edges])
size: The number of nodes in the graph
expansion_dim: The dimension to expand the simplex to.
y: Labels for the graph nodes or a label for the whole graph.
include_down_adj: Whether to add down adj in the complex or not
init_method: How to initialise features at higher levels.
"""
assert x is not None
assert isinstance(edge_index, Tensor) # Support only tensor edge_index for now
# Creates the gudhi-based simplicial complex
simplex_tree = pyg_to_simplex_tree(edge_index, size)
simplex_tree.expansion(expansion_dim) # Computes the clique complex up to the desired dim.
complex_dim = simplex_tree.dimension() # See what is the dimension of the complex now.
# Builds tables of the simplicial complexes at each level and their IDs
simplex_tables, id_maps = build_tables(simplex_tree, size)
# Extracts the boundaries and coboundaries of each simplex in the complex
boundaries_tables, boundaries, co_boundaries = (
extract_boundaries_and_coboundaries_from_simplex_tree(simplex_tree, id_maps, complex_dim))
# Computes the adjacencies between all the simplexes in the complex
shared_boundaries, shared_coboundaries, lower_idx, upper_idx = build_adj(boundaries, co_boundaries, id_maps,
complex_dim, include_down_adj)
# Construct features for the higher dimensions
# TODO: Make this handle edge features as well and add alternative options to compute this.
xs = construct_features(x, simplex_tables, init_method)
# Initialise the node / complex labels
v_y, complex_y = extract_labels(y, size)
cochains = []
for i in range(complex_dim+1):
y = v_y if i == 0 else None
cochain = generate_cochain(i, xs[i], upper_idx, lower_idx, shared_boundaries, shared_coboundaries,
simplex_tables, boundaries_tables, complex_dim=complex_dim, y=y)
cochains.append(cochain)
return Complex(*cochains, y=complex_y, dimension=complex_dim)
def convert_graph_dataset_with_gudhi(dataset, expansion_dim: int, include_down_adj=True,
init_method: str = 'sum'):
# TODO(Cris): Add parallelism to this code like in the cell complex conversion code.
dimension = -1
complexes = []
num_features = [None for _ in range(expansion_dim+1)]
for data in tqdm(dataset):
complex = compute_clique_complex_with_gudhi(data.x, data.edge_index, data.num_nodes,
expansion_dim=expansion_dim, y=data.y, include_down_adj=include_down_adj,
init_method=init_method)
if complex.dimension > dimension:
dimension = complex.dimension
for dim in range(complex.dimension + 1):
if num_features[dim] is None:
num_features[dim] = complex.cochains[dim].num_features
else:
assert num_features[dim] == complex.cochains[dim].num_features
complexes.append(complex)
return complexes, dimension, num_features[:dimension+1]
# ---- support for rings as cells
def get_rings(edge_index, max_k=7):
if isinstance(edge_index, torch.Tensor):
edge_index = edge_index.numpy()
edge_list = edge_index.T
graph_gt = gt.Graph(directed=False)
graph_gt.add_edge_list(edge_list)
gt.stats.remove_self_loops(graph_gt)
gt.stats.remove_parallel_edges(graph_gt)
# We represent rings with their original node ordering
# so that we can easily read out the boundaries
# The use of the `sorted_rings` set allows to discard
# different isomorphisms which are however associated
# to the same original ring – this happens due to the intrinsic
# symmetries of cycles
rings = set()
sorted_rings = set()
for k in range(3, max_k+1):
pattern = nx.cycle_graph(k)
pattern_edge_list = list(pattern.edges)
pattern_gt = gt.Graph(directed=False)
pattern_gt.add_edge_list(pattern_edge_list)
sub_isos = top.subgraph_isomorphism(pattern_gt, graph_gt, induced=True, subgraph=True,
generator=True)
sub_iso_sets = map(lambda isomorphism: tuple(isomorphism.a), sub_isos)
for iso in sub_iso_sets:
if tuple(sorted(iso)) not in sorted_rings:
rings.add(iso)
sorted_rings.add(tuple(sorted(iso)))
rings = list(rings)
return rings
def build_tables_with_rings(edge_index, simplex_tree, size, max_k):
# Build simplex tables and id_maps up to edges by conveniently
# invoking the code for simplicial complexes
cell_tables, id_maps = build_tables(simplex_tree, size)
# Find rings in the graph
rings = get_rings(edge_index, max_k=max_k)
if len(rings) > 0:
# Extend the tables with rings as 2-cells
id_maps += [{}]
cell_tables += [[]]
assert len(cell_tables) == 3, cell_tables
for cell in rings:
next_id = len(cell_tables[2])
id_maps[2][cell] = next_id
cell_tables[2].append(list(cell))
return cell_tables, id_maps
def get_ring_boundaries(ring):
boundaries = list()
for n in range(len(ring)):
a = n
if n + 1 == len(ring):
b = 0
else:
b = n + 1
# We represent the boundaries in lexicographic order
# so to be compatible with 0- and 1- dim cells
# extracted as simplices with gudhi
boundaries.append(tuple(sorted([ring[a], ring[b]])))
return sorted(boundaries)
def extract_boundaries_and_coboundaries_with_rings(simplex_tree, id_maps):
"""Build two maps: cell -> its coboundaries and cell -> its boundaries"""
# Find boundaries and coboundaries up to edges by conveniently
# invoking the code for simplicial complexes
assert simplex_tree.dimension() <= 1
boundaries_tables, boundaries, coboundaries = extract_boundaries_and_coboundaries_from_simplex_tree(
simplex_tree, id_maps, simplex_tree.dimension())
assert len(id_maps) <= 3
if len(id_maps) == 3:
# Extend tables with boundary and coboundary information of rings
boundaries += [{}]
coboundaries += [{}]
boundaries_tables += [[]]
for cell in id_maps[2]:
cell_boundaries = get_ring_boundaries(cell)
boundaries[2][cell] = list()
boundaries_tables[2].append([])
for boundary in cell_boundaries:
assert boundary in id_maps[1], boundary
boundaries[2][cell].append(boundary)
if boundary not in coboundaries[1]:
coboundaries[1][boundary] = list()
coboundaries[1][boundary].append(cell)
boundaries_tables[2][-1].append(id_maps[1][boundary])
return boundaries_tables, boundaries, coboundaries
def compute_ring_2complex(x: Union[Tensor, np.ndarray], edge_index: Union[Tensor, np.ndarray],
edge_attr: Optional[Union[Tensor, np.ndarray]],
size: int, y: Optional[Union[Tensor, np.ndarray]] = None, max_k: int = 7,
include_down_adj=True, init_method: str = 'sum',
init_edges=True, init_rings=False) -> Complex:
"""Generates a ring 2-complex of a pyG graph via graph-tool.
Args:
x: The feature matrix for the nodes of the graph (shape [num_vertices, num_v_feats])
edge_index: The edge_index of the graph (a tensor of shape [2, num_edges])
edge_attr: The feature matrix for the edges of the graph (shape [num_edges, num_e_feats])
size: The number of nodes in the graph
y: Labels for the graph nodes or a label for the whole graph.
max_k: maximum length of rings to look for.
include_down_adj: Whether to add down adj in the complex or not
init_method: How to initialise features at higher levels.
"""
assert x is not None
assert isinstance(edge_index, np.ndarray) or isinstance(edge_index, Tensor)
# For parallel processing with joblib we need to pass numpy arrays as inputs
# Therefore, we convert here everything back to a tensor.
if isinstance(x, np.ndarray):
x = torch.tensor(x)
if isinstance(edge_index, np.ndarray):
edge_index = torch.tensor(edge_index)
if isinstance(edge_attr, np.ndarray):
edge_attr = torch.tensor(edge_attr)
if isinstance(y, np.ndarray):
y = torch.tensor(y)
# Creates the gudhi-based simplicial complex up to edges
simplex_tree = pyg_to_simplex_tree(edge_index, size)
assert simplex_tree.dimension() <= 1
if simplex_tree.dimension() == 0:
assert edge_index.size(1) == 0
# Builds tables of the cellular complexes at each level and their IDs
cell_tables, id_maps = build_tables_with_rings(edge_index, simplex_tree, size, max_k)
assert len(id_maps) <= 3
complex_dim = len(id_maps)-1
# Extracts the boundaries and coboundaries of each cell in the complex
boundaries_tables, boundaries, co_boundaries = extract_boundaries_and_coboundaries_with_rings(simplex_tree, id_maps)
# Computes the adjacencies between all the cells in the complex;
# here we force complex dimension to be 2
shared_boundaries, shared_coboundaries, lower_idx, upper_idx = build_adj(boundaries, co_boundaries, id_maps,
complex_dim, include_down_adj)
# Construct features for the higher dimensions
xs = [x, None, None]
constructed_features = construct_features(x, cell_tables, init_method)
if simplex_tree.dimension() == 0:
assert len(constructed_features) == 1
if init_rings and len(constructed_features) > 2:
xs[2] = constructed_features[2]
if init_edges and simplex_tree.dimension() >= 1:
if edge_attr is None:
xs[1] = constructed_features[1]
# If we have edge-features we simply use them for 1-cells
else:
# If edge_attr is a list of scalar features, make it a matrix
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
# Retrieve feats and check edge features are undirected
ex = dict()
for e, edge in enumerate(edge_index.numpy().T):
canon_edge = tuple(sorted(edge))
edge_id = id_maps[1][canon_edge]
edge_feats = edge_attr[e]
if edge_id in ex:
assert torch.equal(ex[edge_id], edge_feats)
else:
ex[edge_id] = edge_feats
# Build edge feature matrix
max_id = max(ex.keys())
edge_feats = []
assert len(cell_tables[1]) == max_id + 1
for id in range(max_id + 1):
edge_feats.append(ex[id])
xs[1] = torch.stack(edge_feats, dim=0)
assert xs[1].dim() == 2
assert xs[1].size(0) == len(id_maps[1])
assert xs[1].size(1) == edge_attr.size(1)
# Initialise the node / complex labels
v_y, complex_y = extract_labels(y, size)
cochains = []
for i in range(complex_dim + 1):
y = v_y if i == 0 else None
cochain = generate_cochain(i, xs[i], upper_idx, lower_idx, shared_boundaries, shared_coboundaries,
cell_tables, boundaries_tables, complex_dim=complex_dim, y=y)
cochains.append(cochain)
return Complex(*cochains, y=complex_y, dimension=complex_dim)
def convert_graph_dataset_with_rings(dataset, max_ring_size=7, include_down_adj=False,
init_method: str = 'sum', init_edges=True, init_rings=False,
n_jobs=1):
dimension = -1
num_features = [None, None, None]
def maybe_convert_to_numpy(x):
if isinstance(x, Tensor):
return x.numpy()
return x
# Process the dataset in parallel
parallel = ProgressParallel(n_jobs=n_jobs, use_tqdm=True, total=len(dataset))
# It is important we supply a numpy array here. tensors seem to slow joblib down significantly.
complexes = parallel(delayed(compute_ring_2complex)(
maybe_convert_to_numpy(data.x), maybe_convert_to_numpy(data.edge_index),
maybe_convert_to_numpy(data.edge_attr),
data.num_nodes, y=maybe_convert_to_numpy(data.y), max_k=max_ring_size,
include_down_adj=include_down_adj, init_method=init_method,
init_edges=init_edges, init_rings=init_rings) for data in dataset)
# NB: here we perform additional checks to verify the order of complexes
# corresponds to that of input graphs after _parallel_ conversion
for c, complex in enumerate(complexes):
# Handle dimension and number of features
if complex.dimension > dimension:
dimension = complex.dimension
for dim in range(complex.dimension + 1):
if num_features[dim] is None:
num_features[dim] = complex.cochains[dim].num_features
else:
assert num_features[dim] == complex.cochains[dim].num_features
# Validate against graph
graph = dataset[c]
if complex.y is None:
assert graph.y is None
else:
assert torch.equal(complex.y, graph.y)
assert torch.equal(complex.cochains[0].x, graph.x)
if complex.dimension >= 1:
assert complex.cochains[1].x.size(0) == (graph.edge_index.size(1) // 2)
return complexes, dimension, num_features[:dimension+1]
| 23,431 | 41.915751 | 120 | py |
cwn | cwn-main/data/complex.py | """
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Copyright (c) 2021 The CWN Project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import logging
import copy
from torch import Tensor
from torch_sparse import SparseTensor
from mp.cell_mp import CochainMessagePassingParams
from torch_geometric.typing import Adj
from typing import List
class Cochain(object):
"""
Class representing a cochain on k-dim cells (i.e. vector-valued signals on k-dim cells).
Args:
dim: dim of the cells in the cochain
x: feature matrix, shape [num_cells, num_features]; may not be available
upper_index: upper adjacency, matrix, shape [2, num_upper_connections];
may not be available, e.g. when `dim` is the top level dim of a complex
lower_index: lower adjacency, matrix, shape [2, num_lower_connections];
may not be available, e.g. when `dim` is 0
shared_boundaries: a tensor of shape (num_lower_adjacencies,) specifying the indices of
the shared boundary for each lower adjacency
shared_coboundaries: a tensor of shape (num_upper_adjacencies,) specifying the indices of
the shared coboundary for each upper adjacency
boundary_index: boundary adjacency, matrix, shape [2, num_boundaries_connections];
may not be available, e.g. when `dim` is 0
upper_orient: a tensor of shape (num_upper_adjacencies,) specifying the relative
orientation (+-1) with respect to the cells from upper_index
lower_orient: a tensor of shape (num_lower_adjacencies,) specifying the relative
orientation (+-1) with respect to the cells from lower_index
y: labels over cells in the cochain, shape [num_cells,]
"""
def __init__(self, dim: int, x: Tensor = None, upper_index: Adj = None, lower_index: Adj = None,
shared_boundaries: Tensor = None, shared_coboundaries: Tensor = None, mapping: Tensor = None,
boundary_index: Adj = None, upper_orient=None, lower_orient=None, y=None, **kwargs):
if dim == 0:
assert lower_index is None
assert shared_boundaries is None
assert boundary_index is None
# Note, everything that is not of form __smth__ is made None during batching
# So dim must be stored like this.
self.__dim__ = dim
# TODO: check default for x
self.__x = x
self.upper_index = upper_index
self.lower_index = lower_index
self.boundary_index = boundary_index
self.y = y
self.shared_boundaries = shared_boundaries
self.shared_coboundaries = shared_coboundaries
self.upper_orient = upper_orient
self.lower_orient = lower_orient
self.__oriented__ = False
self.__hodge_laplacian__ = None
# TODO: Figure out what to do with mapping.
self.__mapping = mapping
for key, item in kwargs.items():
if key == 'num_cells':
self.__num_cells__ = item
elif key == 'num_cells_down':
self.num_cells_down = item
elif key == 'num_cells_up':
self.num_cells_up = item
else:
self[key] = item
@property
def dim(self):
"""Returns the dimension of the cells in this cochain.
This field should not have a setter. The dimension of a cochain cannot be changed.
"""
return self.__dim__
@property
def x(self):
"""Returns the vector values (features) associated with the cells."""
return self.__x
@x.setter
def x(self, new_x):
"""Sets the vector values (features) associated with the cells."""
if new_x is None:
logging.warning("Cochain features were set to None. ")
else:
assert self.num_cells == len(new_x)
self.__x = new_x
@property
def keys(self):
"""Returns all names of cochain attributes."""
keys = [key for key in self.__dict__.keys() if self[key] is not None]
keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']
return keys
def __getitem__(self, key):
"""Gets the data of the attribute :obj:`key`."""
return getattr(self, key, None)
def __setitem__(self, key, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
def __contains__(self, key):
"""Returns :obj:`True`, if the attribute :obj:`key` is present in the data."""
return key in self.keys
def __cat_dim__(self, key, value):
"""
Returns the dimension for which :obj:`value` of attribute
:obj:`key` will get concatenated when creating batches.
"""
if key in ['upper_index', 'lower_index', 'shared_boundaries',
'shared_coboundaries', 'boundary_index']:
return -1
# by default, concatenate sparse matrices diagonally.
elif isinstance(value, SparseTensor):
return (0, 1)
return 0
def __inc__(self, key, value):
"""
Returns the incremental count to cumulatively increase the value
of the next attribute of :obj:`key` when creating batches.
"""
# TODO: value is not used in this method. Can it be removed?
if key in ['upper_index', 'lower_index']:
inc = self.num_cells
elif key in ['shared_boundaries']:
inc = self.num_cells_down
elif key == 'shared_coboundaries':
inc = self.num_cells_up
elif key == 'boundary_index':
boundary_inc = self.num_cells_down if self.num_cells_down is not None else 0
cell_inc = self.num_cells if self.num_cells is not None else 0
inc = [[boundary_inc], [cell_inc]]
else:
inc = 0
if inc is None:
inc = 0
return inc
def __call__(self, *keys):
"""
Iterates over all attributes :obj:`*keys` in the cochain, yielding
their attribute names and content.
If :obj:`*keys` is not given this method will iterative over all
present attributes.
"""
for key in sorted(self.keys) if not keys else keys:
if key in self:
yield key, self[key]
@property
def num_cells(self):
"""Returns the number of cells in the cochain."""
if hasattr(self, '__num_cells__'):
return self.__num_cells__
if self.x is not None:
return self.x.size(self.__cat_dim__('x', self.x))
if self.boundary_index is not None:
return int(self.boundary_index[1,:].max()) + 1
assert self.upper_index is None and self.lower_index is None
return None
@num_cells.setter
def num_cells(self, num_cells):
"""Sets the number of cells in the cochain."""
# TODO: Add more checks here
self.__num_cells__ = num_cells
@property
def num_cells_up(self):
"""Returns the number of cells in the higher-dimensional cochain of co-dimension 1."""
if hasattr(self, '__num_cells_up__'):
return self.__num_cells_up__
elif self.shared_coboundaries is not None:
assert self.upper_index is not None
return int(self.shared_coboundaries.max()) + 1
assert self.upper_index is None
return 0
@num_cells_up.setter
def num_cells_up(self, num_cells_up):
"""Sets the number of cells in the higher-dimensional cochain of co-dimension 1."""
# TODO: Add more checks here
self.__num_cells_up__ = num_cells_up
@property
def num_cells_down(self):
"""Returns the number of cells in the lower-dimensional cochain of co-dimension 1."""
if self.dim == 0:
return None
if hasattr(self, '__num_cells_down__'):
return self.__num_cells_down__
if self.lower_index is None:
return 0
raise ValueError('Cannot infer the number of cells in the cochain below.')
@num_cells_down.setter
def num_cells_down(self, num_cells_down):
"""Sets the number of cells in the lower-dimensional cochain of co-dimension 1."""
# TODO: Add more checks here
self.__num_cells_down__ = num_cells_down
@property
def num_features(self):
"""Returns the number of features per cell in the cochain."""
if self.x is None:
return 0
return 1 if self.x.dim() == 1 else self.x.size(1)
def __apply__(self, item, func):
if torch.is_tensor(item):
return func(item)
elif isinstance(item, SparseTensor):
# Not all apply methods are supported for `SparseTensor`, e.g.,
# `contiguous()`. We can get around it by capturing the exception.
try:
return func(item)
except AttributeError:
return item
elif isinstance(item, (tuple, list)):
return [self.__apply__(v, func) for v in item]
elif isinstance(item, dict):
return {k: self.__apply__(v, func) for k, v in item.items()}
else:
return item
def apply(self, func, *keys):
"""
Applies the function :obj:`func` to all tensor attributes
:obj:`*keys`. If :obj:`*keys` is not given, :obj:`func` is applied to
all present attributes.
"""
for key, item in self(*keys):
self[key] = self.__apply__(item, func)
return self
def contiguous(self, *keys):
"""
Ensures a contiguous memory layout for all attributes :obj:`*keys`.
If :obj:`*keys` is not given, all present attributes are ensured to
have a contiguous memory layout.
"""
return self.apply(lambda x: x.contiguous(), *keys)
def to(self, device, *keys, **kwargs):
"""
Performs tensor dtype and/or device conversion to all attributes
:obj:`*keys`.
If :obj:`*keys` is not given, the conversion is applied to all present
attributes.
"""
return self.apply(lambda x: x.to(device, **kwargs), *keys)
def clone(self):
return self.__class__.from_dict({
k: v.clone() if torch.is_tensor(v) else copy.deepcopy(v)
for k, v in self.__dict__.items()
})
@property
def mapping(self):
return self.__mapping
class CochainBatch(Cochain):
"""A datastructure for storing a batch of cochains.
Similarly to PyTorch Geometric, the batched cochain consists of a big cochain formed of multiple
independent cochains on sets of disconnected cells.
"""
def __init__(self, dim, batch=None, ptr=None, **kwargs):
super(CochainBatch, self).__init__(dim, **kwargs)
for key, item in kwargs.items():
if key == 'num_cells':
self.__num_cells__ = item
else:
self[key] = item
self.batch = batch
self.ptr = ptr
self.__data_class__ = Cochain
self.__slices__ = None
self.__cumsum__ = None
self.__cat_dims__ = None
self.__num_cells_list__ = None
self.__num_cells_down_list__ = None
self.__num_cells_up_list__ = None
self.__num_cochains__ = None
@classmethod
def from_cochain_list(cls, data_list, follow_batch=[]):
"""
Constructs a batch object from a python list holding
:class:`Cochain` objects.
The assignment vector :obj:`batch` is created on the fly.
Additionally, creates assignment batch vectors for each key in
:obj:`follow_batch`.
"""
keys = list(set.union(*[set(data.keys) for data in data_list]))
assert 'batch' not in keys and 'ptr' not in keys
batch = cls(data_list[0].dim)
for key in data_list[0].__dict__.keys():
if key[:2] != '__' and key[-2:] != '__':
batch[key] = None
batch.__num_cochains__ = len(data_list)
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch']:
batch[key] = []
batch['ptr'] = [0]
device = None
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
cat_dims = {}
num_cells_list = []
num_cells_up_list = []
num_cells_down_list = []
for i, data in enumerate(data_list):
for key in keys:
item = data[key]
if item is not None:
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
item = item + cum
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
value = value + cum
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Treat 0-dimensional tensors as 1-dimensional.
if isinstance(item, Tensor) and item.dim() == 0:
item = item.unsqueeze(0)
batch[key].append(item)
# Gather the size of the `cat` dimension.
size = 1
cat_dim = data.__cat_dim__(key, data[key])
cat_dims[key] = cat_dim
if isinstance(item, Tensor):
size = item.size(cat_dim)
device = item.device
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
device = item.device()
# TODO: do we really need slices, and, are we managing them correctly?
slices[key].append(size + slices[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if hasattr(data, '__num_cells__'):
num_cells_list.append(data.__num_cells__)
else:
num_cells_list.append(None)
if hasattr(data, '__num_cells_up__'):
num_cells_up_list.append(data.__num_cells_up__)
else:
num_cells_up_list.append(None)
if hasattr(data, '__num_cells_down__'):
num_cells_down_list.append(data.__num_cells_down__)
else:
num_cells_down_list.append(None)
num_cells = data.num_cells
if num_cells is not None:
item = torch.full((num_cells, ), i, dtype=torch.long,
device=device)
batch.batch.append(item)
batch.ptr.append(batch.ptr[-1] + num_cells)
# Fix initial slice values:
for key in keys:
slices[key][0] = slices[key][1] - slices[key][1]
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.ptr = None if len(batch.ptr) == 1 else batch.ptr
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_cells_list__ = num_cells_list
batch.__num_cells_up_list__ = num_cells_up_list
batch.__num_cells_down_list__ = num_cells_down_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
if isinstance(item, Tensor):
batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, SparseTensor):
batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
return batch.contiguous()
def __getitem__(self, idx):
if isinstance(idx, str):
return super(CochainBatch, self).__getitem__(idx)
elif isinstance(idx, int):
# TODO: is the 'get_example' method needed for now?
#return self.get_example(idx)
raise NotImplementedError
else:
# TODO: is the 'index_select' method needed for now?
# return self.index_select(idx)
raise NotImplementedError
def to_cochain_list(self) -> List[Cochain]:
r"""Reconstructs the list of :class:`torch_geometric.data.Data` objects
from the batch object.
The batch object must have been created via :meth:`from_data_list` in
order to be able to reconstruct the initial objects."""
# TODO: is the 'to_cochain_list' method needed for now?
#return [self.get_example(i) for i in range(self.num_cochains)]
raise NotImplementedError
@property
def num_cochains(self) -> int:
"""Returns the number of cochains in the batch."""
if self.__num_cochains__ is not None:
return self.__num_cochains__
return self.ptr.numel() + 1
class Complex(object):
"""Class representing a cochain complex or an attributed cellular complex.
Args:
cochains: A list of cochains forming the cochain complex
y: A tensor of shape (1,) containing a label for the complex for complex-level tasks.
dimension: The dimension of the complex.
"""
def __init__(self, *cochains: Cochain, y: torch.Tensor = None, dimension: int = None):
if len(cochains) == 0:
raise ValueError('At least one cochain is required.')
if dimension is None:
dimension = len(cochains) - 1
if len(cochains) < dimension + 1:
raise ValueError(f'Not enough cochains passed, '
f'expected {dimension + 1}, received {len(cochains)}')
self.dimension = dimension
self.cochains = {i: cochains[i] for i in range(dimension + 1)}
self.nodes = cochains[0]
self.edges = cochains[1] if dimension >= 1 else None
self.two_cells = cochains[2] if dimension >= 2 else None
self.y = y
self._consolidate()
return
def _consolidate(self):
for dim in range(self.dimension+1):
cochain = self.cochains[dim]
assert cochain.dim == dim
if dim < self.dimension:
upper_cochain = self.cochains[dim + 1]
num_cells_up = upper_cochain.num_cells
assert num_cells_up is not None
if 'num_cells_up' in cochain:
assert cochain.num_cells_up == num_cells_up
else:
cochain.num_cells_up = num_cells_up
if dim > 0:
lower_cochain = self.cochains[dim - 1]
num_cells_down = lower_cochain.num_cells
assert num_cells_down is not None
if 'num_cells_down' in cochain:
assert cochain.num_cells_down == num_cells_down
else:
cochain.num_cells_down = num_cells_down
def to(self, device, **kwargs):
"""Performs tensor dtype and/or device conversion to cochains and label y, if set."""
# TODO: handle device conversion for specific attributes via `*keys` parameter
for dim in range(self.dimension + 1):
self.cochains[dim] = self.cochains[dim].to(device, **kwargs)
if self.y is not None:
self.y = self.y.to(device, **kwargs)
return self
def get_cochain_params(self,
dim : int,
max_dim : int=2,
include_top_features=True,
include_down_features=True,
include_boundary_features=True) -> CochainMessagePassingParams:
"""
Conveniently constructs all necessary input parameters to perform higher-dim
message passing on the cochain of specified `dim`.
Args:
dim: The dimension from which to extract the parameters
max_dim: The maximum dimension of interest.
This is only used in conjunction with include_top_features.
include_top_features: Whether to include the top features from level max_dim+1.
include_down_features: Include the features for down adjacency
include_boundary_features: Include the features for the boundary
Returns:
An object of type CochainMessagePassingParams
"""
if dim in self.cochains:
cells = self.cochains[dim]
x = cells.x
# Add up features
upper_index, upper_features = None, None
# We also check that dim+1 does exist in the current complex. This cochain might have been
# extracted from a higher dimensional complex by a batching operation, and dim+1
# might not exist anymore even though cells.upper_index is present.
if cells.upper_index is not None and (dim+1) in self.cochains:
upper_index = cells.upper_index
if self.cochains[dim + 1].x is not None and (dim < max_dim or include_top_features):
upper_features = torch.index_select(self.cochains[dim + 1].x, 0,
self.cochains[dim].shared_coboundaries)
# Add down features
lower_index, lower_features = None, None
if include_down_features and cells.lower_index is not None:
lower_index = cells.lower_index
if dim > 0 and self.cochains[dim - 1].x is not None:
lower_features = torch.index_select(self.cochains[dim - 1].x, 0,
self.cochains[dim].shared_boundaries)
# Add boundary features
boundary_index, boundary_features = None, None
if include_boundary_features and cells.boundary_index is not None:
boundary_index = cells.boundary_index
if dim > 0 and self.cochains[dim - 1].x is not None:
boundary_features = self.cochains[dim - 1].x
inputs = CochainMessagePassingParams(x, upper_index, lower_index,
up_attr=upper_features, down_attr=lower_features,
boundary_attr=boundary_features, boundary_index=boundary_index)
else:
raise NotImplementedError(
'Dim {} is not present in the complex or not yet supported.'.format(dim))
return inputs
def get_all_cochain_params(self,
max_dim:int=2,
include_top_features=True,
include_down_features=True,
include_boundary_features=True) -> List[CochainMessagePassingParams]:
"""Extracts the cochain parameters for message passing on the cochains up to max_dim.
Args:
max_dim: The maximum dimension of the complex for which to extract the parameters.
include_top_features: Whether to include the features from level max_dim+1.
include_down_features: Include the features for down adjacent cells.
include_boundary_features: Include the features for the boundary cells.
Returns:
A list of elements of type CochainMessagePassingParams.
"""
all_params = []
return_dim = min(max_dim, self.dimension)
for dim in range(return_dim+1):
all_params.append(self.get_cochain_params(dim, max_dim=max_dim,
include_top_features=include_top_features,
include_down_features=include_down_features,
include_boundary_features=include_boundary_features))
return all_params
def get_labels(self, dim=None):
"""Returns target labels.
If `dim`==k (integer in [0, self.dimension]) then the labels over k-cells are returned.
In the case `dim` is None the complex-wise label is returned.
"""
if dim is None:
y = self.y
else:
if dim in self.cochains:
y = self.cochains[dim].y
else:
raise NotImplementedError(
'Dim {} is not present in the complex or not yet supported.'.format(dim))
return y
def set_xs(self, xs: List[Tensor]):
"""Sets the features of the cochains to the values in the list"""
assert (self.dimension + 1) >= len(xs)
for i, x in enumerate(xs):
self.cochains[i].x = x
@property
def keys(self):
"""Returns all names of complex attributes."""
keys = [key for key in self.__dict__.keys() if self[key] is not None]
keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']
return keys
def __getitem__(self, key):
"""Gets the data of the attribute :obj:`key`."""
return getattr(self, key, None)
def __setitem__(self, key, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
def __contains__(self, key):
"""Returns :obj:`True`, if the attribute :obj:`key` is present in the data."""
return key in self.keys
class ComplexBatch(Complex):
"""Class representing a batch of cochain complexes.
This is stored as a single cochain complex formed of batched cochains.
Args:
cochains: A list of cochain batches that will be put together in a complex batch
dimension: The dimension of the resulting complex.
y: A tensor of labels for the complexes in the batch.
num_complexes: The number of complexes in the batch.
"""
def __init__(self,
*cochains: CochainBatch,
dimension: int,
y: torch.Tensor = None,
num_complexes: int = None):
super(ComplexBatch, self).__init__(*cochains, y=y)
self.num_complexes = num_complexes
self.dimension = dimension
@classmethod
def from_complex_list(cls, data_list: List[Complex], follow_batch=[], max_dim: int = 2):
"""Constructs a ComplexBatch from a list of complexes.
Args:
data_list: a list of complexes from which the batch is built.
follow_batch: creates assignment batch vectors for each key in
:obj:`follow_batch`.
max_dim: the maximum cochain dimension considered when constructing the batch.
Returns:
A ComplexBatch object.
"""
dimension = max([data.dimension for data in data_list])
dimension = min(dimension, max_dim)
cochains = [list() for _ in range(dimension + 1)]
label_list = list()
per_complex_labels = True
for comp in data_list:
for dim in range(dimension+1):
if dim not in comp.cochains:
# If a dim-cochain is not present for the current complex, we instantiate one.
cochains[dim].append(Cochain(dim=dim))
if dim-1 in comp.cochains:
# If the cochain below exists in the complex, we need to add the number of
# boundaries to the newly initialised complex, otherwise batching will not work.
cochains[dim][-1].num_cells_down = comp.cochains[dim - 1].num_cells
else:
cochains[dim].append(comp.cochains[dim])
per_complex_labels &= comp.y is not None
if per_complex_labels:
label_list.append(comp.y)
batched_cochains = [CochainBatch.from_cochain_list(cochain_list, follow_batch=follow_batch)
for cochain_list in cochains]
y = None if not per_complex_labels else torch.cat(label_list, 0)
batch = cls(*batched_cochains, y=y, num_complexes=len(data_list), dimension=dimension)
return batch
| 30,723 | 41.145405 | 110 | py |
cwn | cwn-main/data/test_tu_utils.py | import pytest
import os
import numpy as np
import torch
import random
from data.tu_utils import get_fold_indices, load_data, S2V_to_PyG
from torch_geometric.utils import degree
from definitions import ROOT_DIR
@pytest.fixture
def imdbbinary_graphs():
data, num_classes = load_data(os.path.join(ROOT_DIR, 'datasets', 'IMDBBINARY', 'raw'), 'IMDBBINARY', True)
graph_list = [S2V_to_PyG(datum) for datum in data]
return graph_list
@pytest.fixture
def imdbbinary_nonattributed_graphs():
data, num_classes = load_data(os.path.join(ROOT_DIR, 'datasets', 'IMDBBINARY', 'raw'), 'IMDBBINARY', False)
graph_list = [S2V_to_PyG(datum) for datum in data]
return graph_list
@pytest.fixture
def proteins_graphs():
data, num_classes = load_data(os.path.join(ROOT_DIR, 'datasets', 'PROTEINS', 'raw'), 'PROTEINS', True)
graph_list = [S2V_to_PyG(datum) for datum in data]
return graph_list
def validate_degree_as_tag(graphs):
degree_set = set()
degrees = dict()
for g, graph in enumerate(graphs):
d = degree(graph.edge_index[0])
d = d.numpy().astype(int).tolist()
degree_set |= set(d)
degrees[g] = d
encoder = {deg: d for d, deg in enumerate(sorted(degree_set))}
for g, graph in enumerate(graphs):
feats = graph.x
edge_index = graph.edge_index
assert feats.shape[1] == len(encoder)
row_sum = torch.sum(feats, 1)
assert torch.equal(row_sum, torch.ones(feats.shape[0]))
tags = torch.argmax(feats, 1)
d = degrees[g]
encoded = torch.LongTensor([encoder[deg] for deg in d])
assert torch.equal(tags, encoded), '{}\n{}'.format(tags, encoded)
def validate_get_fold_indices(graphs):
seeds = [0, 42, 43, 666]
folds = list(range(10))
prev_train = None
prev_test = None
for fold in folds:
for seed in seeds:
torch.manual_seed(43)
np.random.seed(43)
random.seed(43)
train_idx_0, test_idx_0 = get_fold_indices(graphs, seed, fold)
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
train_idx_1, test_idx_1 = get_fold_indices(graphs, seed, fold)
# check the splitting procedure is deterministic and robust w.r.t. global seeds
assert np.all(np.equal(train_idx_0, train_idx_1))
assert np.all(np.equal(test_idx_0, test_idx_1))
# check test and train form a partition
assert len(set(train_idx_0) & set(test_idx_0)) == 0
assert len(set(train_idx_0) | set(test_idx_0)) == len(graphs)
# check idxs are different across seeds
if prev_train is not None:
assert np.any(~np.equal(train_idx_0, prev_train))
assert np.any(~np.equal(test_idx_0, prev_test))
prev_train = train_idx_0
prev_test = test_idx_0
def validate_constant_scalar_features(graphs):
for graph in graphs:
feats = graph.x
assert feats.shape[1]
expected = torch.ones(feats.shape[0], 1)
assert torch.equal(feats, expected)
@pytest.mark.data
def test_get_fold_indices_on_imdbbinary(imdbbinary_graphs):
validate_get_fold_indices(imdbbinary_graphs)
@pytest.mark.data
def test_degree_as_tag_on_imdbbinary(imdbbinary_graphs):
validate_degree_as_tag(imdbbinary_graphs)
@pytest.mark.data
def test_constant_scalar_features_on_imdbbinary_without_tags(imdbbinary_nonattributed_graphs):
validate_constant_scalar_features(imdbbinary_nonattributed_graphs)
@pytest.mark.data
def test_degree_as_tag_on_proteins(proteins_graphs):
validate_degree_as_tag(proteins_graphs)
| 3,720 | 32.827273 | 111 | py |
cwn | cwn-main/data/test_batching.py | import torch
import pytest
import itertools
from data.dummy_complexes import (get_house_complex, get_square_complex, get_pyramid_complex,
get_square_dot_complex, get_kite_complex)
from data.complex import ComplexBatch
from data.dummy_complexes import get_testing_complex_list
from data.data_loading import DataLoader, load_dataset
def validate_double_house(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8, 7, 9, 8, 9],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7, 9, 7, 9, 8]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8, 11, 11, 10, 10], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5, 8, 10, 8, 11, 10, 11],
[4, 2, 5, 2, 5, 4, 10, 8, 11, 8, 11, 10]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 7, 11, 8, 9, 8, 10, 8, 11, 9, 10, 10, 11],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 11, 7, 9, 8, 10, 8, 11, 8, 10, 9, 11, 10]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 8, 8, 9, 9],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_square_dot_and_square(batch):
expected_node_upper = torch.tensor([ [0, 1, 0, 3, 1, 2, 2, 3, 5, 6, 5, 8, 6, 7, 7, 8],
[1, 0, 3, 0, 2, 1, 3, 2, 6, 5, 8, 5, 7, 6, 8, 7]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 4, 4, 7, 7, 5, 5, 6, 6], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 2, 3, 4, 5, 4, 7, 5, 6, 6, 7],
[1, 0, 3, 0, 2, 1, 3, 2, 5, 4, 7, 4, 6, 5, 7, 6]], dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 3, 3, 6, 6, 5, 5, 7, 7, 8, 8],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [1], [2], [3], [4]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1,], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert batch.edges.upper_index is None
assert batch.edges.shared_coboundaries is None
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
def validate_kite_and_house(batch):
kite_node_upper = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 2, 3, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 3, 2, 4, 3]], dtype=torch.long)
shifted_house_node_upper = 5 + torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
expected_node_upper = torch.cat([kite_node_upper, shifted_house_node_upper], 1)
kite_node_shared_coboundaries = torch.tensor([0, 0, 2, 2, 1, 1, 3, 3, 4, 4, 5, 5], dtype=torch.long)
shifted_house_node_shared_coboundaries = 6 + torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4], dtype=torch.long)
expected_node_shared_coboundaries = torch.cat([kite_node_shared_coboundaries, shifted_house_node_shared_coboundaries], 0)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=torch.long)
kite_edge_upper = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 1, 4, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 4, 1, 4, 3]], dtype=torch.long)
shifted_house_edge_upper = 6 + torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_upper = torch.cat([kite_edge_upper, shifted_house_edge_upper], 1)
kite_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
shifted_house_edge_shared_coboundaries = 2 + torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_shared_coboundaries = torch.cat([kite_edge_shared_coboundaries, shifted_house_edge_shared_coboundaries], 0)
kite_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 3, 0, 2, 1, 2, 2, 4, 1, 4, 3, 4, 3, 5, 4, 5],
[1, 0, 3, 0, 3, 1, 2, 0, 2, 1, 4, 2, 4, 1, 4, 3, 5, 3, 5, 4]], dtype=torch.long)
shifted_house_lower = 6 + torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4]], dtype=torch.long)
expected_edge_lower = torch.cat([kite_edge_lower, shifted_house_lower], 1)
kite_edge_shared_boundaries = torch.tensor([1, 1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], dtype=torch.long)
shifted_house_edge_shared_boundaries = 5 + torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4], dtype=torch.long)
expected_edge_shared_boundaries = torch.cat([kite_edge_shared_boundaries, shifted_house_edge_shared_boundaries], 0)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_lower = torch.tensor([[0, 1],
[1, 0]], dtype=torch.long)
expected_two_cell_shared_boundaries = torch.tensor([1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [2], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 0, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.shared_coboundaries is None
assert torch.equal(expected_two_cell_lower, batch.two_cells.lower_index)
assert torch.equal(expected_two_cell_shared_boundaries, batch.two_cells.shared_boundaries)
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_and_square(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 8, 9],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 9, 8]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 8, 8],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_square_house(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8, 9, 10, 9, 12, 10, 11, 11, 12, 11, 13, 12, 13],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7, 10, 9, 12, 9, 11, 10, 12, 11, 13, 11, 13, 12]],
dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8, 10, 10, 13, 13, 11, 11, 12, 12, 15, 15, 14, 14],
dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5, 12, 14, 12, 15, 14, 15],
[4, 2, 5, 2, 5, 4, 14, 12, 15, 12, 15, 14]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 8, 9, 10, 11, 10, 13, 11, 12, 11, 15, 12, 13, 12, 14, 12, 15, 13, 14, 14, 15],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 9, 8, 11, 10, 13, 10, 12, 11, 15, 11, 13, 12, 14, 12, 15, 12, 14, 13, 15, 14]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 8, 8, 10, 10, 9, 9, 11, 11, 11, 11, 12, 12, 12, 12, 11, 11, 12, 12, 13, 13],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 2], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_no_batching(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_lower = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def test_double_house_batching():
"""
4 9
/ \ / \
3---2 8---7
| | | |
0---1 5---6
. .
4 5 10 11
. 2 . . 8 .
3 1 9 7
. 0 . . 6 .
. .
/0\ /1\
.---. .---.
| | | |
.---. .---.
"""
house_1 = get_house_complex()
house_2 = get_house_complex()
complex_list = [house_1, house_2]
batch = ComplexBatch.from_complex_list(complex_list)
validate_double_house(batch)
def test_house_and_square_batching():
"""
4
/ \
3---2 8---7
| | | |
0---1 5---6
.
4 5
. 2 . . 8 .
3 1 9 7
. 0 . . 6 .
.
/0\
.---. .---.
| | | |
.---. .---.
"""
house_1 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square]
batch = ComplexBatch.from_complex_list(complex_list)
validate_house_and_square(batch)
def test_house_square_house_batching():
"""
4 13
/ \ / \
3---2 8---7 12--11
| | | | | |
0---1 5---6 9---10
. .
4 5 14 15
. 2 . . 8 . . 12.
3 1 9 7 13 11
. 0 . . 6 . . 10 .
. .
/0\ /1\
.---. .---. .---.
| | | | | |
.---. .---. .---.
"""
house_1 = get_house_complex()
house_2 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square, house_2]
batch = ComplexBatch.from_complex_list(complex_list)
validate_house_square_house(batch)
def test_square_dot_square_batching():
'''
3---2 8---7
| | | |
0---1 4 5---6
. 2 . . 6 .
3 1 7 5
. 0 . . . 4 .
.---. .---.
| | | |
.---. . .---.
'''
square_dot = get_square_dot_complex()
square = get_square_complex()
complex_list = [square_dot, square]
batch = ComplexBatch.from_complex_list(complex_list)
validate_square_dot_and_square(batch)
def test_kite_house_batching():
'''
2---3---4 9
/ \ / / \
0---1 8---7
| |
5---6
. 4 . 5 . .
2 1 3 10 11
. 0 . . 8 .
9 7
. 6 .
.---.---. .
/0\1/ /2\
.---. .---.
| |
.---.
'''
kite = get_kite_complex()
house = get_house_complex()
complex_list = [kite, house]
batch = ComplexBatch.from_complex_list(complex_list)
validate_kite_and_house(batch)
def test_data_loader():
data_list_1 = [
get_house_complex(),
get_house_complex(),
get_house_complex(),
get_square_complex()]
data_list_2 = [
get_house_complex(),
get_square_complex(),
get_house_complex(),
get_house_complex()]
data_list_3 = [
get_house_complex(),
get_square_complex(),
get_pyramid_complex(),
get_pyramid_complex()]
data_list_4 = [
get_square_dot_complex(),
get_square_complex(),
get_kite_complex(),
get_house_complex(),
get_house_complex()]
data_loader_1 = DataLoader(data_list_1, batch_size=2)
data_loader_2 = DataLoader(data_list_2, batch_size=3)
data_loader_3 = DataLoader(data_list_3, batch_size=3, max_dim=3)
data_loader_4 = DataLoader(data_list_4, batch_size=2)
count = 0
for batch in data_loader_1:
count += 1
if count == 1:
validate_double_house(batch)
elif count == 2:
validate_house_and_square(batch)
assert count == 2
count = 0
for batch in data_loader_2:
count += 1
if count == 1:
validate_house_square_house(batch)
elif count == 2:
validate_house_no_batching(batch)
assert count == 2
count = 0
for batch in data_loader_3:
count += 1
assert count == 2
count = 0
for batch in data_loader_4:
count += 1
if count == 1:
validate_square_dot_and_square(batch)
elif count == 2:
validate_kite_and_house(batch)
else:
validate_house_no_batching(batch)
assert count == 3
def test_set_for_features_in_batch():
house_1 = get_house_complex()
house_2 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square, house_2]
vx = torch.arange(21, 35, dtype=torch.float).view(14, 1)
ex = torch.arange(21, 37, dtype=torch.float).view(16, 1)
tx = torch.arange(21, 23, dtype=torch.float).view(2, 1)
xs = [vx, ex, tx]
batch = ComplexBatch.from_complex_list(complex_list)
batch.set_xs(xs)
assert torch.equal(batch.cochains[0].x, vx)
assert torch.equal(batch.cochains[1].x, ex)
assert torch.equal(batch.cochains[2].x, tx)
def test_set_xs_does_not_mutate_dataset():
"""Batches should be copied, so these mutations should not change the dataset"""
data_list = get_testing_complex_list()
data_loader = DataLoader(data_list, batch_size=5, max_dim=2)
# Save batch contents
xs = [[] for _ in range(4)] # we consider up to dim 3 due to the presence of pyramids
for batch in data_loader:
for i in range(batch.dimension + 1):
xs[i].append(batch.cochains[i].x)
txs = []
for i in range(4):
txs.append(torch.cat(xs[i], dim=0) if len(xs[i]) > 0 else None)
# Set batch features
for batch in data_loader:
new_xs = []
for i in range(batch.dimension + 1):
new_xs.append(torch.zeros_like(batch.cochains[i].x))
batch.set_xs(new_xs)
# Save batch contents after set_xs
xs_after = [[] for _ in range(4)]
for batch in data_loader:
for i in range(batch.dimension + 1):
xs_after[i].append(batch.cochains[i].x)
txs_after = []
for i in range(4):
txs_after.append(torch.cat(xs_after[i], dim=0) if len(xs_after[i]) > 0 else None)
# Check that the batch features are the same
for i in range(4):
if txs_after[i] is None:
assert txs[i] is None
else:
assert torch.equal(txs_after[i], txs[i])
def test_batching_returns_the_same_features():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_features_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_features_on_ring_proteins():
dataset = load_dataset('PROTEINS', max_dim=2, fold=0, init_method='mean',
max_ring_size=7)
assert len(dataset) == 1113
assert dataset.max_dim == 2
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_up_attr_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
# Batched
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['up_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['up_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
# TODO: Modify test after merging the top_feature branch
# Right now, the last level cannot have top features
if params[dim].kwargs['up_attr'] is not None and dim < batch_max_dim:
x[dim].append(params[dim].kwargs['up_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(xs[i], batched_xs[i])
def test_batching_returns_the_same_up_attr():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
# Batched
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['up_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['up_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
# TODO: Modify test after merging the top_feature branch
# Right now, the last level cannot have top features
if params[dim].kwargs['up_attr'] is not None and dim < batch_max_dim:
x[dim].append(params[dim].kwargs['up_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(xs[i], batched_xs[i])
@pytest.mark.data
def test_batching_returns_the_same_down_attr_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['down_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['down_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['down_attr'] is not None:
x[dim].append(params[dim].kwargs['down_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert len(xs[i]) == len(batched_xs[i])
assert torch.equal(xs[i], batched_xs[i])
def test_batching_returns_the_same_down_attr():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['down_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['down_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['down_attr'] is not None:
x[dim].append(params[dim].kwargs['down_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert len(xs[i]) == len(batched_xs[i])
assert torch.equal(xs[i], batched_xs[i])
@pytest.mark.data
def test_batching_of_boundary_index_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x_boundaries = [[] for _ in range(batch_max_dim+1)]
batched_x_cells = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
batched_x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
batched_x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
batched_xs_boundaries = [None for _ in range(batch_max_dim+1)]
batched_xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x_boundaries[i]) > 0:
batched_xs_boundaries[i] = torch.cat(batched_x_boundaries[i], dim=0)
if len(batched_x_cells[i]) > 0:
batched_xs_cells[i] = torch.cat(batched_x_cells[i], dim=0)
# Un-batched
x_boundaries = [[] for _ in range(batch_max_dim+1)]
x_cells = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
xs_boundaries = [None for _ in range(batch_max_dim+1)]
xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x_boundaries[i]) > 0:
xs_boundaries[i] = torch.cat(x_boundaries[i], dim=0)
xs_cells[i] = torch.cat(x_cells[i], dim=0)
for i in range(batch_max_dim+1):
if xs_boundaries[i] is None or batched_xs_boundaries[i] is None:
assert xs_boundaries[i] == batched_xs_boundaries[i]
else:
assert len(xs_boundaries[i]) == len(batched_xs_boundaries[i])
assert torch.equal(xs_boundaries[i], batched_xs_boundaries[i])
if xs_cells[i] is None or batched_xs_cells[i] is None:
assert xs_cells[i] == batched_xs_cells[i]
else:
assert len(xs_cells[i]) == len(batched_xs_cells[i])
assert torch.equal(xs_cells[i], batched_xs_cells[i])
def test_batching_of_boundary_index():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x_boundaries = [[] for _ in range(batch_max_dim+1)]
batched_x_cells = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
batched_x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
batched_x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
batched_xs_boundaries = [None for _ in range(batch_max_dim+1)]
batched_xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x_boundaries[i]) > 0:
batched_xs_boundaries[i] = torch.cat(batched_x_boundaries[i], dim=0)
if len(batched_x_cells[i]) > 0:
batched_xs_cells[i] = torch.cat(batched_x_cells[i], dim=0)
# Un-batched
x_boundaries = [[] for _ in range(batch_max_dim+1)]
x_cells = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
xs_boundaries = [None for _ in range(batch_max_dim+1)]
xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x_boundaries[i]) > 0:
xs_boundaries[i] = torch.cat(x_boundaries[i], dim=0)
xs_cells[i] = torch.cat(x_cells[i], dim=0)
for i in range(batch_max_dim+1):
if xs_boundaries[i] is None or batched_xs_boundaries[i] is None:
assert xs_boundaries[i] == batched_xs_boundaries[i]
else:
assert len(xs_boundaries[i]) == len(batched_xs_boundaries[i])
assert torch.equal(xs_boundaries[i], batched_xs_boundaries[i])
if xs_cells[i] is None or batched_xs_cells[i] is None:
assert xs_cells[i] == batched_xs_cells[i]
else:
assert len(xs_cells[i]) == len(batched_xs_cells[i])
assert torch.equal(xs_cells[i], batched_xs_cells[i])
@pytest.mark.data
def test_data_loader_shuffling():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
data_loader = DataLoader(dataset, batch_size=32)
unshuffled_ys = []
for data in data_loader:
unshuffled_ys.append(data.y)
data_loader = DataLoader(dataset, batch_size=32, shuffle=True)
shuffled_ys = []
for data in data_loader:
shuffled_ys.append(data.y)
unshuffled_ys = torch.cat(unshuffled_ys, dim=0)
shuffled_ys = torch.cat(shuffled_ys, dim=0)
assert list(unshuffled_ys.size()) == list(shuffled_ys.size())
assert not torch.equal(unshuffled_ys, shuffled_ys)
@pytest.mark.data
def test_idx_splitting_works():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
splits = dataset.get_idx_split()
val_dataset = dataset[splits["valid"]]
ys1 = []
for data in val_dataset:
ys1.append(data.y)
ys2 = []
for i in splits['valid']:
data = dataset.get(i)
ys2.append(data.y)
ys1 = torch.cat(ys1, dim=0)
ys2 = torch.cat(ys2, dim=0)
assert torch.equal(ys1, ys2)
| 46,797 | 43.065913 | 201 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.