code stringlengths 101 5.91M |
|---|
class EDSR(nn.Module):
def __init__(self, num_channels=3, input_channel=64, factor=4, width=64, depth=16, kernel_size=3, conv=default_conv):
super(EDSR, self).__init__()
n_resblock = depth
n_feats = width
kernel_size = kernel_size
scale = factor
act = nn.ReLU()
m_head = [conv(input_channel, n_feats, kernel_size)]
m_body = [ResBlock(conv, n_feats, kernel_size, act=act, res_scale=1.0) for _ in range(n_resblock)]
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [Upsampler(conv, scale, n_feats, act=False), conv(n_feats, num_channels, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') == (- 1)):
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name)) |
('catx.network_module.CATXHaikuNetwork.__abstractmethods__', set())
def test_network_module(key: PRNGKey) -> None:
def _forward() -> None:
netowrk = CATXHaikuNetwork(depth=2)
assert hasattr(netowrk, 'depth')
forward = hk.transform(_forward)
params = forward.init(rng=key)
forward.apply(params, key)
assert ('__call__' in dir(CATXHaikuNetwork))
assert CATXHaikuNetwork.__call__.__isabstractmethod__
assert issubclass(CATXHaikuNetwork, hk.Module) |
class TransferNet(nn.Module):
def __init__(self, num_class, base_net='resnet50', transfer_loss='mmd', use_bottleneck=True, bottleneck_width=256, max_iter=1000, **kwargs):
super(TransferNet, self).__init__()
self.num_class = num_class
self.base_network = backbones.get_backbone(base_net)
self.use_bottleneck = use_bottleneck
self.transfer_loss = transfer_loss
if self.use_bottleneck:
bottleneck_list = [nn.Linear(self.base_network.output_num(), bottleneck_width), nn.ReLU()]
self.bottleneck_layer = nn.Sequential(*bottleneck_list)
feature_dim = bottleneck_width
else:
feature_dim = self.base_network.output_num()
self.classifier_layer = nn.Linear(feature_dim, num_class)
transfer_loss_args = {'loss_type': self.transfer_loss, 'max_iter': max_iter, 'num_class': num_class}
self.adapt_loss = TransferLoss(**transfer_loss_args)
self.criterion = torch.nn.CrossEntropyLoss()
def forward(self, source, target, source_label):
source = self.base_network(source)
target = self.base_network(target)
if self.use_bottleneck:
source = self.bottleneck_layer(source)
target = self.bottleneck_layer(target)
source_clf = self.classifier_layer(source)
clf_loss = self.criterion(source_clf, source_label)
kwargs = {}
if (self.transfer_loss == 'lmmd'):
kwargs['source_label'] = source_label
target_clf = self.classifier_layer(target)
kwargs['target_logits'] = torch.nn.functional.softmax(target_clf, dim=1)
elif (self.transfer_loss == 'daan'):
source_clf = self.classifier_layer(source)
kwargs['source_logits'] = torch.nn.functional.softmax(source_clf, dim=1)
target_clf = self.classifier_layer(target)
kwargs['target_logits'] = torch.nn.functional.softmax(target_clf, dim=1)
elif (self.transfer_loss == 'bnm'):
tar_clf = self.classifier_layer(target)
target = nn.Softmax(dim=1)(tar_clf)
transfer_loss = self.adapt_loss(source, target, **kwargs)
return (clf_loss, transfer_loss)
def get_parameters(self, initial_lr=1.0):
params = [{'params': self.base_network.parameters(), 'lr': (0.1 * initial_lr)}, {'params': self.classifier_layer.parameters(), 'lr': (1.0 * initial_lr)}]
if self.use_bottleneck:
params.append({'params': self.bottleneck_layer.parameters(), 'lr': (1.0 * initial_lr)})
if (self.transfer_loss == 'adv'):
params.append({'params': self.adapt_loss.loss_func.domain_classifier.parameters(), 'lr': (1.0 * initial_lr)})
elif (self.transfer_loss == 'daan'):
params.append({'params': self.adapt_loss.loss_func.domain_classifier.parameters(), 'lr': (1.0 * initial_lr)})
params.append({'params': self.adapt_loss.loss_func.local_classifiers.parameters(), 'lr': (1.0 * initial_lr)})
return params
def predict(self, x):
features = self.base_network(x)
x = self.bottleneck_layer(features)
clf = self.classifier_layer(x)
return clf
def epoch_based_processing(self, *args, **kwargs):
if (self.transfer_loss == 'daan'):
self.adapt_loss.loss_func.update_dynamic_factor(*args, **kwargs)
else:
pass |
def test_gather_commands(ing):
ing2 = Ingredient('other', ingredients=[ing])
def foo():
pass
.command
def bar():
pass
commands = list(ing2.gather_commands())
assert (('other.bar', bar) in commands)
assert (('tickle.foo', foo) in commands) |
class WNConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(WNConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
weight = (weight - weight_mean)
std = (weight.view(weight.size(0), (- 1)).std(dim=1).view((- 1), 1, 1, 1) + 1e-05)
weight = (weight / std.expand_as(weight))
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) |
def fake_environment(time_limit: int=10) -> FakeEnvironment:
return FakeEnvironment(time_limit=time_limit) |
def VarGRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, noise_in=None, noise_hidden=None):
input = (input.expand(3, *input.size()) if (noise_in is None) else (input.unsqueeze(0) * noise_in))
hx = (hidden.expand(3, *hidden.size()) if (noise_hidden is None) else (hidden.unsqueeze(0) * noise_hidden))
gi = torch.baddbmm(b_ih.unsqueeze(1), input, w_ih)
gh = torch.baddbmm(b_hh.unsqueeze(1), hx, w_hh)
(i_r, i_i, i_n) = gi
(h_r, h_i, h_n) = gh
resetgate = torch.sigmoid((i_r + h_r))
inputgate = torch.sigmoid((i_i + h_i))
newgate = torch.tanh((i_n + (resetgate * h_n)))
hy = (newgate + (inputgate * (hidden - newgate)))
return hy |
class Decoder(nn.Module):
def __init__(self, feat_dim, n_obj_classes):
super(Decoder, self).__init__()
self.layer = nn.Sequential(nn.Conv2d(feat_dim, 128, kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 48, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(48), nn.ReLU(inplace=True))
self.obj_layer = nn.Sequential(nn.Conv2d(48, 48, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(48), nn.ReLU(inplace=True), nn.Conv2d(48, n_obj_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, memory):
l1 = self.layer(memory)
out_obj = self.obj_layer(l1)
return out_obj |
class Benchmark(ABC):
args: BenchmarkArguments
configs: PretrainedConfig
framework: str
def __init__(self, args: BenchmarkArguments=None, configs: PretrainedConfig=None):
self.args = args
if (configs is None):
self.config_dict = {model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names}
else:
self.config_dict = {model_name: config for (model_name, config) in zip(self.args.model_names, configs)}
if (self.args.memory and (os.getenv('TRANSFORMERS_USE_MULTIPROCESSING') == 0)):
logger.warning("Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing.")
self._print_fn = None
self._framework_version = None
self._environment_info = None
def print_fn(self):
if (self._print_fn is None):
if self.args.log_print:
def print_and_log(*args):
with open(self.args.log_filename, 'a') as log_file:
log_file.write((''.join(args) + '\n'))
print(*args)
self._print_fn = print_and_log
else:
self._print_fn = print
return self._print_fn
def framework_version(self):
pass
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
pass
def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
pass
def inference_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
def train_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
def run(self):
result_dict = {model_name: {} for model_name in self.args.model_names}
inference_result_time = copy.deepcopy(result_dict)
inference_result_memory = copy.deepcopy(result_dict)
train_result_time = copy.deepcopy(result_dict)
train_result_memory = copy.deepcopy(result_dict)
for (c, model_name) in enumerate(self.args.model_names):
self.print_fn(f'{(c + 1)} / {len(self.args.model_names)}')
model_dict = {'bs': self.args.batch_sizes, 'ss': self.args.sequence_lengths, 'result': {i: {} for i in self.args.batch_sizes}}
inference_result_time[model_name] = copy.deepcopy(model_dict)
inference_result_memory[model_name] = copy.deepcopy(model_dict)
train_result_time[model_name] = copy.deepcopy(model_dict)
train_result_memory[model_name] = copy.deepcopy(model_dict)
inference_summary = train_summary = None
for batch_size in self.args.batch_sizes:
for sequence_length in self.args.sequence_lengths:
if self.args.inference:
if self.args.memory:
(memory, inference_summary) = self.inference_memory(model_name, batch_size, sequence_length)
inference_result_memory[model_name]['result'][batch_size][sequence_length] = memory
if self.args.speed:
time = self.inference_speed(model_name, batch_size, sequence_length)
inference_result_time[model_name]['result'][batch_size][sequence_length] = time
if self.args.training:
if self.args.memory:
(memory, train_summary) = self.train_memory(model_name, batch_size, sequence_length)
train_result_memory[model_name]['result'][batch_size][sequence_length] = memory
if self.args.speed:
time = self.train_speed(model_name, batch_size, sequence_length)
train_result_time[model_name]['result'][batch_size][sequence_length] = time
if self.args.inference:
if self.args.speed:
self.print_fn(((('\n' + (20 * '=')) + 'INFERENCE - SPEED - RESULT'.center(40)) + (20 * '=')))
self.print_results(inference_result_time, type_label='Time in s')
self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
if self.args.is_tpu:
self.print_fn('TPU was used for inference. Note that the time after compilation stabilized (after ~10 inferences model.forward(..) calls) was measured.')
if self.args.memory:
self.print_fn(((('\n' + (20 * '=')) + 'INFERENCE - MEMORY - RESULT'.center(40)) + (20 * '=')))
self.print_results(inference_result_memory, type_label='Memory in MB')
self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn(((('\n' + (20 * '=')) + 'INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY'.center(40)) + (20 * '=')))
self.print_memory_trace_statistics(inference_summary)
if self.args.training:
if self.args.speed:
self.print_fn(((('\n' + (20 * '=')) + 'TRAIN - SPEED - RESULTS'.center(40)) + (20 * '=')))
self.print_results(train_result_time, 'Time in s')
self.save_to_csv(train_result_time, self.args.train_time_csv_file)
if self.args.is_tpu:
self.print_fn('TPU was used for training. Note that the time after compilation stabilized (after ~10 train loss=model.forward(...) + loss.backward() calls) was measured.')
if self.args.memory:
self.print_fn(((('\n' + (20 * '=')) + 'TRAIN - MEMORY - RESULTS'.center(40)) + (20 * '=')))
self.print_results(train_result_memory, type_label='Memory in MB')
self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn(((('\n' + (20 * '=')) + 'TRAIN - MEMOMRY - LINE BY LINE - SUMMARY'.center(40)) + (20 * '=')))
self.print_memory_trace_statistics(train_summary)
if self.args.env_print:
self.print_fn(((('\n' + (20 * '=')) + 'ENVIRONMENT INFORMATION'.center(40)) + (20 * '=')))
self.print_fn(('\n'.join([f'- {prop}: {val}' for (prop, val) in self.environment_info.items()]) + '\n'))
if self.args.save_to_csv:
with open(self.args.env_info_csv_file, mode='w', newline='') as csv_file:
writer = csv.writer(csv_file)
for (key, value) in self.environment_info.items():
writer.writerow([key, value])
return BenchmarkOutput(inference_result_time, inference_result_memory, train_result_time, train_result_memory, inference_summary, train_summary)
def environment_info(self):
if (self._environment_info is None):
info = {}
info['transformers_version'] = version
info['framework'] = self.framework
if (self.framework == 'PyTorch'):
info['use_torchscript'] = self.args.torchscript
if (self.framework == 'TensorFlow'):
info['eager_mode'] = self.args.eager_mode
info['use_xla'] = self.args.use_xla
info['framework_version'] = self.framework_version
info['python_version'] = platform.python_version()
info['system'] = platform.system()
info['cpu'] = platform.processor()
info['architecture'] = platform.architecture()[0]
info['date'] = datetime.date(datetime.now())
info['time'] = datetime.time(datetime.now())
info['fp16'] = self.args.fp16
info['use_multiprocessing'] = self.args.do_multi_processing
info['only_pretrain_model'] = self.args.only_pretrain_model
if is_psutil_available():
info['cpu_ram_mb'] = bytes_to_mega_bytes(psutil.virtual_memory().total)
else:
logger.warning("Psutil not installed, we won't log available CPU memory. Install psutil (pip install psutil) to log available CPU memory.")
info['cpu_ram_mb'] = 'N/A'
info['use_gpu'] = self.args.is_gpu
if self.args.is_gpu:
info['num_gpus'] = 1
if is_py3nvml_available():
nvml.nvmlInit()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
info['gpu'] = nvml.nvmlDeviceGetName(handle)
info['gpu_ram_mb'] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
info['gpu_power_watts'] = (nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000)
info['gpu_performance_state'] = nvml.nvmlDeviceGetPerformanceState(handle)
nvml.nvmlShutdown()
else:
logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.")
info['gpu'] = 'N/A'
info['gpu_ram_mb'] = 'N/A'
info['gpu_power_watts'] = 'N/A'
info['gpu_performance_state'] = 'N/A'
info['use_tpu'] = self.args.is_tpu
self._environment_info = info
return self._environment_info
def print_results(self, result_dict, type_label):
self.print_fn((80 * '-'))
self.print_fn(((('Model Name'.center(30) + 'Batch Size'.center(15)) + 'Seq Length'.center(15)) + type_label.center(15)))
self.print_fn((80 * '-'))
for model_name in self.args.model_names:
for batch_size in result_dict[model_name]['bs']:
for sequence_length in result_dict[model_name]['ss']:
result = result_dict[model_name]['result'][batch_size][sequence_length]
if isinstance(result, float):
result = (round((1000 * result)) / 1000)
result = ('< 0.001' if (result == 0.0) else str(result))
else:
result = str(result)
self.print_fn((model_name[:30].center(30) + str(batch_size).center(15)), str(sequence_length).center(15), result.center(15))
self.print_fn((80 * '-'))
def print_memory_trace_statistics(self, summary: MemorySummary):
self.print_fn(('\nLine by line memory consumption:\n' + '\n'.join((f'{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.sequential))))
self.print_fn(('\nLines with top memory consumption:\n' + '\n'.join((f'=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.cumulative[:6]))))
self.print_fn(('\nLines with lowest memory consumption:\n' + '\n'.join((f'=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.cumulative[(- 6):]))))
self.print_fn(f'''
Total memory increase: {summary.total}''')
def save_to_csv(self, result_dict, filename):
if (not self.args.save_to_csv):
return
self.print_fn('Saving results to csv.')
with open(filename, mode='w') as csv_file:
assert (len(self.args.model_names) > 0), f'At least 1 model should be defined, but got {self.model_names}'
fieldnames = ['model', 'batch_size', 'sequence_length']
writer = csv.DictWriter(csv_file, fieldnames=(fieldnames + ['result']))
writer.writeheader()
for model_name in self.args.model_names:
result_dict_model = result_dict[model_name]['result']
for bs in result_dict_model:
for ss in result_dict_model[bs]:
result_model = result_dict_model[bs][ss]
writer.writerow({'model': model_name, 'batch_size': bs, 'sequence_length': ss, 'result': ('{}' if (not isinstance(result_model, float)) else '{:.4f}').format(result_model)}) |
def save_config(config, path):
if isinstance(config, argparse.Namespace):
config = to_dict(config)
with open(path, 'w') as file:
yaml.dump(config, file) |
def main():
args = parse_args()
if (args.device == 'cpu'):
args.device = None
cfg = Config.fromfile(args.model_config)
if (args.model_type == 'det'):
if (args.backend == 'TensorRT'):
model = TensorRTDetector(args.model_file, cfg, 0)
else:
model = ONNXRuntimeDetector(args.model_file, cfg, 0)
elif (args.backend == 'TensorRT'):
model = TensorRTRecognizer(args.model_file, cfg, 0)
else:
model = ONNXRuntimeRecognizer(args.model_file, cfg, 0)
samples_per_gpu = 1
cfg = disable_text_recog_aug_test(cfg)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False)
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
(rank, _) = get_dist_info()
if (rank == 0):
kwargs = {}
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs)) |
_module
class Reformat(object):
def __init__(self, **kwargs):
double_flip = kwargs.get('double_flip', False)
self.double_flip = double_flip
def __call__(self, res, info):
meta = res['metadata']
points = res['lidar']['points']
voxels = res['lidar']['voxels']
data_bundle = dict(metadata=meta, points=points, voxels=voxels['voxels'], shape=voxels['shape'], num_points=voxels['num_points'], num_voxels=voxels['num_voxels'], coordinates=voxels['coordinates'])
if (res['mode'] == 'train'):
data_bundle.update(res['lidar']['targets'])
elif (res['mode'] == 'val'):
data_bundle.update(dict(metadata=meta))
if self.double_flip:
yflip_points = res['lidar']['yflip_points']
yflip_voxels = res['lidar']['yflip_voxels']
yflip_data_bundle = dict(metadata=meta, points=yflip_points, voxels=yflip_voxels['voxels'], shape=yflip_voxels['shape'], num_points=yflip_voxels['num_points'], num_voxels=yflip_voxels['num_voxels'], coordinates=yflip_voxels['coordinates'])
xflip_points = res['lidar']['xflip_points']
xflip_voxels = res['lidar']['xflip_voxels']
xflip_data_bundle = dict(metadata=meta, points=xflip_points, voxels=xflip_voxels['voxels'], shape=xflip_voxels['shape'], num_points=xflip_voxels['num_points'], num_voxels=xflip_voxels['num_voxels'], coordinates=xflip_voxels['coordinates'])
double_flip_points = res['lidar']['double_flip_points']
double_flip_voxels = res['lidar']['double_flip_voxels']
double_flip_data_bundle = dict(metadata=meta, points=double_flip_points, voxels=double_flip_voxels['voxels'], shape=double_flip_voxels['shape'], num_points=double_flip_voxels['num_points'], num_voxels=double_flip_voxels['num_voxels'], coordinates=double_flip_voxels['coordinates'])
return ([data_bundle, yflip_data_bundle, xflip_data_bundle, double_flip_data_bundle], info)
return (data_bundle, info) |
def traverse_dir(root_dir, extension=('mid', 'MID'), amount=None, str_=None, is_pure=False, verbose=False, is_sort=False, is_ext=True):
if verbose:
print('[*] Scanning...')
file_list = []
cnt = 0
for (root, _, files) in os.walk(root_dir):
for file in files:
if file.endswith(extension):
if ((amount is not None) and (cnt == amount)):
break
if (str_ is not None):
if (str_ not in file):
continue
mix_path = os.path.join(root, file)
pure_path = (mix_path[(len(root_dir) + 1):] if is_pure else mix_path)
if (not is_ext):
ext = pure_path.split('.')[(- 1)]
pure_path = pure_path[:(- (len(ext) + 1))]
if verbose:
print(pure_path)
file_list.append(pure_path)
cnt += 1
if verbose:
print(('Total: %d files' % len(file_list)))
print('Done!!!')
if is_sort:
file_list.sort()
return file_list |
def build_optimizer_constructor(cfg):
constructor_type = cfg.get('type')
if (constructor_type in OPTIMIZER_BUILDERS):
return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
elif (constructor_type in MMCV_OPTIMIZER_BUILDERS):
return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS)
else:
raise KeyError(f'{constructor_type} is not registered in the optimizer builder registry.') |
class JsonWriter():
def __init__(self, path: str, algorithm_name: str, task_name: str, environment_name: str, seed: int):
self.path = path
self.file_name = 'metrics.json'
self.run_data = {'absolute_metrics': {}}
if os.path.isfile(f'{self.path}/{self.file_name}'):
with open(f'{self.path}/{self.file_name}', 'r') as f:
data = json.load(f)
else:
os.makedirs(self.path, exist_ok=True)
data = {}
self.data = data
if (environment_name not in self.data):
self.data[environment_name] = {}
if (task_name not in self.data[environment_name]):
self.data[environment_name][task_name] = {}
if (algorithm_name not in self.data[environment_name][task_name]):
self.data[environment_name][task_name][algorithm_name] = {}
self.data[environment_name][task_name][algorithm_name][f'seed_{seed}'] = self.run_data
with open(f'{self.path}/{self.file_name}', 'w') as f:
json.dump(self.data, f, indent=4)
def write(self, timestep: int, key: str, value: float, evaluation_step=None) -> None:
(logging_prefix, *metric_key) = key.split('/')
metric_key = '/'.join(metric_key)
metrics = {metric_key: [value]}
if (logging_prefix == 'evaluator'):
step_metrics = {'step_count': timestep}
step_metrics.update(metrics)
step_str = f'step_{evaluation_step}'
if (step_str in self.run_data):
self.run_data[step_str].update(step_metrics)
else:
self.run_data[step_str] = step_metrics
if (logging_prefix == 'absolute'):
self.run_data['absolute_metrics'].update(metrics)
with open(f'{self.path}/{self.file_name}', 'w') as f:
json.dump(self.data, f, indent=4) |
def safe_exp(value):
try:
ans = math.exp(value)
except OverflowError:
ans = float('inf')
return ans |
class AffNIST(Dataset):
url = '
files = ['training_and_validation_batches', 'test_batches']
def __init__(self, root, train=True, transform=None):
self.root = osp.expanduser(osp.normpath(root))
self.raw_dir = osp.join(self.root, 'raw')
self.processed_dir = osp.join(self.root, 'processed')
self.transform = transform
self.download()
self.process()
name = (self.processed_files[0] if train else self.processed_files[1])
(self.data, self.target) = torch.load(name)
def raw_files(self):
return [osp.join(self.raw_dir, f) for f in self.files]
def processed_files(self):
folder = self.processed_dir
return [osp.join(folder, f) for f in ['training.pt', 'test.pt']]
def __getitem__(self, i):
(img, target) = (self.data[i], self.target[i])
img = Image.fromarray(img.numpy(), mode='L')
if (self.transform is not None):
img = self.transform(img)
return (img, target)
def __len__(self):
return self.data.size(0)
def download(self):
if all([osp.exists(f) for f in self.raw_files]):
return
for f in self.files:
path = download_url('{}/{}.zip'.format(self.url, f), self.raw_dir)
extract_zip(path, self.raw_dir)
os.unlink(path)
def process(self):
if all([osp.exists(f) for f in self.processed_files]):
return
print('Processing...')
makedirs(self.processed_dir)
torch.save(self._process(self.raw_files[0]), self.processed_files[0])
torch.save(self._process(self.raw_files[1]), self.processed_files[1])
print('Done!')
def _process(self, folder):
(data, target) = ([], [])
for f in sorted(glob.glob('{}/*.mat'.format(folder))):
f = loadmat(f)['affNISTdata'][0][0]
data += [torch.from_numpy(f[2]).t().view((- 1), 40, 40)]
target.append(torch.from_numpy(f[5]).squeeze())
return (torch.cat(data, dim=0).contiguous(), torch.cat(target, dim=0)) |
_module()
class InferencerLoader(BaseTransform):
def __init__(self, **kwargs) -> None:
super().__init__()
self.from_file = TRANSFORMS.build(dict(type='LoadImageFromFile', **kwargs))
self.from_ndarray = TRANSFORMS.build(dict(type='mmdet.LoadImageFromNDArray', **kwargs))
def transform(self, results: Union[(str, np.ndarray, dict)]) -> dict:
if isinstance(results, str):
inputs = dict(img_path=results)
elif isinstance(results, np.ndarray):
inputs = dict(img=results)
elif isinstance(results, dict):
inputs = results
else:
raise NotImplementedError
if ('img' in inputs):
return self.from_ndarray(inputs)
return self.from_file(inputs) |
class ChannelsLast():
data_loader = create_data_loader(data_dir, batch_size, num_workers, data_transform, subset=dataset_size)
test_data_loader = create_test_data_loader(data_dir, batch_size, num_workers, data_transform, subset=dataset_size)
def setUp(self):
test_dir = os.path.dirname(__file__)
project_test_dir = os.path.abspath(os.path.join(os.path.join(os.path.join(test_dir, '..'), '..'), '..'))
os.environ['PYTHONPATH'] = project_test_dir
def test_trainer_lightning_channels_last(self):
model = CustomResNet()
trainer = Trainer(max_epochs=1, channels_last=True)
trainer.fit(model, self.data_loader, self.test_data_loader)
trainer.test(model, self.test_data_loader)
def test_trainer_channels_last_correctness(self):
model = ConvModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.25)
loss = torch.nn.MSELoss()
pl_module = Trainer.compile(model=model, loss=loss, optimizer=optimizer)
trainer = Trainer(max_epochs=1, channels_last=True)
x = torch.Tensor([[[[1, 0]], [[1, 0]]], [[[1, 0]], [[2, 0]]], [[[0, 3]], [[1, 0]]], [[[1, 1]], [[2, 1]]]])
y = torch.Tensor([[0.0], [1.0], [0.0], [1.0]])
dataset = torch.utils.data.TensorDataset(x, y)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False)
trainer.fit(pl_module, data_loader)
result = torch.tensor([[[[0.0, (- 1.0)]], [[(- 1.25), 0.5]]]])
assert pl_module.model.conv1.weight.equal(result)
def test_trainer_lightning_channels_last_subprocess(self):
model = CustomResNet()
trainer = Trainer(max_epochs=1, num_processes=2, distributed_backend='subprocess', channels_last=True)
trainer.fit(model, self.data_loader, self.test_data_loader)
trainer.test(model, self.test_data_loader)
def test_trainer_channels_last_correctness_subprocess(self):
model = ConvModel()
model.conv1 = torch.nn.Conv2d(2, 1, (1, 2), bias=False)
model.conv1.weight.data.fill_(1.0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.25)
loss = torch.nn.MSELoss()
pl_module = Trainer.compile(model=model, loss=loss, optimizer=optimizer)
trainer = Trainer(max_epochs=1, channels_last=True, distributed_backend='subprocess', num_processes=2)
x = torch.Tensor([[[[1, 0]], [[1, 0]]], [[[1, 0]], [[2, 0]]], [[[0, 3]], [[1, 0]]], [[[1, 1]], [[2, 1]]]])
y = torch.Tensor([[0], [1], [0], [1]])
dataset = torch.utils.data.TensorDataset(x, y)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False)
trainer.fit(pl_module, data_loader)
result = torch.tensor([[[[0.0, (- 1.0)]], [[(- 1.25), 0.5]]]])
assert pl_module.model.conv1.weight.equal(result)
def test_torch_nano_channels_last(self):
MyNano(channels_last=True).train()
def test_torch_nano_channels_last_subprocess(self):
MyNano(num_processes=2, strategy='subprocess', channels_last=True).train()
def test_torch_nano_channels_last_correctness(self):
MyNanoChannelsLastCorrectness(channels_last=True).train()
def test_torch_nano_channels_last_subprocess_correctness(self):
MyNanoChannelsLastCorrectness(num_processes=2, strategy='subprocess', channels_last=True).train() |
def pytest_addoption_shared(parser):
option = '--make-reports'
if (option not in pytest_opt_registered):
parser.addoption(option, action='store', default=False, help='generate report files. The value of this option is used as a prefix to report names')
pytest_opt_registered[option] = 1 |
def test_decay_period(env):
policy = ConstantPolicy(env.action_space.sample())
exp_policy = AddGaussianNoise(env, policy, max_sigma=1.0, min_sigma=0.0, decay_period=2)
assert (exp_policy.get_action(None)[0] != policy.get_action(None)[0]).all()
exp_policy.reset()
assert (exp_policy.get_action(None)[0] != policy.get_action(None)[0]).all()
exp_policy.reset()
assert (exp_policy.get_action(None)[0] == policy.get_action(None)[0]).all() |
def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if isinstance(obj, _TensorPlaceholder):
return tensors[obj.index]
elif isinstance(obj, dict):
return {k: _put_tensors_in_obj(v, tensors) for (k, v) in obj.items()}
elif isinstance(obj, list):
return [_put_tensors_in_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple((_put_tensors_in_obj(v, tensors) for v in obj))
elif isinstance(obj, set):
return {_put_tensors_in_obj(v, tensors) for v in obj}
else:
return obj |
class TestOuterProductMean(unittest.TestCase):
def test_shape(self):
c = 31
opm = OuterProductMean(consts.c_m, consts.c_z, c)
m = torch.rand((consts.batch_size, consts.n_seq, consts.n_res, consts.c_m))
mask = torch.randint(0, 2, size=(consts.batch_size, consts.n_seq, consts.n_res))
m = opm(m, mask=mask, chunk_size=None)
self.assertTrue((m.shape == (consts.batch_size, consts.n_res, consts.n_res, consts.c_z)))
_utils.skip_unless_alphafold_installed()
def test_opm_compare(self):
def run_opm(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_evo = config.model.embeddings_and_evoformer.evoformer
opm = alphafold.model.modules.OuterProductMean(c_evo.outer_product_mean, config.model.global_config, consts.c_z)
act = opm(act=msa_act, mask=msa_mask)
return act
f = hk.transform(run_opm)
n_res = consts.n_res
n_seq = consts.n_seq
c_m = consts.c_m
msa_act = (np.random.rand(n_seq, n_res, c_m).astype(np.float32) * 100)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/' + 'evoformer_iteration/outer_product_mean'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].core.outer_product_mean(torch.as_tensor(msa_act).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask).cuda()).cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < 0.0005)) |
def overfeat(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='overfeat'):
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = (sc.name + '_end_points')
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 512, [3, 3], scope='conv3')
net = slim.conv2d(net, 1024, [3, 3], scope='conv4')
net = slim.conv2d(net, 1024, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
with slim.arg_scope([slim.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=tf.zeros_initializer(), scope='fc8')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[(sc.name + '/fc8')] = net
return (net, end_points) |
def train(ep, sess, lr):
global batch_size, total_steps
total_loss = 0
start_time = time.time()
correct = 0
counter = 0
for (batch_idx, indices) in index_generator(len(X_train), batch_size):
x = X_train[indices]
y = Y_train[indices]
x = np.reshape(x, (x.shape + (1,)))
(_, p, l) = sess.run([update_step, predictions, loss], feed_dict={inputs: x, labels: y, learning_rate: lr})
correct += np.sum((p == np.argmax(y, axis=1)))
counter += p.size
total_loss += l.mean()
total_steps += 1
if ((total_steps > 0) and ((total_steps % args.log_interval) == 0)):
avg_loss = (total_loss / args.log_interval)
elapsed = (time.time() - start_time)
print('| Train Steps {:5d} | lr {:2.5f} | ms/batch {:5.2f} | train_loss {:5.8f} | train_accuracy {:5.4f}'.format(total_steps, lr, ((elapsed * 1000) / args.log_interval), avg_loss, ((100.0 * correct) / counter)))
start_time = time.time()
total_loss = 0
correct = 0
counter = 0 |
def _get_file(tablename: str, quotechar: str="'") -> pd.DataFrame:
z = get_lahman_zip()
f = f'{base_string}/{tablename}'
data = pd.read_csv((f'{path.join(cache.config.cache_directory, f)}' if (z is None) else z.open(f)), header=0, sep=',', quotechar=quotechar)
return data |
class HPOConfig():
def __init__(self, search_space, searcher='xgb', higher_is_better=True, loss_type='reg', min_train_samples=10, seed=42):
self.search_space = search_space
self.searcher = searcher
self.higher_is_better = higher_is_better
self.loss_type = loss_type
self.min_train_samples = min_train_samples
self.seed = seed |
class FlaxTimestepEmbedding(nn.Module):
time_embed_dim: int = 32
dtype: jnp.dtype = jnp.float32
def __call__(self, temb):
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name='linear_1')(temb)
temb = nn.silu(temb)
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name='linear_2')(temb)
return temb |
def GetUtteranceGroups(min_duration, merge_within_speakers_only, spk2utt, utt2dur):
utt_groups = []
group_durations = []
for i in range(len(spk2utt)):
(spk, utts) = spk2utt[i]
durations = []
for utt in utts:
try:
durations.append(utt2dur[utt])
except:
sys.exit('choose_utts_to_combine.py: no duration available in utt2dur file {0} for utterance {1}'.format(args.utt2dur_in, utt))
ranges = CombineList(min_duration, durations)
for (start, end) in ranges:
utt_groups.append([utts[i] for i in range(start, end)])
group_durations.append(sum([durations[i] for i in range(start, end)]))
old_dur_sum = sum(utt2dur.values())
new_dur_sum = sum(group_durations)
if (abs((old_dur_sum - new_dur_sum)) > (0.0001 * old_dur_sum)):
print('choose_utts_to_combine.py: large difference in total durations: {0} vs {1} '.format(old_dur_sum, new_dur_sum), file=sys.stderr)
if (merge_within_speakers_only == 'true'):
return utt_groups
else:
new_utt_groups = []
ranges = CombineList(min_duration, group_durations)
for (start, end) in ranges:
this_group = utt_groups[start]
for i in range((start + 1), end):
this_group += utt_groups[i]
new_utt_groups.append(this_group)
print('choose_utts_to_combine.py: combined {0} utterances to {1} utterances while respecting speaker boundaries, and then to {2} utterances with merging across speaker boundaries.'.format(len(utt2dur), len(utt_groups), len(new_utt_groups)), file=sys.stderr)
return new_utt_groups |
class MyNanoLoadStateDict(TorchNano):
def train(self, lr):
dataset = TensorDataset(torch.tensor([[0.0], [0.0], [1.0], [1.0]]), torch.tensor([[0.0], [0.0], [0.0], [0.0]]))
train_loader = DataLoader(dataset=dataset, batch_size=2, shuffle=False)
loss_func = nn.MSELoss()
origin_model = LinearModel()
origin_optimizer = torch.optim.SGD(origin_model.parameters(), lr=lr)
def train_one_epoch(model, optimizer, loss_func, data_loader):
for (X, y) in data_loader:
optimizer.zero_grad()
loss = loss_func(model(X), y)
self.backward(loss)
optimizer.step()
(model, optimizer, train_loader) = self.setup(origin_model, origin_optimizer, train_loader)
model.train()
train_one_epoch(model, optimizer, loss_func, train_loader)
origin_model.load_state_dict(model.state_dict())
origin_optimizer.load_state_dict(optimizer.state_dict())
(model, optimizer) = self.setup(origin_model, origin_optimizer)
model.train()
train_one_epoch(model, optimizer, loss_func, train_loader)
assert (model.fc1.weight.data == 0.25), f'wrong weights: {model.fc1.weight.data}' |
def consume_token(token, line):
if (token != line.split(None, 1)[0]):
logger.error("Unexpected token, expected '{0}', got '{1}'.".format(token, line.split(None, 1)[0]))
return line.partition(token)[2] |
class answer_json():
def __init__(self):
self.answers = []
def add(self, ques_id, ans):
res = {'question_id': ques_id, 'answer': ans}
self.answers.append(res) |
class DogGenHillclimberParts():
model: doggen.DogGen
scorer: opt_utils.PropertyEvaluator
reactant_vocab_set: typing.Set[str]
rng: np.random.RandomState
dataloader_factory: typing.Callable
prepare_batch: typing.Callable
loss_fn: typing.Callable
device: typing.Union[(str, torch.device)] |
def actor_loss(imag_states, actions, av_actions, old_policy, advantage, actor, ent_weight):
(_, new_policy) = actor(imag_states)
if (av_actions is not None):
new_policy[(av_actions == 0)] = (- .0)
actions = actions.argmax((- 1), keepdim=True)
rho = (F.log_softmax(new_policy, dim=(- 1)).gather(2, actions) - F.log_softmax(old_policy, dim=(- 1)).gather(2, actions)).exp()
(ppo_loss, ent_loss) = calculate_ppo_loss(new_policy, rho, advantage)
if (np.random.randint(10) == 9):
wandb.log({'Policy/Entropy': ent_loss.mean(), 'Policy/Mean action': actions.float().mean()})
return (ppo_loss + (ent_loss.unsqueeze((- 1)) * ent_weight)).mean() |
def reduce_timeout_pending_node_resource(node: Node):
now = time.time()
if (node.is_released or (not node.create_time) or (node.config_resource.gpu_num > 0)):
return False
pending_time = (now - node.create_time.timestamp())
if (pending_time < _dlrover_context.seconds_to_wait_pending_pod):
return False
original_cpu = node.config_resource.cpu
new_cpu = math.ceil((original_cpu / _dlrover_context.factor_to_cut_pending_cpu))
if (new_cpu > NodeResourceLimit.MIN_CPU_CORES):
node.config_resource.cpu = new_cpu
logger.info('Pod %s pending time %s beyonds %s.Delete and relaunch it with CPU %s', node.name, pending_time, _dlrover_context.seconds_to_wait_pending_pod, new_cpu)
original_memory = node.config_resource.memory
new_memory = math.ceil((original_memory / _dlrover_context.factor_to_cut_pending_mem))
if (new_memory > NodeResourceLimit.MIN_MEMORY):
node.config_resource.memory = new_memory
logger.info('Pod %s pending time %s beyonds %s.Delete and relaunch it with memory %s', node.name, pending_time, _dlrover_context.seconds_to_wait_pending_pod, new_memory)
return True |
def parse_key_info(label, anno_file):
if ('===' not in label):
text = clean_ocr(label)
entity = (['O'] * len(text))
return (text, entity)
info_ = label.split('===')
assert (len(info_) >= 5), f'''Invalid anno: {label}
file: {anno_file}'''
assert (((len(info_) - 1) % 4) == 0), f'''Invalid anno: {label}
file: {anno_file}'''
text = clean_ocr(info_[0])
entity = (['O'] * len(text))
kv_num = ((len(info_) - 1) // 4)
for idx in range(kv_num):
try:
entity_cls = info_[((4 * idx) + 1)].upper()
entity_val = clean_ocr(info_[((4 * idx) + 2)])
pos_idx = int(info_[((4 * idx) + 3)])
except Exception as e:
print(f'''Invalid anno: {label}
file: {anno_file}''')
raise RuntimeError(e)
assert ((entity_cls in PRE_DEFINE_KEY) and (entity_val in text)), f'''Invalid anno: {label}
file: {anno_file}'''
tmp = text.split(entity_val)
try:
st_idx = len(entity_val.join(tmp[:(pos_idx + 1)]))
except Exception as e:
import ipdb
ipdb.set_trace()
print(tmp)
end_idx = (st_idx + len(entity_val))
if (end_idx > len(entity)):
raise RuntimeError(f'''Invalid anno: {label}
file: {anno_file}''')
for _ in range(st_idx, end_idx):
assert (entity[_] == 'O'), f'''Invalid anno: {label}
file: {anno_file}'''
entity[st_idx] = f'B-{entity_cls}'
for _ in range((st_idx + 1), end_idx):
entity[_] = f'I-{entity_cls}'
return (text, entity) |
class CenterCrop3D(ImagePreprocessing3D):
def __init__(self, crop_depth, crop_height, crop_width, bigdl_type='float'):
super(CenterCrop3D, self).__init__(bigdl_type, crop_depth, crop_height, crop_width) |
(version='2.0')
def strategy_registry(cls):
assert cls.__name__.endswith('TuneStrategy'), "The name of subclass of TuneStrategy should end with 'TuneStrategy' substring."
if (cls.__name__[:(- len('TuneStrategy'))].lower() in EXP_STRATEGIES):
raise ValueError('Cannot have two strategies with the same name')
EXP_STRATEGIES[cls.__name__[:(- len('TuneStrategy'))].lower()] = cls
return cls |
def get_bn(channels):
if use_sync_bn:
return nn.SyncBatchNorm(channels)
else:
return nn.BatchNorm2d(channels) |
def test_linacc_changingacc_xyz_accellsrframe_scalarfuncomegaz():
lp = potential.MiyamotoNagaiPotential(normalize=1.0, a=1.0, b=0.2)
dp = potential.DehnenBarPotential(omegab=1.8, rb=0.5, Af=0.03)
diskpot = (lp + dp)
x0 = [(lambda t: ((((- 0.03) * (t ** 2.0)) / 2.0) - (((0.03 * (t ** 3.0)) / 6.0) / 20.0))), (lambda t: (((0.04 * (t ** 2.0)) / 2.0) + (((0.08 * (t ** 3.0)) / 6.0) / 20.0))), (lambda t: (((0.02 * (t ** 2.0)) / 2.0) + (((0.03 * (t ** 3.0)) / 6.0) / 20.0)))]
v0 = [(lambda t: (((- 0.03) * t) - (((0.03 * (t ** 2.0)) / 2.0) / 20.0))), (lambda t: ((0.04 * t) + (((0.08 * (t ** 2.0)) / 2.0) / 20.0))), (lambda t: ((0.02 * t) + (((0.03 * (t ** 2.0)) / 2.0) / 20.0)))]
a0 = [(lambda t: ((- 0.03) - ((0.03 * t) / 20.0))), (lambda t: (0.04 + ((0.08 * t) / 20.0))), (lambda t: (0.02 + ((0.03 * t) / 20.0)))]
omega = lp.omegac(1.0)
omegadot = 0.1
omegadotdot = 0.01
omega_func = (lambda t: ((omega + (omegadot * t)) + ((omegadotdot * (t ** 2.0)) / 2.0)))
omegadot_func = (lambda t: (omegadot + (omegadotdot * t)))
framepot = potential.NonInertialFrameForce(x0=x0, v0=v0, a0=a0, Omega=omega_func, Omegadot=omegadot_func)
diskframepot = (AcceleratingPotentialWrapperPotential(pot=diskpot, x0=x0, omegaz=omega, omegazdot=omegadot, omegazdotdot=omegadotdot) + framepot)
def check_orbit(method='odeint', tol=1e-09):
o = Orbit()
o.turn_physical_off()
ts = numpy.linspace(0.0, 20.0, 1001)
o.integrate(ts, diskpot, method=method)
op = Orbit([o.R(), o.vR(), (o.vT() - (omega * o.R())), o.z(), o.vz(), o.phi()])
op.integrate(ts, diskframepot, method=method)
o_xs = o.x(ts)
o_ys = o.y(ts)
o_zs = o.z(ts)
o_vxs = o.vx(ts)
o_vys = o.vy(ts)
o_vzs = o.vz(ts)
op_xs = (op.x(ts) + x0[0](ts))
op_ys = (op.y(ts) + x0[1](ts))
op_zs = (op.z(ts) + x0[2](ts))
(Rp, phip, _) = coords.rect_to_cyl(op_xs, op_ys, op_zs)
phip += (((omega * ts) + ((omegadot * (ts ** 2.0)) / 2.0)) + ((omegadotdot * (ts ** 3.0)) / 6.0))
(op_xs, op_ys, _) = coords.cyl_to_rect(Rp, phip, op_zs)
op_vxs = (op.vx(ts) + v0[0](ts))
op_vys = (op.vy(ts) + v0[1](ts))
op_vzs = (op.vz(ts) + v0[2](ts))
(vRp, vTp, _) = coords.rect_to_cyl_vec(op_vxs, op_vys, op_vzs, (op.x(ts) + x0[0](ts)), (op.y(ts) + x0[1](ts)), (op.z(ts) + x0[2](ts)))
vTp += (((omega * Rp) + ((omegadot * ts) * Rp)) + (((omegadotdot * (ts ** 2.0)) / 2.0) * Rp))
(op_vxs, op_vys, _) = coords.cyl_to_rect_vec(vRp, vTp, op_vzs, phi=phip)
assert (numpy.amax(numpy.fabs((o_xs - op_xs))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_ys - op_ys))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_zs - op_zs))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_vxs - op_vxs))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_vys - op_vys))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_vzs - op_vzs))) < tol), f'Integrating an orbit in a linearly-accelerating, acceleratingly-rotating frame with constant acceleration does not agree with the equivalent orbit in the inertial frame for method {method}'
check_orbit(method='odeint', tol=1e-05)
check_orbit(method='dop853', tol=1e-09)
check_orbit(method='dop853_c', tol=1e-05)
return None |
def perform_analysis(sent_keys, gold_sents, pred_sents, negation_sents, element='Polar_expression'):
analysis_dict = {'in_neg_scope': set(), 'in_neg_scope_with_wrong_polarity': set(), 'in_neg_scope_not_predicted': set(), 'in_neg_scope_correct': set(), 'not_in_neg_scope': set(), 'not_in_neg_scope_with_wrong_polarity': set(), 'not_in_neg_scope_not_predicted': set(), 'not_in_neg_scope_correct': set()}
for key in sent_keys:
gold = gold_sents[key]
pred = pred_sents[key]
neg = negation_sents[key]
neg_range = get_neg_range(neg)
gold_tuples = convert_opinion_to_tuple(gold)
pred_tuples = convert_opinion_to_tuple(pred)
for (opinion, tup) in zip(gold['opinions'], gold_tuples):
(p_source, p_target, p_exp, p_pol, g_source, g_target, g_exp, g_pol) = ([None] * 8)
if in_neg_scope(opinion[element], neg_range):
analysis_dict['in_neg_scope'].add((key, tup))
matches = get_matching_exp(tup, pred_tuples)
(g_source, g_target, g_exp, g_pol) = tup
if (len(matches) > 0):
for match in matches:
(p_source, p_target, p_exp, p_pol) = match
if (g_pol != p_pol):
analysis_dict['in_neg_scope_with_wrong_polarity'].add((key, g_exp, g_pol, p_exp, p_pol))
else:
analysis_dict['in_neg_scope_correct'].add((key, g_exp, g_pol, p_exp, p_pol))
else:
analysis_dict['in_neg_scope_not_predicted'].add((key, g_exp, g_pol, p_exp, p_pol))
else:
analysis_dict['not_in_neg_scope'].add((key, tup))
matches = get_matching_exp(tup, pred_tuples)
(g_source, g_target, g_exp, g_pol) = tup
if (len(matches) > 0):
for match in matches:
(p_source, p_target, p_exp, p_pol) = match
if (g_pol != p_pol):
analysis_dict['not_in_neg_scope_with_wrong_polarity'].add((key, g_exp, g_pol, p_exp, p_pol))
else:
analysis_dict['not_in_neg_scope_correct'].add((key, g_exp, g_pol, p_exp, p_pol))
else:
analysis_dict['not_in_neg_scope_not_predicted'].add((key, g_exp, g_pol, p_exp, p_pol))
return analysis_dict |
def log_prior_gaussian(z, Mu=0.0, Sigma=1.0):
logprob = ((- ((0.5 * np.log((2 * np.pi))) + tf.log(Sigma))) - (0.5 * (((z - Mu) / Sigma) ** 2)))
return tf.reduce_sum(logprob, 1) |
def check_valid(annots: list[str]) -> bool:
allowed_pattern = re.compile('^(O$|B-.+$|I-.+$)')
annots = (['O'] + annots)
n = len(annots)
if any(((allowed_pattern.match(annot) is None) for annot in annots)):
return False
for i in range(1, n):
annot = annots[i]
if annot.startswith('I-'):
if ((annots[(i - 1)] == 'O') or (annots[(i - 1)][2:] != annot[2:])):
return False
return True |
def test_slog_to_array():
(expected_vals, slogs) = _get_array_and_slog_vals()
vals = helpers.array_from_slog(slogs)
assert_pytree_allclose(vals, expected_vals) |
def get_vocab(vocab_root_path, text_min_count):
with open(os.path.join(vocab_root_path, 'vocab_new', (('vocab-' + str(text_min_count)) + '.txt'))) as f:
print('geting vocab')
vocab = f.read()
vocab = vocab.split('\n')
print('the length of vocab is: ', len(vocab))
return vocab |
def set_quantizer(name, mod, quantizer, k, v):
quantizer_mod = getattr(mod, quantizer, None)
if (quantizer_mod is not None):
assert hasattr(quantizer_mod, k)
setattr(quantizer_mod, k, v)
else:
logger.warning(f'{name} has no {quantizer}') |
def eval():
with tf.Graph().as_default() as g:
noise = tf.random.normal(mean=0.0, stddev=1.0, shape=(50, NOISE_DIM))
step = tf.train.get_or_create_global_step()
with tf.variable_scope('Generator'):
one_hot = tf.one_hot(tf.concat(([tf.range(0, 10)] * 5), axis=0), 10)
fake_img = conditional_generator((noise, one_hot), is_training=False)
fake_img = ((fake_img * 128.0) + 128.0)
fake_img = tf.cast(fake_img, tf.uint8)
tiled = tfgan.eval.image_grid(fake_img, grid_shape=(5, 10), image_shape=(28, 28), num_channels=1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(MODEL_DIR)
saver.restore(sess, ckpt)
(outputs, step_value) = sess.run([tiled, step])
plt.imsave('./image_{}.png'.format(step_value), np.squeeze(outputs), cmap='gray') |
class MyDataParallel(torch_geometric.nn.DataParallel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
if (name == 'module'):
return self._modules['module']
else:
return getattr(self.module, name) |
class DataTrainingArguments():
data_dir: Optional[str] = field(default=None, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'})
use_tfds: Optional[bool] = field(default=True, metadata={'help': 'If TFDS should be used or not.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
max_query_length: int = field(default=64, metadata={'help': 'The maximum number of tokens for the question. Questions longer than this will be truncated to this length.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
n_best_size: int = field(default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
lang_id: int = field(default=0, metadata={'help': 'language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'}) |
def subsample_indices(indices: List[int], n: int, split_seed=0):
if (n > len(indices)):
print('Warning: n == {} > len(indices) == {}'.format(n, len(indices)))
state = np.random.RandomState(split_seed)
indices = state.permutation(indices)
return sorted(indices[:n].tolist()) |
def create_path_model(context, model_params, ds_train, path_output, train_onehotencoder):
path_model = Path(path_output, context[ConfigKW.MODEL_NAME])
if (not path_model.is_dir()):
logger.info(f'Creating model directory: {path_model}')
path_model.mkdir(parents=True)
if ((ModelParamsKW.FILM_LAYERS in model_params) and any(model_params[ModelParamsKW.FILM_LAYERS])):
joblib.dump(train_onehotencoder, path_model.joinpath('one_hot_encoder.joblib'))
if (MetadataKW.METADATA_DICT in ds_train[0][MetadataKW.INPUT_METADATA][0]):
metadata_dict = ds_train[0][MetadataKW.INPUT_METADATA][0][MetadataKW.METADATA_DICT]
joblib.dump(metadata_dict, path_model.joinpath('metadata_dict.joblib'))
else:
logger.info(f'Model directory already exists: {path_model}') |
def test_mnist():
BNN_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
test_image_mnist = os.path.join(BNN_ROOT_DIR, 'Test_image', '3.image-idx3-ubyte')
classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1, 'mnist', bnn.RUNTIME_HW)
out = classifier.classify_mnist(test_image_mnist)
print('Inferred class: ', out)
assert (out == 3), 'MNIST HW test failed for LFCW1A1'
classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A2, 'mnist', bnn.RUNTIME_HW)
out = classifier.classify_mnist(test_image_mnist)
print('Inferred class: ', out)
assert (out == 3), 'MNIST HW test failed for LFCW1A2'
w1a1 = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1, 'mnist', bnn.RUNTIME_SW)
out = w1a1.classify_mnist(test_image_mnist)
print('Inferred class: ', out)
assert (out == 3), 'MNIST SW test failed for LFC W1A1'
w1a2 = bnn.LfcClassifier(bnn.NETWORK_LFCW1A2, 'mnist', bnn.RUNTIME_SW)
out = w1a2.classify_mnist(test_image_mnist)
print('Inferred class: ', out)
assert (out == 3), 'MNIST SW test failed for LFC W1A2'
print('test finished with no errors!')
xlnk = Xlnk()
xlnk.xlnk_reset() |
def allocate(lengths: np.ndarray, numseqs: np.ndarray, lengths_cumsum: np.ndarray, rank: int, c: int, n: int):
s = 0
start_index = 0
result = []
result_totseqs = []
while True:
l = 1
r = (1 + np.searchsorted(lengths_cumsum[start_index:], (s + (c * n)), 'right'))
while ((r - l) > 1):
m = ((l + r) // 2)
if ffd_check(lengths[start_index:(start_index + m)], c, n):
l = m
else:
r = m
batch = ffd_with_result(lengths[start_index:(start_index + l)], c, start_index)
if (len(batch) < n):
break
start_index += l
s = lengths_cumsum[(start_index - 1)]
result.append(batch[rank])
totseq = 0
for indices in batch:
for idx in indices:
totseq += numseqs[idx]
result_totseqs.append(totseq)
return (result, result_totseqs, s, ((len(result) * c) * n)) |
def func_io(file_name, param, out_type):
print(' output interaction = ', file_name)
num_param = len(np.nonzero(param)[0])
f = open(file_name, 'wt')
f.write(('' + '\n'))
f.write((('num ' + '{0:8d}'.format(num_param)) + '\n'))
f.write(('' + '\n'))
f.write(('' + '\n'))
f.write(('' + '\n'))
cnt = 0
for all_i in np.nonzero(param)[0]:
all_j = np.nonzero(param)[1][cnt]
tmp_param = param[all_i][all_j]
cnt += 1
if (out_type == 'two'):
f.write((((' {0:8d} '.format(all_i) + ' {0:8d} '.format(all_j)) + ' {0:8f} '.format(tmp_param)) + '\n'))
f.close() |
_module()
class Mask2FormerHead(MaskFormerHead):
def __init__(self, in_channels, feat_channels, out_channels, num_things_classes=80, num_stuff_classes=53, num_queries=100, num_transformer_feat_level=3, pixel_decoder=None, enforce_decoder_input_project=False, transformer_decoder=None, positional_encoding=None, loss_cls=None, loss_mask=None, loss_dice=None, train_cfg=None, test_cfg=None, init_cfg=None, **kwargs):
super(AnchorFreeHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = (self.num_things_classes + self.num_stuff_classes)
self.num_queries = num_queries
self.num_transformer_feat_level = num_transformer_feat_level
self.num_heads = transformer_decoder.transformerlayers.attn_cfgs.num_heads
self.num_transformer_decoder_layers = transformer_decoder.num_layers
assert (pixel_decoder.encoder.transformerlayers.attn_cfgs.num_levels == num_transformer_feat_level)
pixel_decoder_ = copy.deepcopy(pixel_decoder)
pixel_decoder_.update(in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels)
self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1]
self.transformer_decoder = build_transformer_layer_sequence(transformer_decoder)
self.decoder_embed_dims = self.transformer_decoder.embed_dims
self.decoder_input_projs = ModuleList()
for _ in range(num_transformer_feat_level):
if ((self.decoder_embed_dims != feat_channels) or enforce_decoder_input_project):
self.decoder_input_projs.append(Conv2d(feat_channels, self.decoder_embed_dims, kernel_size=1))
else:
self.decoder_input_projs.append(nn.Identity())
self.decoder_positional_encoding = build_positional_encoding(positional_encoding)
self.query_embed = nn.Embedding(self.num_queries, feat_channels)
self.query_feat = nn.Embedding(self.num_queries, feat_channels)
self.level_embed = nn.Embedding(self.num_transformer_feat_level, feat_channels)
self.cls_embed = nn.Linear(feat_channels, (self.num_classes + 1))
self.mask_embed = nn.Sequential(nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels))
self.test_cfg = test_cfg
self.train_cfg = train_cfg
if train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
self.sampler = build_sampler(self.train_cfg.sampler, context=self)
self.num_points = self.train_cfg.get('num_points', 12544)
self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0)
self.importance_sample_ratio = self.train_cfg.get('importance_sample_ratio', 0.75)
self.class_weight = loss_cls.class_weight
self.loss_cls = build_loss(loss_cls)
self.loss_mask = build_loss(loss_mask)
self.loss_dice = build_loss(loss_dice)
def init_weights(self):
for m in self.decoder_input_projs:
if isinstance(m, Conv2d):
caffe2_xavier_init(m, bias=0)
self.pixel_decoder.init_weights()
for p in self.transformer_decoder.parameters():
if (p.dim() > 1):
nn.init.xavier_normal_(p)
def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, img_metas):
num_queries = cls_score.shape[0]
num_gts = gt_labels.shape[0]
point_coords = torch.rand((1, self.num_points, 2), device=cls_score.device)
mask_points_pred = point_sample(mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, 1)).squeeze(1)
gt_points_masks = point_sample(gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, 1)).squeeze(1)
assign_result = self.assigner.assign(cls_score, mask_points_pred, gt_labels, gt_points_masks, img_metas)
sampling_result = self.sampler.sample(assign_result, mask_pred, gt_masks)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
labels = gt_labels.new_full((self.num_queries,), self.num_classes, dtype=torch.long)
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = gt_labels.new_ones((self.num_queries,))
mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]
mask_weights = mask_pred.new_zeros((self.num_queries,))
mask_weights[pos_inds] = 1.0
return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds)
def loss_single(self, cls_scores, mask_preds, gt_labels_list, gt_masks_list, img_metas):
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
mask_preds_list = [mask_preds[i] for i in range(num_imgs)]
(labels_list, label_weights_list, mask_targets_list, mask_weights_list, num_total_pos, num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, gt_labels_list, gt_masks_list, img_metas)
labels = torch.stack(labels_list, dim=0)
label_weights = torch.stack(label_weights_list, dim=0)
mask_targets = torch.cat(mask_targets_list, dim=0)
mask_weights = torch.stack(mask_weights_list, dim=0)
cls_scores = cls_scores.flatten(0, 1)
labels = labels.flatten(0, 1)
label_weights = label_weights.flatten(0, 1)
class_weight = cls_scores.new_tensor(self.class_weight)
loss_cls = self.loss_cls(cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum())
num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos]))
num_total_masks = max(num_total_masks, 1)
mask_preds = mask_preds[(mask_weights > 0)]
if (mask_targets.shape[0] == 0):
loss_dice = mask_preds.sum()
loss_mask = mask_preds.sum()
return (loss_cls, loss_mask, loss_dice)
with torch.no_grad():
points_coords = get_uncertain_point_coords_with_randomness(mask_preds.unsqueeze(1), None, self.num_points, self.oversample_ratio, self.importance_sample_ratio)
mask_point_targets = point_sample(mask_targets.unsqueeze(1).float(), points_coords).squeeze(1)
mask_point_preds = point_sample(mask_preds.unsqueeze(1), points_coords).squeeze(1)
loss_dice = self.loss_dice(mask_point_preds, mask_point_targets, avg_factor=num_total_masks)
mask_point_preds = mask_point_preds.reshape((- 1))
mask_point_targets = mask_point_targets.reshape((- 1))
loss_mask = self.loss_mask(mask_point_preds, mask_point_targets, avg_factor=(num_total_masks * self.num_points))
return (loss_cls, loss_mask, loss_dice)
def forward_head(self, decoder_out, mask_feature, attn_mask_target_size):
decoder_out = self.transformer_decoder.post_norm(decoder_out)
decoder_out = decoder_out.transpose(0, 1)
cls_pred = self.cls_embed(decoder_out)
mask_embed = self.mask_embed(decoder_out)
mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature)
attn_mask = F.interpolate(mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False)
attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat((1, self.num_heads, 1, 1)).flatten(0, 1)
attn_mask = (attn_mask.sigmoid() < 0.5)
attn_mask = attn_mask.detach()
return (cls_pred, mask_pred, attn_mask)
def forward(self, feats, img_metas):
batch_size = len(img_metas)
(mask_features, multi_scale_memorys) = self.pixel_decoder(feats)
decoder_inputs = []
decoder_positional_encodings = []
for i in range(self.num_transformer_feat_level):
decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])
decoder_input = decoder_input.flatten(2).permute(2, 0, 1)
level_embed = self.level_embed.weight[i].view(1, 1, (- 1))
decoder_input = (decoder_input + level_embed)
mask = decoder_input.new_zeros(((batch_size,) + multi_scale_memorys[i].shape[(- 2):]), dtype=torch.bool)
decoder_positional_encoding = self.decoder_positional_encoding(mask)
decoder_positional_encoding = decoder_positional_encoding.flatten(2).permute(2, 0, 1)
decoder_inputs.append(decoder_input)
decoder_positional_encodings.append(decoder_positional_encoding)
query_feat = self.query_feat.weight.unsqueeze(1).repeat((1, batch_size, 1))
query_embed = self.query_embed.weight.unsqueeze(1).repeat((1, batch_size, 1))
cls_pred_list = []
mask_pred_list = []
(cls_pred, mask_pred, attn_mask) = self.forward_head(query_feat, mask_features, multi_scale_memorys[0].shape[(- 2):])
cls_pred_list.append(cls_pred)
mask_pred_list.append(mask_pred)
for i in range(self.num_transformer_decoder_layers):
level_idx = (i % self.num_transformer_feat_level)
attn_mask[torch.where((attn_mask.sum((- 1)) == attn_mask.shape[(- 1)]))] = False
layer = self.transformer_decoder.layers[i]
attn_masks = [attn_mask, None]
query_feat = layer(query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], attn_masks=attn_masks, query_key_padding_mask=None, key_padding_mask=None)
(cls_pred, mask_pred, attn_mask) = self.forward_head(query_feat, mask_features, multi_scale_memorys[((i + 1) % self.num_transformer_feat_level)].shape[(- 2):])
cls_pred_list.append(cls_pred)
mask_pred_list.append(mask_pred)
return (cls_pred_list, mask_pred_list) |
def scan_reform(data):
xy = []
for row in data:
sentences = row['sentence_span']
i = (- 1)
for s in sentences:
i += 1
if (len(s[0]) != 0):
xy.append({'sentence_span': s[0], 'y': s[1], 'token_ev_labels': row['token_ev_labels'][i]})
return xy |
class ModelFedCon_noheader(nn.Module):
def __init__(self, base_model, out_dim, n_classes, net_configs=None):
super(ModelFedCon_noheader, self).__init__()
if (base_model == 'resnet18'):
basemodel = models.resnet18(pretrained=False)
self.features = nn.Sequential(*list(basemodel.children())[:(- 1)])
num_ftrs = basemodel.fc.in_features
elif ((base_model == 'resnet') or (base_model == 'resnet50-cifar10') or (base_model == 'resnet50-cifar100') or (base_model == 'resnet50-smallkernel')):
basemodel = ResNet50_cifar10()
self.features = nn.Sequential(*list(basemodel.children())[:(- 1)])
num_ftrs = basemodel.fc.in_features
elif (base_model == 'resnet18-cifar10'):
basemodel = ResNet18_cifar10()
self.features = nn.Sequential(*list(basemodel.children())[:(- 1)])
num_ftrs = basemodel.fc.in_features
elif (base_model == 'mlp'):
self.features = MLP_header(input_dim=net_configs[0], hidden_dims=net_configs[1:(- 1)])
num_ftrs = net_configs[(- 2)]
elif (base_model == 'simple-cnn'):
self.features = SimpleCNN_header(input_dim=((16 * 5) * 5), hidden_dims=[120, 84], output_dim=n_classes)
num_ftrs = 84
elif (base_model == 'simple-cnn-mnist'):
self.features = SimpleCNNMNIST_header(input_dim=((16 * 4) * 4), hidden_dims=[120, 84], output_dim=n_classes)
num_ftrs = 84
self.l3 = nn.Linear(num_ftrs, n_classes)
self.num_ftrs = num_ftrs
def _get_basemodel(self, model_name):
try:
model = self.model_dict[model_name]
return model
except:
raise 'Invalid model name. Check the config file and pass one of: resnet18 or resnet50'
def forward(self, x):
h = self.features(x)
h = h.reshape((- 1), self.num_ftrs)
y = self.l3(h)
return (h, h, y) |
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if (('adamw' in opt_lower) or ('radam' in opt_lower)):
weight_decay /= args.lr
if (weight_decay and filter_bias_and_bn):
parameters = add_weight_decay(model, weight_decay)
weight_decay = 0.0
else:
parameters = model.parameters()
if ('fused' in opt_lower):
assert (has_apex and torch.cuda.is_available()), 'APEX and CUDA required for fused optimizers'
opt_split = opt_lower.split('_')
opt_lower = opt_split[(- 1)]
if ((opt_lower == 'sgd') or (opt_lower == 'nesterov')):
optimizer = optim.SGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True)
elif (opt_lower == 'momentum'):
optimizer = optim.SGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False)
elif (opt_lower == 'adam'):
optimizer = optim.Adam(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'adamw'):
optimizer = AdamW(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'nadam'):
optimizer = Nadam(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'radam'):
optimizer = RAdam(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'adadelta'):
optimizer = optim.Adadelta(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'rmsprop'):
optimizer = optim.RMSprop(parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps, momentum=args.momentum, weight_decay=weight_decay)
elif (opt_lower == 'rmsproptf'):
optimizer = RMSpropTF(parameters, lr=args.lr, alpha=0.9, eps=args.opt_eps, momentum=args.momentum, weight_decay=weight_decay)
elif (opt_lower == 'novograd'):
optimizer = NovoGrad(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'nvnovograd'):
optimizer = NvNovoGrad(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'fusedsgd'):
optimizer = FusedSGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=True)
elif (opt_lower == 'fusedmomentum'):
optimizer = FusedSGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=weight_decay, nesterov=False)
elif (opt_lower == 'fusedadam'):
optimizer = FusedAdam(parameters, lr=args.lr, adam_w_mode=False, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'fusedadamw'):
optimizer = FusedAdam(parameters, lr=args.lr, adam_w_mode=True, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'fusedlamb'):
optimizer = FusedLAMB(parameters, lr=args.lr, weight_decay=weight_decay, eps=args.opt_eps)
elif (opt_lower == 'fusednovograd'):
optimizer = FusedNovoGrad(parameters, lr=args.lr, betas=(0.95, 0.98), weight_decay=weight_decay, eps=args.opt_eps)
else:
assert (False and 'Invalid optimizer')
raise ValueError
if (len(opt_split) > 1):
if (opt_split[0] == 'lookahead'):
optimizer = Lookahead(optimizer)
return optimizer |
class ResClassifier(nn.Module):
def __init__(self, class_num=12, extract=False, dropout_p=0.5):
super(ResClassifier, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(2048, 1000), nn.BatchNorm1d(1000, affine=True), nn.ReLU(inplace=True), nn.Dropout(p=dropout_p))
self.fc2 = nn.Linear(1000, class_num)
self.extract = extract
self.dropout_p = dropout_p
def forward(self, x):
fc1_emb = self.fc1(x)
if self.training:
fc1_emb.mul_(math.sqrt((1 - self.dropout_p)))
logit = self.fc2(fc1_emb)
if self.extract:
return (fc1_emb, logit)
return logit |
class DQNModel(nn.Module):
def __init__(self, num_outputs):
super().__init__()
def init_weights(m):
if (type(m) == nn.Linear):
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
self.trunk = nn.Sequential(nn.Conv2d(6, 32, 8, stride=4), nn.ReLU(True), nn.Conv2d(32, 32, 4, stride=2), nn.ReLU(True), nn.Conv2d(32, 32, 1), Flatten(), nn.ReLU(), nn.Linear(((9 ** 2) * 32), 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU())
self.adventage = nn.Linear(512, num_outputs)
self.value = nn.Linear(512, 1)
self.apply(init_weights)
def forward(self, inputs):
features = inputs
features = self.trunk(features)
value = self.value(features)
adventage = self.adventage(features)
features = ((adventage + value) - adventage.mean())
return features |
def _test_exact_gpr(config: ConfigDense, model: GPR, Xnew: tf.Tensor) -> tf.Tensor:
(X, y) = model.data
Kyy = model.kernel(X, full_cov=True)
Kyy = tf.linalg.set_diag(Kyy, (tf.linalg.diag_part(Kyy) + model.likelihood.variance))
Lyy = tf.linalg.cholesky(Kyy)
count = 0
L_joint = None
samples = []
while (count < config.num_samples):
size = min(config.shard_size, (config.num_samples - count))
((f, fnew), L_joint) = common.sample_joint(model.kernel, X, Xnew, num_samples=size, L=L_joint)
update_fns = exact_update(model.kernel, X, y, (f + model.mean_function(X)), L=Lyy, diag=model.likelihood.variance)
samples.append((fnew + update_fns(Xnew)))
count += size
samples = tf.concat(samples, axis=0)
if (model.mean_function is not None):
samples += model.mean_function(Xnew)
return samples |
def resnet152(pretrained=False):
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model |
class TrainingSummary():
model_name: str
language: Optional[Union[(str, List[str])]] = None
license: Optional[str] = None
tags: Optional[Union[(str, List[str])]] = None
finetuned_from: Optional[str] = None
tasks: Optional[Union[(str, List[str])]] = None
dataset: Optional[Union[(str, List[str])]] = None
dataset_tags: Optional[Union[(str, List[str])]] = None
dataset_args: Optional[Union[(str, List[str])]] = None
eval_results: Optional[Dict[(str, float)]] = None
eval_lines: Optional[List[str]] = None
hyperparameters: Optional[Dict[(str, Any)]] = None
source: Optional[str] = 'trainer'
def __post_init__(self):
if ((self.license is None) and (not is_offline_mode()) and (self.finetuned_from is not None) and (len(self.finetuned_from) > 0)):
try:
info = model_info(self.finetuned_from)
for tag in info.tags:
if tag.startswith('license:'):
self.license = tag[8:]
except requests.exceptions.HTTPError:
pass
def create_model_index(self, metric_mapping):
model_index = {'name': self.model_name}
dataset_names = _listify(self.dataset)
dataset_tags = _listify(self.dataset_tags)
dataset_args = _listify(self.dataset_args)
if (len(dataset_args) < len(dataset_tags)):
dataset_args = (dataset_args + ([None] * (len(dataset_tags) - len(dataset_args))))
dataset_mapping = {tag: name for (tag, name) in zip(dataset_tags, dataset_names)}
dataset_arg_mapping = {tag: arg for (tag, arg) in zip(dataset_tags, dataset_args)}
task_mapping = {task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if (task in TASK_TAG_TO_NAME_MAPPING)}
model_index['results'] = []
if ((len(task_mapping) == 0) and (len(dataset_mapping) == 0)):
return [model_index]
if (len(task_mapping) == 0):
task_mapping = {None: None}
if (len(dataset_mapping) == 0):
dataset_mapping = {None: None}
all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping]
for (task_tag, ds_tag) in all_possibilities:
result = {}
if (task_tag is not None):
result['task'] = {'name': task_mapping[task_tag], 'type': task_tag}
if (ds_tag is not None):
result['dataset'] = {'name': dataset_mapping[ds_tag], 'type': ds_tag}
if (dataset_arg_mapping[ds_tag] is not None):
result['dataset']['args'] = dataset_arg_mapping[ds_tag]
if (len(metric_mapping) > 0):
result['metrics'] = []
for (metric_tag, metric_name) in metric_mapping.items():
result['metrics'].append({'name': metric_name, 'type': metric_tag, 'value': self.eval_results[metric_name]})
if (('task' in result) and ('dataset' in result) and ('metrics' in result)):
model_index['results'].append(result)
else:
logger.info(f'''Dropping the following result as it does not have all the necessary fields:
{result}''')
return [model_index]
def create_metadata(self):
metric_mapping = infer_metric_tags_from_eval_results(self.eval_results)
metadata = {}
metadata = _insert_values_as_list(metadata, 'language', self.language)
metadata = _insert_value(metadata, 'license', self.license)
metadata = _insert_values_as_list(metadata, 'tags', self.tags)
metadata = _insert_values_as_list(metadata, 'datasets', self.dataset_tags)
metadata = _insert_values_as_list(metadata, 'metrics', list(metric_mapping.keys()))
metadata['model-index'] = self.create_model_index(metric_mapping)
return metadata
def to_model_card(self):
model_card = ''
metadata = yaml.dump(self.create_metadata(), sort_keys=False)
if (len(metadata) > 0):
model_card = f'''---
{metadata}---
'''
if (self.source == 'trainer'):
model_card += AUTOGENERATED_TRAINER_COMMENT
else:
model_card += AUTOGENERATED_KERAS_COMMENT
model_card += f'''
# {self.model_name}
'''
if (self.finetuned_from is None):
model_card += 'This model was trained from scratch on '
else:
model_card += f'This model is a fine-tuned version of [{self.finetuned_from}]( on '
if (self.dataset is None):
model_card += 'an unknown dataset.'
elif isinstance(self.dataset, str):
model_card += f'the {self.dataset} dataset.'
elif (isinstance(self.dataset, (tuple, list)) and (len(self.dataset) == 1)):
model_card += f'the {self.dataset[0]} dataset.'
else:
model_card += (', '.join([f'the {ds}' for ds in self.dataset[:(- 1)]]) + f' and the {self.dataset[(- 1)]} datasets.')
if (self.eval_results is not None):
model_card += '\nIt achieves the following results on the evaluation set:\n'
model_card += '\n'.join([f'- {name}: {_maybe_round(value)}' for (name, value) in self.eval_results.items()])
model_card += '\n'
model_card += '\n## Model description\n\nMore information needed\n'
model_card += '\n## Intended uses & limitations\n\nMore information needed\n'
model_card += '\n## Training and evaluation data\n\nMore information needed\n'
model_card += '\n## Training procedure\n'
model_card += '\n### Training hyperparameters\n'
if (self.hyperparameters is not None):
model_card += '\nThe following hyperparameters were used during training:\n'
model_card += '\n'.join([f'- {name}: {value}' for (name, value) in self.hyperparameters.items()])
model_card += '\n'
else:
model_card += '\nMore information needed\n'
if (self.eval_lines is not None):
model_card += '\n### Training results\n\n'
model_card += make_markdown_table(self.eval_lines)
model_card += '\n'
model_card += '\n### Framework versions\n\n'
model_card += f'''- Transformers {__version__}
'''
if ((self.source == 'trainer') and is_torch_available()):
import torch
model_card += f'''- Pytorch {torch.__version__}
'''
elif ((self.source == 'keras') and is_tf_available()):
import tensorflow as tf
model_card += f'''- TensorFlow {tf.__version__}
'''
if is_datasets_available():
import datasets
model_card += f'''- Datasets {datasets.__version__}
'''
if is_tokenizers_available():
import tokenizers
model_card += f'''- Tokenizers {tokenizers.__version__}
'''
return model_card
def from_trainer(cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None):
one_dataset = (trainer.train_dataset if (trainer.train_dataset is not None) else trainer.eval_dataset)
if (is_hf_dataset(one_dataset) and ((dataset_tags is None) or (dataset_args is None))):
default_tag = one_dataset.builder_name
if (default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']):
if (dataset_tags is None):
dataset_tags = [default_tag]
if (dataset_args is None):
dataset_args = [one_dataset.config_name]
if ((dataset is None) and (dataset_tags is not None)):
dataset = dataset_tags
if ((finetuned_from is None) and hasattr(trainer.model.config, '_name_or_path') and (not os.path.isdir(trainer.model.config._name_or_path))):
finetuned_from = trainer.model.config._name_or_path
if (tasks is None):
model_class_name = trainer.model.__class__.__name__
for (task, mapping) in TASK_MAPPING.items():
if (model_class_name in _get_mapping_values(mapping)):
tasks = task
if (model_name is None):
model_name = Path(trainer.args.output_dir).name
if (tags is None):
tags = ['generated_from_trainer']
elif (isinstance(tags, str) and (tags != 'generated_from_trainer')):
tags = [tags, 'generated_from_trainer']
elif ('generated_from_trainer' not in tags):
tags.append('generated_from_trainer')
(_, eval_lines, eval_results) = parse_log_history(trainer.state.log_history)
hyperparameters = extract_hyperparameters_from_trainer(trainer)
return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters)
def from_keras(cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None):
if (dataset is not None):
if (is_hf_dataset(dataset) and ((dataset_tags is None) or (dataset_args is None))):
default_tag = dataset.builder_name
if (default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']):
if (dataset_tags is None):
dataset_tags = [default_tag]
if (dataset_args is None):
dataset_args = [dataset.config_name]
if ((dataset is None) and (dataset_tags is not None)):
dataset = dataset_tags
if ((finetuned_from is None) and hasattr(model.config, '_name_or_path') and (not os.path.isdir(model.config._name_or_path))):
finetuned_from = model.config._name_or_path
if (tasks is None):
model_class_name = model.__class__.__name__
for (task, mapping) in TASK_MAPPING.items():
if (model_class_name in _get_mapping_values(mapping)):
tasks = task
if (tags is None):
tags = ['generated_from_keras_callback']
elif (isinstance(tags, str) and (tags != 'generated_from_keras_callback')):
tags = [tags, 'generated_from_keras_callback']
elif ('generated_from_keras_callback' not in tags):
tags.append('generated_from_keras_callback')
if (keras_history is not None):
(_, eval_lines, eval_results) = parse_keras_history(keras_history)
else:
eval_lines = []
eval_results = dict()
hyperparameters = extract_hyperparameters_from_keras(model)
return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source='keras') |
def adaptive_clip_grad(parameters, clip_factor=0.01, eps=0.001, norm_type=2.0):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
for p in parameters:
if (p.grad is None):
continue
p_data = p.detach()
g_data = p.grad.detach()
max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor)
grad_norm = unitwise_norm(g_data, norm_type=norm_type)
clipped_grad = (g_data * (max_norm / grad_norm.clamp(min=1e-06)))
new_grads = torch.where((grad_norm < max_norm), g_data, clipped_grad)
p.grad.detach().copy_(new_grads) |
class RoIPointPool3dFunction(Function):
def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512):
assert ((points.shape.__len__() == 3) and (points.shape[2] == 3))
(batch_size, boxes_num, feature_len) = (points.shape[0], boxes3d.shape[1], point_features.shape[2])
pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view((- 1), 7), pool_extra_width).view(batch_size, (- 1), 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, (3 + feature_len)))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
roipoint_pool3d_cuda.forward(points.contiguous(), pooled_boxes3d.contiguous(), point_features.contiguous(), pooled_features, pooled_empty_flag)
return (pooled_features, pooled_empty_flag)
def backward(ctx, grad_out):
raise NotImplementedError |
def get_batch_indices(array, batch_size):
indices = [0]
s = 0
for (i, v) in enumerate(array):
s += v.item()
if (s > batch_size):
indices.append(i)
s = v.item()
indices.append(len(array))
return indices |
class PrecisionRecallCurve(PytorchMetric):
def __init__(self):
import torchmetrics
self.internal_curve = torchmetrics.PrecisionRecallCurve()
def __call__(self, preds, targets):
self.internal_curve.update(preds, targets.to(torch.int64))
def compute(self):
return self.internal_curve.compute() |
def temporal_padding(x, padding=(1, 1)):
assert (len(padding) == 2)
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return tf.pad(x, pattern) |
class GeneralTask(AbstractTask):
def __init__(self, task, config, prompt, seed=42):
self.task = task
self.name = task
self.config = config
self.seed = seed
self.prompt = prompt
print('')
print(self.task)
print()
print(self.config)
print()
print(self.prompt)
print('')
if (self.config == 'skip'):
print('Pass through ')
else:
if (self.config != 'none'):
self.promptList = DatasetTemplates(f'{task}/{config}')[self.prompt]
else:
self.promptList = DatasetTemplates(f'{task}')[self.prompt]
name = task
metric = []
if (task == 'trivia_qa'):
metric_names = ['squad']
metric.append(metrics.squad)
else:
metric_names = [x.lower() for x in self.promptList.metadata.metrics]
metric.append(metrics.accuracy)
metric.append(metrics.calculate_rouge)
metric.append(metrics.bleu)
for x in metric_names:
if (x == 'other'):
continue
elif (x == 'matthews_correlation'):
metric.append(metrics.matthews_corrcoef)
elif (x == 'em'):
metric.append(metrics.exact_match)
elif (x == 'f1'):
metric.append(metrics.f1_score_with_invalid)
elif (x == 'spearmanr'):
metric.append(metrics.spearman_corrcoef)
elif (x == 'f1_multiclass'):
metric.append(metrics.mean_multiclass_f1(num_classes=3))
elif (x == 'squad'):
metric.append(metrics.squad)
self.metric_names = metric_names
self.metric = metric
split_to_data_split = {'train': 'train', 'validation': 'validation', 'test': 'test'}
def load_dataset(self, split: str):
print('####')
print(self.task)
print('####')
if (self.task == 'anli'):
return datasets.load_dataset(self.task, cache_dir='cache', script_version='master')[self.config]
elif (self.task == 'rotten_tomatoes'):
x = datasets.load_dataset(self.task, split=split, cache_dir='cache', script_version='master')
x = x.rename_column('label', 'labels')
return x
elif (self.task == 'app_reviews'):
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['train'][10000:10200]
return x[split]
elif (self.task == 'wiki_bio'):
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['val']
return x[split]
elif (self.task == 'yelp_review_full'):
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['test']
x = x.remove_columns('label')
return x[split]
elif (self.task == 'imdb'):
print('####')
print(self.task)
print('####')
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['test']
return x[split]
elif (self.task == 'ag_news'):
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['test']
return x[split]
elif (self.task == 'dbpedia_14'):
x = datasets.load_dataset(self.task)
x['validation'] = x['test']
return x[split]
elif (self.task == 'trec'):
x = datasets.load_dataset(self.task, cache_dir='cache', script_version='master')
x['validation'] = x['test']
return x[split]
elif (self.config != 'none'):
x = datasets.load_dataset(self.task, self.config, split=split, cache_dir='cache', script_version='master')
return x
elif (self.task == 'super_glue'):
if (self.config == 'copa_gen'):
return datasets.load_dataset(self.task, 'copa', cache_dir='cache', split=split, script_version='master')
elif (self.task == 'xsum'):
return datasets.load_dataset('xsum', split=split)
else:
return datasets.load_dataset(self.task, split=split, cache_dir='cache', script_version='master')
def preprocessor(self, example, add_prefix=True):
tmp = self.promptList.apply(example)
src_texts = tmp[:(- 1)]
tgt_texts = tmp[(- 1)]
result = {}
result['source'] = ' '.join(src_texts)
result['target'] = tgt_texts
result['task'] = self.name
result['extra_fields'] = {}
if ('super_glue' in self.name):
if (self.config == 'copa'):
result['labels_list'] = [example['choice1'], example['choice2']]
elif (self.config == 'cb'):
if (self.prompt == 'can we infer'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'claim true/false/inconclusive'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'MNLI crowdsource'):
result['labels_list'] = ['Correct', 'Inconclusive', 'Incorrect']
elif (self.prompt == 'should assume'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'does it follow that'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'GPT-3 style'):
result['labels_list'] = ['True', 'False', 'Neither']
elif (self.prompt == 'based on the previous passage'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'justified in saying'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'take the following as truth'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'must be true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'guaranteed/possible/impossible'):
result['labels_list'] = ['Guaranteed', 'Possible', 'Impossible']
elif (self.prompt == 'always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'does this imply'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'consider always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'guaranteed true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.config == 'rte'):
if (self.prompt == 'GPT-3 style'):
result['labels_list'] = ['True', 'False']
else:
result['labels_list'] = ['Yes', 'No']
elif (self.config == 'wsc.fixed'):
if (self.prompt == 'p is/are r'):
result['labels_list'] = ['False', 'True']
elif (self.prompt == 'the pronoun refers to'):
result['labels_list'] = ['False', 'True']
elif (self.prompt == 'in other words'):
result['labels_list'] = ['False', 'True']
else:
result['labels_list'] = ['No', 'Yes']
elif (self.config == 'wic'):
if (self.prompt == 'affirmation_true_or_false'):
result['labels_list'] = ['False', 'True']
else:
result['labels_list'] = ['No', 'Yes']
elif (self.name == 'winogrande'):
if (self.config == 'winogrande_xl'):
if (self.prompt == 'True or False'):
result['labels_list'] = ['True', 'False']
else:
result['labels_list'] = [example['option1'], example['option2']]
elif (self.name == 'hellaswag'):
if (self.prompt == 'Predict ending with hint'):
result['labels_list'] = [item for item in example['endings']]
elif (self.prompt == 'complete_first_then'):
result['labels_list'] = [item for item in example['endings']]
elif (self.prompt == 'Randomized prompts template'):
result['labels_list'] = [item for item in example['endings']]
elif (self.prompt == 'Appropriate continuation - Yes or No'):
result['labels_list'] = ['Yes', 'No']
elif (self.prompt == 'Reversed appropriate continuation - Yes or No'):
result['labels_list'] = ['Yes', 'No']
elif (self.prompt == 'how_ends'):
result['labels_list'] = ['Ending 1', 'Ending 2', 'Ending 3', 'Ending 4']
elif (self.prompt == 'if_begins_how_continues'):
result['labels_list'] = ['Ending 1', 'Ending 2', 'Ending 3', 'Ending 4']
elif (self.name == 'anli_r1'):
if (self.prompt == 'can we infer'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'claim true/false/inconclusive'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'MNLI crowdsource'):
result['labels_list'] = ['Correct', 'Inconclusive', 'Incorrect']
elif (self.prompt == 'should assume'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'does it follow that'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'GPT-3 style'):
result['labels_list'] = ['True', 'False', 'Neither']
elif (self.prompt == 'based on the previous passage'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'justified in saying'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'take the following as truth'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'must be true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'guaranteed/possible/impossible'):
result['labels_list'] = ['Guaranteed', 'Possible', 'Impossible']
elif (self.prompt == 'always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'does this imply'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'consider always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'guaranteed true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.name == 'anli_r2'):
if (self.prompt == 'can we infer'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'claim true/false/inconclusive'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'MNLI crowdsource'):
result['labels_list'] = ['Correct', 'Inconclusive', 'Incorrect']
elif (self.prompt == 'should assume'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'does it follow that'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'GPT-3 style'):
result['labels_list'] = ['True', 'False', 'Neither']
elif (self.prompt == 'based on the previous passage'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'justified in saying'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'take the following as truth'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'must be true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'guaranteed/possible/impossible'):
result['labels_list'] = ['Guaranteed', 'Possible', 'Impossible']
elif (self.prompt == 'always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'does this imply'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'consider always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'guaranteed true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.name == 'anli_r3'):
if (self.prompt == 'can we infer'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'claim true/false/inconclusive'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'MNLI crowdsource'):
result['labels_list'] = ['Correct', 'Inconclusive', 'Incorrect']
elif (self.prompt == 'should assume'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'does it follow that'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'GPT-3 style'):
result['labels_list'] = ['True', 'False', 'Neither']
elif (self.prompt == 'based on the previous passage'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'justified in saying'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'take the following as truth'):
result['labels_list'] = ['True', 'Inconclusive', 'False']
elif (self.prompt == 'must be true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'guaranteed/possible/impossible'):
result['labels_list'] = ['Guaranteed', 'Possible', 'Impossible']
elif (self.prompt == 'always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'does this imply'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
elif (self.prompt == 'consider always/sometimes/never'):
result['labels_list'] = ['Always', 'Sometimes', 'Never']
elif (self.prompt == 'guaranteed true'):
result['labels_list'] = ['Yes', 'Maybe', 'No']
if use_verbalizer:
if (self.name == 'cos_e'):
if (self.prompt == 'question_description_option_text'):
result['labels_list'] = [item for item in example['choices']]
elif (self.prompt == 'question_description_option_id'):
result['labels_list'] = ['A', 'B', 'C', 'D', 'E']
elif (self.prompt == 'question_option_description_text'):
result['labels_list'] = [item for item in example['choices']]
elif (self.prompt == 'description_question_option_id'):
result['labels_list'] = ['A', 'B', 'C', 'D', 'E']
elif (self.prompt == 'description_question_option_text'):
result['labels_list'] = [item for item in example['choices']]
elif (self.prompt == 'question_option_description_id'):
result['labels_list'] = ['A', 'B', 'C', 'D', 'E']
elif (self.name == 'commonsense_qa'):
if (self.prompt == 'answer_given_question_without_options'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'question_answering'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'question_to_answer_index'):
result['labels_list'] = [item for item in example['choices']['label']]
elif (self.prompt == 'most_suitable_answer'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.name == 'dream'):
if (self.prompt == 'baseline'):
result['labels_list'] = [item for item in example['choice']]
elif (self.prompt == 'read_the_following_conversation_and_answer_the_question'):
result['labels_list'] = [item for item in example['choice']]
elif (self.name == 'quail'):
if (self.prompt == 'context_question_answer_description_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_question_answer_description_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'description_context_question_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_question_description_answer_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'context_question_description_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'context_description_question_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'context_question_description_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'no_prompt_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_description_question_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'description_context_question_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'no_prompt_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'context_description_question_answer_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.prompt == 'description_context_question_answer_text'):
result['labels_list'] = [item for item in example['answers']]
elif (self.name == 'quartz'):
if (self.prompt == 'use_info_from_question_paragraph'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'paragraph_question_plain_concat'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'use_info_from_paragraph_question'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'answer_question_based_on'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'answer_question_below'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'read_passage_below_choose'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'having_read_above_passage'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'given_the_fact_answer_the_q'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.name == 'social_i_qa'):
if (self.prompt == 'I was wondering'):
result['labels_list'] = [example['answerA'], example['answerB'], example['answerC']]
elif (self.prompt == 'Show choices and generate answer'):
result['labels_list'] = [example['answerA'], example['answerB'], example['answerC']]
elif (self.prompt == 'Check if a random answer is valid or not'):
result['labels_list'] = ['Yes', 'No']
elif (self.prompt == 'Generate answer'):
result['labels_list'] = [example['answerA'], example['answerB'], example['answerC']]
elif (self.prompt == 'Show choices and generate index'):
result['labels_list'] = ['A', 'B', 'C']
elif (self.name == 'wiqa'):
if (self.prompt == 'effect_with_string_answer'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'which_of_the_following_is_the_supposed_perturbation'):
result['labels_list'] = ['indirectly impacting a step of the process', 'not impacting any step of the process']
elif (self.prompt == 'effect_with_label_answer'):
result['labels_list'] = ['A', 'B', 'C']
elif (self.prompt == 'does_the_supposed_perturbation_have_an_effect'):
result['labels_list'] = ['yes', 'no']
elif (self.name == 'cosmos_qa'):
if (self.prompt == 'description_context_question_answer_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'description_context_question_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'description_context_question_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_description_question_answer_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'no_prompt_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_question_description_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'no_prompt_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'context_description_question_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_question_description_answer_id'):
result['labels_list'] = ['A', 'B', 'C', 'D']
elif (self.prompt == 'context_description_question_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'context_question_description_answer_text'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.prompt == 'only_question_answer'):
result['labels_list'] = [example['answer0'], example['answer1'], example['answer2'], example['answer3']]
elif (self.name == 'qasc'):
if (self.prompt == 'is_correct_1'):
result['labels_list'] = ['Yes', 'No']
elif (self.prompt == 'qa_with_separated_facts_1'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'qa_with_separated_facts_3'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'qa_with_separated_facts_4'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'qa_with_separated_facts_5'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'qa_with_combined_facts_1'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.prompt == 'is_correct_2'):
result['labels_list'] = ['Yes', 'No']
elif (self.prompt == 'qa_with_separated_facts_2'):
result['labels_list'] = [item for item in example['choices']['text']]
elif (self.name == 'quarel'):
if (self.prompt == 'do_not_use'):
result['labels_list'] = [example['world_literals']['world1'][0], example['world_literals']['world2'][0]]
elif (self.prompt == 'logic_test'):
result['labels_list'] = [example['world_literals']['world1'][0], example['world_literals']['world2'][0]]
elif (self.prompt == 'heres_a_story'):
result['labels_list'] = [example['world_literals']['world1'][0], example['world_literals']['world2'][0]]
elif (self.prompt == 'choose_between'):
result['labels_list'] = [example['world_literals']['world1'][0], example['world_literals']['world2'][0]]
elif (self.prompt == 'testing_students'):
result['labels_list'] = [example['world_literals']['world1'][0], example['world_literals']['world2'][0]]
elif (self.name == 'sciq'):
if (self.prompt == 'Direct Question (Closed Book)'):
result['labels_list'] = [example['distractor1'], example['distractor2'], example['distractor3'], example['correct_answer']]
elif (self.prompt == 'Multiple Choice (Closed Book)'):
result['labels_list'] = [example['distractor1'], example['distractor2'], example['distractor3'], example['correct_answer']]
elif (self.prompt == 'Multiple Choice Question First'):
result['labels_list'] = [example['distractor1'], example['distractor2'], example['distractor3'], example['correct_answer']]
elif (self.prompt == 'Multiple Choice'):
result['labels_list'] = [example['distractor1'], example['distractor2'], example['distractor3'], example['correct_answer']]
elif (self.prompt == 'Direct Question'):
result['labels_list'] = [example['distractor1'], example['distractor2'], example['distractor3'], example['correct_answer']]
elif (self.name == 'app_reviews'):
if (self.prompt == 'categorize_rating_using_review'):
result['labels_list'] = ['Not at all', 'No', 'Maybe', 'Yes', 'Definitely']
elif (self.prompt == 'convert_to_star_rating'):
result['labels_list'] = ['', '', '', '', '']
elif (self.name == 'imdb'):
if (self.prompt == 'Movie Expressed Sentiment 2'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Opinion bad good choices'):
result['labels_list'] = ['bad', 'good']
elif (self.prompt == 'Sentiment with choices '):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Sentiment Feeling'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Writer Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Movie Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Text Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Negation template for positive and negative'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Enjoyment Yes No'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Reviewer Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Enjoyment'):
result['labels_list'] = ["They didn't like it!", 'They loved it']
elif (self.name == 'rotten_tomatoes'):
if (self.prompt == 'Reviewer Opinion bad good choices'):
result['labels_list'] = ['bad', 'good']
elif (self.prompt == 'Text Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Sentiment with choices '):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Enjoyment Yes No'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Reviewer Enjoyment'):
result['labels_list'] = ["They didn't like it", 'They loved it']
elif (self.prompt == 'Movie Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Writer Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Movie Expressed Sentiment 2'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Expressed Sentiment'):
result['labels_list'] = ['negative', 'positive']
elif (self.prompt == 'Reviewer Sentiment Feeling'):
result['labels_list'] = ['negative', 'positive']
elif (self.name == 'paws'):
if (self.prompt == 'task_description-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Meaning'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'context-question-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Rewrite-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'context-question'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Concatenation'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Concatenation-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'Meaning-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'PAWS-ANLI GPT3'):
result['labels_list'] = ['False', 'True']
elif (self.prompt == 'Rewrite'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'PAWS-ANLI GPT3-no-label'):
result['labels_list'] = ['No', 'Yes']
elif (self.name == 'glue_qqp'):
if (self.prompt == 'quora'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'duplicate or not'):
result['labels_list'] = ['not duplicates', 'duplicates']
elif (self.prompt == 'same thing'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'answer'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'meaning'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'duplicate'):
result['labels_list'] = ['no', 'yes']
elif (self.name == 'glue_mrpc'):
if (self.prompt == 'want to know'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'paraphrase'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'equivalent'):
result['labels_list'] = ['not equivalent', 'equivalent']
elif (self.prompt == 'replace'):
result['labels_list'] = ['no', 'yes']
elif (self.prompt == 'same thing'):
result['labels_list'] = ['no', 'yes']
elif (self.name == 'ag_news'):
if (self.prompt == 'classify_question_first'):
result['labels_list'] = ['World politics', 'Sports', 'Business', 'Science and technology']
elif (self.prompt == 'classify_with_choices_question_first'):
result['labels_list'] = ['World politics', 'Sports', 'Business', 'Science and technology']
elif (self.prompt == 'recommend'):
result['labels_list'] = ['Politician', 'Athlete', 'Business executive', 'Scientist']
elif (self.prompt == 'which_section_choices'):
result['labels_list'] = ['World News', 'Sports', 'Business', 'Science and Technology']
elif (self.prompt == 'which_section'):
result['labels_list'] = ['World News', 'Sports', 'Business', 'Science and Technology']
elif (self.prompt == 'classify_with_choices'):
result['labels_list'] = ['World politics', 'Sports', 'Business', 'Science and technology']
elif (self.prompt == 'classify'):
result['labels_list'] = ['World politics', 'Sports', 'Business', 'Science and technology']
elif (self.name == 'dbpedia_14'):
if (self.prompt == 'given_list_what_category_does_the_paragraph_belong_to'):
result['labels_list'] = ['Company', 'Educational Institution', 'Artist', 'Athlete', 'Office Holder', 'Mean Of Transportation', 'Building', 'Natural Place', 'Village', 'Animal', 'Plant', 'Album', 'Film', 'Written Work']
elif (self.prompt == 'pick_one_category_for_the_following_text'):
result['labels_list'] = ['Company', 'Educational Institution', 'Artist', 'Athlete', 'Office Holder', 'Mean Of Transportation', 'Building', 'Natural Place', 'Village', 'Animal', 'Plant', 'Album', 'Film', 'Written Work']
elif (self.prompt == 'given_a_choice_of_categories'):
result['labels_list'] = ['Company', 'Educational Institution', 'Artist', 'Athlete', 'Office Holder', 'Mean Of Transportation', 'Building', 'Natural Place', 'Village', 'Animal', 'Plant', 'Album', 'Film', 'Written Work']
elif (self.prompt == 'given_a_list_of_category_what_does_the_title_belong_to'):
result['labels_list'] = ['Company', 'Educational Institution', 'Artist', 'Athlete', 'Office Holder', 'Mean Of Transportation', 'Building', 'Natural Place', 'Village', 'Animal', 'Plant', 'Album', 'Film', 'Written Work']
elif (self.name == 'trec'):
if (self.prompt == 'what_category_best_describe'):
result['labels_list'] = ['Description', 'Entity', 'Abbreviation', 'Person', 'Quantity', 'Location']
elif (self.prompt == 'fine_grained_LOC'):
result['labels_list'] = ['city', 'country', 'mountain', 'state', 'other location']
elif (self.prompt == 'fine_grained_NUM_context_first'):
result['labels_list'] = ['code', 'count', 'date', 'distance', 'price', 'order', 'period of time', 'percentage', 'speed', 'temperature', 'size', 'weight', 'other number']
elif (self.prompt == 'fine_grained_ENTY'):
result['labels_list'] = ['an animal', 'an organ of the body', 'a color', 'creative piece', 'currency', 'disease or medicine', 'event', 'food', 'musical instrument', 'language', 'letter', 'plant', 'product', 'religion', 'sport', 'substance', 'symbol', 'technique', 'term', 'vehicle', 'word', 'other entity']
elif (self.prompt == 'fine_grained_NUM'):
result['labels_list'] = ['code', 'count', 'date', 'distance', 'price', 'order', 'period of time', 'percentage', 'speed', 'temperature', 'size', 'weight', 'other number']
elif (self.prompt == 'pick_the_best_descriptor'):
result['labels_list'] = ['Description', 'Entity', 'Abbreviation', 'Person', 'Quantity', 'Location']
elif (self.prompt == 'fine_grained_open_context_first'):
result['labels_list'] = ['Manner', 'Creative Piece', 'Animal', 'Expression abbreviated', 'Individual', 'Group', 'Title', 'Defintion', 'Date', 'Reason', 'Event', 'State', 'Description', 'Count', 'Other', 'Letter', 'Religion', 'Food', 'Country', 'Color', 'Term', 'City', 'Organ of the body', 'Disease or medicine', 'Mountain', 'Price', 'Product', 'Period', 'Substance', 'Sport', 'Plant', 'Technique', 'Size', 'Instrument', 'Abbreviation', 'Speed', 'Word', 'Language', 'Percentage', 'Code', 'Distance', 'Temperature', 'Symbol', 'Order', 'Vehicle', 'Weight', 'Currency']
elif (self.prompt == 'fine_grained_LOC_context_first'):
result['labels_list'] = ['city', 'country', 'mountain', 'state', 'other location']
elif (self.prompt == 'which_category_best_describes'):
result['labels_list'] = ['Description', 'Entity', 'Abbreviation', 'Person', 'Quantity', 'Location']
elif (self.prompt == 'fine_grained_DESC'):
result['labels_list'] = ['definition', 'description', 'manner of action', 'reason']
elif (self.prompt == 'trec1'):
result['labels_list'] = ['Description', 'Entity', 'Abbreviation', 'Person', 'Quantity', 'Location']
elif (self.prompt == 'fine_grained_ABBR'):
result['labels_list'] = ['abbreviation', 'expression abbreviated']
elif (self.prompt == 'fine_grained_ABBR_context_first'):
result['labels_list'] = ['abbreviation', 'expression abbreviated']
elif (self.prompt == 'trec2'):
result['labels_list'] = ['Description', 'Entity', 'Abbreviation', 'Person', 'Quantity', 'Location']
elif (self.prompt == 'fine_grained_HUM'):
result['labels_list'] = ['group', 'individual', 'title', 'description']
elif (self.prompt == 'fine_grained_open'):
result['labels_list'] = ['Manner', 'Creative Piece', 'Animal', 'Expression abbreviated', 'Individual', 'Group', 'Title', 'Defintion', 'Date', 'Reason', 'Event', 'State', 'Description', 'Count', 'Other', 'Letter', 'Religion', 'Food', 'Country', 'Color', 'Term', 'City', 'Organ of the body', 'Disease or medicine', 'Mountain', 'Price', 'Product', 'Period', 'Substance', 'Sport', 'Plant', 'Technique', 'Size', 'Instrument', 'Abbreviation', 'Speed', 'Word', 'Language', 'Percentage', 'Code', 'Distance', 'Temperature', 'Symbol', 'Order', 'Vehicle', 'Weight', 'Currency']
elif (self.prompt == 'fine_grained_HUM_context_first'):
result['labels_list'] = ['group', 'individual', 'title', 'description']
elif (self.prompt == 'fine_grained_DESC_context_first'):
result['labels_list'] = ['definition', 'description', 'manner of action', 'reason']
elif (self.name == 'hotpot_qa'):
if (self.prompt == 'classify_question_type'):
result['labels_list'] = ['comparison', 'bridge']
elif (self.name == 'wiki_qa'):
if (self.prompt == 'Is This True?'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'automatic_system'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'found_on_google'):
result['labels_list'] = ['No', 'Yes']
elif (self.prompt == 'exercise'):
result['labels_list'] = ['False', 'True']
elif (self.prompt == 'Decide_good_answer'):
result['labels_list'] = ['No', 'Yes']
return result |
def CleanseComments(line):
commentpos = line.find('//')
if ((commentpos != (- 1)) and (not IsCppString(line[:commentpos]))):
line = line[:commentpos].rstrip()
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) |
class AdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-06, weight_decay=0.0, correct_bias=True):
if (lr < 0.0):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1.0 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1.0 - beta2), grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']:
bias_correction1 = (1.0 - (beta1 ** state['step']))
bias_correction2 = (1.0 - (beta2 ** state['step']))
step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
if (group['weight_decay'] > 0.0):
p.data.add_(((- group['lr']) * group['weight_decay']), p.data)
return loss |
_module()
class DAFormerHead(BaseDecodeHead):
def __init__(self, **kwargs):
super(DAFormerHead, self).__init__(input_transform='multiple_select', **kwargs)
assert (not self.align_corners)
decoder_params = kwargs['decoder_params']
embed_dims = decoder_params['embed_dims']
if isinstance(embed_dims, int):
embed_dims = ([embed_dims] * len(self.in_index))
embed_cfg = decoder_params['embed_cfg']
embed_neck_cfg = decoder_params['embed_neck_cfg']
if (embed_neck_cfg == 'same_as_embed_cfg'):
embed_neck_cfg = embed_cfg
fusion_cfg = decoder_params['fusion_cfg']
for cfg in [embed_cfg, embed_neck_cfg, fusion_cfg]:
if ((cfg is not None) and ('aspp' in cfg['type'])):
cfg['align_corners'] = self.align_corners
self.embed_layers = {}
for (i, in_channels, embed_dim) in zip(self.in_index, self.in_channels, embed_dims):
if (i == self.in_index[(- 1)]):
self.embed_layers[str(i)] = build_layer(in_channels, embed_dim, **embed_neck_cfg)
else:
self.embed_layers[str(i)] = build_layer(in_channels, embed_dim, **embed_cfg)
self.embed_layers = nn.ModuleDict(self.embed_layers)
self.fuse_layer = build_layer(sum(embed_dims), self.channels, **fusion_cfg)
def forward(self, inputs):
x = inputs
(n, _, h, w) = x[(- 1)].shape
os_size = x[0].size()[2:]
_c = {}
for i in self.in_index:
_c[i] = self.embed_layers[str(i)](x[i])
if (_c[i].dim() == 3):
_c[i] = _c[i].permute(0, 2, 1).contiguous().reshape(n, (- 1), x[i].shape[2], x[i].shape[3])
if (_c[i].size()[2:] != os_size):
_c[i] = resize(_c[i], size=os_size, mode='bilinear', align_corners=self.align_corners)
x = self.fuse_layer(torch.cat(list(_c.values()), dim=1))
x = self.cls_seg(x)
return x |
class TemporalDataset(BaseDataset):
def initialize(self, opt):
assert (opt.dataset == 'cityscapes')
self.opt = opt
self.height = int((opt.loadSize / 2.0))
self.width = opt.loadSize
self.isTrain = opt.isTrain
self.static = opt.static
if (opt.isTrain == True):
phase = 'train'
else:
phase = 'val'
self.all_image_paths = self.load_all_image_paths(((opt.ImagesRoot + phase) + '/'), ((opt.SemanticRoot + phase) + '/'), ((opt.InstanceRoot + phase) + '/'), ((opt.StaticMapDir + phase) + '/'))
self.n_of_seqs = len(self.all_image_paths)
print(('Load number of video paths = %d' % self.n_of_seqs))
self.seq_len_max = max([len(A[0]) for A in self.all_image_paths])
self.n_frames_total = 27
def __getitem__(self, index):
tIn = self.opt.tIn
tOut = self.opt.tOut
image_paths = self.all_image_paths[(index % self.n_of_seqs)][0]
semantic_paths = self.all_image_paths[(index % self.n_of_seqs)][1]
instance_paths = self.all_image_paths[(index % self.n_of_seqs)][2]
if (self.static is True):
static_paths = self.all_image_paths[(index % self.n_of_seqs)][3]
nonrigid_paths = self.all_image_paths[(index % self.n_of_seqs)][4]
small_paths = self.all_image_paths[(index % self.n_of_seqs)][5]
if (self.isTrain == True):
tAll = (tIn + tOut)
start_idx = np.random.randint(0, ((self.n_frames_total - tAll) + 1))
else:
tAll = tIn
start_idx = 0
(origin_w, origin_h) = Image.open(image_paths[start_idx]).size
params = get_img_params(self.opt, (origin_w, origin_h))
transform_scale_bicubic = get_transform(self.opt, params)
transform_scale_nearest = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
Images = 0
Semantics = 0
Instancs = 0
Back_mask = 0
for i in range(tAll):
image_path = image_paths[(start_idx + i)]
semantic_path = semantic_paths[(start_idx + i)]
instance_path = instance_paths[(start_idx + i)]
nonrigid_path = nonrigid_paths[(start_idx + i)]
small_path = small_paths[(start_idx + i)]
semantic_PIL = self.get_resize_PIL(semantic_path, Image.NEAREST)
Semantici = self.numpy2tensor(semantic_PIL, transform_scale_nearest, True)
image_PIL = self.get_resize_PIL(image_path)
if self.static:
non_rigid = self.read_non_rigid(nonrigid_path)
small = self.read_non_rigid(small_path)
static_path = static_paths[(start_idx + i)]
static_PIL = self.get_resize_PIL(static_path)
static = self.delete_non_rigid(static_PIL, non_rigid, small)
Imagei = self.get_image_back(image_PIL, transform_scale_bicubic, static)
statici = self.mask2tensor(static, transform_scale_nearest)
Back_mask = (statici if (i == 0) else torch.cat([Back_mask, statici], dim=0))
else:
back = self.compute_back(semantic_PIL)
Imagei = self.get_image_back(image_PIL, transform_scale_bicubic, back)
backmaski = self.mask2tensor(back, transform_scale_nearest)
Back_mask = (backmaski if (i == 0) else torch.cat([Back_mask, backmaski], dim=0))
instance_pil = self.get_resize_PIL(instance_path)
Instancei = self.mask2tensor(instance_pil, transform_scale_nearest)
Images = (Imagei if (i == 0) else torch.cat([Images, Imagei], dim=0))
Semantics = (Semantici if (i == 0) else torch.cat([Semantics, Semantici], dim=0))
Instancs = (Instancei if (i == 0) else torch.cat([Instancs, Instancei], dim=0))
return_list = {'Image': Images, 'Semantic': Semantics, 'Instance': Instancs, 'back_mask': Back_mask, 'Image_path': image_paths, 'Semantic_path': semantic_paths}
return return_list
def numpy2tensor(self, arr, transform_scaleA, is_label=False):
A_scaled = transform_scaleA(arr)
if is_label:
A_scaled *= 255
return A_scaled
def get_resize_PIL(self, path, method=Image.BICUBIC):
img = Image.open(path)
if (img.size[1] != self.height):
img = img.resize((self.width, self.height), resample=method)
return img
def delete_non_rigid(self, static_pil, non_rigid, small):
static = (1.0 - (np.array(static_pil) / 255.0))
kernel = np.ones((7, 7), np.int32)
non_rigid = cv2.dilate(non_rigid, kernel, iterations=1)
static[(non_rigid == 1)] = 0
static[(small == 1)] = 1
return static
def get_image_back(self, image_pil, transform_scaleA, backmask):
A_img = (np.array(image_pil) * np.tile(np.expand_dims(backmask, axis=2), [1, 1, 3]))
A_img = Image.fromarray(A_img.astype(np.uint8))
A_scaled = transform_scaleA(A_img)
return A_scaled
def compute_non_rigid(self, semantic_PIL):
non_rigid = np.zeros((self.height, self.width))
non_rigid_idx = [11, 12, 17, 18]
semantic_npy = np.array(semantic_PIL)
for b in range(len(non_rigid_idx)):
non_rigid[(semantic_npy == non_rigid_idx[b])] = 1
return non_rigid
def read_non_rigid(self, non_rigid_path):
mask = (np.array(Image.open(non_rigid_path).resize((self.width, self.height), resample=Image.NEAREST)) / 255)
return mask
def compute_back(self, arr):
semantic = np.array(arr)
back = (semantic < 11)
return back.astype(np.int32)
def mask2tensor(self, mask, transform_scaleA):
if isinstance(mask, np.ndarray):
mask = Image.fromarray(mask.astype(np.uint8))
mask_tensor = transform_scaleA(mask)
mask_tensor *= 255
return mask_tensor
def __len__(self):
return self.n_of_seqs
def name(self):
return 'TemporalDataset'
def load_all_image_paths(self, image_dir, semantic_dir, instance_dir, static_map_dir):
non_rigid_dir = self.opt.non_rigid_dir
small_object_mask_dir = self.opt.small_object_mask_dir
city_dir = os.listdir(image_dir)
city_dir.sort()
video = []
video_cnt = 0
for i in range(len(city_dir)):
frame_dir = (image_dir + city_dir[i])
frame_list = os.listdir(frame_dir)
frame_list.sort()
for j in range((len(frame_list) // 30)):
image = []
semantic = []
instance = []
static_paths = []
non_rigid_paths = []
small_object_mask = []
for k in range((j * 30), ((j * 30) + 4)):
full_image_path = ((frame_dir + '/') + frame_list[k])
full_semantic_path = (((semantic_dir + city_dir[i]) + '/') + frame_list[k])
full_instance_path = (((instance_dir + city_dir[i]) + '/') + frame_list[k])
assert os.path.isfile(full_image_path)
assert os.path.isfile(full_semantic_path)
image.append(full_image_path)
semantic.append(full_semantic_path)
instance.append(full_instance_path)
if (self.static is True):
full_static_path = ((static_map_dir + ('%04d/' % video_cnt)) + ('pred_dynamic_%02d.png' % (k - (j * 30))))
assert os.path.isfile(full_static_path)
static_paths.append(full_static_path)
full_nonrigid_path = ((non_rigid_dir + ('%04d/' % video_cnt)) + ('non_rigid_mask_%02d.png' % (k - (j * 30))))
assert os.path.isfile(full_nonrigid_path)
non_rigid_paths.append(full_nonrigid_path)
full_small_object_path = ((small_object_mask_dir + ('%04d/' % video_cnt)) + ('small_object_mask_%02d.png' % (k - (j * 30))))
assert os.path.isfile(full_small_object_path)
small_object_mask.append(full_small_object_path)
if (self.static is True):
video.append((image, semantic, instance, static_paths, non_rigid_paths, small_object_mask))
video_cnt = (video_cnt + 1)
else:
video.append((image, semantic, instance))
return video |
class DPRReaderState(DPRState):
def load_dpr_model(self):
model = DPRReader(DPRConfig(**BertConfig.get_config_dict('bert-base-uncased')[0]))
print(f'Loading DPR reader from {self.src_file}')
saved_state = load_states_from_checkpoint(self.src_file)
state_dict = {'encoder.bert_model.embeddings.position_ids': model.span_predictor.encoder.bert_model.embeddings.position_ids}
for (key, value) in saved_state.model_dict.items():
if (key.startswith('encoder.') and (not key.startswith('encoder.encode_proj'))):
key = ('encoder.bert_model.' + key[len('encoder.'):])
state_dict[key] = value
model.span_predictor.load_state_dict(state_dict)
return model |
def logs2pil(logs, keys=['sample']):
imgs = dict()
for k in logs:
try:
if (len(logs[k].shape) == 4):
img = custom_to_pil(logs[k][(0, ...)])
elif (len(logs[k].shape) == 3):
img = custom_to_pil(logs[k])
else:
print(f'Unknown format for key {k}. ')
img = None
except:
img = None
imgs[k] = img
return imgs |
class PyPrint(PyStatement):
def __init__(self, arg):
self.arg = arg
def __repr__(self):
if isinstance(self.arg, PyStrAppend):
try:
if (self.arg.left.name == VAR_OUT):
return ('print %s' % str(self.arg.right))
elif (self.arg.right.name == VAR_OUT):
return ('print %s' % str(self.arg.left))
except AttributeError:
pass
return ('%s; %s' % (str(PyPrint(self.arg.left)), str(PyPrint(self.arg.right))))
return ('print %s' % str(self.arg)) |
class SelectAdaptivePool2d(nn.Module):
def __init__(self, output_size=1, pool_type='avg', flatten=False):
super(SelectAdaptivePool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
self.flatten = flatten
if (pool_type == 'avgmax'):
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif (pool_type == 'catavgmax'):
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif (pool_type == 'max'):
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if (pool_type != 'avg'):
assert False, ('Invalid pool type: %s' % pool_type)
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
x = self.pool(x)
if self.flatten:
x = x.flatten(1)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return ((((((self.__class__.__name__ + ' (') + 'output_size=') + str(self.output_size)) + ', pool_type=') + self.pool_type) + ')') |
def compute_ard_masks(module, *, prefix='', **kwargs):
if (not isinstance(module, torch.nn.Module)):
return {}
relevance = named_relevance(module, prefix=prefix, **kwargs)
return {((name + ('.' if name else '')) + 'mask'): mask for (name, mask) in relevance} |
def saveToFile(images_processed, results, outFile):
f = open(outFile, 'a')
if (results[0][0] == (- 1)):
for i in range(len(images_processed)):
f.write('{} {}\n'.format(images_processed[i], results[i][1]))
else:
for i in range(len(images_processed)):
f.write('{} {} {}\n'.format(images_processed[i], results[i][0], results[i][1]))
f.close() |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='TaoBao', type=str, help='Dataset to use')
parser.add_argument('--seed', default=2022, type=int, help='seed for experiment')
parser.add_argument('--embed_size', default=32, type=int, help='embedding size for all layer')
parser.add_argument('--lr', default=0.05, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=8e-08, type=float, help='weight decay for adam optimizer')
parser.add_argument('--model', default='dgrec', type=str, help='model selection')
parser.add_argument('--epoch', default=1000, type=int, help='epoch number')
parser.add_argument('--patience', default=10, type=int, help='early_stop validation')
parser.add_argument('--batch_size', default=2048, type=int, help='batch size')
parser.add_argument('--layers', default=1, type=int, help='layer number')
parser.add_argument('--gpu', default=0, type=int, help='-1 for cpu, 0 for gpu:0')
parser.add_argument('--k_list', default=[100, 300], type=list, help='topk evaluation')
parser.add_argument('--k', default=20, type=int, help='neighbor number in each GNN aggregation')
parser.add_argument('--neg_number', default=4, type=int, help='negative sampler number for each positive pair')
parser.add_argument('--metrics', default=['recall', 'hit_ratio', 'coverage'])
parser.add_argument('--sigma', default=1.0, type=float, help='sigma for gaussian kernel')
parser.add_argument('--gamma', default=2.0, type=float, help='gamma for gaussian kernel')
parser.add_argument('--category_balance', default=True, type=bool, help='whether make loss category balance')
parser.add_argument('--beta_class', default=0.9, type=float, help='class re-balanced loss beta')
args = parser.parse_args()
return args |
def test_copy():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
new_cfg = copy.copy(cfg)
assert isinstance(new_cfg, Config)
assert (new_cfg is not cfg)
assert (new_cfg._cfg_dict is cfg._cfg_dict)
assert (new_cfg._filename == cfg._filename)
assert (new_cfg._text == cfg._text) |
_torch
class TransfoXLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((TransfoXLModel, TransfoXLLMHeadModel) if is_torch_available() else ())
all_generative_model_classes = ((TransfoXLLMHeadModel,) if is_torch_available() else ())
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
class TransfoXLModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, mem_len=30, clamp_len=15, is_training=True, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, d_embed=32, num_attention_heads=4, d_head=8, d_inner=128, div_val=2, num_hidden_layers=5, scope=None, seed=1, eos_token_id=0):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.mem_len = mem_len
self.key_length = (seq_length + mem_len)
self.clamp_len = clamp_len
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.cutoffs = cutoffs
self.hidden_size = hidden_size
self.d_embed = d_embed
self.num_attention_heads = num_attention_heads
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.num_hidden_layers = num_hidden_layers
self.scope = scope
self.seed = seed
self.eos_token_id = eos_token_id
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = TransfoXLConfig(vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_ids=self.eos_token_id)
return (config, input_ids_1, input_ids_2, lm_labels)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLModel(config)
model.to(torch_device)
model.eval()
(hidden_states_1, mems_1) = model(input_ids_1)
(hidden_states_2, mems_2) = model(input_ids_2, mems_1)
outputs = {'hidden_states_1': hidden_states_1, 'mems_1': mems_1, 'hidden_states_2': hidden_states_2, 'mems_2': mems_2}
return outputs
def check_transfo_xl_model_output(self, result):
self.parent.assertListEqual(list(result['hidden_states_1'].size()), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(result['hidden_states_2'].size()), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list((list(mem.size()) for mem in result['mems_1'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
self.parent.assertListEqual(list((list(mem.size()) for mem in result['mems_2'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLLMHeadModel(config)
model.to(torch_device)
model.eval()
(lm_logits_1, mems_1) = model(input_ids_1)
(loss_1, _, mems_1) = model(input_ids_1, labels=lm_labels)
(lm_logits_2, mems_2) = model(input_ids_2, mems=mems_1)
(loss_2, _, mems_2) = model(input_ids_2, labels=lm_labels, mems=mems_1)
outputs = {'loss_1': loss_1, 'mems_1': mems_1, 'lm_logits_1': lm_logits_1, 'loss_2': loss_2, 'mems_2': mems_2, 'lm_logits_2': lm_logits_2}
return outputs
def check_transfo_xl_lm_head_output(self, result):
self.parent.assertListEqual(list(result['loss_1'].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['lm_logits_1'].size()), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.size()) for mem in result['mems_1'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
self.parent.assertListEqual(list(result['loss_2'].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['lm_logits_2'].size()), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.size()) for mem in result['mems_2'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids_1}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TransfoXLModelTest.TransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs)
self.model_tester.check_transfo_xl_model_output(output_result)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs)
self.model_tester.check_transfo_xl_lm_head_output(output_result)
def test_model_from_pretrained(self):
for model_name in list(TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = TransfoXLModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
class TFOptimization():
def __init__(self, model: PreTrainedModel, args, train_dataset=None, eval_dataset=None, compute_metrics: Optional[Callable]=None, criterion=None, optimizer=None, task_type=None, task_id=None, strategy=None):
self.model = model
self.teacher_model = None
self.component = None
self.eval_dataset = eval_dataset
self.train_dataset = train_dataset
self._eval_func = None
self._train_func = None
self.quant_config = None
self.pruning_config = None
self.distillation_config = None
self.pruner = None
self.quantizer = None
self.distiller = None
self.in_training = False
self._input_names = None
self._output_names = None
self._inputs = None
self.compute_metrics = compute_metrics
self.args = args
self.optimizer = optimizer
self.task_type = task_type
self.task_id = task_id
self.criterion = (criterion if (criterion is not None) else (self.model.loss if hasattr(self.model, 'loss') else None))
self.model.save_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id), saved_model=True)
(_, self.input_names, self.output_names) = saved_model_session(os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id), 'saved_model/1'), input_tensor_names=[], output_tensor_names=[])
self.eval_distributed = False
self.strategy = strategy
def inputs(self):
return self._inputs
def inputs(self, inputs: dict):
self._inputs = inputs
def input_names(self):
return self._input_names
_names.setter
def input_names(self, input_names: List):
self._input_names = input_names
def output_names(self):
return self._output_names
_names.setter
def output_names(self, output_names: List):
self._output_names = output_names
def eval_func(self):
return self._eval_func
_func.setter
def eval_func(self, func: Callable):
self._eval_func = func
def train_func(self):
return self._train_func
_func.setter
def train_func(self, func: Callable):
self._train_func = func
def train_dataset(self):
return self._train_dataset
_dataset.setter
def train_dataset(self, train_dataset):
assert (isinstance(train_dataset, tf.data.Dataset) or (train_dataset is None)), 'train_dataset should be obj of tf.data.Dataset'
self._train_dataset = train_dataset
def eval_dataset(self):
return self._eval_dataset
_dataset.setter
def eval_dataset(self, eval_dataset):
assert (isinstance(eval_dataset, tf.data.Dataset) or (eval_dataset is None)), 'eval_dataset should be obj of tf.data.Dataset'
self._eval_dataset = eval_dataset
def builtin_eval_func(self, model):
model_type = None
label_ids: np.ndarray = None
try:
model_type = get_model_type(model)
except ValueError:
logger.info('use keras savedModel')
num_examples = sum((1 for _ in (self._eval_dataset.unbatch() if hasattr(self._eval_dataset, 'unbatch') else self._eval_dataset)))
logger.info(f'***** Running Evaluation *****')
logger.info(f' Num examples in dataset = {num_examples}')
logger.info(f' Batch size = {self.args.per_device_eval_batch_size}')
if (model_type is None):
preds: np.ndarray = None
infer = model.signatures['serving_default']
for (idx, (inputs, labels)) in enumerate(self._eval_dataset):
for name in inputs:
inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype)
results = infer(**inputs)
for val in results:
if (preds is None):
preds = results[val].numpy()
else:
preds = np.append(preds, results[val].numpy(), axis=0)
if (label_ids is None):
label_ids = (labels[0].numpy() if isinstance(labels, list) else labels.numpy())
else:
label_ids = np.append(label_ids, (labels[0].numpy() if isinstance(labels, list) else labels.numpy()), axis=0)
test_predictions = {'logits': preds}
eval_metrics = self.compute_metrics(test_predictions, label_ids)
acc = eval_metrics['accuracy']
return acc
else:
from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name
input_tensor = [get_tensor_by_name(model, x) for x in self.input_names]
output_tensor = [get_tensor_by_name(model, x) for x in self.output_names]
logger.info('Start to evaluate the TensorFlow model.')
total_time = 0
config = tf.compat.v1.ConfigProto()
config.use_per_session_threads = 1
config.inter_op_parallelism_threads = 1
sess = tf.compat.v1.Session(graph=model, config=config)
feed_dict = {}
label_ids: np.ndarray = None
preds: np.ndarray = None
for (idx, (inputs, labels)) in enumerate(self._eval_dataset):
assert (len(input_tensor) == len(inputs)), 'inputs len must equal with input_tensor'
feed_dict = {}
for name in inputs:
for tensor in input_tensor:
pos = tensor.name.rfind(':')
t_name = (tensor.name if (pos < 0) else tensor.name[:pos])
if (name == t_name):
feed_dict[tensor] = inputs[name].numpy()
break
start = time.time()
logits = sess.run(output_tensor, feed_dict)
total_time += (time.time() - start)
if (not self.args.prediction_loss_only):
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0].numpy()
if (isinstance(logits, list) and (len(logits) > 1)):
for val in logits:
if (preds is None):
preds = val
else:
preds = np.append(preds, val, axis=0)
for val in labels:
if (label_ids is None):
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if (preds is None):
preds = (logits[0] if isinstance(logits, list) else logits)
else:
preds = np.append(preds, (logits[0] if isinstance(logits, list) else logits), axis=0)
if (label_ids is None):
label_ids = (labels[0].numpy() if isinstance(labels, list) else labels.numpy())
else:
label_ids = np.append(label_ids, (labels[0].numpy() if isinstance(labels, list) else labels.numpy()), axis=0)
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
try:
loss = (self.criterion(label_ids, preds) if (self.criterion is not None) else None)
except Exception as e:
logger.info(e)
logger.info('There is no loss function or loss compute error, Please compute loss in compute_metrics function')
loss = None
results = self.compute_metrics({'logits': preds}, label_ids)
if (loss is not None):
results['loss'] = loss.numpy()
if isinstance(self.metrics, list):
nums = len(self.metrics)
for metric in self.metrics:
assert (metric.name in results.keys()), 'Please set metric from {}'.format(results.keys())
if (nums == 1):
result = results.get(self.metrics[0].name)
else:
result = 0
for metric in self.metrics:
assert (metric.weight_ratio is not None), 'Please set weights for metric if you want to use more than one metric'
result += (results[metric.name] * metric.weighted)
logger.info('metric Accuracy: {}'.format(result))
elif isinstance(self.metrics, Metric):
assert (self.metrics.name in results.keys()), 'Please set metric from {}'.format(results.keys())
result = results.get(self.metrics.name)
logger.info('metric Accuracy: {}'.format(result))
else:
assert False, 'Please set the correct metrics format from the README'
else:
result = 0
logger.info('Throughput: {} samples/sec'.format((num_examples / total_time)))
return result
def init_quantizer(self, quant_config):
from neural_compressor.experimental import Quantization
self.quant_config = (QuantizationConfig() if (quant_config is None) else quant_config)
self.quant_config.framework = 'tensorflow'
self.metrics = self.quant_config.metrics
quantizer = Quantization(self.quant_config.inc_config)
quantizer.model = common.Model(os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id), 'saved_model/1'), modelType='saved_model')
self.quantizer = quantizer
return quantizer
def _inc_quantize(self, quant_config):
if (self.quantizer is None):
self.init_quantizer(quant_config=quant_config)
if (self._eval_func is not None):
self.quantizer.eval_func = self._eval_func
else:
assert (self.metrics is not None), 'Please pass the metrics to QuantizationConfig.metrics!'
self.quantizer.eval_func = self.builtin_eval_func
if (self.quant_config.approach == QuantizationMode.POSTTRAININGSTATIC.value):
if (self._train_dataset is not None):
self.quantizer.calib_dataloader = TFDataloader(self._train_dataset, batch_size=self.args.per_device_train_batch_size)
elif (self._eval_dataset is not None):
self.quantizer.calib_dataloader = TFDataloader(self._eval_dataset, batch_size=self.args.per_device_eval_batch_size)
else:
assert False, 'Please pass calibration dataset to TFNoTrainerOptimizer.calib_dataloader'
elif (self.quant_config.approach == QuantizationMode.QUANTIZATIONAWARETRAINING.value):
assert False, 'Unsupport quantization aware training for tensorflow framework'
opt_model = self.quantizer.fit()
opt_model.save(self.args.output_dir)
logger.info('quantized model have saved to {}'.format(self.args.output_dir))
return opt_model.model
def quantize(self, quant_config: QuantizationConfig=None, eval_func: Optional[Callable]=None, train_func: Optional[Callable]=None, train_dataset=None, eval_dataset=None):
if (eval_func is not None):
self._eval_func = eval_func
if (train_func is not None):
self._train_func = train_func
if (train_dataset is not None):
self.train_dataset = train_dataset
if (eval_dataset is not None):
self.eval_dataset = eval_dataset
return self._inc_quantize(quant_config=quant_config)
def init_pruner(self, pruning_config=None):
from neural_compressor.experimental import Pruning
if (pruning_config.framework != 'tensorflow'):
logger.warning('pruning_config.framework is {}, should be tensorflow'.format(pruning_config.framework))
pruning_config.framework = 'tensorflow'
self.pruning_config = pruning_config
self.metrics = self.pruning_config.metrics
assert isinstance(self.pruning_config, PruningConfig), 'please pass a instance of PruningConfig to trainer.prune!'
pruner = Pruning(self.pruning_config.inc_config)
pruner.model = os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id), 'saved_model/1')
pruner.model.model_type = 'saved_model'
self.pruner = pruner
self.component = pruner
return pruner
def prune(self, pruning_config=None, eval_func: Optional[Callable]=None, train_func: Optional[Callable]=None, train_dataset=None, eval_dataset=None):
if (self.pruner is None):
self.init_pruner(pruning_config=pruning_config)
if (eval_func is not None):
self.eval_func = eval_func
if (train_func is not None):
self.train_func = train_func
if (train_dataset is not None):
self.train_dataset = train_dataset
if (eval_dataset is not None):
self.eval_dataset = eval_dataset
if (self._eval_func is not None):
self.pruner.eval_func = self._eval_func
else:
assert (self.metrics is not None), 'Please pass the metrics to PruningConfig.metrics!'
self.pruner.eval_func = self.builtin_eval_func
if (self.train_func is not None):
if (version.parse(__version__) <= version.parse('1.12')):
self.pruner.pruning_func = self._train_func
else:
self.pruner.train_func = self._train_func
elif (version.parse(__version__) <= version.parse('1.12')):
self.pruner.pruning_func = self.build_train_func
else:
self.pruner.train_func = self.build_train_func
opt_model = self.pruner.fit()
(stats, sparsity) = opt_model.report_sparsity()
logger.info(stats)
logger.info(sparsity)
opt_model.save(self.args.output_dir)
logger.info('pruned model have saved to {}'.format(self.args.output_dir))
return opt_model.model
def init_distiller(self, distillation_config, teacher_model: PreTrainedModel):
from neural_compressor.experimental import Distillation
assert isinstance(distillation_config, DistillationConfig), 'please pass a instance of DistillationConfig to trainer.distill!'
def train_step(data):
if (len(data) == 3):
(x, y, sample_weight) = data
else:
sample_weight = None
(x, y) = data
with tf.GradientTape() as tape:
y_pred = self.model(x)
teacher_outputs = self.distiller.criterion.teacher_model_forward(input=x, teacher_model=teacher_model)
loss = self.model.compute_loss(x, y, y_pred, sample_weight)
loss = self.distiller.on_after_compute_loss(x, y_pred.logits, loss, teacher_outputs.logits)
self.model._validate_target_and_loss(y, loss)
self.model.optimizer.minimize(loss, self.model.trainable_variables, tape=tape)
return self.model.compute_metrics(x, y, y_pred, sample_weight)
self.model.train_step = train_step
self.model.compile(optimizer=self.model.optimizer, loss=self.model.loss, metrics=self.model.compiled_metrics._user_metrics)
if (distillation_config.framework != 'tensorflow'):
logger.warning('distillation_config.framework is {}, should be tensorflow'.format(distillation_config.framework))
distillation_config.framework = 'tensorflow'
self.distillation_config = distillation_config
self.metrics = self.distillation_config.metrics
self.teacher_model = teacher_model
distiller = Distillation(self.distillation_config.inc_config)
distiller.model = os.path.join(TMPPATH, 'saved_model/1')
distiller.model.model_type = 'saved_model'
self.teacher_model.save_pretrained(TEACHERPATH, saved_model=True)
distiller.teacher_model = os.path.join(TEACHERPATH, 'saved_model/1')
distiller.teacher_model.model_type = 'saved_model'
self.distiller = distiller
self.component = distiller
return distiller
def distill(self, distillation_config, teacher_model: PreTrainedModel, eval_func: Optional[Callable]=None, train_func: Optional[Callable]=None):
if (self.distiller is None):
self.init_distiller(distillation_config=distillation_config, teacher_model=teacher_model)
if (eval_func is not None):
self._eval_func = eval_func
if (train_func is not None):
self._train_func = train_func
else:
self._train_func = self.build_train_func
self.distiller.eval_func = self._eval_func
self.distiller.train_func = self._train_func
self.distiller.create_criterion()
opt_model = self.distiller.fit()
opt_model.save(self.args.output_dir)
logger.info('distilled model have saved to {}'.format(self.args.output_dir))
return opt_model.model
def model_builder_builtin(self, arch_paras=None, model_cls=None):
config = self.model.config
if (arch_paras is not None):
assert isinstance(arch_paras, dict), 'Expect arch_paras to be a dict.'
for k in arch_paras:
if hasattr(config, k):
config.__setattr__(k, arch_paras[k])
if (k == 'intra_bottleneck_size'):
config.__setattr__('true_hidden_size', arch_paras[k])
return model_cls.from_config(config)
def autodistill(self, autodistillation_config, teacher_model: PreTrainedModel, model_builder: Optional[Callable]=None, model_cls: Optional[Callable]=None, eval_func: Optional[Callable]=None, train_func: Optional[Callable]=None):
self.autodistillation_config = autodistillation_config
if (model_builder is None):
assert (model_cls is not None), (('Must specify model_cls to use the built-in ' + 'model_builder, e.g. model_cls=AutoModelForPreTraining, or you can use ') + 'the customized model_builder.')
model_builder = partial(self.model_builder_builtin, model_cls=model_cls)
agent = AutoDistillation(model_builder, self.autodistillation_config, framework='tensorflow')
def train_func_builtin(model):
def run_distillers(model, distillers, train_steps, block_names, presentation='flash distillation'):
for (i, elements) in enumerate(zip(distillers, train_steps, block_names)):
(distiller, ts, bln) = elements
logger.info(' '.join([('=' * 30), 'Step {} of'.format((i + 1)), presentation, ('=' * 30)]))
def train_step(data):
if (len(data) == 3):
(x, y, sample_weight) = data
else:
sample_weight = None
(x, y) = data
with tf.GradientTape() as tape:
y_pred = model(x)
teacher_outputs = distiller.criterion.teacher_model_forward(input=x, teacher_model=teacher_model)
loss = model.compute_loss(x, y, y_pred, sample_weight)
loss = distiller.on_after_compute_loss(x, y_pred.logits, loss, teacher_outputs.logits)
model._validate_target_and_loss(y, loss)
optimizer = self.model.optimizer
optimizer.minimize(loss, model.trainable_variables, tape=tape)
return model.compute_metrics(x, y, y_pred, sample_weight)
model.save_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id), saved_model=True)
opt_kwargs = {}
for (k, v) in self.model.optimizer.__dict__.items():
if (not k.startswith('_')):
opt_kwargs[k] = v
optimizer = self.model.optimizer.__class__(**opt_kwargs)
if self.strategy:
with self.strategy.scope():
model = model_cls.from_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id))
model.compile(optimizer=optimizer, loss=self.model.loss, metrics=self.model.compiled_metrics._user_metrics)
model.train_step = train_step
else:
model.train_step = train_step
model.compile(optimizer=optimizer, loss=self.model.loss, metrics=self.model.compiled_metrics._user_metrics)
self.model = model
distiller.model = os.path.join(TMPPATH, 'saved_model/1')
distiller.model.model_type = 'saved_model'
teacher_model.save_pretrained(TEACHERPATH, saved_model=True)
distiller.teacher_model = os.path.join(TEACHERPATH, 'saved_model/1')
distiller.teacher_model.model_type = 'saved_model'
if (eval_func is not None):
self._eval_func = eval_func
else:
self._eval_func = self.builtin_eval_func
if (train_func is not None):
self._train_func = train_func
else:
self._train_func = self.build_train_func
distiller.eval_func = self._eval_func
distiller.train_func = self._train_func
distiller.create_criterion()
self.component = self.distiller = distiller
opt_model = distiller.fit()
opt_model.save(self.args.output_dir)
return opt_model
agent.create_distillers()
ori_model = model
if agent.flash_distillers:
model = run_distillers(ori_model, agent.flash_distillers, agent.flash_train_steps, agent.flash_block_names)
if agent.regular_distillers:
model = run_distillers(ori_model, agent.regular_distillers, agent.regular_train_steps, agent.regular_block_names, presentation='regular distillation')
return model.model
def eval_func_builtin(model):
if self._eval_func:
result = self._eval_func(model)
else:
result = self.builtin_eval_func(model)
return {'metric': result}
agent.framework = 'tensorflow'
agent.train_func = (train_func if train_func else train_func_builtin)
agent.eval_func = (eval_func if eval_func else eval_func_builtin)
os.makedirs(self.args.output_dir, exist_ok=True)
return agent.search(self.args.output_dir, model_cls)
def build_train_func(self, model):
tf.random.set_seed(1)
epochs = 1
component = self.component
prune_model = self.model
model_path = get_filepath(TMPPATH, self.task_type, self.task_id)
if ('distillation' in self.component.cfg):
epochs = max(epochs, self.component.cfg.distillation.train.get('epoch', 1))
hooks = self.component.hooks
if ('pruning' in self.component.cfg):
epochs = max(epochs, self.component.cfg.pruning.train.get('epoch', 1))
callbacks = self.pruner.callbacks
hooks = callbacks['tf_pruning'](self.pruner.model, self.model, self.pruner.hooks)
class callback(tf.keras.callbacks.Callback):
def on_train_begin(self, logs=None):
if (version.parse(__version__) <= version.parse('1.12')):
hooks['pre_epoch_begin']()
else:
hooks['on_train_begin']()
def on_train_end(self, logs=None):
if (version.parse(__version__) <= version.parse('1.12')):
hooks['post_epoch_end']()
else:
hooks['on_train_end']()
def on_epoch_begin(self, epoch, logs=None):
hooks['on_epoch_begin'](epoch)
def on_epoch_end(self, epoch, logs=None):
component.model._session = None
prune_model.save_pretrained(model_path, saved_model=True)
component.model = os.path.join(model_path, 'saved_model/1')
component.model.model_type = 'saved_model'
component.model.sess
hooks['on_epoch_end']()
def on_train_batch_begin(self, batch, logs=None):
if (version.parse(__version__) <= version.parse('1.12')):
hooks['on_batch_begin'](batch)
else:
hooks['on_step_begin'](batch)
def on_train_batch_end(self, batch, logs=None):
if (version.parse(__version__) <= version.parse('1.12')):
hooks['on_batch_end']()
else:
hooks['on_step_end']()
self.model.fit(self.train_dataset, validation_data=self.eval_dataset, epochs=epochs, callbacks=[callback()])
self.component.model._session = None
self.model.save_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id), saved_model=True) |
def parse_fast(line, grammar, grammar_len, sparse_matches=False):
matches = None
if sparse_matches:
matches = list()
else:
matches = [0 for x in range(grammar_len)]
for line_index in range(len(line)):
unit = line[line_index]
candidates = _get_candidates(unit, grammar)
for k in range(len(candidates)):
(construction, grammar_index) = candidates[k]
match = True
for j in range(1, len(construction)):
if (construction[j] == (0, 0)):
break
if ((line_index + j) < len(line)):
if (line[(line_index + j)][(construction[j][0] - 1)] != construction[j][1]):
match = False
break
else:
match = False
break
if (match == True):
if sparse_matches:
matches.append(grammar_index)
else:
matches[grammar_index] += 1
return matches |
def squareform(tensor):
assert isinstance(tensor, tf.Tensor), 'tensor_utils.squareform: Input must be a `tensorflow.Tensor` instance.'
tensor_shape = tensor.shape.as_list()
n_elements = tensor_shape[0]
if _is_vector(tensor):
if (n_elements == 0):
return tf.zeros((1, 1), dtype=tensor.dtype)
dimension = int(np.ceil(np.sqrt((n_elements * 2))))
if ((dimension * (dimension - 1)) != (n_elements * 2)):
raise ValueError('Incompatible vector size. It must be a binomial coefficient n choose 2 for some integer n >=2.')
n_total_elements_matrix = (dimension ** 2)
n_diagonal_zeros = dimension
n_fill_zeros = ((n_total_elements_matrix - n_elements) - n_diagonal_zeros)
condensed_distance_tensor = tf.reshape(tensor, shape=(n_elements, 1))
diagonal_zeros = tf.zeros(shape=(n_diagonal_zeros, 1), dtype=condensed_distance_tensor.dtype)
fill_zeros = tf.zeros(shape=(n_fill_zeros, 1), dtype=condensed_distance_tensor.dtype)
def upper_triangular_indices(dimension):
assert (dimension > 0), 'tensor_utils.upper_triangular_indices: Dimension must be positive integer!'
for row in range(dimension):
for column in range((row + 1), dimension):
element_index = ((dimension * row) + column)
(yield element_index)
all_indices = set(range(n_total_elements_matrix))
diagonal_indices = list(range(0, n_total_elements_matrix, (dimension + 1)))
upper_triangular = list(upper_triangular_indices(dimension))
remaining_indices = all_indices.difference(set(diagonal_indices).union(upper_triangular))
data = (diagonal_zeros, condensed_distance_tensor, fill_zeros)
indices = (tuple(diagonal_indices), tuple(upper_triangular), tuple(remaining_indices))
stitch_vector = tf.dynamic_stitch(data=data, indices=indices)
upper_triangular = tf.reshape(stitch_vector, (dimension, dimension))
lower_triangular = tf.transpose(upper_triangular)
return (upper_triangular + lower_triangular)
else:
raise NotImplementedError('tensor_utils.squareform: Only 1-d (vector) input is supported!') |
def SkipConnectFastGRUCell(input, hidden, hidden_skip, w_ih, w_hh, b_ih=None, b_hh=None, noise_in=None, noise_hidden=None):
if (noise_in is not None):
input = (input * noise_in)
hx = torch.cat([hidden, hidden_skip], dim=1)
if (noise_hidden is not None):
hx = (hx * noise_hidden)
gi = F.linear(input, w_ih, b_ih)
gh = F.linear(hx, w_hh, b_hh)
(i_r, i_i, i_n) = gi.chunk(3, 1)
(h_r, h_i, h_n) = gh.chunk(3, 1)
resetgate = torch.sigmoid((i_r + h_r))
inputgate = torch.sigmoid((i_i + h_i))
newgate = torch.tanh((i_n + (resetgate * h_n)))
hy = (newgate + (inputgate * (hidden - newgate)))
return hy |
def test_quad_double_track(vrblvl=0):
mickey = ['x^2 + 4*y^2 - 4;', '2*y^2 - x;']
(start, startsols) = total_degree_start_system(mickey, vrblvl=vrblvl)
print('the start system :')
for pol in start:
print(pol)
print('the start solutions :')
for (idx, sol) in enumerate(startsols):
print('Solution', (idx + 1), ':')
print(sol)
(gamma, sols) = quad_double_track(mickey, start, startsols, tasks=2, vrblvl=vrblvl)
print('the solutions :')
for (idx, sol) in enumerate(sols):
print('Solution', (idx + 1), ':')
print(sol)
err = verify(mickey, sols, vrblvl)
if (vrblvl > 0):
print('the error sum :', err)
if ((len(sols) == 4) and (abs((err.real + err.imag)) < 1e-10)):
if (vrblvl > 0):
print('Found 4 solutions and error is okay.')
return 0
if (len(sols) != 4):
if (vrblvl > 0):
print('Number of solutions is not 4 :', len(sols))
return 1
if (abs((err.real + err.imag)) >= 1e-10):
if (vrblvl > 0):
print('The error is too large.')
return 1 |
class BatchTensorToVars(object):
def __init__(self, use_cuda=True):
self.use_cuda = use_cuda
def __call__(self, batch):
batch_var = {}
for (key, value) in batch.items():
if (isinstance(value, torch.Tensor) and (not self.use_cuda)):
batch_var[key] = Variable(value, requires_grad=False)
elif (isinstance(value, torch.Tensor) and self.use_cuda):
batch_var[key] = Variable(value, requires_grad=False).cuda()
else:
batch_var[key] = value
return batch_var |
class __DisplMixin():
def displ_item(self, index):
(sample, ann) = (self.__getitem__(index), self.annotation[index])
return OrderedDict({'file_L': ann['images'][0], 'file_R': ann['images'][1], 'sentence': ann['sentence'], 'label': ann['label'], 'image': [sample['image0'], sample['image1']]}) |
class ExportForecastingPipeline(nn.Module):
def __init__(self, preprocess: nn.Module, inference: nn.Module, postprocess: nn.Module) -> None:
super().__init__()
self.preprocess = preprocess
self.inference = inference
self.postprocess = postprocess
def forward(self, data):
preprocess_output = self.preprocess(data)
inference_output = self.inference(preprocess_output)
postprocess_output = self.postprocess(inference_output)
return postprocess_output |
class nnUNetTrainerV2_Loss_TopK10(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
self.loss = TopKLoss(k=10) |
class StructuralDataset(GraphDataset):
def __init__(self, distance_matrix_key='distance_matrix', feature_matrix_key='feature_matrix', **kwargs):
super().__init__(**kwargs)
self.distance_matrix_key = distance_matrix_key
self.feature_matrix_key = feature_matrix_key
def __getitem__(self, index):
item = super().__getitem__(index)
num_nodes = int(item[self.num_nodes_key])
edges = item.pop(self.edges_key)
node_feats = item.pop(self.node_features_key)
edge_feats = item.pop(self.edge_features_key)
(node_feats, dist_mat, edge_feats_mat) = preprocess_data(num_nodes, edges, node_feats, edge_feats)
item[self.node_features_key] = node_feats
item[self.distance_matrix_key] = dist_mat
item[self.feature_matrix_key] = edge_feats_mat
return item |
_registry(operator_type='Where')
class Where(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'torch'):
if (type(node.inputsAt(2).toIValue()) == float):
self._attr['mask_value'] = node.inputsAt(2).toIValue()
else:
self._attr['mask_value'] = node.inputsAt(2).toIValue().item() |
def compute_cov_a(a, classname, layer_info, fast_cnn):
batch_size = a.size(0)
if (classname == 'Conv2d'):
if fast_cnn:
a = _extract_patches(a, *layer_info)
a = a.view(a.size(0), (- 1), a.size((- 1)))
a = a.mean(1)
else:
a = _extract_patches(a, *layer_info)
a = a.view((- 1), a.size((- 1))).div_(a.size(1)).div_(a.size(2))
elif (classname == 'AddBias'):
is_cuda = a.is_cuda
a = torch.ones(a.size(0), 1)
if is_cuda:
a = a.cuda()
return (a.t() (a / batch_size)) |
class CaffeEltWiseLayer(CaffeLayerGenerator):
def __init__(self, name, operation):
super(CaffeEltWiseLayer, self).__init__(name, 'Eltwise')
self.operation = operation
def write(self, f):
param_str = '\n bottom: "{}"\n eltwise_param{{\n operation: {}\n }}'.format(self.bottom[1], self.operation)
f.write(self.get_template().format(param_str)) |
def auto_tune(input_graph_path, batch_size):
dataset = Dataset()
dataloader = DataLoader(framework='tensorflow', dataset=dataset, batch_size=batch_size)
tuning_criterion = TuningCriterion(max_trials=100)
config = PostTrainingQuantConfig(approach='static', tuning_criterion=tuning_criterion, accuracy_criterion=AccuracyCriterion(higher_is_better=True, criterion='relative', tolerable_loss=0.01))
q_model = fit(model=input_graph_path, conf=config, calib_dataloader=dataloader, eval_dataloader=dataloader)
return q_model |
class Base(torch.nn.Module):
def __init__(self):
super().__init__()
def _set_child_attribute(self, attr, value):
if hasattr(self, attr):
setattr(self, attr, value)
for module in self.modules():
if hasattr(module, attr):
setattr(module, attr, value)
return self
def set_temperature(self, value):
self._set_child_attribute('temperature', value)
def enable_hard_round(self, mode=True):
self._set_child_attribute('hard_round', mode)
def disable_hard_round(self, mode=True):
self.enable_hard_round((not mode)) |
_torch
class PipelineTesterMixin():
required_optional_params = frozenset(['num_inference_steps', 'num_images_per_prompt', 'generator', 'latents', 'output_type', 'return_dict'])
test_attention_slicing = True
test_xformers_attention = True
def get_generator(self, seed):
device = (torch_device if (torch_device != 'mps') else 'cpu')
generator = torch.Generator(device).manual_seed(seed)
return generator
def pipeline_class(self) -> Union[(Callable, DiffusionPipeline)]:
raise NotImplementedError('You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. See existing pipeline tests for reference.')
def get_dummy_components(self):
raise NotImplementedError('You need to implement `get_dummy_components(self)` in the child test class. See existing pipeline tests for reference.')
def get_dummy_inputs(self, device, seed=0):
raise NotImplementedError('You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. See existing pipeline tests for reference.')
def params(self) -> frozenset:
raise NotImplementedError("You need to set the attribute `params` in the child test class. `params` are checked for if all values are present in `__call__`'s signature. You can set `params` using one of the common set of parameters defined in `pipeline_params.py` e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to image pipelines, including prompts and prompt embedding overrides.If your pipeline's set of arguments has minor changes from one of the common sets of arguments, do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline with non-configurable height and width arguments should set the attribute as `params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. See existing pipeline tests for reference.")
def batch_params(self) -> frozenset:
raise NotImplementedError("You need to set the attribute `batch_params` in the child test class. `batch_params` are the parameters required to be batched when passed to the pipeline's `__call__` method. `pipeline_params.py` provides some common sets of parameters such as `TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's set of batch arguments has minor changes from one of the common sets of batch arguments, do not make modifications to the existing common sets of batch arguments. I.e. a text to image pipeline `negative_prompt` is not batched should set the attribute as `batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. See existing pipeline tests for reference.")
def callback_cfg_params(self) -> frozenset:
raise NotImplementedError("You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. `callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback function when dynamically adjusting `guidance_scale`. They are variables that require specialtreatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's set of cfg arguments has minor changes from one of the common sets of cfg arguments, do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`")
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_save_load_local(self, expected_max_difference=0.0005):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
logger = logging.get_logger('diffusers.pipelines.pipeline_utils')
logger.setLevel(diffusers.logging.INFO)
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir, safe_serialization=False)
with CaptureLogger(logger) as cap_logger:
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
for name in pipe_loaded.components.keys():
if (name not in pipe_loaded._optional_components):
assert (name in str(cap_logger))
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs((to_np(output) - to_np(output_loaded))).max()
self.assertLess(max_diff, expected_max_difference)
def test_pipeline_call_signature(self):
self.assertTrue(hasattr(self.pipeline_class, '__call__'), f'{self.pipeline_class} should have a `__call__` method')
parameters = inspect.signature(self.pipeline_class.__call__).parameters
optional_parameters = set()
for (k, v) in parameters.items():
if (v.default != inspect._empty):
optional_parameters.add(k)
parameters = set(parameters.keys())
parameters.remove('self')
parameters.discard('kwargs')
remaining_required_parameters = set()
for param in self.params:
if (param not in parameters):
remaining_required_parameters.add(param)
self.assertTrue((len(remaining_required_parameters) == 0), f'Required parameters not present: {remaining_required_parameters}')
remaining_required_optional_parameters = set()
for param in self.required_optional_params:
if (param not in optional_parameters):
remaining_required_optional_parameters.add(param)
self.assertTrue((len(remaining_required_optional_parameters) == 0), f'Required optional parameters not present: {remaining_required_optional_parameters}')
def test_inference_batch_consistent(self, batch_sizes=[2]):
self._test_inference_batch_consistent(batch_sizes=batch_sizes)
def _test_inference_batch_consistent(self, batch_sizes=[2], additional_params_copy_to_batched_inputs=['num_inference_steps']):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
inputs['generator'] = self.get_generator(0)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
batched_inputs = []
for batch_size in batch_sizes:
batched_input = {}
batched_input.update(inputs)
for name in self.batch_params:
if (name not in inputs):
continue
value = inputs[name]
if (name == 'prompt'):
len_prompt = len(value)
batched_input[name] = [value[:(len_prompt // i)] for i in range(1, (batch_size + 1))]
batched_input[name][(- 1)] = (100 * 'very long')
else:
batched_input[name] = (batch_size * [value])
if ('generator' in inputs):
batched_input['generator'] = [self.get_generator(i) for i in range(batch_size)]
if ('batch_size' in inputs):
batched_input['batch_size'] = batch_size
batched_inputs.append(batched_input)
logger.setLevel(level=diffusers.logging.WARNING)
for (batch_size, batched_input) in zip(batch_sizes, batched_inputs):
output = pipe(**batched_input)
assert (len(output[0]) == batch_size)
def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=0.0001):
self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)
def _test_inference_batch_single_identical(self, batch_size=2, expected_max_diff=0.0001, additional_params_copy_to_batched_inputs=['num_inference_steps']):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for components in pipe.components.values():
if hasattr(components, 'set_default_attn_processor'):
components.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
inputs['generator'] = self.get_generator(0)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
batched_inputs = {}
batched_inputs.update(inputs)
for name in self.batch_params:
if (name not in inputs):
continue
value = inputs[name]
if (name == 'prompt'):
len_prompt = len(value)
batched_inputs[name] = [value[:(len_prompt // i)] for i in range(1, (batch_size + 1))]
batched_inputs[name][(- 1)] = (100 * 'very long')
else:
batched_inputs[name] = (batch_size * [value])
if ('generator' in inputs):
batched_inputs['generator'] = [self.get_generator(i) for i in range(batch_size)]
if ('batch_size' in inputs):
batched_inputs['batch_size'] = batch_size
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
output = pipe(**inputs)
output_batch = pipe(**batched_inputs)
assert (output_batch[0].shape[0] == batch_size)
max_diff = np.abs((to_np(output_batch[0][0]) - to_np(output[0][0]))).max()
assert (max_diff < expected_max_diff)
def test_dict_tuple_outputs_equivalent(self, expected_max_difference=0.0001):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator_device = 'cpu'
output = pipe(**self.get_dummy_inputs(generator_device))[0]
output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]
max_diff = np.abs((to_np(output) - to_np(output_tuple))).max()
self.assertLess(max_diff, expected_max_difference)
def test_components_function(self):
init_components = self.get_dummy_components()
init_components = {k: v for (k, v) in init_components.items() if (not isinstance(v, (str, int, float)))}
pipe = self.pipeline_class(**init_components)
self.assertTrue(hasattr(pipe, 'components'))
self.assertTrue((set(pipe.components.keys()) == set(init_components.keys())))
((torch_device != 'cuda'), reason='float16 requires CUDA')
def test_float16_inference(self, expected_max_diff=0.05):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
components = self.get_dummy_components()
pipe_fp16 = self.pipeline_class(**components)
for component in pipe_fp16.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe_fp16.to(torch_device, torch.float16)
pipe_fp16.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
if ('generator' in inputs):
inputs['generator'] = self.get_generator(0)
output = pipe(**inputs)[0]
fp16_inputs = self.get_dummy_inputs(torch_device)
if ('generator' in fp16_inputs):
fp16_inputs['generator'] = self.get_generator(0)
output_fp16 = pipe_fp16(**fp16_inputs)[0]
max_diff = np.abs((to_np(output) - to_np(output_fp16))).max()
self.assertLess(max_diff, expected_max_diff, 'The outputs of the fp16 and fp32 pipelines are too different.')
((torch_device != 'cuda'), reason='float16 requires CUDA')
def test_save_load_float16(self, expected_max_diff=0.01):
components = self.get_dummy_components()
for (name, module) in components.items():
if hasattr(module, 'half'):
components[name] = module.to(torch_device).half()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)
for component in pipe_loaded.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
for (name, component) in pipe_loaded.components.items():
if hasattr(component, 'dtype'):
self.assertTrue((component.dtype == torch.float16), f'`{name}.dtype` switched from `float16` to {component.dtype} after loading.')
inputs = self.get_dummy_inputs(torch_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs((to_np(output) - to_np(output_loaded))).max()
self.assertLess(max_diff, expected_max_diff, 'The output of the fp16 pipeline changed after saving and loading.')
def test_save_load_optional_components(self, expected_max_difference=0.0001):
if (not hasattr(self.pipeline_class, '_optional_components')):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)
generator_device = 'cpu'
inputs = self.get_dummy_inputs(generator_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir, safe_serialization=False)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
for component in pipe_loaded.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
for optional_component in pipe._optional_components:
self.assertTrue((getattr(pipe_loaded, optional_component) is None), f'`{optional_component}` did not stay set to None after loading.')
inputs = self.get_dummy_inputs(generator_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs((to_np(output) - to_np(output_loaded))).max()
self.assertLess(max_diff, expected_max_difference)
((torch_device != 'cuda'), reason='CUDA and CPU are required to switch devices')
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to('cpu')
model_devices = [component.device.type for component in components.values() if hasattr(component, 'device')]
self.assertTrue(all(((device == 'cpu') for device in model_devices)))
output_cpu = pipe(**self.get_dummy_inputs('cpu'))[0]
self.assertTrue((np.isnan(output_cpu).sum() == 0))
pipe.to('cuda')
model_devices = [component.device.type for component in components.values() if hasattr(component, 'device')]
self.assertTrue(all(((device == 'cuda') for device in model_devices)))
output_cuda = pipe(**self.get_dummy_inputs('cuda'))[0]
self.assertTrue((np.isnan(to_np(output_cuda)).sum() == 0))
def test_to_dtype(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
model_dtypes = [component.dtype for component in components.values() if hasattr(component, 'dtype')]
self.assertTrue(all(((dtype == torch.float32) for dtype in model_dtypes)))
pipe.to(torch_dtype=torch.float16)
model_dtypes = [component.dtype for component in components.values() if hasattr(component, 'dtype')]
self.assertTrue(all(((dtype == torch.float16) for dtype in model_dtypes)))
def test_attention_slicing_forward_pass(self, expected_max_diff=0.001):
self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)
def _test_attention_slicing_forward_pass(self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=0.001):
if (not self.test_attention_slicing):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator_device = 'cpu'
inputs = self.get_dummy_inputs(generator_device)
output_without_slicing = pipe(**inputs)[0]
pipe.enable_attention_slicing(slice_size=1)
inputs = self.get_dummy_inputs(generator_device)
output_with_slicing = pipe(**inputs)[0]
if test_max_difference:
max_diff = np.abs((to_np(output_with_slicing) - to_np(output_without_slicing))).max()
self.assertLess(max_diff, expected_max_diff, 'Attention slicing should not affect the inference results')
if test_mean_pixel_difference:
assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))
(((torch_device != 'cuda') or (not is_accelerate_available()) or is_accelerate_version('<', '0.14.0')), reason='CPU offload is only available with CUDA and `accelerate v0.14.0` or higher')
def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=0.0001):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator_device = 'cpu'
inputs = self.get_dummy_inputs(generator_device)
output_without_offload = pipe(**inputs)[0]
pipe.enable_sequential_cpu_offload()
inputs = self.get_dummy_inputs(generator_device)
output_with_offload = pipe(**inputs)[0]
max_diff = np.abs((to_np(output_with_offload) - to_np(output_without_offload))).max()
self.assertLess(max_diff, expected_max_diff, 'CPU offloading should not affect the inference results')
(((torch_device != 'cuda') or (not is_accelerate_available()) or is_accelerate_version('<', '0.17.0')), reason='CPU offload is only available with CUDA and `accelerate v0.17.0` or higher')
def test_model_cpu_offload_forward_pass(self, expected_max_diff=0.0002):
generator_device = 'cpu'
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(generator_device)
output_without_offload = pipe(**inputs)[0]
pipe.enable_model_cpu_offload()
inputs = self.get_dummy_inputs(generator_device)
output_with_offload = pipe(**inputs)[0]
max_diff = np.abs((to_np(output_with_offload) - to_np(output_without_offload))).max()
self.assertLess(max_diff, expected_max_diff, 'CPU offloading should not affect the inference results')
offloaded_modules = [v for (k, v) in pipe.components.items() if (isinstance(v, torch.nn.Module) and (k not in pipe._exclude_from_cpu_offload))]
(self.assertTrue(all(((v.device.type == 'cpu') for v in offloaded_modules))), f"Not offloaded: {[v for v in offloaded_modules if (v.device.type != 'cpu')]}")
(((torch_device != 'cuda') or (not is_xformers_available())), reason='XFormers attention is only available with CUDA and `xformers` installed')
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass()
def _test_xformers_attention_forwardGenerator_pass(self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=0.0001):
if (not self.test_xformers_attention):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, 'set_default_attn_processor'):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output_without_offload = pipe(**inputs)[0]
output_without_offload = (output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload)
pipe.enable_xformers_memory_efficient_attention()
inputs = self.get_dummy_inputs(torch_device)
output_with_offload = pipe(**inputs)[0]
output_with_offload = (output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload)
if test_max_difference:
max_diff = np.abs((to_np(output_with_offload) - to_np(output_without_offload))).max()
self.assertLess(max_diff, expected_max_diff, 'XFormers attention should not affect the inference results')
if test_mean_pixel_difference:
assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])
def test_progress_bar(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
max_steps = re.search('/(.*?) ', stderr).group(1)
self.assertTrue(((max_steps is not None) and (len(max_steps) > 0)))
self.assertTrue((f'{max_steps}/{max_steps}' in stderr), 'Progress bar should be enabled and stopped at the max step')
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue((stderr.getvalue() == ''), 'Progress bar should be disabled')
def test_num_images_per_prompt(self):
sig = inspect.signature(self.pipeline_class.__call__)
if ('num_images_per_prompt' not in sig.parameters):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_sizes = [1, 2]
num_images_per_prompts = [1, 2]
for batch_size in batch_sizes:
for num_images_per_prompt in num_images_per_prompts:
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if (key in self.batch_params):
inputs[key] = (batch_size * [inputs[key]])
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert (images.shape[0] == (batch_size * num_images_per_prompt))
def test_cfg(self):
sig = inspect.signature(self.pipeline_class.__call__)
if ('guidance_scale' not in sig.parameters):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
inputs['guidance_scale'] = 1.0
out_no_cfg = pipe(**inputs)[0]
inputs['guidance_scale'] = 7.5
out_cfg = pipe(**inputs)[0]
assert (out_cfg.shape == out_no_cfg.shape)
def test_callback_inputs(self):
sig = inspect.signature(self.pipeline_class.__call__)
has_callback_tensor_inputs = ('callback_on_step_end_tensor_inputs' in sig.parameters)
has_callback_step_end = ('callback_on_step_end' in sig.parameters)
if (not (has_callback_tensor_inputs and has_callback_step_end)):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(hasattr(pipe, '_callback_tensor_inputs'), f' {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs')
def callback_inputs_subset(pipe, i, t, callback_kwargs):
for (tensor_name, tensor_value) in callback_kwargs.items():
assert (tensor_name in pipe._callback_tensor_inputs)
return callback_kwargs
def callback_inputs_all(pipe, i, t, callback_kwargs):
for tensor_name in pipe._callback_tensor_inputs:
assert (tensor_name in callback_kwargs)
for (tensor_name, tensor_value) in callback_kwargs.items():
assert (tensor_name in pipe._callback_tensor_inputs)
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs['callback_on_step_end'] = callback_inputs_subset
inputs['callback_on_step_end_tensor_inputs'] = ['latents']
inputs['output_type'] = 'latent'
output = pipe(**inputs)[0]
inputs['callback_on_step_end'] = callback_inputs_all
inputs['callback_on_step_end_tensor_inputs'] = pipe._callback_tensor_inputs
inputs['output_type'] = 'latent'
output = pipe(**inputs)[0]
def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):
is_last = (i == (pipe.num_timesteps - 1))
if is_last:
callback_kwargs['latents'] = torch.zeros_like(callback_kwargs['latents'])
return callback_kwargs
inputs['callback_on_step_end'] = callback_inputs_change_tensor
inputs['callback_on_step_end_tensor_inputs'] = pipe._callback_tensor_inputs
inputs['output_type'] = 'latent'
output = pipe(**inputs)[0]
assert (output.abs().sum() == 0)
def test_callback_cfg(self):
sig = inspect.signature(self.pipeline_class.__call__)
has_callback_tensor_inputs = ('callback_on_step_end_tensor_inputs' in sig.parameters)
has_callback_step_end = ('callback_on_step_end' in sig.parameters)
if (not (has_callback_tensor_inputs and has_callback_step_end)):
return
if ('guidance_scale' not in sig.parameters):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(hasattr(pipe, '_callback_tensor_inputs'), f' {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs')
def callback_increase_guidance(pipe, i, t, callback_kwargs):
pipe._guidance_scale += 1.0
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs['guidance_scale'] = 2.0
inputs['callback_on_step_end'] = callback_increase_guidance
inputs['callback_on_step_end_tensor_inputs'] = pipe._callback_tensor_inputs
_ = pipe(**inputs)[0]
assert (pipe.guidance_scale == (inputs['guidance_scale'] + pipe.num_timesteps)) |
def run_network_check(config, entrypoint):
cmd_args = ['-m', 'dlrover.trainer.torch.run_network_check']
for _ in range(2):
success = network_check(config=config, entrypoint=entrypoint, args=cmd_args)
if success:
logger.info('Network check pass.')
return success
else:
logger.error('Network of the cluster is not available because of abnormal node.')
return success |
def get_feature_importance(RBM, data, weights=None, Nchains=500, Nthermalize=1000, Nstep=10, Lchains=100, init='data'):
if (init == 'data'):
h = RBM.mean_hiddens(data)
initial_points = data[KMPP_choose_centroids(h, Nchains)]
else:
initial_points = []
(data_gen, _) = RBM.gen_data(Nthermalize=Nthermalize, Nchains=Nchains, Nstep=Nstep, Lchains=Lchains, config_init=initial_points)
cgf = RBM.hlayer.cgf_from_inputs(RBM.input_hiddens(data))
cgf_gen = RBM.hlayer.cgf_from_inputs(RBM.input_hiddens(data_gen))
DeltaL = ((utilities.average(cgf, weights=weights) + utilities.logsumexp((- cgf_gen), axis=0)) - np.log(len(data_gen)))
return DeltaL |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.