code stringlengths 101 5.91M |
|---|
_module()
class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs):
assert ((min_momentum is None) ^ (min_momentum_ratio is None))
self.min_momentum = min_momentum
self.min_momentum_ratio = min_momentum_ratio
super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs)
def get_momentum(self, runner, base_momentum):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
if (self.min_momentum_ratio is not None):
target_momentum = (base_momentum * self.min_momentum_ratio)
else:
target_momentum = self.min_momentum
return annealing_cos(base_momentum, target_momentum, (progress / max_progress)) |
class UnitTest(unittest.TestCase):
def setUp(self) -> None:
device = get_device_type()
if (device != 'cpu'):
self.skipTest('Only test this UT case on Intel CPU.')
plugins['tts']['class'] = TextToSpeech
plugins['tts']['enable'] = True
plugins['asr']['class'] = AudioSpeechRecognition
plugins['asr']['enable'] = True
plugins['tts']['instance'] = plugins['tts']['class'](device='cpu', voice='default', stream_mode=False, output_audio_path='./output_audio.wav')
plugins['asr']['instance'] = plugins['asr']['class'](device='cpu', model_name_or_path='openai/whisper-small')
base64_to_audio(sample_audio_base64, './sample_audio.wav')
def tearDown(self) -> None:
for filename in os.listdir('.'):
if filename.endswith('.wav'):
os.remove(filename)
if os.path.exists('./tmp_audio_bytes'):
os.remove('./tmp_audio_bytes')
def test_plugin_as_service(self):
with open('./sample_audio.wav', 'rb') as file:
response = client.post('/plugin/audio/asr', files={'file': file})
print(response.text)
assert (response.status_code == 200)
assert (response.json()['asr_result'] == 'welcome to neural chat')
request_data = {'text': 'Hello', 'voice': 'default', 'knowledge_id': 'default'}
tts_response = client.post('/plugin/audio/tts', json=request_data)
assert (tts_response.status_code == 200) |
def main():
for split in splits:
word_table = {c: [] for c in WORD_TABLE_COLUMNS}
split_path = os.path.join(seg_path, (split + '_align'))
speaker_dirs = os.listdir(split_path)
speaker_dirs = list(filter((lambda x: str.isdigit(x)), speaker_dirs))
speaker_dirs.sort(key=(lambda x: int(x)))
pbar = tqdm.tqdm(range(len(speaker_dirs)))
for speaker in speaker_dirs:
pbar.update()
speaker_dir = os.path.join(split_path, speaker)
if os.path.isdir(speaker_dir):
align_files = os.listdir(speaker_dir)
align_files.sort(key=(lambda x: int(x.split('.')[0].split('_')[(- 1)])))
for align_file in align_files:
(word_time, word_text) = convert(os.path.join(speaker_dir, align_file))
index = align_file.split('.')[0]
word_table['id'].append(index)
word_table['word_time'].append(word_time)
word_table['word_text'].append(word_text)
word_df = pd.DataFrame.from_dict(word_table)
save_df_to_tsv(word_df, os.path.join(split_path, (split + '_word_align.tsv'))) |
def simpleperf_abi_dir_names(abi):
simpleperf_dir_names = {'armeabi-v7a': 'arm', 'arm64-v8a': 'arm64'}
return simpleperf_dir_names[abi] |
def advantage(A):
std = ((0.0001 + A.std()) if (len(A) > 0) else 1)
adv = ((A - A.mean()) / std)
adv = adv.detach()
adv[(adv != adv)] = 0
return adv |
def load_wikigold_data(file_name):
with open(file_name, 'r') as f:
instances = json.load(f)
sent_list = list()
labels_list = list()
for instant in instances:
sent = instant['text']
labels = formalize_bio(instant['labels'])
sent_list.append(sent)
labels_list.append(labels)
for (sentence, labels) in zip(sent_list, labels_list):
assert (len(sentence) == len(labels))
return (sent_list, labels_list) |
def test_categorical_exact_exclude_parents(X):
exclude_parents = ((), (2,), (), (1,))
structure = _categorical_exact(X, exclude_parents=exclude_parents)
assert_tuple_equal(structure, ((), (), (0,), (0, 2)))
structure = _categorical_exact(X, exclude_parents=exclude_parents, max_parents=1)
assert_tuple_equal(structure, ((), (0,), (), ()))
exclude_parents = ((), (2,), (), (0, 1))
structure = _categorical_exact(X, exclude_parents=exclude_parents)
assert_tuple_equal(structure, ((2, 3), (), (), (2,))) |
class MessagePassing(torch.nn.Module):
def __init__(self, aggr='add'):
super(MessagePassing, self).__init__()
self.message_args = inspect.getargspec(self.message)[0][1:]
self.update_args = inspect.getargspec(self.update)[0][2:]
def propagate(self, aggr, edge_index, **kwargs):
assert (aggr in ['add', 'mean', 'max'])
kwargs['edge_index'] = edge_index
size = None
message_args = []
for arg in self.message_args:
if (arg[(- 2):] == '_i'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[0]])
elif (arg[(- 2):] == '_j'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[1]])
else:
message_args.append(kwargs[arg])
update_args = [kwargs[arg] for arg in self.update_args]
out = self.message(*message_args)
out = scatter_(aggr, out, edge_index[0], dim_size=size)
out = self.update(out, *update_args)
return out
def message(self, x_j):
return x_j
def update(self, aggr_out):
return aggr_out |
class Deconvolution2D(KerasLayer):
def __init__(self, nb_filter, nb_row, nb_col, output_shape, init='glorot_uniform', activation=None, border_mode='valid', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, bias=True, input_shape=None, **kwargs):
if (border_mode != 'valid'):
invalidInputError(False, "For Deconvolution2D, only border_mode='valid' is supported for now")
super(Deconvolution2D, self).__init__(None, nb_filter, nb_row, nb_col, init, activation, subsample, dim_ordering, W_regularizer, b_regularizer, bias, (list(input_shape) if input_shape else None), **kwargs) |
def update_scale(qmodel, model, data_distill, graph, bottoms, res, targ_layer, num_epoch=1000):
print('Start updating scale')
writer = SummaryWriter('./tensorboard/exp_{}/'.format(round(time.time())))
qmodel = qmodel.eval().cuda()
model = model.eval().cuda()
for idx in range(len(data_distill)):
data_distill[idx].requires_grad = False
graph_original = copy.deepcopy(graph)
optimizer = torch.optim.Adam([p for (n, p) in qmodel.named_parameters() if ('scale' in n)], lr=0.001)
terminate = False
hooks = []
hook_handle = []
for (name, module) in qmodel.named_modules():
if ((type(module) in targ_layer) and hasattr(module, 'scale')):
grad_hook = GradHook(module.weight, (module.scale if hasattr(module, 'scale') else None), (module.scale_prev if hasattr(module, 'scale_prev') else None), (module.merge_scale if hasattr(module, 'scale') else None), (module.merge_scale_prev if hasattr(module, 'scale_prev') else None))
hooks.append(grad_hook)
hook_handle.append(module.scale.register_hook(grad_hook.hook_mask_grad_tensor))
try:
for epoch in range(num_epoch):
for it in range(len(data_distill)):
data = data_distill[it].cuda()
with torch.no_grad():
logit = model(data)
replace_op()
qlogit = qmodel(data)
restore_op()
klloss = kl_categorical(qlogit, logit)
normloss = 0
for (idx, hook) in enumerate(hooks):
normloss += norm2(hook.get_weight_scaled(), idx, writer, (((epoch * len(data_distill)) + it) + 1))
loss = klloss
writer.add_scalar('loss', loss.data, (((epoch * len(data_distill)) + it) + 1))
writer.add_scalar('norm', normloss.data, (((epoch * len(data_distill)) + it) + 1))
writer.add_scalar('kldiv', klloss.data, (((epoch * len(data_distill)) + it) + 1))
print('loss: {}, klloss: {}, norm: {}, iter: {}, epoch: {}'.format(loss.data, klloss.data, normloss.data, (it + 1), (epoch + 1)))
optimizer.zero_grad()
loss.backward()
optimizer.step()
for rr in res:
(layer_first, _, bn_idx) = rr.get_idxs()
scale = graph[layer_first].scale.detach().data.view((- 1))
graph[bn_idx].fake_weight.copy_((graph_original[bn_idx].fake_weight * scale))
graph[bn_idx].fake_bias.copy_((graph_original[bn_idx].fake_bias * scale))
set_quant_minmax(graph, bottoms, verbose=False)
if (loss.data < 0.02):
terminate = True
break
if terminate:
break
except KeyboardInterrupt:
for rr in res:
(layer_first, _, bn_idx) = rr.get_idxs()
scale = graph[layer_first].scale.detach().data.view((- 1))
graph[bn_idx].fake_weight.copy_((graph_original[bn_idx].fake_weight * scale))
graph[bn_idx].fake_bias.copy_((graph_original[bn_idx].fake_bias * scale))
for handle in hook_handle:
handle.remove()
return qmodel |
_BUILDERS.register_module()
class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, **kwargs):
logger = get_root_logger()
parameter_groups = {}
logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')
num_layers = (self.paramwise_cfg.get('num_layers') + 2)
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')
logger.info(f'Build LearningRateDecayOptimizerConstructor {decay_type} {decay_rate} - {num_layers}')
weight_decay = self.base_wd
for (name, param) in module.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or (name in ('pos_embed', 'cls_token'))):
group_name = 'no_decay'
this_weight_decay = 0.0
else:
group_name = 'decay'
this_weight_decay = weight_decay
if ('layer_wise' in decay_type):
if ('ConvNeXt' in module.backbone.__class__.__name__):
layer_id = get_layer_id_for_convnext(name, self.paramwise_cfg.get('num_layers'))
logger.info(f'set param {name} as id {layer_id}')
else:
raise NotImplementedError()
elif (decay_type == 'stage_wise'):
if ('ConvNeXt' in module.backbone.__class__.__name__):
layer_id = get_stage_id_for_convnext(name, num_layers)
logger.info(f'set param {name} as id {layer_id}')
else:
raise NotImplementedError()
group_name = f'layer_{layer_id}_{group_name}'
if (group_name not in parameter_groups):
scale = (decay_rate ** ((num_layers - layer_id) - 1))
parameter_groups[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': (scale * self.base_lr)}
parameter_groups[group_name]['params'].append(param)
parameter_groups[group_name]['param_names'].append(name)
(rank, _) = get_dist_info()
if (rank == 0):
to_display = {}
for key in parameter_groups:
to_display[key] = {'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay']}
logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')
params.extend(parameter_groups.values()) |
def dino_xcit_medium_24_p16(pretrained=True, **kwargs):
model = torch.hub.load('facebookresearch/xcit:main', 'xcit_medium_24_p16', num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=' map_location='cpu')
model.load_state_dict(state_dict, strict=True)
return model |
def make_data_loader(cfg, shuffle_train=True):
train_transforms = build_transforms(cfg, is_train=shuffle_train)
val_transforms = build_transforms(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
dataset = BaseImageDataset()
print(cfg.DATASETS.TRAIN)
if isinstance(cfg.DATASETS.TRAIN, str):
cur_dataset = init_dataset(cfg.DATASETS.TRAIN, root=cfg.DATASETS.ROOT_DIR)
dataset = cur_dataset
else:
for (i, dataset_name) in enumerate(cfg.DATASETS.TRAIN):
cur_dataset = init_dataset(dataset_name, root=cfg.DATASETS.ROOT_DIR)
(min_id, max_id) = dataset.get_id_range(dataset.train)
dataset.train.extend(apply_id_bias(cur_dataset.train, id_bias=(max_id + 1)))
dataset.train_tracks += cur_dataset.train_tracks
if cfg.DATASETS.COMBINEALL:
(min_id, max_id) = dataset.get_id_range(dataset.train)
to_merge_train = dataset.relabel((cur_dataset.query + cur_dataset.gallery))
dataset.train.extend(apply_id_bias(to_merge_train, id_bias=(max_id + 1)))
dataset.train_tracks += cur_dataset.test_tracks
dataset.train = dataset.relabel(dataset.train)
if cfg.INPUT.CUTOFF_LONGTAILED:
dataset.train = dataset.longtail_data_process(dataset.train, NUM_INSTANCE_PER_CLS=cfg.INPUT.LONGTAILED_THR)
if isinstance(cfg.DATASETS.TEST, str):
cur_dataset = init_dataset(cfg.DATASETS.TEST, root=cfg.DATASETS.ROOT_DIR)
(dataset.query, dataset.gallery) = (cur_dataset.query, cur_dataset.gallery)
dataset.test_tracks = cur_dataset.test_tracks
dataset.query_orientation = cur_dataset.query_orientation
dataset.gallery_orientation = cur_dataset.gallery_orientation
else:
(dataset.query, dataset.gallery) = ([], [])
for (i, dataset_name) in enumerate(cfg.DATASETS.TEST):
cur_dataset = init_dataset(dataset_name, root=cfg.DATASETS.ROOT_DIR)
dataset.query.extend(apply_id_bias(cur_dataset.query, id_bias=(i * 10000)))
dataset.gallery.extend(apply_id_bias(cur_dataset.gallery, id_bias=(i * 10000)))
dataset.test_tracks += cur_dataset.test_tracks
dataset.query_orientation = cur_dataset.query_orientation
dataset.gallery_orientation = cur_dataset.gallery_orientation
dataset.print_dataset_statistics(dataset.train, dataset.query, dataset.gallery)
(num_train_pids, num_train_imgs, num_train_cams) = dataset.get_imagedata_info(dataset.train)
num_classes = num_train_pids
train_set = ImageDataset(dataset.train, train_transforms)
if (cfg.DATALOADER.SAMPLER == 'softmax'):
train_loader = DataLoader(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, shuffle=shuffle_train, num_workers=num_workers, collate_fn=train_collate_fn)
elif (cfg.DATALOADER.SAMPLER == 'm_per_class'):
train_loader = DataLoader(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, sampler=MPerClassSampler(dataset.train, cfg.SOLVER.IMS_PER_BATCH, cfg.DATALOADER.NUM_INSTANCE), num_workers=num_workers, collate_fn=train_collate_fn)
else:
train_loader = DataLoader(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, sampler=RandomIdentitySampler(dataset.train, cfg.SOLVER.IMS_PER_BATCH, cfg.DATALOADER.NUM_INSTANCE), num_workers=num_workers, collate_fn=train_collate_fn)
val_set = ImageDataset((dataset.query + dataset.gallery), val_transforms)
val_loader = DataLoader(val_set, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=False, num_workers=num_workers, collate_fn=val_collate_fn)
return (train_loader, val_loader, len(dataset.query), num_classes, dataset) |
class PROBAVDataModule(BaseDataModule):
def __init__(self, root: str='.data/probav', band: str='RED', lr_transform: T.Compose=T.Compose([ToTensor(), ToDtype(torch.float32)]), hr_transform: T.Compose=T.Compose([ToTensor(), ToDtype(torch.float32)]), *args, **kwargs):
super().__init__(*args, **kwargs)
self.root = root
self.band = band
self.lr_transform = lr_transform
self.hr_transform = hr_transform
def setup(self, stage: Optional[str]=None):
train_dataset = PROBAV(root=self.root, split='train', band=self.band, lr_transform=self.lr_transform, hr_transform=self.hr_transform)
(self.train_dataset, self.val_dataset) = dataset_split(train_dataset, val_pct=self.val_split)
self.test_dataset = PROBAV(root=self.root, split='test', band=self.band, lr_transform=self.lr_transform, hr_transform=self.hr_transform)
def on_before_batch_transfer(self, batch, dataloader_idx):
if (batch['lr'].ndim == 6):
batch['lr'] = rearrange(batch['lr'], 'b t d c h w -> (b d) t c h w')
if (batch['hr'].ndim == 5):
batch['hr'] = rearrange(batch['hr'], 'b d c h w -> (b d) c h w')
return batch |
def quaddobl_newton_power_series(pols, lser, idx=1, maxdeg=4, nbr=4, checkin=True, verbose=True):
from phcpy.solver import number_of_symbols
from phcpy.interface import store_quaddobl_system, load_quaddobl_system
from phcpy.phcpy2c3 import py2c_quaddobl_Newton_power_series as newton
from phcpy.phcpy2c3 import py2c_syspool_quaddobl_init
from phcpy.phcpy2c3 import py2c_syspool_quaddobl_create
from phcpy.phcpy2c3 import py2c_syspool_quaddobl_size as poolsize
from phcpy.phcpy2c3 import py2c_syspool_copy_to_quaddobl_container
from phcpy.phcpy2c3 import py2c_syspool_quaddobl_clear
nbsym = number_of_symbols(pols)
if verbose:
print('the polynomials :')
for pol in pols:
print(pol)
print('Number of variables :', nbsym)
if checkin:
if (not checkin_newton_power_series(nbsym, lser, idx)):
return lser
store_quaddobl_system(lser, nbvar=1)
py2c_syspool_quaddobl_init(1)
py2c_syspool_quaddobl_create(1)
store_quaddobl_system(pols, nbvar=nbsym)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = ((- 1) if fail else poolsize())
if verbose:
if (size == (- 1)):
print("An error occurred in the execution of Newton's method.")
else:
print('Computed one series solution.')
py2c_syspool_copy_to_quaddobl_container(1)
result = load_quaddobl_system()
result = substitute_symbol(result, idx)
py2c_syspool_quaddobl_clear()
return result |
class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def train(model, train_loaders, optimizer, tokenizer, epoch, global_step, device, scheduler, scaler, config):
model.train()
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window=1, fmt='{value:.6f}'))
metric_logger.add_meter('temperature', SmoothedValue(window=1, fmt='{value:.4f}'))
loss_names = ['loss_ita', 'loss_itm']
media_types = [loader.dataset.media_type for loader in train_loaders]
for name in loss_names:
for m in media_types:
metric_logger.add_meter(f'{m}-{name}', SmoothedValue(window=1, fmt='{value:.4f}'))
header = f'Train Epoch: [{epoch}]'
log_freq = config.log_freq
if config.distributed:
for d in train_loaders:
d.sampler.set_epoch(epoch)
train_loader = MetaLoader(name2loader=dict(list(zip(media_types, train_loaders))))
model_without_ddp = (model.module if config.distributed else model)
iterator = metric_logger.log_every(train_loader, log_freq, header)
for (i, (media_type, (image, text, idx))) in enumerate(iterator):
image = image.to(device, non_blocking=True)
idx = idx.to(device, non_blocking=True)
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=config.max_txt_l, return_tensors='pt').to(device)
with torch.cuda.amp.autocast(enabled=config.fp16):
loss_dict = model(image, text_input, idx=idx)
loss = sum(loss_dict.values())
optimizer.zero_grad()
scaler.scale(loss).backward()
if (config.optimizer.max_grad_norm > 0):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), config.optimizer.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
for name in loss_names:
value = loss_dict[name]
value = (value if isinstance(value, float) else value.item())
metric_logger.update(**{f'{media_type}-{name}': value})
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(temperature=model_without_ddp.temp.item())
if (is_main_process() and config.wandb.enable and ((global_step % log_freq) == 0)):
logs = metric_logger.get_global_avg_dict()
log_dict_to_wandb(logs, step=global_step, prefix='train/')
global_step += 1
if (config.debug and (((i + 1) % 5) == 0)):
break
metric_logger.synchronize_between_processes()
logger.info(f'Averaged train stats: {metric_logger.global_avg()}')
return global_step |
def move(nrow):
return np.array([1, (nrow + 1), nrow, (nrow - 1), (- 1), ((- nrow) - 1), (- nrow), ((- nrow) + 1)]) |
_module()
class DetectionTransformer(BaseDetector, metaclass=ABCMeta):
def __init__(self, backbone: ConfigType, neck: OptConfigType=None, encoder: OptConfigType=None, decoder: OptConfigType=None, bbox_head: OptConfigType=None, positional_encoding: OptConfigType=None, num_queries: int=100, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(data_preprocessor=data_preprocessor, init_cfg=init_cfg)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.encoder = encoder
self.decoder = decoder
self.positional_encoding = positional_encoding
self.num_queries = num_queries
self.backbone = MODELS.build(backbone)
if (neck is not None):
self.neck = MODELS.build(neck)
self.bbox_head = MODELS.build(bbox_head)
self._init_layers()
def _init_layers(self) -> None:
pass
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Union[(dict, list)]:
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats, batch_data_samples)
losses = self.bbox_head.loss(**head_inputs_dict, batch_data_samples=batch_data_samples)
return losses
def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList, rescale: bool=True) -> SampleList:
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats, batch_data_samples)
results_list = self.bbox_head.predict(**head_inputs_dict, rescale=rescale, batch_data_samples=batch_data_samples)
batch_data_samples = self.add_pred_to_datasample(batch_data_samples, results_list)
return batch_data_samples
def _forward(self, batch_inputs: Tensor, batch_data_samples: OptSampleList=None) -> Tuple[List[Tensor]]:
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats, batch_data_samples)
results = self.bbox_head.forward(**head_inputs_dict)
return results
def forward_transformer(self, img_feats: Tuple[Tensor], batch_data_samples: OptSampleList=None) -> Dict:
(encoder_inputs_dict, decoder_inputs_dict) = self.pre_transformer(img_feats, batch_data_samples)
encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)
(tmp_dec_in, head_inputs_dict) = self.pre_decoder(**encoder_outputs_dict)
decoder_inputs_dict.update(tmp_dec_in)
decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)
head_inputs_dict.update(decoder_outputs_dict)
return head_inputs_dict
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def pre_transformer(self, img_feats: Tuple[Tensor], batch_data_samples: OptSampleList=None) -> Tuple[(Dict, Dict)]:
pass
def forward_encoder(self, feat: Tensor, feat_mask: Tensor, feat_pos: Tensor, **kwargs) -> Dict:
pass
def pre_decoder(self, memory: Tensor, **kwargs) -> Tuple[(Dict, Dict)]:
pass
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, **kwargs) -> Dict:
pass |
class SparseLeNet(nn.Module):
def __init__(self, sparsities, sparse_func='reg'):
super(SparseLeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(256, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.sparse1 = models.sparse_func_dict[sparse_func](sparsities[0])
self.sparse2 = models.sparse_func_dict[sparse_func](sparsities[1])
self.sparse3 = models.Sparsify1D(sparsities[2])
self.sparse4 = models.Sparsify1D(sparsities[3])
self.relu = nn.ReLU()
def forward(self, x):
out = self.sparse1(self.conv1(x))
out = F.max_pool2d(out, 2)
out = self.sparse2(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = self.sparse3(self.fc1(out))
out = self.sparse4(self.fc2(out))
out = self.fc3(out)
return out |
class DCNv2Pooling(nn.Module):
def __init__(self, spatial_scale, pooled_size, output_dim, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
super(DCNv2Pooling, self).__init__()
self.spatial_scale = spatial_scale
self.pooled_size = pooled_size
self.output_dim = output_dim
self.no_trans = no_trans
self.group_size = group_size
self.part_size = (pooled_size if (part_size is None) else part_size)
self.sample_per_part = sample_per_part
self.trans_std = trans_std
self.func = DCNv2PoolingFunction(self.spatial_scale, self.pooled_size, self.output_dim, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new()
return self.func(data, rois, offset) |
def dropout(inputs, is_training, scope, keep_prob=0.5, noise_shape=None):
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training, (lambda : tf.nn.dropout(inputs, keep_prob, noise_shape)), (lambda : inputs))
return outputs |
def query_point_sampling_complex(draw) -> np.complex:
real_part = draw(float_sampling())
imaginary_part = draw(float_sampling())
query_point = np.complex(real_part, imaginary_part)
return query_point |
def add_words_to_word_vec_dict(word_vec_dict, word_set, dictionary, translations=None):
succeeded_to_find_in_src_list = 0
failed_to_find_in_src_list = 0
for word in word_set:
try:
translation = (word if (translations is None) else translations[word])
word_vec_dict[translation] = dictionary[translation]
succeeded_to_find_in_src_list += 1
except KeyError as e:
failed_to_find_in_src_list += 1
assert (len(word_vec_dict) > 0)
print(('# src: %d - %d' % (succeeded_to_find_in_src_list, failed_to_find_in_src_list)))
print(('source list size: %d' % len(word_set)))
print(('word_vec_dict size: %d' % len(word_vec_dict))) |
def test_quad_double_syspool(vrblvl=0):
initialize_quad_double_syspool(3, vrblvl)
dim = size_quad_double_syspool(vrblvl)
print('The size of the systems pool :', dim)
pol1 = ['t - 1/3;']
set_quad_double_system(1, pol1, vrblvl)
copy_to_quad_double_syspool(1)
pol2 = ['t - 2/3;']
set_quad_double_system(1, pol2, vrblvl)
copy_to_quad_double_syspool(2)
pol3 = ['t - 1;']
set_quad_double_system(1, pol3, vrblvl)
copy_to_quad_double_syspool(3)
for i in range(1, (dim + 1)):
clear_quad_double_system(vrblvl)
copy_from_quad_double_syspool(i)
pols = get_quad_double_system(vrblvl)
print('system at', i, 'in the pool :', pols)
clear_quad_double_syspool(vrblvl)
return int((dim != 3)) |
class PermutationInvariantSolution(Solution):
def __init__(self, n_embeddings=16, proj_dim=32, hidden_size=8):
self.kwargs = {'n_embeddings': n_embeddings, 'proj_dim': proj_dim, 'hidden_size': hidden_size}
self.policy = PermutationInvariantNetwork(n_embeddings=n_embeddings, proj_dim=proj_dim, hidden_size=hidden_size)
self.dtype = torch.float32
self.policy.to(self.dtype)
self.policy.eval()
self.prev_action = 0
def clone(self):
old_policy = self.policy
new_solution = self.__class__(**self.kwargs)
new_solution.policy.load_state_dict(old_policy.state_dict())
return new_solution
def get_action(self, obs):
y = self.policy(torch.from_numpy(obs).to(self.dtype), self.prev_action)
action = y.item()
self.prev_action = action
return action
def reset(self):
self.policy.attention_neuron.hx = None
self.previous_action = 0
def get_n_features(self):
return None |
class PrefetchLoader(object):
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while (batch is not None):
(yield batch)
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if (batch is not None):
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method |
def _kinematics_from_tokens(helper: PredictHelper, instance: str, sample: str) -> KinematicsData:
annotation = helper.get_sample_annotation(instance, sample)
(x, y, _) = annotation['translation']
yaw = quaternion_yaw(Quaternion(annotation['rotation']))
velocity = helper.get_velocity_for_agent(instance, sample)
acceleration = helper.get_acceleration_for_agent(instance, sample)
yaw_rate = helper.get_heading_change_rate_for_agent(instance, sample)
if np.isnan(velocity):
velocity = 0.0
if np.isnan(acceleration):
acceleration = 0.0
if np.isnan(yaw_rate):
yaw_rate = 0.0
(hx, hy) = (np.cos(yaw), np.sin(yaw))
(vx, vy) = ((velocity * hx), (velocity * hy))
(ax, ay) = ((acceleration * hx), (acceleration * hy))
return (x, y, vx, vy, ax, ay, velocity, yaw_rate, acceleration, yaw) |
def parse_results(experiments, save_dir):
log_results = {}
for (exp_name, subdict) in experiments.items():
timestamp = subdict['timestamp']
if timestamp.startswith('TODO'):
log_results[exp_name] = {'timestamp': 'TODO', 'results': {}}
continue
log_path = ((((Path(save_dir) / 'log') / exp_name) / timestamp) / 'info.log')
assert log_path.exists(), f'missing log file for {exp_name}: {log_path}'
results = parse_log(log_path)
log_results[exp_name] = {'timestamp': timestamp, 'results': results}
return log_results |
def add_pip(src, dst, flags=0):
(x, y, _, _) = switches[(- 1)]
if (src not in wire_downhill):
wire_downhill[src] = set()
wire_downhill[src].add(dst)
if (dst not in wire_uphill):
wire_uphill[dst] = set()
wire_uphill[dst].add(src)
pip_xy[(src, dst)] = (x, y, 0, (len(switches) - 1), flags) |
class SceneGraphTrainer(DefaultTrainer):
def __init__(self, cfg):
super(SceneGraphTrainer, self).__init__(cfg)
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=SceneGraphDatasetMapper(cfg, True))
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=SceneGraphDatasetMapper(cfg, False))
def build_hooks(self):
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
ret = [hooks.IterationTimer(), hooks.LRScheduler(self.optimizer, self.scheduler), (hooks.PreciseBN(cfg.TEST.EVAL_PERIOD, self.model, self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER) if (cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)) else None)]
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_to_keep=100))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def test(cls, cfg, model, evaluators=None):
logger = logging.getLogger(__name__)
results = OrderedDict()
for (idx, dataset_name) in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator = SceneGraphEvaluator(dataset_name, cfg, True, output_folder)
results_i = scenegraph_inference_on_dataset(cfg, model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(results_i, dict), 'Evaluator must return a dict on the main process. Got {} instead.'.format(results_i)
logger.info('Evaluation results for {} in csv format:'.format(dataset_name))
print_csv_format(results_i)
comm.synchronize()
if (len(results) == 1):
results = list(results.values())[0]
return results |
def is_rst_docstring(docstring):
if (_re_rst_special_words.search(docstring) is not None):
return True
if (_re_double_backquotes.search(docstring) is not None):
return True
if (_re_rst_example.search(docstring) is not None):
return True
return False |
def Perlin(nrow, specs={}):
size = specs.get('size', 5)
assert (size > 0)
x = y = np.linspace(0, size, nrow)
n = [[noise.pnoise2(i, j, repeatx=size, repeaty=size) for j in y] for i in x]
landscape = (n - np.min(n))
landscape /= landscape.max()
return landscape.ravel() |
def batch_to_device(batch, device='cuda:0'):
vals = [to_device(getattr(batch, field), device) for field in batch._fields]
return type(batch)(*vals) |
def get_optimized_training_schedule(task, optimizer):
if (task in ['C10-CNN1', 'C100-resnet', 'tiny-CNN']):
if ('layca' in optimizer):
lr = ((3 ** (- 5)) if (optimizer in ['Adam_layca', 'SGD_AMom_layca']) else (3 ** (- 3)))
elif ((task in ['C10-CNN1', 'C100-resnet']) and (optimizer == 'SGD_normalized')):
lr = (3 ** (- 2))
elif ((task, optimizer) == ('C10-CNN1', 'RMSprop')):
lr = (3 ** (- 6))
elif ((task, optimizer) == ('C100-resnet', 'Adam')):
lr = (3 ** (- 5))
elif ((task, optimizer) == ('tiny-CNN', 'Adagrad')):
lr = (3 ** (- 4))
else:
lr = (3 ** (- 1))
if (task == 'C10-CNN1'):
return (100, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [80, 90, 97])))
elif (task == 'C100-resnet'):
return (100, lr, LearningRateScheduler(lr_schedule(lr, 0.1, [70, 90, 97])))
elif (task == 'tiny-CNN'):
if (optimizer == 'SGD_weight_decay'):
return (100, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [70, 90, 97])))
else:
return (80, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [70])))
elif (task == 'C10-CNN2'):
if (optimizer in ['SGD_weight_decay', 'RMSprop_weight_decay']):
lr = (0.0003 if (optimizer == 'RMSprop_weight_decay') else 0.5)
return (250, lr, LearningRateScheduler(lr_schedule(lr, 0.5, [(i * 25) for i in range(1, 100)])))
else:
if ('layca' in optimizer):
lr = ((3 ** (- 5)) if (optimizer in ['Adam_layca', 'SGD_AMom_layca']) else (3 ** (- 3)))
elif (optimizer == 'SGD'):
lr = (3 ** (- 1))
elif (optimizer == 'SGD_normalized'):
lr = (3 ** (- 1))
return (250, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [100, 170, 220])))
elif (task == 'C100-WRN'):
if (optimizer in ['SGD_weight_decay', 'Adam_weight_decay']):
lr = (0.0003 if (optimizer == 'Adam_weight_decay') else 0.1)
return (200, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [60, 120, 160])))
elif ('layca' in optimizer):
lr = ((3 ** (- 5)) if (optimizer in ['Adam_layca', 'SGD_AMom_layca']) else (3 ** (- 3)))
return (250, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [100, 170, 220])))
elif ((optimizer == 'SGD_normalized') or (optimizer == 'SGD')):
lr = (3 ** (- 2))
return (250, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [100, 170, 220])))
elif (task == 'C10-resnet'):
if (optimizer in ['SGD', 'SGD_weight_decay']):
lr = (3 ** (- 1))
elif (optimizer in ['SGD_layca']):
lr = (3 ** (- 3))
return (200, lr, LearningRateScheduler(lr_schedule(lr, 0.2, [60, 120, 160]))) |
class CityscapesDataset(Pix2pixDataset):
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='fixed')
parser.set_defaults(load_size=512)
parser.set_defaults(crop_size=512)
parser.set_defaults(display_winsize=512)
parser.set_defaults(label_nc=19)
parser.set_defaults(aspect_ratio=2.0)
(opt, _) = parser.parse_known_args()
if hasattr(opt, 'num_upsampling_layers'):
parser.set_defaults(num_upsampling_layers='more')
return parser
def get_paths(self, opt, adda_mode='normal'):
if (adda_mode == 'source'):
root = opt.dataroot_source
elif (adda_mode == 'target'):
root = opt.dataroot_target
else:
root = opt.dataroot
phase = ('val' if (opt.phase == 'test') else 'train')
if opt.eval_spade:
label_dir = os.path.join(root, 'gtFinePredProb', phase)
else:
label_dir = os.path.join(root, 'gtFine', phase)
label_paths_all = make_dataset(label_dir, recursive=True)
label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')]
image_dir = os.path.join(root, 'leftImg8bit', phase)
image_paths = make_dataset(image_dir, recursive=True)
if (not opt.no_instance):
instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')]
else:
instance_paths = []
return (label_paths, image_paths, instance_paths)
def paths_match(self, path1, path2):
name1 = os.path.basename(path1)
name2 = os.path.basename(path2)
return ('_'.join(name1.split('_')[:3]) == '_'.join(name2.split('_')[:3])) |
def is_syntactic_correct(code):
try:
javalang.parse.parse(code)
return True
except Exception as e:
return False |
def test_digits_cosine_greedi_ll_object():
model = FacilityLocationSelection(100, 'cosine', optimizer=GreeDi(optimizer1='lazy', optimizer2='lazy', random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking[:30], digits_cosine_greedi_ranking[:30])
assert_array_almost_equal(model.gains[:30], digits_cosine_greedi_gains[:30], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1 |
def run_interpretation_summary(x_unvec, y, contrib_sums_D, contrib_sums_D2, contrib_sums, idx_feat_dict, idx_class_dict, icd9_descript_dict, pairs, num_sample, full_out_dir):
from riddle import feature_importance, frequency, ordering
feat_importance_summary = feature_importance.FeatureImportanceSummary(contrib_sums_D, contrib_sums_D2, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict, icd9_descript_dict=icd9_descript_dict, pairs=pairs, num_sample=num_sample)
feat_class_freq_table = frequency.get_frequency_table(x_unvec, y, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict)
ordering_summary = ordering.summarize_orderings(contrib_sums, feat_class_freq_table, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict, icd9_descript_dict=icd9_descript_dict)
ordering_summary.save_individual_tables(idx_class_dict, full_out_dir)
ordering_summary.save(full_out_dir) |
def paren_colors():
if (color_scheme == 'dark'):
return ['red', 'green', 'cyan', 'magenta', 'yellow']
elif (color_scheme == 'light'):
return ['blue', 'red', 'magenta', 'green', 'cyan']
else:
raise RuntimeError(('Unknown color scheme: %s' % color_scheme)) |
def mobilenetv3_small_w7d20(**kwargs):
return get_mobilenetv3(version='small', width_scale=0.35, model_name='mobilenetv3_small_w7d20', **kwargs) |
def relu_or_hswish(name):
if (name == 'RE'):
return nn.ReLU
elif (name == 'HS'):
return nn.Hardswish
else:
raise IOError(f'{name} does not exist') |
class VTUAVDataset(BaseDataset):
def __init__(self, subset):
super().__init__()
if (subset == 'st'):
self.base_path = os.path.join(self.env_settings.vtuav_path, 'short-term')
elif (subset == 'lt'):
self.base_path = os.path.join(self.env_settings.vtuav_path, 'long-term')
else:
raise ValueError(f'No {subset} subset in VTUAV TEST!')
self.sequence_list = self._get_sequence_list(subset)
self.subset = subset
def get_sequence_list(self):
return SequenceList([self._construct_sequence(s) for s in self.sequence_list])
def _construct_sequence(self, sequence_name):
anno_path_rgb = '{}/{}/rgb.txt'.format(self.base_path, sequence_name)
anno_path_x = '{}/{}/ir.txt'.format(self.base_path, sequence_name)
ground_truth_rect_rgb = load_text(str(anno_path_rgb), delimiter=' ', dtype=np.float64)
ground_truth_rect_x = load_text(str(anno_path_x), delimiter=' ', dtype=np.float64)
ground_truth_rect = np.concatenate([ground_truth_rect_rgb, ground_truth_rect_x], axis=1)
rgb_frames_path = '{}/{}/{}'.format(self.base_path, sequence_name, 'rgb')
rgb_frame_list = sorted([frame for frame in os.listdir(rgb_frames_path) if frame.endswith('.jpg')])
rgb_frames_list = [os.path.join(rgb_frames_path, frame) for frame in rgb_frame_list]
x_frames_path = '{}/{}/{}'.format(self.base_path, sequence_name, 'ir')
x_frame_list = sorted([frame for frame in os.listdir(x_frames_path) if frame.endswith('.jpg')])
x_frames_list = [os.path.join(x_frames_path, frame) for frame in x_frame_list]
frames_list = list(zip(rgb_frames_list, x_frames_list))
return Sequence(sequence_name, frames_list, 'vtuav_{}'.format(self.subset), ground_truth_rect.reshape((- 1), 8))
def __len__(self):
return len(self.sequence_list)
def _get_sequence_list(self, subset):
if (subset == 'st'):
sequence_list = ['test_ST_001/animal_001', 'test_ST_001/bike_003', 'test_ST_001/bike_005', 'test_ST_001/bike_006', 'test_ST_001/bike_008', 'test_ST_001/bus_001', 'test_ST_001/bus_004', 'test_ST_001/bus_006', 'test_ST_001/bus_007', 'test_ST_001/bus_026', 'test_ST_001/bus_028', 'test_ST_002/bus_029', 'test_ST_002/c-vehicle_003', 'test_ST_002/cable_002', 'test_ST_002/car_004', 'test_ST_002/car_005', 'test_ST_002/car_006', 'test_ST_002/car_007', 'test_ST_002/car_012', 'test_ST_002/car_020', 'test_ST_002/car_022', 'test_ST_002/car_027', 'test_ST_002/car_042', 'test_ST_002/car_049', 'test_ST_002/car_053', 'test_ST_002/car_056', 'test_ST_002/car_059', 'test_ST_003/car_060', 'test_ST_003/car_061', 'test_ST_003/car_063', 'test_ST_003/car_064', 'test_ST_003/car_065', 'test_ST_003/car_067', 'test_ST_003/car_072', 'test_ST_003/car_075', 'test_ST_003/car_077', 'test_ST_003/car_079', 'test_ST_003/car_096', 'test_ST_003/car_097', 'test_ST_003/car_101', 'test_ST_003/car_106', 'test_ST_003/car_109', 'test_ST_003/car_110', 'test_ST_004/bus_014', 'test_ST_004/bus_019', 'test_ST_004/bus_021', 'test_ST_004/car_112', 'test_ST_004/car_123', 'test_ST_004/car_128', 'test_ST_004/car_129', 'test_ST_004/car_132', 'test_ST_004/elebike_002', 'test_ST_004/elebike_004', 'test_ST_004/elebike_005', 'test_ST_004/elebike_006', 'test_ST_004/elebike_007', 'test_ST_004/elebike_008', 'test_ST_004/elebike_010', 'test_ST_004/elebike_011', 'test_ST_004/elebike_018', 'test_ST_004/elebike_019', 'test_ST_004/tricycle_027', 'test_ST_004/tricycle_032', 'test_ST_004/tricycle_035', 'test_ST_004/tricycle_037', 'test_ST_004/truck_004', 'test_ST_004/truck_007', 'test_ST_004/truck_008', 'test_ST_005/excavator_001', 'test_ST_005/pedestrian_001', 'test_ST_005/pedestrian_005', 'test_ST_005/pedestrian_006', 'test_ST_005/pedestrian_007', 'test_ST_005/pedestrian_010', 'test_ST_005/pedestrian_015', 'test_ST_005/pedestrian_016', 'test_ST_005/pedestrian_017', 'test_ST_006/pedestrian_038', 'test_ST_006/pedestrian_041', 'test_ST_006/pedestrian_044', 'test_ST_006/pedestrian_046', 'test_ST_006/pedestrian_050', 'test_ST_006/pedestrian_051', 'test_ST_006/pedestrian_052', 'test_ST_006/pedestrian_053', 'test_ST_006/pedestrian_056', 'test_ST_006/pedestrian_058', 'test_ST_006/pedestrian_060', 'test_ST_006/pedestrian_062', 'test_ST_006/pedestrian_064', 'test_ST_007/pedestrian_077', 'test_ST_007/pedestrian_079', 'test_ST_007/pedestrian_080', 'test_ST_007/pedestrian_088', 'test_ST_007/pedestrian_089', 'test_ST_007/pedestrian_093', 'test_ST_007/pedestrian_095', 'test_ST_007/pedestrian_098', 'test_ST_007/pedestrian_109', 'test_ST_007/pedestrian_110', 'test_ST_007/pedestrian_111', 'test_ST_007/pedestrian_112', 'test_ST_007/pedestrian_113', 'test_ST_007/pedestrian_117', 'test_ST_007/pedestrian_119', 'test_ST_007/pedestrian_120', 'test_ST_007/pedestrian_121', 'test_ST_007/pedestrian_122', 'test_ST_007/pedestrian_123', 'test_ST_007/pedestrian_127', 'test_ST_007/pedestrian_130', 'test_ST_007/pedestrian_134', 'test_ST_007/pedestrian_136', 'test_ST_007/pedestrian_138', 'test_ST_008/pedestrian_139', 'test_ST_008/pedestrian_142', 'test_ST_008/pedestrian_143', 'test_ST_008/pedestrian_148', 'test_ST_008/pedestrian_149', 'test_ST_008/pedestrian_150', 'test_ST_008/pedestrian_151', 'test_ST_008/pedestrian_152', 'test_ST_008/pedestrian_153', 'test_ST_008/pedestrian_154', 'test_ST_009/pedestrian_173', 'test_ST_009/pedestrian_179', 'test_ST_009/pedestrian_183', 'test_ST_009/pedestrian_185', 'test_ST_009/pedestrian_192', 'test_ST_009/pedestrian_195', 'test_ST_009/pedestrian_196', 'test_ST_009/pedestrian_209', 'test_ST_009/pedestrian_211', 'test_ST_009/pedestrian_213', 'test_ST_009/pedestrian_215', 'test_ST_010/ship_001', 'test_ST_010/train_003', 'test_ST_010/train_004', 'test_ST_010/tricycle_003', 'test_ST_010/tricycle_004', 'test_ST_010/tricycle_005', 'test_ST_010/tricycle_006', 'test_ST_010/tricycle_007', 'test_ST_010/tricycle_008', 'test_ST_010/tricycle_009', 'test_ST_010/tricycle_010', 'test_ST_010/tricycle_011', 'test_ST_010/tricycle_016', 'test_ST_010/tricycle_017', 'test_ST_010/tricycle_019', 'test_ST_010/tricycle_023', 'test_ST_011/pedestrian_162', 'test_ST_011/pedestrian_163', 'test_ST_011/pedestrian_164', 'test_ST_011/pedestrian_217', 'test_ST_011/pedestrian_227', 'test_ST_011/pedestrian_229', 'test_ST_011/pedestrian_230', 'test_ST_011/pedestrian_234', 'test_ST_012/pedestrian_023', 'test_ST_012/pedestrian_025', 'test_ST_012/pedestrian_026', 'test_ST_012/pedestrian_155', 'test_ST_012/pedestrian_156', 'test_ST_012/pedestrian_161', 'test_ST_013/bus_010', 'test_ST_013/bus_012', 'test_ST_013/elebike_031', 'test_ST_013/elebike_032', 'test_ST_013/pedestrian_019', 'test_ST_013/pedestrian_020', 'test_ST_013/pedestrian_027', 'test_ST_013/pedestrian_028', 'test_ST_013/pedestrian_033', 'test_ST_013/pedestrian_034', 'test_ST_013/pedestrian_036']
elif (subset == 'lt'):
sequence_list = ['test_LT_001/bus_025', 'test_LT_001/bus_032', 'test_LT_001/car_001', 'test_LT_001/car_008', 'test_LT_001/car_054', 'test_LT_001/car_055', 'test_LT_001/car_057', 'test_LT_001/car_070', 'test_LT_002/animal_003', 'test_LT_002/animal_004', 'test_LT_002/bike_001', 'test_LT_002/car_010', 'test_LT_002/car_015', 'test_LT_002/car_018', 'test_LT_002/car_036', 'test_LT_002/car_046', 'test_LT_003/car_073', 'test_LT_003/car_091', 'test_LT_003/car_095', 'test_LT_003/car_103', 'test_LT_003/car_119', 'test_LT_003/car_125', 'test_LT_003/car_127', 'test_LT_003/elebike_003', 'test_LT_004/elebike_009', 'test_LT_004/elebike_012', 'test_LT_004/elebike_013', 'test_LT_004/elebike_014', 'test_LT_004/elebike_027', 'test_LT_004/elebike_029', 'test_LT_004/pedestrian_002', 'test_LT_004/pedestrian_009', 'test_LT_004/pedestrian_013', 'test_LT_004/pedestrian_014', 'test_LT_004/pedestrian_021', 'test_LT_004/pedestrian_024', 'test_LT_005/pedestrian_037', 'test_LT_005/pedestrian_055', 'test_LT_005/pedestrian_132', 'test_LT_005/pedestrian_137', 'test_LT_005/pedestrian_140', 'test_LT_005/pedestrian_141', 'test_LT_005/pedestrian_144', 'test_LT_005/pedestrian_145', 'test_LT_006/pedestrian_168', 'test_LT_006/pedestrian_178', 'test_LT_006/pedestrian_182', 'test_LT_006/pedestrian_184', 'test_LT_006/pedestrian_187', 'test_LT_006/pedestrian_188', 'test_LT_006/pedestrian_190', 'test_LT_006/pedestrian_194', 'test_LT_007/pedestrian_208', 'test_LT_007/pedestrian_220', 'test_LT_007/pedestrian_221', 'test_LT_008/pedestrian_214', 'test_LT_008/pedestrian_218', 'test_LT_008/pedestrian_219', 'test_LT_009/pedestrian_199', 'test_LT_009/pedestrian_201', 'test_LT_009/pedestrian_204', 'test_LT_009/pedestrian_205', 'test_LT_009/tricycle_002', 'test_LT_009/tricycle_012', 'test_LT_009/tricycle_013', 'test_LT_009/tricycle_015', 'test_LT_009/tricycle_018', 'test_LT_009/tricycle_025', 'test_LT_009/tricycle_036', 'test_LT_009/truck_001', 'test_LT_010/pedestrian_207', 'test_LT_010/pedestrian_212', 'test_LT_010/pedestrian_226', 'test_LT_010/pedestrian_232']
else:
raise ValueError(f'VTUAV has no {subset} subset')
return sequence_list |
.parametrize('loss_bbox', [dict(type='L1Loss', loss_weight=1.0), dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0), dict(type='IoULoss', loss_weight=1.0), dict(type='BoundedIoULoss', loss_weight=1.0), dict(type='GIoULoss', loss_weight=1.0), dict(type='DIoULoss', loss_weight=1.0), dict(type='CIoULoss', loss_weight=1.0), dict(type='MSELoss', loss_weight=1.0), dict(type='SmoothL1Loss', loss_weight=1.0), dict(type='BalancedL1Loss', loss_weight=1.0)])
def test_bbox_loss_compatibility(loss_bbox):
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
if ('IoULoss' in loss_bbox['type']):
cfg_model.roi_head.bbox_head.reg_decoded_bbox = True
cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
(loss, _) = detector._parse_losses(loss)
assert (float(loss.item()) > 0) |
def convert_to_submission(source_dir, target_dir):
niftis = subfiles(source_dir, join=False, suffix='.nii.gz')
patientids = np.unique([i[:10] for i in niftis])
maybe_mkdir_p(target_dir)
for p in patientids:
files_of_that_patient = subfiles(source_dir, prefix=p, suffix='.nii.gz', join=False)
assert len(files_of_that_patient)
files_of_that_patient.sort()
shutil.copy(join(source_dir, files_of_that_patient[0]), join(target_dir, (p + '_ED.nii.gz')))
shutil.copy(join(source_dir, files_of_that_patient[1]), join(target_dir, (p + '_ES.nii.gz'))) |
def tf_model_to_tar(tf_model: Model, run_id: int):
model_name = 'intent-model-{}/1'.format(run_id)
local_tar_name = 'model-{}.tar.gz'.format(run_id)
tf_model.save(filepath=model_name)
with tarfile.open(local_tar_name, mode='w:gz') as _tar:
_tar.add(model_name, recursive=True)
shutil.rmtree(model_name.split('/')[0])
return local_tar_name |
def checkpoint_dir(trainer: Trainer):
return os.path.join(trainer.logdir, 'br_policy_checkpoints') |
def get_mask(attention, thr_high, thr_low):
mask = attention.new_zeros((attention.size(0), 1, 224, 224)).fill_(255)
mask = mask_fg(mask, attention, thr_high)
mask = mask_bg(mask, attention, thr_low)
return mask |
def save_analysis(chosen_data: list[dict], rejected_data: list[dict], output_dir: Path):
rejected_data = sorted(rejected_data, key=(lambda x: x['reason']))
write_jsonl((output_dir / 'rejected_data.jsonl'), rejected_data)
chosen_data_dict = dict[(str, list[dict])]()
rejected_data_dict = dict[(str, list[dict])]()
for d in chosen_data:
chosen_data_dict.setdefault(d['lang'], []).append(d)
for d in rejected_data:
rejected_data_dict.setdefault(d['lang'], []).append(d)
all_langs = (set(chosen_data_dict.keys()) | set(rejected_data_dict.keys()))
all_reasons = set((d['reason'] for d in rejected_data))
analysis_dict = {'overall': {'total': (len(chosen_data) + len(rejected_data)), 'chosen': len(chosen_data), 'rejected': len(rejected_data), 'chosen_ratio': f'{(len(chosen_data) / (len(chosen_data) + len(rejected_data))):.2f}'}, 'lang': {lang: dict(total=((chosen_len := len(chosen_data_dict.get(lang, []))) + (rejected_len := len(rejected_data_dict.get(lang, [])))), chosen=chosen_len, rejected=rejected_len, chosen_ratio=f'{(chosen_len / (chosen_len + rejected_len)):.2f}') for lang in all_langs}, 'reason': {reason: sum((1 for d in rejected_data if (d['reason'] == reason))) for reason in set(all_reasons)}}
(output_dir / 'analysis.json').write_text(json.dumps(analysis_dict, indent=2))
max_examples_per_reason = 5
examples_dir = (output_dir / 'examples')
examples_dir.mkdir()
for lang in all_langs:
for reason in all_reasons:
examples = [f'''[Seed]
{d['seed']}
[Prompt]
[Problem]
{d['problem']}
[Solution]
{d['solution']}''' for d in rejected_data_dict.get(lang, []) if (d['reason'] == reason)]
examples = examples[:max_examples_per_reason]
reason_str = reason.replace(' ', '_')
for (i, example) in enumerate(examples):
(examples_dir / f'{lang}-{reason_str}-{i}.txt').write_text(example) |
class LikGauss(Likelihood):
def __init__(self, sf=None):
self.sf = sf
def gpml_function(self):
if (self.sf > (- np.Inf)):
return '{}'
else:
return '{}'
def is_thunk(self):
return True
def id(self):
return 'Gauss'
def param_vector(self):
if (self.sf > (- np.Inf)):
return np.array([self.sf])
else:
return np.array([])
def latex(self):
return '{\\sc GS}'
def syntax(self):
return colored('GS', self.depth)
def effective_params(self):
if (self.sf == (- np.Inf)):
return 0
else:
return 1
def gpml_inference_method(self):
if (self.sf > (- np.Inf)):
return ''
else:
return ''
def copy(self):
return LikGauss(sf=self.sf)
def initialise_params(self, sd=1, data_shape=None):
if (self.sf == None):
if (np.random.rand() < 0.5):
self.sf = np.random.normal(loc=(data_shape['y_sd'] - np.log(10)), scale=sd)
else:
self.sf = np.random.normal(loc=0, scale=sd)
def __repr__(self):
return ('LikGauss(sf=%s)' % self.sf)
def pretty_print(self):
return colored(('GS(sf=%s)' % format_if_possible('%1.1f', self.sf)), self.depth)
def load_param_vector(self, params):
if (len(params) == 0):
self.sf = (- np.Inf)
else:
(sf,) = params
self.sf = sf |
_LAYERS.register_module()
class SparseInverseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key) |
def scale_grad(grad):
grad_arr = torch.abs(grad).mean(dim=1).detach().permute(1, 2, 0)
grad_arr /= grad_arr.quantile(0.98)
grad_arr = torch.clamp(grad_arr, 0, 1)
return grad_arr.numpy() |
class FORCESNLPsolver_final_outputs_ctypes(ctypes.Structure):
_fields_ = [('x01', (ctypes.c_double * 17)), ('x02', (ctypes.c_double * 17)), ('x03', (ctypes.c_double * 17)), ('x04', (ctypes.c_double * 17)), ('x05', (ctypes.c_double * 17)), ('x06', (ctypes.c_double * 17)), ('x07', (ctypes.c_double * 17)), ('x08', (ctypes.c_double * 17)), ('x09', (ctypes.c_double * 17)), ('x10', (ctypes.c_double * 17)), ('x11', (ctypes.c_double * 17)), ('x12', (ctypes.c_double * 17)), ('x13', (ctypes.c_double * 17)), ('x14', (ctypes.c_double * 17)), ('x15', (ctypes.c_double * 17)), ('x16', (ctypes.c_double * 17)), ('x17', (ctypes.c_double * 17)), ('x18', (ctypes.c_double * 17)), ('x19', (ctypes.c_double * 17)), ('x20', (ctypes.c_double * 17))] |
def get_discriminator(model_config):
discriminator_name = model_config['d_name']
if (discriminator_name == 'no_gan'):
model_d = None
elif (discriminator_name == 'patch_gan'):
model_d = NLayerDiscriminator(n_layers=model_config['d_layers'], norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), use_sigmoid=False)
model_d = nn.DataParallel(model_d, device_ids=[0])
elif (discriminator_name == 'double_gan'):
patch_gan = NLayerDiscriminator(n_layers=model_config['d_layers'], norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), use_sigmoid=False)
patch_gan = nn.DataParallel(patch_gan, device_ids=[0])
full_gan = get_fullD(model_config)
full_gan = nn.DataParallel(full_gan, device_ids=[0])
model_d = {'patch': patch_gan, 'full': full_gan}
elif (discriminator_name == 'multi_scale'):
model_d = MultiScaleDiscriminator(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']))
model_d = nn.DataParallel(model_d, device_ids=[0])
else:
raise ValueError(('Discriminator Network [%s] not recognized.' % discriminator_name))
return model_d |
class DataTrainingArguments():
label_column_id: int = field(metadata={'help': 'Which column contains the label'})
train_file: str = field(default=None, metadata={'help': 'The path of the training file'})
dev_file: Optional[str] = field(default=None, metadata={'help': 'The path of the development file'})
test_file: Optional[str] = field(default=None, metadata={'help': 'The path of the test file'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) |
def validate_and_save(cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool) -> Tuple[(List[Optional[float]], bool)]:
num_updates = trainer.get_num_updates()
max_update = (cfg.optimization.max_update or math.inf)
should_stop = False
if (num_updates >= max_update):
should_stop = True
logger.info(f'Stopping training due to num_updates: {num_updates} >= max_update: {max_update}')
training_time_hours = (trainer.cumulative_training_time() / (60 * 60))
if ((cfg.optimization.stop_time_hours > 0) and (training_time_hours > cfg.optimization.stop_time_hours)):
should_stop = True
logger.info(f'Stopping training due to cumulative_training_time: {training_time_hours} > stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)')
do_save = ((end_of_epoch and ((epoch_itr.epoch % cfg.checkpoint.save_interval) == 0)) or should_stop or ((cfg.checkpoint.save_interval_updates > 0) and (num_updates > 0) and ((num_updates % cfg.checkpoint.save_interval_updates) == 0) and (num_updates >= cfg.dataset.validate_after_updates)))
do_validate = ((((not end_of_epoch) and do_save) or (end_of_epoch and ((epoch_itr.epoch % cfg.dataset.validate_interval) == 0)) or should_stop or ((cfg.dataset.validate_interval_updates > 0) and (num_updates > 0) and ((num_updates % cfg.dataset.validate_interval_updates) == 0))) and (not cfg.dataset.disable_validation))
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
if (do_save or should_stop):
checkpoint_utils.save_checkpoint(cfg.checkpoint, trainer, epoch_itr, valid_losses[0])
return (valid_losses, should_stop) |
class RevGrad(Module):
def __init__(self, alpha=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.alpha = tensor(alpha, requires_grad=False)
def forward(self, input_):
return revgrad(input_, self.alpha) |
class DenseConvBlock():
def __init__(self, growth_rate=64, n_layers=1, bottleneck_factor=1, **kwargs):
n_layers = np.minimum(n_layers, 3)
n_layers = np.maximum(n_layers, 1)
self.dense_conv = DenseConv2D((growth_rate * n_layers), bottleneck_factor)
def call(self, inputs):
return self.dense_conv(inputs)
def __call__(self, inputs):
return self.call(inputs) |
('/conv', response_model=List[TurnResponse])
def conversational_entity_linking(config: ConversationConfig):
if DEBUG:
return []
return config.response() |
def build_transforms(cfg, mode='train'):
assert (mode in ['train', 'test', 'val'])
min_size = cfg.SCALES[0]
max_size = cfg.SCALES[1]
assert (min_size <= max_size)
if (mode == 'train'):
flip_prob = cfg.TRAIN.FLIP_PROB
elif (mode == 'test'):
flip_prob = cfg.TEST.FLIP_PROB
else:
flip_prob = cfg.VAL.FLIP_PROB
to_bgr255 = True
normalize_transform = T.Normalize(mean=cfg.NETWORK.PIXEL_MEANS, std=cfg.NETWORK.PIXEL_STDS, to_bgr255=to_bgr255)
transform = T.Compose([T.Resize(min_size, max_size), T.RandomHorizontalFlip(flip_prob), T.ToTensor(), normalize_transform, T.FixPadding(min_size, max_size, pad=0)])
return transform |
_group.command('get')
('name')
('path', type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True))
_project(required=True)
def get_sim(name, path, project=None):
from cli.sims import download_sim
try:
output_path = download_sim(name, path, project)
click.echo(f"Downloaded sim '{name}' to {output_path}")
except requests.exceptions.HTTPError as e:
click.secho(f'Failed to download sim: {e}', fg='red', err=True)
if (e.response.status_code == 400):
click.secho(str(e.response.json()), fg='red', err=True)
except NameError as e:
click.secho(f'Failed to download sim: {e}', fg='yellow', err=True) |
def fc_elu_layer(name, bottom, output_dim, is_train, bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None, dropout=False, dropout_rate=0.3):
if dropout:
bottom = tf.cond(is_train, (lambda : tf.nn.dropout(bottom, rate=dropout_rate)), (lambda : bottom))
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer, biases_initializer, reuse=reuse)
elu = tf.nn.elu(fc)
return elu |
def extract_block(content: str, indent_level: int=0) -> str:
current_object = []
lines = content.split('\n')
end_markers = [')', ']', '}', '"""']
for (idx, line) in enumerate(lines):
if ((idx == 0) and (indent_level > 0) and (not is_empty_line(line)) and (find_indent(line) != indent_level)):
raise ValueError(f'When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got {find_indent(line)} instead.')
if ((find_indent(line) < indent_level) and (not is_empty_line(line))):
break
is_valid_object = (len(current_object) > 0)
if ((not is_empty_line(line)) and (not line.endswith(':')) and (find_indent(line) == indent_level) and is_valid_object):
if (line.lstrip() in end_markers):
current_object.append(line)
return '\n'.join(current_object)
else:
current_object.append(line)
if (len(current_object) > 0):
return '\n'.join(current_object) |
def test_precompute():
settings = dict(feature='mels', samplerate=16000, n_mels=32, fmin=0, fmax=8000, n_fft=512, hop_length=256, augmentations=12)
dir = './pre2'
if os.path.exists(dir):
shutil.rmtree(dir)
workdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/'))
data = urbansound8k.load_dataset()
urbansound8k.maybe_download_dataset(workdir)
d = os.path.join(dir, features.settings_id(settings))
expect_path = features.feature_path(data.iloc[0], d)
assert (not os.path.exists(expect_path)), expect_path
preprocess.precompute(data[0:4], settings, out_dir=d, verbose=0, force=True, n_jobs=2)
assert os.path.exists(expect_path), expect_path |
class TestSparseProductCUDA(unittest.TestCase):
def setUpClass(cls):
if (not torch.cuda.is_available()):
raise unittest.SkipTest('No CUDA capable device detected')
def test_single_query(self):
X = torch.randn(1, 1, 1, 32).cuda()
Y = torch.randn(1, 1, 100, 32).cuda()
lengths = torch.full((1,), 1, dtype=torch.int32).cuda()
topk = torch.cumsum((torch.rand(1, 1, 1, 10) * 10), dim=(- 1)).long().cuda()
products = sparse_dot_product(X, Y, topk)
all_products = torch.einsum('nhle,nhse->nhls', X, Y)
self.assertLess(torch.max(torch.abs((products.squeeze() - all_products[(0, 0, 0, topk[(0, 0, 0)])]))), 0.0001)
def test_simple_product(self):
X = torch.randn(10, 4, 100, 32).cuda()
Y = torch.randn(10, 4, 100, 32).cuda()
lengths = torch.full((10,), 100, dtype=torch.int32).cuda()
topk = torch.cumsum((torch.rand(10, 4, 100, 10) * 10), dim=(- 1)).long().cuda()
A = torch.randn(10, 4, 100, 100).to(X.device).requires_grad_(False)
(topk_v, topk) = torch.topk(A, 10, dim=(- 1))
topk = topk.contiguous()
products = sparse_dot_product(X, Y, topk)
all_products = torch.einsum('nhle,nhse->nhls', X, Y)
self.assertLess(torch.max(torch.abs((products - all_products[(torch.arange(10).view(10, 1, 1, 1), torch.arange(4).view(1, 4, 1, 1), torch.arange(100).view(1, 1, 100, 1), topk)]))), 0.0001)
(os.getenv('BENCHMARK_TESTS', ''), 'no benchmarks')
def test_small_benchmark(self):
N = 12
H = 8
L = 1000
S = 1000
E = 32
k = 32
X = torch.randn(N, H, L, E).cuda()
Y = torch.randn(N, H, S, E).cuda()
A = torch.randn(N, H, L, S).to(X.device).requires_grad_(False)
(topk_v, topk) = torch.topk(A, k, dim=(- 1))
topk = topk.contiguous()
for i in range(1000):
products = sparse_dot_product(X, Y, topk)
torch.cuda.synchronize()
s = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
s.record()
products = sparse_dot_product(X, Y, topk)
e.record()
torch.cuda.synchronize()
t_s = s.elapsed_time(e)
for i in range(1000):
torch.einsum('nhle,nhse->nhls', X, Y)
s = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
s.record()
torch.einsum('nhle,nhse->nhls', X, Y)
e.record()
torch.cuda.synchronize()
t_f = s.elapsed_time(e)
print('Sparse: {}, Full: {}, F/S: {}'.format(t_s, t_f, (t_f / t_s))) |
def augmentor_sim(cascade_file, save_aug_file):
def calculate_global_time(path):
all_ts = list()
i = 0
with open(path, 'r') as f:
for line in f:
i += 1
last_t = 0
paths = line.strip().split('\t')
paths = paths[2:(- 1)]
for path in paths:
t = int(path.split(':')[1])
reaction_t = (t - last_t)
last_t = t
all_ts.append(reaction_t)
return (np.mean(all_ts), i, expon.fit(all_ts))
def calculate_local_time(line):
last_t = 0
cascade_t = list()
paths = line.strip().split('\t')
paths = paths[2:(- 1)]
for path in paths:
t = int(path.split(':')[1])
reaction_t = (t - last_t)
last_t = t
cascade_t.append(reaction_t)
return np.mean(cascade_t)
def node_degree(line):
g = nx.Graph()
paths = line.strip().split('\t')[2:(- 1)]
for path in paths:
nodes = path.split(':')[0].split(',')
g.add_edge(nodes[(- 1)], nodes[(- 2)])
degree = {node: g.degree(node) for node in g.nodes()}
all_degree = sum([g.degree(node) for node in g.nodes()])
num_leaf_nodes = sum([1 for (node, degree) in g.degree if (len(list(nx.neighbors(g, node))) == 1)])
return (degree, all_degree, num_leaf_nodes)
def parent_node_degree(observation_path):
g = nx.Graph()
for path in observation_path[1:(- 1)]:
nodes = path.split(':')[0].split(',')
g.add_edge(nodes[(- 1)], nodes[(- 2)])
parent_degree = {node: nx.degree(g, list(nx.neighbors(g, node))[0]) for (node, degree) in g.degree if (len(list(nx.neighbors(g, node))) == 1)}
all_parent_degree = sum([nx.degree(g, list(nx.neighbors(g, node))[0]) for (node, degree) in g.degree if (len(list(nx.neighbors(g, node))) == 1)])
num_added_leaf_nodes = sum([1 for (node, degree) in g.degree if (len(list(nx.neighbors(g, node))) == 1)])
return (parent_degree, all_parent_degree, num_added_leaf_nodes)
(mean_global_t, num_samples, (loc, scale)) = calculate_global_time(cascade_file)
with open(cascade_file, 'r') as f, open(save_aug_file, 'w') as save_f:
i = 0
for line in f:
mean_local_t = calculate_local_time(line)
(degree, all_degree, num_leaf_nodes) = node_degree(line)
added_nodes = list()
paths = line.strip().split('\t')[:(- 1)][:(FLAGS.max_seq + 1)]
num_ori_nodes = (len(paths) - 1)
eta = (FLAGS.aug_strength * num_ori_nodes)
cascade_id = int(paths[0])
observation_path = [paths[1]]
added_node_idx = 1
for path in paths[2:]:
nodes = path.split(':')[0].split(',')
cur_node = nodes[(- 1)]
observation_path.append(path)
add_node_prob = (eta * (degree[cur_node] / all_degree))
if (random.random() > add_node_prob):
continue
t = ((int(path.split(':')[1]) + (FLAGS.theta * mean_local_t)) + ((1 - FLAGS.theta) * expon.rvs(loc, scale, size=1)))
nodes.append(('-' + str(added_node_idx)))
added_node_idx += 1
if (t > FLAGS.t_o):
t = FLAGS.t_o
added_nodes.append(((','.join(nodes) + ':') + str(int(t))))
observation_path.extend(added_nodes)
num_added_nodes = len(observation_path)
(parent_degree, all_parent_degree, num_added_leaf_nodes) = parent_node_degree(observation_path)
eta = (FLAGS.aug_strength * num_added_leaf_nodes)
if (num_leaf_nodes != 0):
for path in observation_path[1:]:
nodes = path.split(':')[0].split(',')
cur_node = nodes[(- 1)]
try:
del_node_prob = (eta * (parent_degree[cur_node] / all_parent_degree))
except KeyError:
continue
if (random.random() < del_node_prob):
observation_path.remove(path)
observation_path.sort(key=(lambda tup: int(tup.split(':')[1])))
num_del_nodes = len(observation_path)
save_file(cascade_id, observation_path, save_f)
i += 1 |
def retrofit_eval_fn(original_fn):
def f(model_id, *args, **kwargs):
if ('view_index' in kwargs):
view_index = kwargs['view_index']
if isinstance(view_index, int):
return original_fn(model_id, *args, **kwargs)
else:
del kwargs['view_index']
else:
view_index = None
if (view_index is None):
view_index = get_builder(model_id).view_index
if isinstance(view_index, int):
return original_fn(model_id, *args, view_index=view_index, **kwargs)
assert isinstance(view_index, (list, tuple))
values = [original_fn(model_id, *args, view_index=vi, **kwargs) for vi in view_index]
return np.mean(values)
return f |
class URL(object):
def __init__(self, string='', method=GET, query={}, **kwargs):
self.__dict__['method'] = method
self.__dict__['_string'] = u(string)
self.__dict__['_parts'] = None
self.__dict__['_headers'] = None
self.__dict__['_redirect'] = None
if isinstance(string, URL):
self.__dict__['method'] = string.method
self.query.update(string.query)
if (len(query) > 0):
self.query.update(query)
if (len(kwargs) > 0):
self.parts.update(kwargs)
def _parse(self):
p = urlsplit(self._string)
P = {PROTOCOL: p[0], USERNAME: '', PASSWORD: '', DOMAIN: p[1], PORT: '', PATH: p[2], PAGE: '', QUERY: urldecode(p[3]), ANCHOR: p[4]}
if ('' in P[DOMAIN]):
(P[USERNAME], P[PASSWORD]) = (p[1].split('')[0].split(':') + [''])[:2]
P[DOMAIN] = p[1].split('')[1]
if (':' in P[DOMAIN]):
(P[DOMAIN], P[PORT]) = P[DOMAIN].split(':')
P[PORT] = ((P[PORT].isdigit() and int(P[PORT])) or P[PORT])
if ('/' in P[PATH]):
P[PAGE] = p[2].split('/')[(- 1)]
P[PATH] = p[2][:(len(p[2]) - len(P[PAGE]))].strip('/').split('/')
P[PATH] = list(filter((lambda v: (v != '')), P[PATH]))
else:
P[PAGE] = p[2].strip('/')
P[PATH] = []
self.__dict__['_parts'] = P
def _get_string(self):
return str(self)
def _set_string(self, v):
self.__dict__['_string'] = u(v)
self.__dict__['_parts'] = None
string = property(_get_string, _set_string)
def parts(self):
if (not self._parts):
self._parse()
return self._parts
def querystring(self):
s = self.parts[QUERY].items()
s = dict(((bytestring(k), (v if (v is not None) else '')) for (k, v) in s))
if (sys.version > '3'):
s = urlencode(s)
else:
t = {key: (value.encode('utf-8') if isinstance(value, str) else value) for (key, value) in s.items()}
s = urlencode(t).decode('utf-8')
return s
def __getattr__(self, k):
if (k in self.__dict__):
return self.__dict__[k]
if (k in self.parts):
return self.__dict__['_parts'][k]
raise AttributeError(("'URL' object has no attribute '%s'" % k))
def __setattr__(self, k, v):
if (k in self.__dict__):
self.__dict__[k] = u(v)
return
if (k == 'string'):
self._set_string(v)
return
if (k == 'query'):
self.parts[k] = v
return
if (k in self.parts):
self.__dict__['_parts'][k] = u(v)
return
raise AttributeError(("'URL' object has no attribute '%s'" % k))
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
url = self.string
if os.path.exists(url):
return urlopen(url)
post = (((self.method == POST) and self.querystring) or None)
socket.setdefaulttimeout(timeout)
handlers = []
if proxy:
handlers.append(ProxyHandler({proxy[1]: proxy[0]}))
handlers.append(HTTPCookieProcessor(cookielib.CookieJar()))
handlers.append(HTTPHandler)
install_opener(build_opener(*handlers))
try:
request = Request(url, post, {'User-Agent': user_agent, 'Referer': referrer})
if (authentication is not None):
authentication = tuple((encode_utf8(x) for x in authentication))
request.add_header('Authorization', ('Basic %s' % decode_utf8(base64.b64encode((b'%s:%s' % authentication)))))
return urlopen(request)
except UrllibHTTPError as e:
if (e.code == 301):
raise HTTP301Redirect(src=e, url=url)
if (e.code == 400):
raise HTTP400BadRequest(src=e, url=url)
if (e.code == 401):
raise HTTP401Authentication(src=e, url=url)
if (e.code == 403):
raise HTTP403Forbidden(src=e, url=url)
if (e.code == 404):
raise HTTP404NotFound(src=e, url=url)
if (e.code == 414):
raise HTTP414RequestURITooLong(src=e, url=url)
if (e.code == 420):
raise HTTP420Error(src=e, url=url)
if (e.code == 429):
raise HTTP429TooMayRequests(src=e, url=url)
if (e.code == 500):
raise HTTP500InternalServerError(src=e, url=url)
if (e.code == 503):
raise HTTP503ServiceUnavailable(src=e, url=url)
raise HTTPError(str(e), src=e, url=url)
except as e:
raise HTTPError(str(e), src=e, url=url)
except socket.timeout as e:
raise URLTimeout(src=e, url=url)
except socket.error as e:
if (('timed out' in str((e.args + ('', ''))[0])) or ('timed out' in str((e.args + ('', ''))[1]))):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except UrllibURLError as e:
if ('timed out' in str(e.reason)):
raise URLTimeout(src=e, url=url)
raise URLError(str(e), src=e, url=url)
except ValueError as e:
raise URLError(str(e), src=e, url=url)
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
if ((self._parts is None) and (self.method == GET) and ('oauth_' not in self._string)):
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", '', id)
if (unicode is True):
id = ('u' + id)
if (cached and (id in cache)):
if isinstance(cache, dict):
return cache[id]
if (unicode is True):
return cache[id]
if (unicode is False):
return cache.get(id, unicode=False)
t = time.time()
try:
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
except socket.timeout as e:
raise URLTimeout(src=e, url=self.string)
if (unicode is True):
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max((throttle - (time.time() - t)), 0))
return data
def read(self, *args, **kwargs):
return self.open(**kwargs).read(*args)
def exists(self, timeout=10):
try:
self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError:
return True
except URLTimeout:
return True
except URLError:
return False
except:
return True
return True
def mimetype(self, timeout=10):
try:
return self.headers['content-type'].split(';')[0]
except KeyError:
return None
def headers(self, timeout=10):
if (self.__dict__['_headers'] is None):
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__['_headers'] = h
if ('Content-Type' in self.__dict__['_headers']):
self.__dict__['_headers']['content-type'] = self.__dict__['_headers']['Content-Type']
return self.__dict__['_headers']
def redirect(self, timeout=10):
if (self.__dict__['_redirect'] is None):
try:
r = u(self.open(timeout).geturl())
except URLError:
r = None
self.__dict__['_redirect'] = (((r != self.string) and r) or '')
return (self.__dict__['_redirect'] or None)
def __str__(self):
P = self.parts
u = []
if P[PROTOCOL]:
u.append(('%s://' % P[PROTOCOL]))
if P[USERNAME]:
u.append(('%s:%' % (P[USERNAME], P[PASSWORD])))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append((':%s' % P[PORT]))
if (P[PORT] or (P[DOMAIN] and (not P[PATH]) and (not P[PAGE]))):
u.append('/')
if P[PATH]:
u.append(('/%s/' % '/'.join(P[PATH])))
if (P[PAGE] and (len(u) > 0)):
u[(- 1)] = u[(- 1)].rstrip('/')
if P[PAGE]:
u.append(('/%s' % P[PAGE]))
if (P[QUERY] and (self.method == GET)):
u.append(('?%s' % self.querystring))
if P[ANCHOR]:
u.append(('#%s' % P[ANCHOR]))
u = ''.join(u)
u = u.lstrip('/')
return u
def __repr__(self):
return ('URL(%s, method=%s)' % (repr(self.string), repr(self.method)))
def copy(self):
return URL(self.string, self.method, self.query) |
class TrainingSchedule(Callback):
def __init__(self, total_time):
self._total_time = total_time
self._lr = self._get_lr(0.0)
def _get_lr(self, progress):
if (progress > 0.8):
return 0.004
elif (progress > 0.5):
return 0.02
else:
return 0.1
def on_train_begin(self, logs={}):
self._start = time.time()
self._lr = self._get_lr(0.0)
K.set_value(self.model.optimizer.lr, self._lr)
def on_batch_end(self, batch, logs):
t = (time.time() - self._start)
if (t >= self._total_time):
self.model.stop_training = True
lr = self._get_lr((t / self._total_time))
if (lr != self._lr):
self._lr = lr
K.set_value(self.model.optimizer.lr, self._lr)
def lr(self):
return self._lr |
class PreResActivation(nn.Module):
def __init__(self, in_channels, bn_affine=True):
super(PreResActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels, affine=bn_affine)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x |
def run_multi_process_init_distributed(codes=None, nproc=2, training_script=None, training_script_args=''):
if (codes is not None):
(fd, training_script) = tempfile.mkstemp(suffix='py')
with open(fd, 'w') as f:
f.write(codes)
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_PORT'] = str(find_free_port())
input_args = ([f'--nproc_per_node={nproc}', f'{training_script}'] + list(training_script_args))
args = parse_args(input_args)
main(args) |
_tokenizer('nltk')
class NLTKTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError('Please install nltk with: pip install nltk')
def encode(self, x: str) -> str:
return ' '.join(self.word_tokenize(x))
def decode(self, x: str) -> str:
return x |
def get_model_list():
ret = requests.post((args.controller_url + '/refresh_all_workers'))
assert (ret.status_code == 200)
ret = requests.post((args.controller_url + '/list_models'))
models = ret.json()['models']
models.sort(key=(lambda x: priority.get(x, x)))
logger.info(f'Models: {models}')
return models |
class JobManager(MsfManager):
def list(self):
return self.rpc.call(MsfRpcMethod.JobList)
def stop(self, jobid):
self.rpc.call(MsfRpcMethod.JobStop, [jobid])
def info(self, jobid):
return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])
def info_by_uuid(self, uuid):
return self.rpc.call(MsfRpcMethod.ModuleResults, [uuid]) |
def bert_tokenize(sent):
tokens = []
for (i, t) in enumerate(sent):
subtokens = tokenizer.tokenize(t['text'].strip())
for st in subtokens:
tokens.append({'text': t['text'], 'text_with_ws': t['text_with_ws'], 'lemma': t['lemma'], 'sub': st, 'text_id': i})
return tokens |
def loss_game_nfsp_dqn_params(env: MultiAgentEnv) -> Dict[(str, Any)]:
return merge_dicts(GRL_DEFAULT_OSHI_ZUMO_MEDIUM_DQN_PARAMS, {'metrics_smoothing_episodes': 10000, 'exploration_config': {'epsilon_timesteps': int(.0), 'final_epsilon': 0.001, 'initial_epsilon': 0.06, 'type': ValidActionsEpsilonGreedy}, 'model': merge_dicts(MODEL_DEFAULTS, {'fcnet_hiddens': [32, 32]}), 'target_network_update_freq': 100000, 'buffer_size': 100000, 'lr': 0.007, 'rollout_fragment_length': 16, 'train_batch_size': 4096}) |
def initalizeEnvironment(environment, logger):
if (environment != ''):
db = Database(DB_NAME, DB_HOST, DB_PORT)
' Can be SimpleFog, BitbrainFog // Datacenter '
if (environment != ''):
datacenter = Datacenter(HOSTS_IP, environment)
else:
datacenter = BitbrainFog(HOSTS)
' Can be SWSD, BWGD // DFW '
if (environment != ''):
workload = DFW(NEW_CONTAINERS, db)
else:
workload = BWGD(NEW_CONTAINERS, 3)
' Can be LRMMTR, RF, RL, RM, Random, RLRMMTR, TMMR, TMMTR, GA, GOBI '
scheduler = RandomScheduler()
hostlist = datacenter.generateHosts()
if (environment != ''):
env = Framework(scheduler, CONTAINERS, INTERVAL_TIME, hostlist, db, environment, logger)
else:
env = Simulator(TOTAL_POWER, ROUTER_BW, scheduler, CONTAINERS, INTERVAL_TIME, hostlist)
newcontainerinfos = workload.generateNewContainers(env.interval)
deployed = env.addContainersInit(newcontainerinfos)
decision = scheduler.placement(deployed)
migrations = env.allocateInit(decision)
workload.updateDeployedContainers(env.getCreationIDs(migrations, deployed))
print("Deployed containers' creation IDs:", env.getCreationIDs(migrations, deployed))
print('Containers in host:', env.getContainersInHosts())
print('Schedule:', env.getActiveContainerList())
printDecisionAndMigrations(decision, migrations)
stats = Stats(env, workload, datacenter, scheduler)
stats.saveStats(deployed, migrations, [], deployed, decision)
return (datacenter, workload, scheduler, env, stats) |
_model
def cspresnet50w(pretrained=False, **kwargs):
return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) |
def dobldobl_set_solution(nvar, sol, verbose=False):
from phcpy.phcpy2c3 import py2c_padcon_initialize_dobldobl_solution
from phcpy.interface import store_dobldobl_solutions
store_dobldobl_solutions(nvar, [sol])
return py2c_padcon_initialize_dobldobl_solution(1, int(verbose)) |
def drn_d_105(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-105'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model |
_module()
class HWFolderMultipleGTDataset(BaseDHDataset):
def __init__(self, lq_folder, gt_folder, pipeline, trans_folder=None, ann_file=None, num_input_frames=None, test_mode=True):
super().__init__(pipeline, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.trans_folder = str(trans_folder)
self.ann_file = ann_file
if ((num_input_frames is not None) and (num_input_frames <= 0)):
raise ValueError(f'"num_input_frames" must be None or positive, but got {num_input_frames}.')
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def _load_annotations_from_file(self):
data_infos = []
ann_list = mmcv.list_from_file(self.ann_file)
for ann in ann_list:
(key, sequence_length) = ann.strip().split(' ')
if (self.num_input_frames is None):
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
dataset = key.split('/')[(- 2)]
folder = key.split('/')[(- 1)]
haze_beta = float(folder.split('_')[(- 1)])
haze_light = (float(folder.split('_')[(- 2)]) / 255)
data_info = dict(lq_path=self.lq_folder, gt_path=self.gt_folder, trans_path=self.trans_folder, key=key, num_input_frames=int(num_input_frames), sequence_length=int(sequence_length), dataset=dataset, folder=folder, haze_beta=haze_beta, haze_light=haze_light)
data_infos.append(data_info)
return data_infos
def load_annotations(self):
if (self.ann_file is not None):
return self._load_annotations_from_file()
else:
raise NotImplementedError
logger = get_root_logger()
datasets = os.listdir(self.lq_folder)
assert (datasets == os.listdir(self.gt_folder))
datasets.sort()
logger.info(f'Datasets ({len(datasets)}): {datasets}')
data_infos = []
for dataset in datasets:
folders = os.listdir(osp.join(osp.join(self.lq_folder, dataset)))
assert (folders == os.listdir(osp.join(osp.join(self.gt_folder, dataset))))
folders.sort()
for folder in folders:
lq_folder = osp.join(self.lq_folder, dataset, folder)
sequence_length = len(os.listdir(lq_folder))
if (self.num_input_frames is None):
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
haze_beta = float(folder.split('_')[(- 1)])
haze_light = (float(folder.split('_')[(- 2)]) / 255)
data_info = dict(lq_path=self.lq_folder, gt_path=self.gt_folder, trans_path=self.trans_folder, key=osp.join(dataset, folder), num_input_frames=num_input_frames, sequence_length=sequence_length, dataset=dataset, folder=folder, haze_beta=haze_beta, haze_light=haze_light)
data_infos.append(data_info)
return data_infos
def evaluate(self, results, logger=None):
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
eval_results = [res['eval_result'] for res in results]
eval_result = defaultdict(list)
metrics = [metric for metric in AVAILABLE_METRICS if (metric in eval_results[0].keys())]
for dataset in AVAILABLE_DATASETS:
for haze_beta in AVAILABLE_BETAS:
for metric in metrics:
eval_result[f'{dataset}/{haze_beta}/{metric}'] = list()
datasets = []
haze_betas = []
for i in range(len(results)):
res = eval_results[i]
dataset = results[i]['dataset']
haze_beta = results[i]['haze_beta']
for (metric, val) in res.items():
eval_result[metric].append(val)
eval_result[f'{dataset}/{haze_beta}/{metric}'].append(val)
eval_result[f'{dataset}/avg/{metric}'].append(val)
if (metric not in metrics):
metrics.append(metric)
if (dataset not in datasets):
datasets.append(dataset)
if (haze_beta not in haze_betas):
haze_betas.append(haze_beta)
datasets = [dataset for dataset in AVAILABLE_DATASETS if (dataset in datasets)]
haze_betas = [haze_beta for haze_beta in AVAILABLE_BETAS if (haze_beta in haze_betas)]
if (not self.test_mode):
fmt_eval_result = {}
for metric in metrics:
values = [(sum(eval_result[f'{dataset}/avg/{metric}']) / len(eval_result[f'{dataset}/avg/{metric}'])) for dataset in datasets]
fmt_eval_result[metric] = (sum(values) / len(values))
return fmt_eval_result
fmt_eval_result = {}
for dataset in (datasets + ['Average']):
for haze_beta in haze_betas:
for metric in metrics:
assert (len(eval_result[f'{dataset}/{haze_beta}/{metric}']) == len(eval_result[f'{dataset}/{haze_betas[0]}/{metrics[0]}']))
num_folders = len(eval_result[f'{dataset}/{haze_betas[0]}/{metrics[0]}'])
assert (len(eval_result[f'{dataset}/avg/{metrics[0]}']) == (num_folders * len(haze_betas)))
if (dataset == ['Average']):
assert (num_folders == len(datasets))
key = f'[{dataset:10s} ({num_folders})] '
info = ''
for haze_beta in (haze_betas + ['avg']):
info += f'{haze_beta}: '
for metric in metrics:
val = (sum(eval_result[f'{dataset}/{haze_beta}/{metric}']) / len(eval_result[f'{dataset}/{haze_beta}/{metric}']))
if ((dataset != 'Average') and (haze_beta != 'avg')):
eval_result[f'Average/{haze_beta}/{metric}'].append(val)
eval_result[f'Average/avg/{metric}'].append(val)
if (metric in ('L1', 'PSNR', 'SSIM')):
info += f'{val:.4f}/'
info = f"{info.rstrip('/')}, "
fmt_eval_result[key] = info
return fmt_eval_result |
class AutoPipelineForImage2Image(ConfigMixin):
config_name = 'model_index.json'
def __init__(self, *args, **kwargs):
raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_pipe(pipeline)` methods.')
def from_pretrained(cls, pretrained_model_or_path, **kwargs):
cache_dir = kwargs.pop('cache_dir', DIFFUSERS_CACHE)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
use_auth_token = kwargs.pop('use_auth_token', None)
local_files_only = kwargs.pop('local_files_only', False)
revision = kwargs.pop('revision', None)
load_config_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'resume_download': resume_download, 'proxies': proxies, 'use_auth_token': use_auth_token, 'local_files_only': local_files_only, 'revision': revision}
config = cls.load_config(pretrained_model_or_path, **load_config_kwargs)
orig_class_name = config['_class_name']
if ('controlnet' in kwargs):
orig_class_name = config['_class_name'].replace('Pipeline', 'ControlNetPipeline')
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name)
kwargs = {**load_config_kwargs, **kwargs}
return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs)
def from_pipe(cls, pipeline, **kwargs):
original_config = dict(pipeline.config)
original_cls_name = pipeline.__class__.__name__
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name)
if ('controlnet' in kwargs):
if (kwargs['controlnet'] is not None):
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('ControlNet', '').replace('Img2ImgPipeline', 'ControlNetImg2ImgPipeline'))
else:
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('ControlNetImg2ImgPipeline', 'Img2ImgPipeline'))
(expected_modules, optional_kwargs) = _get_signature_keys(image_2_image_cls)
pretrained_model_name_or_path = original_config.pop('_name_or_path', None)
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if (k in kwargs)}
original_class_obj = {k: pipeline.components[k] for (k, v) in pipeline.components.items() if ((k in expected_modules) and (k not in passed_class_obj))}
passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if (k in kwargs)}
original_pipe_kwargs = {k: original_config[k] for (k, v) in original_config.items() if ((k in optional_kwargs) and (k not in passed_pipe_kwargs))}
additional_pipe_kwargs = [k[1:] for k in original_config.keys() if (k.startswith('_') and (k[1:] in optional_kwargs) and (k[1:] not in passed_pipe_kwargs))]
for k in additional_pipe_kwargs:
original_pipe_kwargs[k] = original_config.pop(f'_{k}')
image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs}
unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": original_config[k] for (k, v) in original_config.items() if (k not in image_2_image_kwargs)}
missing_modules = ((set(expected_modules) - set(pipeline._optional_components)) - set(image_2_image_kwargs.keys()))
if (len(missing_modules) > 0):
raise ValueError(f'Pipeline {image_2_image_cls} expected {expected_modules}, but only {set((list(passed_class_obj.keys()) + list(original_class_obj.keys())))} were passed')
model = image_2_image_cls(**image_2_image_kwargs)
model.register_to_config(_name_or_path=pretrained_model_name_or_path)
model.register_to_config(**unused_original_config)
return model |
def load_data(config):
print(('-*-' * 10))
print(f'current data_sign: {config.data_sign}')
if (config.data_sign == 'conll03'):
data_processor = Conll03Processor()
elif (config.data_sign == 'zh_msra'):
data_processor = MSRAProcessor()
elif (config.data_sign == 'zh_onto'):
data_processor = Onto4ZhProcessor()
elif (config.data_sign == 'en_onto'):
data_processor = Onto5EngProcessor()
elif (config.data_sign == 'genia'):
data_processor = GeniaProcessor()
elif (config.data_sign == 'ace2004'):
data_processor = ACE2004Processor()
elif (config.data_sign == 'ace2005'):
data_processor = ACE2005Processor()
elif (config.data_sign == 'resume'):
data_processor = ResumeZhProcessor()
else:
raise ValueError('Please Notice that your data_sign DO NOT exits !!!!!')
label_list = data_processor.get_labels()
tokenizer = BertTokenizer4Tagger.from_pretrained(config.bert_model, do_lower_case=config.do_lower_case)
dataset_loaders = MRCNERDataLoader(config, data_processor, label_list, tokenizer, mode='test', allow_impossible=True)
test_dataloader = dataset_loaders.get_dataloader(data_sign='test', num_data_processor=config.num_data_processor)
return (test_dataloader, label_list) |
class HifiganVocoder():
def __init__(self, vocoder_path, vocoder_cfg_path, use_cuda=True):
with open(vocoder_cfg_path) as f:
cfg = json.load(f)
self.vocoder = CodeHiFiGANVocoder(vocoder_path, cfg).eval()
self.use_cuda = use_cuda
if self.use_cuda:
self.vocoder.cuda()
def code2wav(self, code, speaker_id=0, pred_dur=False):
if isinstance(code, str):
code = list(map(int, code.split()))
inp = {'code': torch.LongTensor(code).view(1, (- 1))}
if self.vocoder.model.multispkr:
inp['spkr'] = torch.LongTensor([speaker_id]).view(1, 1)
if self.use_cuda:
inp = utils.move_to_cuda(inp)
return self.vocoder(inp, pred_dur).detach().cpu().numpy()
def codes2wav(self, codes, speaker_ids=[0, 4], pred_dur=False):
if isinstance(codes, dict):
codes = list(codes.values())
assert (len(codes) == 2)
wav1 = self.code2wav(codes[0], speaker_ids[0], pred_dur)
wav2 = self.code2wav(codes[1], speaker_ids[1], pred_dur)
wav = np.stack([wav1, wav2])
return wav |
class GCN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(dataset.num_node_features, 16)
self.conv2 = GCNConv(16, dataset.num_classes)
def forward(self, data):
(x, edge_index) = (data.x, data.edge_index)
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1) |
def hotpot_biattention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
(h_len, u_len) = (tf.shape(h)[2], tf.shape(u)[1])
M = tf.shape(h)[1]
u_aug = tf.tile(tf.expand_dims(u, 1), [1, M, 1, 1])
with tf.variable_scope((scope or 'hotpot_biattention')):
h_dot = tf.squeeze(tf.tile(tf.expand_dims(tf.layers.dense(h, 1), 3), [1, 1, 1, u_len, 1]), axis=(- 1))
u_dot = tf.squeeze(tf.tile(tf.expand_dims(tf.layers.dense(u_aug, 1), 2), [1, 1, h_len, 1, 1]), axis=(- 1))
dot_scale = tf.get_variable('dot_scale', [(config.hidden_size * 2)])
cross_dot = tf.einsum('ijkl,ijml->ijkm', (h * dot_scale), u_aug)
att = (((h_dot + u_dot) + cross_dot) - (1e+30 * (1.0 - tf.cast(tf.tile(tf.expand_dims(h_mask, axis=3), [1, 1, 1, u_len]), 'float32'))))
weight_one = tf.nn.softmax(att)
weight_two = tf.nn.softmax(tf.reduce_max(att, axis=(- 1)))
output_one = tf.einsum('ijkl,ijlm->ijkm', weight_one, u_aug)
output_two = tf.einsum('ijk,ijkl->ijl', weight_two, h)
output = tf.concat([h, output_one, (h * output_one), tf.einsum('ijk,ijlk->ijlk', output_two, output_one)], axis=(- 1))
return output |
class MJOPTION(Structure):
_fields_ = [('timestep', c_double), ('apirate', c_double), ('tolerance', c_double), ('impratio', c_double), ('gravity', (c_double * 3)), ('wind', (c_double * 3)), ('magnetic', (c_double * 3)), ('density', c_double), ('viscosity', c_double), ('o_margin', c_double), ('o_solref', (c_double * 2)), ('o_solimp', (c_double * 3)), ('mpr_tolerance', c_double), ('mpr_iterations', c_int), ('integrator', c_int), ('collision', c_int), ('impedance', c_int), ('reference', c_int), ('solver', c_int), ('iterations', c_int), ('disableflags', c_int), ('enableflags', c_int)] |
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
line = line.replace('\\\\', '')
if (line.count('/*') > line.count('*/')):
error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. Lint may give bogus warnings. Consider replacing these with //-style comments, with #if 0...#endif, or with more clearly structured multi-line comments.')
if ((line.count('"') - line.count('\\"')) % 2):
error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t do well with such strings, and may give bogus warnings. Use C++11 raw strings or concatenation instead.') |
def split_to_dir(train_idxes, val_idxes, test_idxes, label_list):
texts = []
with open('data/AAPD/text_all', 'r') as f:
for line in f:
texts.append(line)
def write_text(path, idxes):
with open(path, 'w') as f:
for i in idxes:
f.write(texts[i])
def write_label(path, idxes):
with open(path, 'w') as f:
for i in idxes:
f.write((' '.join(label_list[i]) + '\n'))
if (not os.path.isdir('data/AAPD2')):
os.makedirs('data/AAPD2')
write_text('data/AAPD2/text_train', train_idxes)
write_text('data/AAPD2/text_val', val_idxes)
write_text('data/AAPD2/text_test', test_idxes)
write_label('data/AAPD2/label_train', train_idxes)
write_label('data/AAPD2/label_val', val_idxes)
write_label('data/AAPD2/label_test', test_idxes)
S = set(train_idxes)
for idx in train_idxes:
S.add(tuple(label_list[idx]))
x = 0
for idx in val_idxes:
if (tuple(label_list[idx]) in S):
x += 1
print(x)
x = 0
for idx in test_idxes:
if (tuple(label_list[idx]) in S):
x += 1
print(x) |
class PointSupDatasetMapper():
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, sample_points: int=0):
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.sample_points = sample_points
logger = logging.getLogger(__name__)
mode = ('training' if is_train else 'inference')
logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}')
logger.info(f'Point Augmentations used in {mode}: sample {sample_points} points')
def from_config(cls, cfg, is_train: bool=True):
augs = utils.build_augmentation(cfg, is_train)
if (cfg.INPUT.CROP.ENABLED and is_train):
raise ValueError('Crop augmentation not supported to point supervision.')
ret = {'is_train': is_train, 'augmentations': augs, 'image_format': cfg.INPUT.FORMAT, 'sample_points': cfg.INPUT.SAMPLE_POINTS}
return ret
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.image_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (not self.is_train):
dataset_dict.pop('annotations', None)
return dataset_dict
if ('annotations' in dataset_dict):
for ann in dataset_dict['annotations']:
point_coords_wrt_image = np.array(ann['point_coords']).astype(float)
point_coords_wrt_image = (point_coords_wrt_image + 0.5)
ann['point_coords'] = point_coords_wrt_image
annos = [transform_instance_annotations(obj, transforms, image_shape) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
instances = annotations_to_instances(annos, image_shape, sample_points=self.sample_points)
dataset_dict['instances'] = utils.filter_empty_instances(instances)
return dataset_dict |
class PredefinedPromptExtractor(PromptExtractor):
def __init__(self, templates: List[str]):
super().__init__()
self.templates = ['a photo of a {}.', 'This is a photo of a {}', 'There is a {} in the scene', 'There is the {} in the scene', 'a photo of a {} in the scene', 'a photo of a small {}.', 'a photo of a medium {}.', 'a photo of a large {}.', 'This is a photo of a small {}.', 'This is a photo of a medium {}.', 'This is a photo of a large {}.', 'There is a small {} in the scene.', 'There is a medium {} in the scene.', 'There is a large {} in the scene.']
def init_task_prompt(self, clip_model):
self.task_embeddings = None
def forward(self, noun_list: List[str], clip_model: nn.Module):
text_features_bucket = []
for template in self.templates:
noun_tokens = [clip.tokenize(template.format(noun)) for noun in noun_list]
target_device = (clip_model.text_projection.data.device if torch.is_tensor(clip_model.text_projection) else clip_model.text_projection.weight.device)
text_inputs = torch.cat(noun_tokens).to(target_device)
text_features = clip_model.encode_text(text_inputs)
text_features /= text_features.norm(dim=(- 1), keepdim=True)
text_features_bucket.append(text_features)
del text_inputs
text_features = torch.stack(text_features_bucket).mean(dim=0)
text_features = (text_features / text_features.norm(dim=(- 1), keepdim=True))
return text_features |
_module()
class CenterNet(SingleStageDetector):
def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) |
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale=0.1, visual=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = (in_planes // 8)
self.branch0 = nn.Sequential(BasicConv(in_planes, (2 * inter_planes), kernel_size=1, stride=stride), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=visual, dilation=visual, relu=False))
self.branch1 = nn.Sequential(BasicConv(in_planes, inter_planes, kernel_size=1, stride=1), BasicConv(inter_planes, (2 * inter_planes), kernel_size=3, stride=stride, padding=1), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=(visual + 1), dilation=(visual + 1), relu=False))
self.branch2 = nn.Sequential(BasicConv(in_planes, inter_planes, kernel_size=1, stride=1), BasicConv(inter_planes, ((inter_planes // 2) * 3), kernel_size=3, stride=1, padding=1), BasicConv(((inter_planes // 2) * 3), (2 * inter_planes), kernel_size=3, stride=stride, padding=1), BasicConv((2 * inter_planes), (2 * inter_planes), kernel_size=3, stride=1, padding=((2 * visual) + 1), dilation=((2 * visual) + 1), relu=False))
self.ConvLinear = BasicConv((6 * inter_planes), out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = ((out * self.scale) + short)
out = self.relu(out)
return out |
class normalize(nn.Module):
def __init__(self):
super(normalize, self).__init__()
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
return x |
class Poisson(Distribution):
def __init__(self, lambdas=None, inertia=0.0, frozen=False, check_data=True):
super().__init__(inertia=inertia, frozen=frozen, check_data=check_data)
self.name = 'Poisson'
self.lambdas = _check_parameter(_cast_as_parameter(lambdas), 'lambdas', min_value=0, ndim=1)
self._initialized = (lambdas is not None)
self.d = (self.lambdas.shape[(- 1)] if self._initialized else None)
self._reset_cache()
def _initialize(self, d):
self.lambdas = _cast_as_parameter(torch.zeros(d, dtype=self.dtype, device=self.device))
self._initialized = True
super()._initialize(d)
def _reset_cache(self):
if (self._initialized == False):
return
self.register_buffer('_w_sum', torch.zeros(self.d, device=self.device))
self.register_buffer('_xw_sum', torch.zeros(self.d, device=self.device))
self.register_buffer('_log_lambdas', torch.log(self.lambdas))
def sample(self, n):
return torch.distributions.Poisson(self.lambdas).sample([n])
def log_probability(self, X):
X = _check_parameter(_cast_as_tensor(X), 'X', min_value=0.0, ndim=2, shape=((- 1), self.d), check_parameter=self.check_data)
return torch.sum((((X * self._log_lambdas) - self.lambdas) - torch.lgamma((X + 1))), dim=(- 1))
def summarize(self, X, sample_weight=None):
if (self.frozen == True):
return
(X, sample_weight) = super().summarize(X, sample_weight=sample_weight)
_check_parameter(X, 'X', min_value=0, check_parameter=self.check_data)
self._w_sum[:] = (self._w_sum + torch.sum(sample_weight, dim=0))
self._xw_sum[:] = (self._xw_sum + torch.sum((X * sample_weight), dim=0))
def from_summaries(self):
if (self.frozen == True):
return
lambdas = (self._xw_sum / self._w_sum)
_update_parameter(self.lambdas, lambdas, self.inertia)
self._reset_cache() |
def _find_conditional_parameters(dim, S):
Sig12Sig22inv = []
cond_var = []
for e in range(dim):
S11 = copy.copy(S[e][e])
S12 = S[e][:]
S12 = np.delete(S12, e)
S21 = S[e][:]
S21 = np.delete(S21, e)
S22 = S[:][:]
S22 = np.delete(S22, e, 0)
S22 = np.delete(S22, e, 1)
S22inv = npla.inv(S22)
S12S22inv = S12.dot(S22inv)
Sig12Sig22inv.append(S12S22inv)
cond_var.append((S11 - S12S22inv.dot(S21)))
return (cond_var, Sig12Sig22inv) |
class TrainerConfigCLAM(_TrainerConfig):
def __init__(self, *, num_splits: int=1, k: int=3, k_start: int=(- 1), k_end: int=(- 1), max_epochs: int=20, lr: float=0.0001, reg: float=1e-05, label_frac: float=1, weighted_sample: bool=False, log_data: bool=False, testing: bool=False, early_stopping: bool=False, subtyping: bool=False, seed: int=1, results_dir: Optional[str]=None, n_classes: Optional[int]=None, split_dir=None, data_root_dir=None, micro_average=False, **kwargs):
for (argname, argval) in dict(locals()).items():
if (argname != 'kwargs'):
setattr(self, argname, argval)
self.model_config = ModelConfigCLAM(**kwargs)
def _to_clam_args(self):
from ..clam import CLAM_Args
all_kw = self.to_dict()
all_kw.update(self.model_config.to_dict())
all_kw['model_type'] = all_kw['model']
all_kw['drop_out'] = all_kw['dropout']
del all_kw['model']
del all_kw['dropout']
del all_kw['model_kwargs']
return CLAM_Args(**all_kw) |
class DCNv2(nn.Module):
def __init__(self, c1, c2, k, s, p, g=1):
super().__init__()
self.dcn = DeformConv2d(c1, c2, k, s, p, groups=g)
self.offset_mask = nn.Conv2d(c2, (((g * 3) * k) * k), k, s, p)
self._init_offset()
def _init_offset(self):
self.offset_mask.weight.data.zero_()
self.offset_mask.bias.data.zero_()
def forward(self, x, offset):
out = self.offset_mask(offset)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat([o1, o2], dim=1)
mask = mask.sigmoid()
return self.dcn(x, offset, mask) |
def _get_sampling_method(training_pars: dict) -> Callable[([List[SentenceEvidence], Dict[(str, List[SentenceEvidence])]], List[SentenceEvidence])]:
if (training_pars['sampling_method'] == 'random'):
sampling_ratio = training_pars['sampling_ratio']
logging.info(f'Setting up random sampling with negative/positive ratio = {sampling_ratio}')
def random_sampler(document: List[SentenceEvidence], _: Dict[(str, List[SentenceEvidence])]) -> List[SentenceEvidence]:
positives = list(filter((lambda s: ((s.kls == 1) and (len(s.sentence) > 0))), document))
if any(map((lambda s: (len(s.sentence) == 0)), positives)):
raise ValueError('Some positive sentences are of zero length!')
all_negatives = list(filter((lambda s: ((s.kls == 0) and (len(s.sentence) > 0))), document))
num_negatives = min(len(all_negatives), round((len(positives) * sampling_ratio)))
random_negatives = random.choices(all_negatives, k=num_negatives)
results = sorted((positives + random_negatives))
random.shuffle(results)
return results
return random_sampler
elif (training_pars['sampling_method'] == 'everything'):
def everything_sampler(document: List[SentenceEvidence], _: Dict[(str, List[SentenceEvidence])]) -> List[SentenceEvidence]:
return document
return everything_sampler
else:
raise ValueError(f"Unknown sampling method for training: {training_pars['sampling_method']}") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.