code stringlengths 101 5.91M |
|---|
def normalize(policy_id, score):
key = (policy_id + '-v0')
min_score = infos.REF_MIN_SCORE[key]
max_score = infos.REF_MAX_SCORE[key]
return ((score - min_score) / (max_score - min_score)) |
def _update_model_res_skip(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
wavenet.res_skip_layers = torch.nn.ModuleList()
for i in range(0, n_layers):
if (i < (n_layers - 1)):
res_skip_channels = (2 * n_channels)
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
skip_layer = torch.nn.utils.remove_weight_norm(wavenet.skip_layers[i])
if (i < (n_layers - 1)):
res_layer = torch.nn.utils.remove_weight_norm(wavenet.res_layers[i])
res_skip_layer.weight = torch.nn.Parameter(torch.cat([res_layer.weight, skip_layer.weight]))
res_skip_layer.bias = torch.nn.Parameter(torch.cat([res_layer.bias, skip_layer.bias]))
else:
res_skip_layer.weight = torch.nn.Parameter(skip_layer.weight)
res_skip_layer.bias = torch.nn.Parameter(skip_layer.bias)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
wavenet.res_skip_layers.append(res_skip_layer)
del wavenet.res_layers
del wavenet.skip_layers |
class MyModel(object):
def Start(self):
ns.core.Simulator.Schedule(ns.core.Seconds(10.0), self.HandleEvent, ns.core.Simulator.Now().GetSeconds())
def HandleEvent(self, value):
print('Member method received event at', ns.core.Simulator.Now().GetSeconds(), 's started at', value, 's') |
def gen_grad_ens(x, logits, y):
adv_loss = K.categorical_crossentropy(logits[0], y, from_logits=True)
if (len(logits) >= 1):
for i in range(1, len(logits)):
adv_loss += K.categorical_crossentropy(logits[i], y, from_logits=True)
grad = K.gradients(adv_loss, [x])[0]
return (adv_loss, grad) |
def return_diving48():
root_data = 'Diving48/frames'
filename_imglist_train = 'Diving48/train_videofolder.txt'
filename_imglist_val = 'Diving48/val_videofolder.txt'
prefix = '{:05d}.jpg'
return (filename_imglist_train, filename_imglist_val, root_data, prefix) |
def method_impl(name, declarations, is_python_method, module):
for declaration in declarations:
declaration['python_arglists'] = make_python_arglists(declaration, is_python_method)
pycname = get_pycname(name)
method_header = ['HANDLE_TH_ERRORS']
method_header += emit_namedtuple_typedefs(declarations)
method_header += ([UNPACK_SELF] if is_python_method else [])
method_footer = ['END_HANDLE_TH_ERRORS']
check_has_torch_function = (TORCH_FUNCTION_CHECK_NOARGS.substitute(name=(('"' + name) + '"')) if is_python_method else '')
if is_noarg_binding(declarations):
dispatch = emit_single_dispatch(declaration, is_python_method)
return PY_VARIABLE_METHOD_NOARGS.substitute(name=name, pycname=pycname, method_header=method_header, dispatch=dispatch, method_footer=method_footer, check_has_torch_function=check_has_torch_function)
method_footer = (['Py_RETURN_NONE;'] + method_footer)
grouped = group_overloads(declarations, is_python_method)
is_singleton = (len(grouped) == 1)
signatures = []
dispatch = []
for (i, dictionary) in enumerate(grouped):
signature = dictionary['signature']
signatures.append('"{}",'.format(signature))
overload_index = (i if (not is_singleton) else None)
dispatch.append(emit_dispatch_case(overload_index, dictionary, is_python_method))
if is_singleton:
template = PY_VARIABLE_METHOD_VARARGS_SINGLETON
else:
template = PY_VARIABLE_METHOD_VARARGS
if module:
check_has_torch_function = TORCH_FUNCTION_CHECK.substitute(namespace=NATIVE_NAMESPACE_MAPPING[module], modulename=(('"' + module) + '"'), self_=('self_' if is_python_method else 'nullptr'))
else:
check_has_torch_function = TORCH_FUNCTION_CHECK.substitute(namespace='THPVariableClass', modulename='"torch.Tensor"', self_=('self_' if is_python_method else 'nullptr'))
max_args = max([get_python_argc(decl) for decl in declarations])
traceable = ('true' if all((should_trace(d) for d in declarations)) else 'false')
return template.substitute(name=name, pycname=pycname, method_header=method_header, max_args=max_args, signatures=signatures, traceable=traceable, check_has_torch_function=check_has_torch_function, dispatch=dispatch, method_footer=method_footer, self_=('self_' if is_python_method else 'nullptr')) |
def default_setup(cfg, args):
output_dir = cfg.OUTPUT_DIR
if (comm.is_main_process() and output_dir):
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info('Rank of current process: {}. World size: {}'.format(rank, comm.get_world_size()))
logger.info(('Environment info:\n' + collect_env_info()))
logger.info(('Command line arguments: ' + str(args)))
if (hasattr(args, 'config_file') and (args.config_file != '')):
logger.info('Contents of args.config_file={}:\n{}'.format(args.config_file, PathManager.open(args.config_file, 'r').read()))
logger.info('Running with full config:\n{}'.format(cfg))
if (comm.is_main_process() and output_dir):
path = os.path.join(output_dir, 'config.yaml')
with PathManager.open(path, 'w') as f:
f.write(cfg.dump())
logger.info('Full config saved to {}'.format(os.path.abspath(path)))
seed_all_rng()
if (not (hasattr(args, 'eval_only') and args.eval_only)):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK |
def spawn(cmd, *args):
argv = ([cmd] + list(args))
pid = None
args_str = ' '.join(argv)
try:
pid = os.spawnlp(os.P_NOWAIT, cmd, *argv)
children[pid] = {'pid': pid, 'cmd': argv}
except Exception as inst:
print(f"'{args_str}': {str(inst)}")
print(f"spawned pid {pid} of nproc={len(children)} njobs={len(jobs)} for '{args_str}'")
return pid |
class GammaAugmentor(Augmentor):
def __init__(self, gamma_range=((- 0.1), 0.1)):
self.gamma_range = gamma_range
def apply_after_resize(self, tensors, factor=None):
with tf.name_scope('gamma_augmentor'):
img = tensors[DataKeys.IMAGES]
if (factor is None):
factor = self._sample_factor()
gamma = (tf.log((0.5 + ((1 / math.sqrt(2)) * factor))) / tf.log((0.5 - ((1 / math.sqrt(2)) * factor))))
aug_image = (img ** gamma)
aug_tensors = tensors.copy()
aug_tensors[DataKeys.IMAGES] = aug_image
return aug_tensors
def _sample_factor(self):
return tf.random_uniform(shape=[], minval=self.gamma_range[0], maxval=self.gamma_range[1], dtype=tf.float32)
def batch_apply_after_resize(self, tensors_batch):
factor = self._sample_factor()
return [self.apply_after_resize(x, factor) for x in tensors_batch] |
class GlobalConsistencyError(ConfusionMatrixMetric):
def __init__(self, metric: str='GCOERR'):
super().__init__(metric)
def calculate(self):
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
if (((tp + fn) == 0) or ((tn + fp) == 0) or ((tp + fp) == 0) or ((tn + fn) == 0)):
warnings.warn('Unable to compute global consistency error due to division by zero, returning inf', NotComputableMetricWarning)
return float('inf')
n = (((tp + tn) + fp) + fn)
e1 = ((((fn * (fn + (2 * tp))) / (tp + fn)) + ((fp * (fp + (2 * tn))) / (tn + fp))) / n)
e2 = ((((fp * (fp + (2 * tp))) / (tp + fp)) + ((fn * (fn + (2 * tn))) / (tn + fn))) / n)
return min(e1, e2) |
def get_dataset(args):
if (args.dataset == 'cifar10'):
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
train_set = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True, transform=transform_train)
train_sup_set = train_set
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
train_sup_loader = torch.utils.data.DataLoader(train_sup_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.mbs, shuffle=False, num_workers=NUM_WORKERS)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
nb_classes = len(classes)
dim_inp = (32 * 32)
elif (args.dataset == 'cifar100'):
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
train_set = torchvision.datasets.CIFAR100(root=args.data, train=True, download=True, transform=transform_train)
train_sup_set = train_set
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
train_sup_loader = torch.utils.data.DataLoader(train_sup_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
testset = torchvision.datasets.CIFAR100(root=args.data, train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.mbs, shuffle=False, num_workers=NUM_WORKERS)
nb_classes = 100
dim_inp = (32 * 32)
elif (args.dataset == 'stl10'):
transform_train = transforms.Compose([transforms.RandomResizedCrop(32, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.Resize(32, interpolation=3), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
train_set = torchvision.datasets.STL10(root=args.data, split='train+unlabeled', download=True, transform=transform_train)
train_sup_set = torchvision.datasets.STL10(root=args.data, split='train', download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
train_sup_loader = torch.utils.data.DataLoader(train_sup_set, batch_size=args.mbs, shuffle=True, num_workers=NUM_WORKERS, pin_memory=True)
testset = torchvision.datasets.STL10(root=args.data, split='test', download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.mbs, shuffle=False, num_workers=NUM_WORKERS)
nb_classes = 10
dim_inp = (64 * 64)
return (trainloader, train_sup_loader, testloader, nb_classes, dim_inp) |
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(ROIMaskHead, self).__init__()
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg, in_channels)
self.predictor = make_roi_mask_predictor(cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_mask_post_processor(cfg)
self.loss_evaluator = make_roi_mask_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
if self.training:
all_proposals = proposals
(proposals, positive_inds) = keep_only_positive_boxes(proposals)
if (self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
x = features
x = x[torch.cat(positive_inds, dim=0)]
else:
x = self.feature_extractor(features, proposals)
mask_logits = self.predictor(x)
if (not self.training):
result = self.post_processor(mask_logits, proposals)
return (x, result, {})
loss_mask = self.loss_evaluator(proposals, mask_logits, targets)
return (x, all_proposals, dict(loss_mask=loss_mask)) |
class FeatureFusion3dce(nn.Module):
def __init__(self):
super(FeatureFusion3dce, self).__init__()
self.num_slice = cfg.INPUT.NUM_SLICES
self.num_image = cfg.INPUT.NUM_IMAGES_3DCE
self.out_dim = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.in_dim = cfg.runtime_info.backbone_ft_dim
self.conv = nn.Conv2d((self.num_image * self.in_dim), self.out_dim, 1)
nn.init.kaiming_uniform_(self.conv.weight, a=1)
nn.init.constant_(self.conv.bias, 0)
def forward(self, x, images=None):
x = x[0].reshape((- 1), (self.num_image * x[0].shape[1]), x[0].shape[2], x[0].shape[3])
x = self.conv(x)
if (images is not None):
images = images[int((self.num_image / 2))::self.num_image]
return ([x], images)
return [x] |
def pesq_nb(predicted, target, sampling_frequency=8000):
g = torch.manual_seed(1)
nb_pesq = PerceptualEvaluationSpeechQuality(sampling_frequency, 'nb')
return nb_pesq(predicted, target) |
def draw_image_embedding_with_batch_to_tensor(batch):
if (('image_embedding' not in batch) or (batch['image_embedding'] is None)):
return (- torch.ones_like(batch['image']))
elif ('image' in batch['image_embedding']):
return batch['image_embedding']['image']
else:
return (- torch.ones_like(batch['image'])) |
class LockedValue(object):
def __init__(self, value):
self.lock = threading.Lock()
self._value = value
def _get_value(self):
self.lock.acquire()
try:
return self._value
finally:
self.lock.release()
def _set_value(self, value):
self.lock.acquire()
try:
self._value = value
finally:
self.lock.release()
value = property(_get_value, _set_value) |
class ClassifierTeacherLoss(object):
def __init__(self, teacher_model):
self.teacher = teacher_model
def __call__(self, inputs, targets):
logits = self.teacher(inputs)
loss = F.cross_entropy(logits, targets)
return (loss, logits) |
def get_model(framework, text_type, text_rep, arch='transformer', frontend='cnn', mix_type='cf', audio_rep='mel'):
save_dir = f'../mtr/{framework}/exp/{arch}_{frontend}_{mix_type}_{audio_rep}/{text_type}_{text_rep}'
config = OmegaConf.load(os.path.join(save_dir, 'hparams.yaml'))
audio_preprocessr = TFRep(sample_rate=config.sr, f_min=0, f_max=int((config.sr / 2)), n_fft=config.n_fft, win_length=config.win_length, hop_length=int((0.01 * config.sr)), n_mels=config.mel_dim)
frontend = ResFrontEnd(input_size=(config.mel_dim, (int((100 * config.duration)) + 1)), conv_ndim=128, attention_ndim=config.attention_ndim, mix_type=config.mix_type)
audio_encoder = MusicTransformer(audio_representation=audio_preprocessr, frontend=frontend, audio_rep=config.audio_rep, attention_nlayers=config.attention_nlayers, attention_ndim=config.attention_ndim)
if (config.text_type == 'bert'):
text_encoder = AutoModel.from_pretrained(config.text_backbone)
tokenizer = AutoTokenizer.from_pretrained(config.text_backbone)
config.text_dim = 768
elif (config.text_type == 'glove'):
text_encoder = nn.Identity()
tokenizer = torch.load(os.path.join(config.data_dir, 'ecals_annotation', 'glove_tag_embs.pt'))
add_tokenizer = torch.load(os.path.join(args.msu_dir, 'pretrain', 'glove_tag_embs.pt'))
tokenizer.update(add_tokenizer)
config.text_dim = 300
else:
tokenizer = None
config.audio_dim = config.attention_ndim
if (framework == 'contrastive'):
model = ContrastiveModel(audio_encoder=audio_encoder, text_encoder=text_encoder, text_type=config.text_type, audio_dim=config.audio_dim, text_dim=config.text_dim, mlp_dim=config.mlp_dim, temperature=config.temperature)
elif (framework == 'triplet'):
model = TripletModel(audio_encoder=audio_encoder, text_encoder=text_encoder, text_type=config.text_type, audio_dim=config.audio_dim, text_dim=config.text_dim, mlp_dim=config.mlp_dim, margin=config.margin)
elif (framework == 'classification'):
model = ClassificationModel(audio_encoder=audio_encoder, audio_dim=config.attention_ndim, mlp_dim=config.mlp_dim, num_classes=1054)
pretrained_object = torch.load(f'{save_dir}/best.pth', map_location='cpu')
state_dict = pretrained_object['state_dict']
for k in list(state_dict.keys()):
if k.startswith('module.'):
state_dict[k[len('module.'):]] = state_dict[k]
del state_dict[k]
model.load_state_dict(state_dict)
return (model, tokenizer, config) |
def move_pre_birth(patient: RawPatient) -> Optional[RawPatient]:
birth_date = None
for event in patient.events:
if (event.concept_id == OMOP_BIRTH):
birth_date = event.start
if (birth_date is None):
return None
new_events = []
for event in patient.events:
if (event.start < birth_date):
delta = (birth_date - event.start)
if (delta > datetime.timedelta(days=30)):
continue
event.start = birth_date
if ((event.end is not None) and (event.end < birth_date)):
event.end = birth_date
new_events.append(event)
patient.events = new_events
patient.resort()
return patient |
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = 'HP F90 (?P<version>[^\\s*,]*)'
executables = {'version_cmd': ['f90', '+version'], 'compiler_f77': ['f90'], 'compiler_fix': ['f90'], 'compiler_f90': ['f90'], 'linker_so': ['ld', '-b'], 'archiver': ['ar', '-cr'], 'ranlib': ['ranlib']}
module_dir_switch = None
module_include_switch = None
pic_flags = ['+Z']
def get_flags(self):
return (self.pic_flags + ['+ppu', '+DD64'])
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
return FCompiler.get_version(self, force, ok_status) |
class MapTilingTuner(cutout_tuner.CutoutTuner):
def __init__(self, sdfg: dace.SDFG, measurement: dtypes.InstrumentationType=dtypes.InstrumentationType.Timer) -> None:
super().__init__(task='MapTiling', sdfg=sdfg)
self.instrument = measurement
def cutouts(self) -> Generator[(Tuple[(dace.SDFG, str)], None, None)]:
for (node, state) in self._sdfg.all_nodes_recursive():
if isinstance(node, dace.nodes.MapEntry):
if (xfh.get_parent_map(state, node) is not None):
continue
node_id = state.node_id(node)
state_id = self._sdfg.node_id(state)
subgraph_nodes = state.scope_subgraph(node).nodes()
cutout = SDFGCutout.singlestate_cutout(state, *subgraph_nodes)
(yield (cutout, f'{state_id}.{node_id}.{node.label}'))
def space(self, map_entry: dace.nodes.MapEntry) -> Generator[(Tuple[int], None, None)]:
choices = [None, (64, 8, 1)]
return choices
def config_from_key(self, key: str, **kwargs) -> List[int]:
if (key == 'None'):
return None
return list(map((lambda k: int(k)), key.split('.')))
def apply(self, config: List[int], label: str, **kwargs) -> None:
if (config is None):
return
(state_id, node_id, _) = label.split('.')
map_entry = self._sdfg.node(int(state_id)).node(int(node_id))
df.MapTiling.apply_to(self._sdfg, map_entry=map_entry, options={'tile_sizes': config})
def pre_evaluate(self, cutout: dace.SDFG, measurements: int, **kwargs) -> Dict:
cutout.start_state.instrument = self.instrument
map_entry = None
for node in cutout.start_state.nodes():
if (isinstance(node, dace.nodes.MapEntry) and (xfh.get_parent_map(cutout.start_state, node) is None)):
map_entry = node
break
assert (map_entry is not None)
new_kwargs = {'space_kwargs': {'map_entry': map_entry}, 'cutout': cutout.to_json(), 'map_entry_id': cutout.start_state.node_id(map_entry), 'measurements': measurements, 'key': (lambda point: ('None' if (point is None) else '.'.join(map((lambda p: str(p)), point))))}
return new_kwargs
def evaluate(self, config, cutout, map_entry_id: int, measurements: int, **kwargs) -> float:
cutout_ = dace.SDFG.from_json(cutout)
map_ = cutout_.start_state.node(map_entry_id)
if (config == 'None'):
df.MapTiling.apply_to(cutout_, map_entry=map_, options={'tile_sizes': config})
return self.measure(cutout_, measurements) |
.parametrize('directed', [True, False])
.parametrize('tree_func', [breadth_first_tree, depth_first_tree])
def test_int64_indices(tree_func, directed):
g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2))
assert (g.indices.dtype == np.int64)
tree = tree_func(g, 0, directed=directed)
assert_array_almost_equal(csgraph_to_dense(tree), [[0, 1], [0, 0]]) |
_lr_scheduler('polynomial_decay')
class PolynomialDecaySchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0)
self.lr = args.lr[0]
if (args.warmup_updates > 0):
self.warmup_factor = (1.0 / args.warmup_updates)
else:
self.warmup_factor = 1
self.end_learning_rate = args.end_learning_rate
self.total_num_update = args.total_num_update
self.power = args.power
self.optimizer.set_lr((self.warmup_factor * self.lr))
def add_args(parser):
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--end-learning-rate', default=0.0, type=float)
parser.add_argument('--power', default=1.0, type=float)
parser.add_argument('--total-num-update', default=1000000, type=int)
def get_next_lr(self, epoch):
lrs = self.args.lr
if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)):
next_lr = lrs[min(epoch, (len(lrs) - 1))]
else:
next_lr = self.optimizer.get_lr()
return next_lr
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr()
def step_update(self, num_updates):
if ((self.args.warmup_updates > 0) and (num_updates <= self.args.warmup_updates)):
self.warmup_factor = (num_updates / float(self.args.warmup_updates))
lr = (self.warmup_factor * self.lr)
elif (num_updates >= self.total_num_update):
lr = self.end_learning_rate
else:
warmup = self.args.warmup_updates
lr_range = (self.lr - self.end_learning_rate)
pct_remaining = (1 - ((num_updates - warmup) / (self.total_num_update - warmup)))
lr = ((lr_range * (pct_remaining ** self.power)) + self.end_learning_rate)
self.optimizer.set_lr(lr)
return self.optimizer.get_lr() |
class _coo_base(_data_matrix, _minmax_mixin):
_format = 'coo'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
is_array = isinstance(self, sparray)
if isinstance(arg1, tuple):
if isshape(arg1, allow_1d=is_array):
self._shape = check_shape(arg1, allow_1d=is_array)
idx_dtype = self._get_index_dtype(maxval=max(self._shape))
data_dtype = getdtype(dtype, default=float)
self.indices = tuple((np.array([], dtype=idx_dtype) for _ in range(len(self._shape))))
self.data = np.array([], dtype=data_dtype)
self.has_canonical_format = True
else:
try:
(obj, indices) = arg1
except (TypeError, ValueError) as e:
raise TypeError('invalid input format') from e
if (shape is None):
if any(((len(idx) == 0) for idx in indices)):
raise ValueError('cannot infer dimensions from zero sized index arrays')
shape = tuple(((operator.index(np.max(idx)) + 1) for idx in indices))
self._shape = check_shape(shape, allow_1d=is_array)
idx_dtype = self._get_index_dtype(indices, maxval=max(self.shape), check_contents=True)
self.indices = tuple((np.array(idx, copy=copy, dtype=idx_dtype) for idx in indices))
self.data = getdata(obj, copy=copy, dtype=dtype)
self.has_canonical_format = False
elif issparse(arg1):
if ((arg1.format == self.format) and copy):
self.indices = tuple((idx.copy() for idx in arg1.indices))
self.data = arg1.data.copy()
self._shape = check_shape(arg1.shape, allow_1d=is_array)
self.has_canonical_format = arg1.has_canonical_format
else:
coo = arg1.tocoo()
self.indices = tuple(coo.indices)
self.data = coo.data
self._shape = check_shape(coo.shape, allow_1d=is_array)
self.has_canonical_format = False
else:
M = np.asarray(arg1)
if (not is_array):
M = np.atleast_2d(M)
if (M.ndim != 2):
raise TypeError('expected dimension <= 2 array or matrix')
self._shape = check_shape(M.shape, allow_1d=is_array)
if (shape is not None):
if (check_shape(shape, allow_1d=is_array) != self._shape):
message = f'inconsistent shapes: {shape} != {self._shape}'
raise ValueError(message)
index_dtype = self._get_index_dtype(maxval=max(self._shape))
indices = M.nonzero()
self.indices = tuple((idx.astype(index_dtype, copy=False) for idx in indices))
self.data = M[indices]
self.has_canonical_format = True
if (dtype is not None):
self.data = self.data.astype(dtype, copy=False)
self._check()
def row(self):
return (self.indices[(- 2)] if (self.ndim > 1) else np.zeros_like(self.col))
def row(self, new_row):
if (self.ndim < 2):
raise ValueError('cannot set row attribute of a 1-dimensional sparse array')
new_row = np.asarray(new_row, dtype=self.indices[(- 2)].dtype)
self.indices = ((self.indices[:(- 2)] + (new_row,)) + self.indices[(- 1):])
def col(self):
return self.indices[(- 1)]
def col(self, new_col):
new_col = np.asarray(new_col, dtype=self.indices[(- 1)].dtype)
self.indices = (self.indices[:(- 1)] + (new_col,))
def reshape(self, *args, **kwargs):
is_array = isinstance(self, sparray)
shape = check_shape(args, self.shape, allow_1d=is_array)
(order, copy) = check_reshape_kwargs(kwargs)
if (shape == self.shape):
if copy:
return self.copy()
else:
return self
flat_indices = _ravel_indices(self.indices, self.shape, order=order)
if (len(shape) == 2):
if (order == 'C'):
new_indices = divmod(flat_indices, shape[1])
else:
new_indices = divmod(flat_indices, shape[0])[::(- 1)]
else:
new_indices = np.unravel_index(flat_indices, shape, order=order)
if copy:
new_data = self.data.copy()
else:
new_data = self.data
return self.__class__((new_data, new_indices), shape=shape, copy=False)
reshape.__doc__ = _spbase.reshape.__doc__
def _getnnz(self, axis=None):
if ((axis is None) or ((axis == 0) and (self.ndim == 1))):
nnz = len(self.data)
if any(((len(idx) != nnz) for idx in self.indices)):
raise ValueError('all index and data arrays must have the same length')
if ((self.data.ndim != 1) or any(((idx.ndim != 1) for idx in self.indices))):
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if (axis < 0):
axis += self.ndim
if (axis >= self.ndim):
raise ValueError('axis out of bounds')
if (self.ndim > 2):
raise NotImplementedError('per-axis nnz for COO arrays with >2 dimensions is not supported')
return np.bincount(downcast_intp_index(self.indices[(1 - axis)]), minlength=self.shape[(1 - axis)])
_getnnz.__doc__ = _spbase._getnnz.__doc__
def _check(self):
if (self.ndim != len(self.indices)):
raise ValueError(f'mismatching number of index arrays for shape; got {len(self.indices)}, expected {self.ndim}')
for (i, idx) in enumerate(self.indices):
if (idx.dtype.kind != 'i'):
warn(f'index array {i} has non-integer dtype ({idx.dtype.name})', stacklevel=3)
idx_dtype = self._get_index_dtype(self.indices, maxval=max(self.shape))
self.indices = tuple((np.asarray(idx, dtype=idx_dtype) for idx in self.indices))
self.data = to_native(self.data)
if (self.nnz > 0):
for (i, idx) in enumerate(self.indices):
if (idx.max() >= self.shape[i]):
raise ValueError(f'axis {i} index {idx.max()} exceeds matrix dimension {self.shape[i]}')
if (idx.min() < 0):
raise ValueError(f'negative axis {i} index: {idx.min()}')
def transpose(self, axes=None, copy=False):
if (axes is None):
axes = range(self.ndim)[::(- 1)]
elif isinstance(self, sparray):
if (len(axes) != self.ndim):
raise ValueError("axes don't match matrix dimensions")
if (len(set(axes)) != self.ndim):
raise ValueError('repeated axis in transpose')
elif (axes != (1, 0)):
raise ValueError("Sparse matrices do not support an 'axes' parameter because swapping dimensions is the only logical permutation.")
permuted_shape = tuple((self._shape[i] for i in axes))
permuted_indices = tuple((self.indices[i] for i in axes))
return self.__class__((self.data, permuted_indices), shape=permuted_shape, copy=copy)
transpose.__doc__ = _spbase.transpose.__doc__
def resize(self, *shape) -> None:
is_array = isinstance(self, sparray)
shape = check_shape(shape, allow_1d=is_array)
if (len(shape) > self.ndim):
flat_indices = _ravel_indices(self.indices, self.shape)
max_size = math.prod(shape)
self.indices = np.unravel_index(flat_indices[:max_size], shape)
self.data = self.data[:max_size]
self._shape = shape
return
if (len(shape) < self.ndim):
tmp_shape = ((self._shape[:(len(shape) - 1)] + ((- 1),)) + ((1,) * (self.ndim - len(shape))))
tmp = self.reshape(tmp_shape)
self.indices = tmp.indices[:len(shape)]
self._shape = tmp.shape[:len(shape)]
is_truncating = any(((old > new) for (old, new) in zip(self.shape, shape)))
if is_truncating:
mask = np.logical_and.reduce([(idx < size) for (idx, size) in zip(self.indices, shape)])
if (not mask.all()):
self.indices = tuple((idx[mask] for idx in self.indices))
self.data = self.data[mask]
self._shape = shape
resize.__doc__ = _spbase.resize.__doc__
def toarray(self, order=None, out=None):
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if ((not fortran) and (not B.flags.c_contiguous)):
raise ValueError('Output array must be C or F contiguous')
if (self.ndim > 2):
raise ValueError('Cannot densify higher-rank sparse array')
(M, N) = self._shape_as_2d
coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel('A'), fortran)
return B.reshape(self.shape)
toarray.__doc__ = _spbase.toarray.__doc__
def tocsc(self, copy=False):
if (self.ndim != 2):
raise ValueError('Cannot convert a 1d sparse array to csc format')
if (self.nnz == 0):
return self._csc_container(self.shape, dtype=self.dtype)
else:
(M, N) = self.shape
idx_dtype = self._get_index_dtype((self.col, self.row), maxval=max(self.nnz, M))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty((N + 1), dtype=idx_dtype)
indices = np.empty_like(row, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz, col, row, self.data, indptr, indices, data)
x = self._csc_container((data, indices, indptr), shape=self.shape)
if (not self.has_canonical_format):
x.sum_duplicates()
return x
def tocsr(self, copy=False):
if (self.ndim != 2):
raise ValueError('Cannot convert a 1d sparse array to csr format')
if (self.nnz == 0):
return self._csr_container(self.shape, dtype=self.dtype)
else:
(M, N) = self.shape
idx_dtype = self._get_index_dtype((self.row, self.col), maxval=max(self.nnz, N))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty((M + 1), dtype=idx_dtype)
indices = np.empty_like(col, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz, row, col, self.data, indptr, indices, data)
x = self._csr_container((data, indices, indptr), shape=self.shape)
if (not self.has_canonical_format):
x.sum_duplicates()
return x
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
tocoo.__doc__ = _spbase.tocoo.__doc__
def todia(self, copy=False):
if (self.ndim != 2):
raise ValueError('Cannot convert a 1d sparse array to dia format')
self.sum_duplicates()
ks = (self.col - self.row)
(diags, diag_idx) = np.unique(ks, return_inverse=True)
if (len(diags) > 100):
warn(('Constructing a DIA matrix with %d diagonals is inefficient' % len(diags)), SparseEfficiencyWarning, stacklevel=2)
if (self.data.size == 0):
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), (self.col.max() + 1)), dtype=self.dtype)
data[(diag_idx, self.col)] = self.data
return self._dia_container((data, diags), shape=self.shape)
todia.__doc__ = _spbase.todia.__doc__
def todok(self, copy=False):
if (self.ndim != 2):
raise ValueError('Cannot convert a 1d sparse array to dok format')
self.sum_duplicates()
dok = self._dok_container(self.shape, dtype=self.dtype)
dok._update(zip(zip(self.row, self.col), self.data))
return dok
todok.__doc__ = _spbase.todok.__doc__
def diagonal(self, k=0):
if (self.ndim != 2):
raise ValueError('diagonal requires two dimensions')
(rows, cols) = self.shape
if ((k <= (- rows)) or (k >= cols)):
return np.empty(0, dtype=self.data.dtype)
diag = np.zeros(min((rows + min(k, 0)), (cols - max(k, 0))), dtype=self.dtype)
diag_mask = ((self.row + k) == self.col)
if self.has_canonical_format:
row = self.row[diag_mask]
data = self.data[diag_mask]
else:
inds = tuple((idx[diag_mask] for idx in self.indices))
((row, _), data) = self._sum_duplicates(inds, self.data[diag_mask])
diag[(row + min(k, 0))] = data
return diag
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
if (self.ndim != 2):
raise ValueError('setting a diagonal requires two dimensions')
(M, N) = self.shape
if (values.ndim and (not len(values))):
return
idx_dtype = self.row.dtype
full_keep = ((self.col - self.row) != k)
if (k < 0):
max_index = min((M + k), N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, (self.col >= max_index))
new_row = np.arange((- k), ((- k) + max_index), dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, (N - k))
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, (self.row >= max_index))
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, (k + max_index), dtype=idx_dtype)
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
self.indices = (np.concatenate((self.row[keep], new_row)), np.concatenate((self.col[keep], new_col)))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
def _with_data(self, data, copy=True):
if copy:
indices = tuple((idx.copy() for idx in self.indices))
else:
indices = self.indices
return self.__class__((data, indices), shape=self.shape, dtype=data.dtype)
def sum_duplicates(self) -> None:
if self.has_canonical_format:
return
summed = self._sum_duplicates(self.indices, self.data)
(self.indices, self.data) = summed
self.has_canonical_format = True
def _sum_duplicates(self, indices, data):
if (len(data) == 0):
return (indices, data)
order = np.lexsort(indices[::(- 1)])
indices = tuple((idx[order] for idx in indices))
data = data[order]
unique_mask = np.logical_or.reduce([(idx[1:] != idx[:(- 1)]) for idx in indices])
unique_mask = np.append(True, unique_mask)
indices = tuple((idx[unique_mask] for idx in indices))
(unique_inds,) = np.nonzero(unique_mask)
data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
return (indices, data)
def eliminate_zeros(self):
mask = (self.data != 0)
self.data = self.data[mask]
self.indices = tuple((idx[mask] for idx in self.indices))
def _add_dense(self, other):
if (other.shape != self.shape):
raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
dtype = upcast_char(self.dtype.char, other.dtype.char)
result = np.array(other, dtype=dtype, copy=True)
fortran = int(result.flags.f_contiguous)
(M, N) = self._shape_as_2d
coo_todense(M, N, self.nnz, self.row, self.col, self.data, result.ravel('A'), fortran)
return self._container(result, copy=False)
def _mul_vector(self, other):
result_shape = (self.shape[0] if (self.ndim > 1) else 1)
result = np.zeros(result_shape, dtype=upcast_char(self.dtype.char, other.dtype.char))
if (self.ndim == 2):
col = self.col
row = self.row
elif (self.ndim == 1):
col = self.indices[0]
row = np.zeros_like(col)
else:
raise NotImplementedError(f'coo_matvec not implemented for ndim={self.ndim}')
coo_matvec(self.nnz, row, col, self.data, other, result)
if (isinstance(self, sparray) and (result_shape == 1)):
return result[0]
return result
def _mul_multivector(self, other):
result_dtype = upcast_char(self.dtype.char, other.dtype.char)
if (self.ndim == 2):
result_shape = (other.shape[1], self.shape[0])
col = self.col
row = self.row
elif (self.ndim == 1):
result_shape = (other.shape[1],)
col = self.indices[0]
row = np.zeros_like(col)
else:
raise NotImplementedError(f'coo_matvec not implemented for ndim={self.ndim}')
result = np.zeros(result_shape, dtype=result_dtype)
for (i, other_col) in enumerate(other.T):
coo_matvec(self.nnz, row, col, self.data, other_col, result[i:(i + 1)])
return result.T.view(type=type(other)) |
def add_graph_arguments(parser):
parser.add_argument('--num-items', type=int, default=10, help='Maximum number of items in each KB')
parser.add_argument('--entity-hist-len', type=int, default=2, help='Number of most recent utterances to consider when updating entity node embeddings')
parser.add_argument('--max-num-entities', type=int, default=30, help='Estimate of maximum number of entities in a dialogue')
parser.add_argument('--max-degree', type=int, default=10, help='Maximum degree of a node in the graph') |
class VitAttention(SequenceModule):
def d_output(self):
return self.dim
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, packed_linear=True, linear_cfg=None, **kwargs):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
if (linear_cfg is not None):
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
else:
if (linear_cfg is None):
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias, _recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias, _recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias, _recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
def forward(self, x, state=None):
(B, N, C) = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
else:
(q, k, v) = (self.q_proj(x), self.k_proj(x), self.v_proj(x))
(q, k, v) = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
(bsz, num_heads, q_seq_len, dk) = q.size()
(_, _, k_seq_len, _) = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
attn = torch.empty((bsz * num_heads), q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale), '(b h) t s -> b h t s', h=self.num_heads)
attn = F.softmax(attn, dim=(- 1), dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return (x, None) |
class TransitiveGroup(PermutationGroup_unique):
def __init__(self, d, n):
self._d = d = Integer(d)
self._n = n = Integer(n)
if (d < 0):
raise ValueError('degree d must not be negative')
max_n = TransitiveGroups(d).cardinality()
if ((n > max_n) or (n <= 0)):
raise ValueError(('index n must be in {1,..,%s}' % max_n))
if (d <= 1):
PermutationGroup_generic.__init__(self, gens=[()], domain=list(range(1, (d + 1))))
else:
gap_group = libgap.TransitiveGroup(d, n)
PermutationGroup_generic.__init__(self, gap_group=gap_group)
def transitive_number(self):
return self._n
def degree(self):
return self._d
def _repr_(self):
return ('Transitive group number %s of degree %s' % (self._n, self._d)) |
def register_Ns3QueueDisc_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AddInternalQueue', 'void', [param('ns3::Ptr< ns3::Queue< ns3::QueueDiscItem > >', 'queue')])
cls.add_method('AddPacketFilter', 'void', [param('ns3::Ptr< ns3::PacketFilter >', 'filter')])
cls.add_method('AddQueueDiscClass', 'void', [param('ns3::Ptr< ns3::QueueDiscClass >', 'qdClass')])
cls.add_method('Classify', 'int32_t', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')])
cls.add_method('Dequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [])
cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')])
cls.add_method('GetInternalQueue', 'ns3::Ptr< ns3::Queue< ns3::QueueDiscItem > >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetNBytes', 'uint32_t', [], is_const=True)
cls.add_method('GetNInternalQueues', 'uint32_t', [], is_const=True)
cls.add_method('GetNPacketFilters', 'uint32_t', [], is_const=True)
cls.add_method('GetNPackets', 'uint32_t', [], is_const=True)
cls.add_method('GetNQueueDiscClasses', 'uint32_t', [], is_const=True)
cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True)
cls.add_method('GetPacketFilter', 'ns3::Ptr< ns3::PacketFilter >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetQueueDiscClass', 'ns3::Ptr< ns3::QueueDiscClass >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetQuota', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetStats', 'ns3::QueueDisc::Stats const &', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetWakeMode', 'ns3::QueueDisc::WakeMode', [], is_const=True, is_virtual=True)
cls.add_method('Peek', 'ns3::Ptr< ns3::QueueDiscItem const >', [], is_const=True)
cls.add_method('Run', 'void', [])
cls.add_method('SetNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')])
cls.add_method('SetQuota', 'void', [param('uint32_t const', 'quota')], is_virtual=True)
cls.add_static_attribute('CHILD_QUEUE_DISC_DROP', 'char const * const', is_const=True)
cls.add_static_attribute('INTERNAL_QUEUE_DROP', 'char const * const', is_const=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DropAfterDequeue', 'void', [param('ns3::Ptr< ns3::QueueDiscItem const >', 'item'), param('char const *', 'reason')], visibility='protected')
cls.add_method('DropBeforeEnqueue', 'void', [param('ns3::Ptr< ns3::QueueDiscItem const >', 'item'), param('char const *', 'reason')], visibility='protected')
cls.add_method('Mark', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item'), param('char const *', 'reason')], visibility='protected')
cls.add_method('CheckConfig', 'bool', [], is_pure_virtual=True, visibility='private', is_virtual=True)
cls.add_method('DoDequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [], is_pure_virtual=True, visibility='private', is_virtual=True)
cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], is_pure_virtual=True, visibility='private', is_virtual=True)
cls.add_method('DoPeek', 'ns3::Ptr< ns3::QueueDiscItem const >', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
cls.add_method('InitializeParams', 'void', [], is_pure_virtual=True, visibility='private', is_virtual=True)
return |
(scope='function')
def ray_session_fixture():
if (not ray.is_initialized()):
ray.init(memory=, object_store_memory=, ignore_reinit_error=True, log_to_driver=False, include_webui=False)
(yield)
if ray.is_initialized():
ray.shutdown() |
def generate_plot_points(f, xrange, plot_points=5, adaptive_tolerance=0.01, adaptive_recursion=5, randomize=True, initial_points=None, *, excluded=False, imaginary_tolerance=1e-08):
from sage.plot.misc import setup_for_eval_on_grid
(f, ranges) = setup_for_eval_on_grid(f, [xrange], plot_points, imaginary_tolerance=imaginary_tolerance)
(xmin, xmax, delta) = ranges[0]
x_values = srange(*ranges[0], include_endpoint=True)
random = current_randstate().python_random().random
for i in range(len(x_values)):
xi = x_values[i]
if (randomize and (i > 0) and (i < (plot_points - 1))):
xi += (delta * (random() - 0.5))
x_values[i] = xi
if isinstance(initial_points, list):
x_values = sorted((x_values + initial_points))
data = ([None] * len(x_values))
exceptions = 0
exception_indices = []
for i in range(len(x_values)):
xi = x_values[i]
try:
data[i] = (float(xi), float(f(xi)))
if (str(data[i][1]) in ['nan', 'NaN', 'inf', '-inf']):
msg = ('Unable to compute f(%s)' % xi)
sage.misc.verbose.verbose(msg, 1)
exceptions += 1
exception_indices.append(i)
except (ArithmeticError, TypeError, ValueError) as m:
sage.misc.verbose.verbose(('%s\nUnable to compute f(%s)' % (m, xi)), 1)
if (i == 0):
for j in range(1, 99):
xj = (xi + ((delta * j) / 100.0))
try:
data[i] = (float(xj), float(f(xj)))
if (data[i][1] != data[i][1]):
continue
break
except (ArithmeticError, TypeError, ValueError):
pass
else:
msg = m
exceptions += 1
exception_indices.append(i)
elif (i == (plot_points - 1)):
for j in range(1, 99):
xj = (xi - ((delta * j) / 100.0))
try:
data[i] = (float(xj), float(f(xj)))
if (data[i][1] != data[i][1]):
continue
break
except (ArithmeticError, TypeError, ValueError):
pass
else:
msg = m
exceptions += 1
exception_indices.append(i)
else:
msg = m
exceptions += 1
exception_indices.append(i)
data = [data[i] for i in range(len(data)) if (i not in exception_indices)]
excluded_points = [x_values[i] for i in exception_indices]
(i, j) = (0, 0)
adaptive_tolerance = (delta * float(adaptive_tolerance))
adaptive_recursion = int(adaptive_recursion)
while (i < (len(data) - 1)):
for p in adaptive_refinement(f, data[i], data[(i + 1)], adaptive_tolerance=adaptive_tolerance, adaptive_recursion=adaptive_recursion, excluded=True):
if (p[1] == 'NaN'):
excluded_points.append(p[0])
else:
data.insert((i + 1), p)
i += 1
i += 1
if (((len(data) == 0) and (exceptions > 0)) or (exceptions > 10)):
sage.misc.verbose.verbose(('WARNING: When plotting, failed to evaluate function at %s points.' % exceptions), level=0)
sage.misc.verbose.verbose(("Last error message: '%s'" % msg), level=0)
if excluded:
return (data, excluded_points)
return data |
def _final_estimator_has(attr):
def check(self):
getattr(self._final_estimator, attr)
return True
return check |
def sample_neighs(G, nodes, sample_num=None, self_loop=False, shuffle=True):
_sample = np.random.choice
neighs = [list(G[int(node)]) for node in nodes]
if sample_num:
if self_loop:
sample_num -= 1
samp_neighs = [(list(_sample(neigh, sample_num, replace=False)) if (len(neigh) >= sample_num) else list(_sample(neigh, sample_num, replace=True))) for neigh in neighs]
if self_loop:
samp_neighs = [(samp_neigh + list([nodes[i]])) for (i, samp_neigh) in enumerate(samp_neighs)]
if shuffle:
samp_neighs = [list(np.random.permutation(x)) for x in samp_neighs]
else:
samp_neighs = neighs
return (np.asarray(samp_neighs, dtype=np.float32), np.asarray(list(map(len, samp_neighs)))) |
def head_forward(inputs, in_index, embed_layers, fuse_layer, align_corners):
x = inputs
(n, _, h, w) = x[(- 1)].shape
os_size = x[0].size()[2:]
_c = {}
for i in in_index:
_c[i] = embed_layers[str(i)](x[i])
if (_c[i].dim() == 3):
_c[i] = _c[i].permute(0, 2, 1).contiguous().reshape(n, (- 1), x[i].shape[2], x[i].shape[3])
if (_c[i].size()[2:] != os_size):
_c[i] = resize(_c[i], size=os_size, mode='bilinear', align_corners=align_corners)
return fuse_layer(torch.cat(list(_c.values()), dim=1)) |
def spec_to_float32(spec):
spec32 = []
for (name, dtype) in spec:
if (dtype == float64):
dtype32 = float32
elif isinstance(dtype, numba.core.types.npytypes.Array):
if (dtype.dtype == float64):
dtype32 = dtype.copy(dtype=float32)
else:
dtype32 = dtype
else:
raise ValueError(f'Unknown spec type {dtype}')
spec32.append((name, dtype32))
return spec32 |
class Precision(object):
def __init__(self, n=21, max_accuracy=2):
self.max_accuracy = max_accuracy
self.Xaxis = np.linspace(0, self.max_accuracy, n)
self.reset()
def reset(self):
self.accuracies = []
def add_accuracy(self, val, index=None):
self.accuracies.append(val)
def get_main(self):
main_avg = [(np.sum((i for i in self.accuracies)).astype(float) / self.count)]
return main_avg
def count(self):
return len(self.accuracies)
def value(self):
prec = [(np.sum(((i <= thres) for i in self.accuracies)).astype(float) / self.count) for thres in self.Xaxis]
return np.array(prec)
def average(self):
if (len(self.accuracies) == 0):
return 0
return ((np.trapz(self.value, x=self.Xaxis) * 100) / self.max_accuracy) |
class FlaxVisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def test_RegularArray_RecordArray_NumpyArray():
a = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), 3)
assert (a.to_typetracer().form == a.to_typetracer(forget_length=True).form)
assert is_unknown_length(a.to_typetracer(forget_length=True).length)
b = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.emptyarray.EmptyArray()], ['nest']), 0, zeros_length=10)
assert (b.to_typetracer().form == b.form)
assert (b.to_typetracer().form.type == b.form.type)
assert (len(b['nest']) == 10)
assert (b.to_typetracer()['nest'].form == b['nest'].form)
assert isinstance(b['nest'][5], ak.contents.emptyarray.EmptyArray)
assert (b.to_typetracer()['nest'][5].form == b['nest'][5].form)
assert (len(b['nest'][5]) == 0)
assert isinstance(b['nest'][7:], ak.contents.regulararray.RegularArray)
assert (b.to_typetracer()['nest'][7:].form == b['nest'][7:].form)
assert (len(b['nest'][7:]) == 3)
assert (len(b['nest'][7:100]) == 3)
with pytest.raises(IndexError):
b['nest']['bad']
assert (b.to_typetracer().form == b.to_typetracer(forget_length=True).form)
assert is_unknown_length(b.to_typetracer(forget_length=True).length) |
def GetPseudoAAC1(ProteinSequence, lamda=30, weight=0.05, AAP=[_Hydrophobicity, _hydrophilicity]):
rightpart = 0.0
for i in range(lamda):
rightpart = (rightpart + GetSequenceOrderCorrelationFactor(ProteinSequence, (i + 1), AAP))
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = (1 + (weight * rightpart))
for (index, i) in enumerate(AALetter):
result[('PAAC' + str((index + 1)))] = round((AAC[i] / temp), 3)
return result |
class CommonRemoteModuleTest(RpcAgentTestFixture):
def world_size(self):
return 2
def _create_remote_module_iter(remote_device, modes=None):
if (modes is None):
modes = ModuleCreationMode.__members__.values()
args = (1,)
kwargs = dict(first_kwarg=2)
if (ModuleCreationMode.MODULE_CTOR in modes):
remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
(yield remote_module)
if (ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes):
remote_module = _RemoteModule(remote_device, create_scripted_module, args, kwargs, _module_interface_cls=MyModuleInterface)
scripted_remote_module = torch.jit.script(remote_module)
(yield scripted_remote_module) |
def _check_decreasing_hecke_factorization(t):
if (not isinstance(t, DecreasingHeckeFactorization)):
if (not isinstance(t, (tuple, list))):
raise ValueError('t should be a list or tuple')
for factor in t:
if (not isinstance(factor, (tuple, list))):
raise ValueError('each factor in t should be a list or tuple')
if (not all((isinstance(x, (int, Integer)) for x in factor))):
raise ValueError('each nonempty factor should contain integers')
for i in range((len(factor) - 1)):
if (factor[i] <= factor[(i + 1)]):
raise ValueError('each nonempty factor should be a strictly decreasing sequence') |
class GenerativeDecoder(nn.Module):
def __init__(self, config, vocabulary):
super().__init__()
self.config = config
self.word_embed = nn.Embedding(len(vocabulary), config['word_embedding_size'], padding_idx=vocabulary.PAD_INDEX)
self.answer_rnn = nn.LSTM(config['word_embedding_size'], config['lstm_hidden_size'], config['lstm_num_layers'], batch_first=True, dropout=config['dropout'])
self.lstm_to_words = nn.Linear(self.config['lstm_hidden_size'], len(vocabulary))
self.dropout = nn.Dropout(p=config['dropout'])
self.logsoftmax = nn.LogSoftmax(dim=(- 1))
def forward(self, encoder_output, batch):
if self.training:
ans_in = batch['ans_in']
(batch_size, num_rounds, max_sequence_length) = ans_in.size()
ans_in = ans_in.view((batch_size * num_rounds), max_sequence_length)
ans_in_embed = self.word_embed(ans_in)
init_hidden = encoder_output.view(1, (batch_size * num_rounds), (- 1))
init_hidden = init_hidden.repeat(self.config['lstm_num_layers'], 1, 1)
init_cell = torch.zeros_like(init_hidden)
(ans_out, (hidden, cell)) = self.answer_rnn(ans_in_embed, (init_hidden, init_cell))
ans_out = self.dropout(ans_out)
ans_word_scores = self.lstm_to_words(ans_out)
return ans_word_scores
else:
ans_in = batch['opt_in']
(batch_size, num_rounds, num_options, max_sequence_length) = ans_in.size()
ans_in = ans_in.view(((batch_size * num_rounds) * num_options), max_sequence_length)
ans_in_embed = self.word_embed(ans_in)
init_hidden = encoder_output.view(batch_size, num_rounds, 1, (- 1))
init_hidden = init_hidden.repeat(1, 1, num_options, 1)
init_hidden = init_hidden.view(1, ((batch_size * num_rounds) * num_options), (- 1))
init_hidden = init_hidden.repeat(self.config['lstm_num_layers'], 1, 1)
init_cell = torch.zeros_like(init_hidden)
(ans_out, (hidden, cell)) = self.answer_rnn(ans_in_embed, (init_hidden, init_cell))
ans_word_scores = self.logsoftmax(self.lstm_to_words(ans_out))
target_ans_out = batch['opt_out'].view(((batch_size * num_rounds) * num_options), (- 1))
ans_word_scores = torch.gather(ans_word_scores, (- 1), target_ans_out.unsqueeze((- 1))).squeeze()
ans_word_scores = (ans_word_scores * (target_ans_out > 0).float().cuda())
ans_scores = torch.sum(ans_word_scores, (- 1))
ans_scores = ans_scores.view(batch_size, num_rounds, num_options)
return ans_scores |
class BuildModelJob(GenericJob):
def __init__(self, problem):
self.type = 'buildmodel'
GenericJob.__init__(self, problem)
self.add_call_Back(self.print_result)
def run(self):
print(('Process [%s]: buildmodel running %s' % (os.getpid(), self.problem_name)), file=sys.stderr)
h = 0
step = (random() / 10000)
for i in range(0, 10000):
h += (step * i)
return int(h) |
def log_pytorch_version_info():
import torch
logger.info('Pytorch version: %s', torch.__version__) |
def test_resplit_no_keep_tokens(pipeline):
tokens = [['I', "can't", 'believe', 'it'], ["I can't", 'sleep']]
doc = resplit_mwt(tokens, pipeline, keep_tokens=False)
assert (len(doc.sentences) == 2)
assert (len(doc.sentences[0].tokens) == 4)
assert (len(doc.sentences[0].tokens[1].words) == 2)
assert (doc.sentences[0].tokens[1].words[0].text == 'ca')
assert (doc.sentences[0].tokens[1].words[1].text == "n't")
assert (len(doc.sentences[1].tokens) == 3)
assert (len(doc.sentences[1].tokens[1].words) == 2)
assert (doc.sentences[1].tokens[1].words[0].text == 'ca')
assert (doc.sentences[1].tokens[1].words[1].text == "n't") |
def inconsistent_user_full_pandas_dataset():
events = pd.DataFrame({'user_id': [0, 0, 1, 1, 1, 3], 'item_id': [0, 1, 0, 2, 3, 1], 'timestamp': [0, 1, 2, 3, 4, 5], 'rating': [1.1, 1.2, 1.3, 2, 3, 4]})
users = pd.DataFrame({'user_id': [0, 1, 2], 'gender': [0, 1, 0]})
items = pd.DataFrame({'item_id': [0, 1, 2, 3], 'category_id': [0, 0, 1, 2], 'feature1': [1.1, 1.2, 1.3, 1.4]})
return {'interactions': events, 'users': users, 'items': items, 'user_col': 'user_id', 'item_col': 'item_id', 'timestamp_col': 'timestamp', 'ratings_col': 'rating', 'users_cardinality': 3, 'items_cardinality': 4} |
def create_dict_dataloader(X, Y, split, **kwargs):
ds = DictDataset.from_tensors(torch.FloatTensor(X), torch.LongTensor(Y), split)
return DictDataLoader(ds, **kwargs) |
class EncodingBytes(bytes):
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = (- 1)
def __iter__(self):
return self
def __next__(self):
p = self._position = (self._position + 1)
if (p >= len(self)):
raise StopIteration
elif (p < 0):
raise TypeError
return self[p:(p + 1)]
def next(self):
return self.__next__()
def previous(self):
p = self._position
if (p >= len(self)):
raise StopIteration
elif (p < 0):
raise TypeError
self._position = p = (p - 1)
return self[p:(p + 1)]
def setPosition(self, position):
if (self._position >= len(self)):
raise StopIteration
self._position = position
def getPosition(self):
if (self._position >= len(self)):
raise StopIteration
if (self._position >= 0):
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:(self.position + 1)]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
p = self.position
while (p < len(self)):
c = self[p:(p + 1)]
if (c not in chars):
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while (p < len(self)):
c = self[p:(p + 1)]
if (c in chars):
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
rv = self.startswith(bytes, self.position)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
try:
self._position = ((self.index(bytes, self.position) + len(bytes)) - 1)
except ValueError:
raise StopIteration
return True |
def test_gcn_lstm_model_input_output():
(fx, fy, a) = get_timeseries_graph_data()
gcn_lstm_model = GCN_LSTM(seq_len=fx.shape[(- 1)], adj=a, gc_layer_sizes=[8, 8, 16], gc_activations=['relu', 'relu', 'relu'], lstm_layer_sizes=[8, 16, 32], lstm_activations=['tanh'])
(x_input, x_output) = gcn_lstm_model.in_out_tensors()
assert (x_input.shape[1] == fx.shape[1])
assert (x_input.shape[2] == fx.shape[2])
assert (x_output.shape[1] == fx.shape[(- 2)]) |
class Metrics():
def calculate_metrics_mm(self, output, gt_item):
valid_mask = (gt_item > 0.1)
output_mm = (1000.0 * output[valid_mask])
gt_mm = (1000.0 * gt_item[valid_mask])
diff = np.abs((output_mm - gt_mm))
mse = np.mean(np.power(diff, 2))
rmse = np.sqrt(mse)
mae = np.mean(diff)
return (rmse, mae) |
def griffin_lim(magnitudes, stft_fn, n_iters=30):
angles = np.angle(np.exp(((2j * np.pi) * np.random.rand(*magnitudes.size()))))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
(_, angles) = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal |
def test_add_constructor(provide_callables_from_fixtures_modules, default_test_case):
generic_constructor = gao.GenericConstructor(owner=default_test_case.test_cluster.type_system.to_type_info(provide_callables_from_fixtures_modules['Basket']), inferred_signature=InferredSignature(signature=Signature(parameters=[Parameter(name='foo', kind=Parameter.POSITIONAL_OR_KEYWORD, annotation=int)]), original_return_type=default_test_case.test_cluster.type_system.convert_type_hint(None), original_parameters={'foo': default_test_case.test_cluster.type_system.convert_type_hint(int)}, type_system=default_test_case.test_cluster.type_system))
factory = tf.TestFactory(default_test_case.test_cluster)
result = factory.add_constructor(default_test_case, generic_constructor, position=0)
assert (result.type == default_test_case.test_cluster.type_system.convert_type_hint(provide_callables_from_fixtures_modules['Basket']))
assert (default_test_case.size() == 2) |
class LabelCooccurrenceGraphBuilder(GraphBuilderBase):
def __init__(self, weighted=None, include_self_edges=None, normalize_self_edges=None):
super(LabelCooccurrenceGraphBuilder, self).__init__()
if (weighted not in [True, False]):
raise ValueError('Weighted needs to be a boolean')
if (include_self_edges not in [True, False]):
raise ValueError('Decision whether to include self edges needs to be a boolean')
if (include_self_edges and (normalize_self_edges not in [True, False])):
raise ValueError('Decision whether to normalize self edges needs to be a boolean')
if (normalize_self_edges and (not include_self_edges)):
raise ValueError('Include self edges must be set to true if normalization is true')
if (normalize_self_edges and (not weighted)):
raise ValueError('Normalizing self-edge weights_ does not make sense in an unweighted graph')
self.is_weighted = weighted
self.include_self_edges = include_self_edges
self.normalize_self_edges = normalize_self_edges
def transform(self, y):
label_data = get_matrix_in_format(y, 'lil')
label_count = label_data.shape[1]
edge_map = {}
for row in label_data.rows:
if self.include_self_edges:
pairs = [(a, b) for b in row for a in row if (a <= b)]
else:
pairs = [(a, b) for b in row for a in row if (a < b)]
for p in pairs:
if (p not in edge_map):
edge_map[p] = 1.0
elif self.is_weighted:
edge_map[p] += 1.0
if self.normalize_self_edges:
for i in range(label_count):
if ((i, i) in edge_map):
edge_map[(i, i)] = (edge_map[(i, i)] / 2.0)
return edge_map |
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(self, ann_file, root, remove_images_without_annotations, ann_types, transforms=None):
super(COCODataset, self).__init__(root, ann_file)
self.ids = sorted(self.ids)
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno, ann_types):
ids.append(img_id)
self.ids = ids
self.json_category_id_to_contiguous_id = {v: (i + 1) for (i, v) in enumerate(self.coco.getCatIds())}
self.contiguous_category_id_to_json_id = {v: k for (k, v) in self.json_category_id_to_contiguous_id.items()}
self.id_to_img_map = {k: v for (k, v) in enumerate(self.ids)}
category_ids = self.coco.getCatIds()
categories = [c['name'] for c in self.coco.loadCats(category_ids)]
self.classes = (['__background__'] + categories)
self.ann_types = ann_types
if ('parsing' in self.ann_types):
set_flip(self.root)
self._transforms = transforms
def __getitem__(self, idx):
(img, anno) = super(COCODataset, self).__getitem__(idx)
if (len(anno) > 0):
if ('iscrowd' in anno[0]):
anno = [obj for obj in anno if (obj['iscrowd'] == 0)]
boxes = [obj['bbox'] for obj in anno]
boxes = torch.as_tensor(boxes).reshape((- 1), 4)
target = BoxList(boxes, img.size, mode='xywh').convert('xyxy')
classes = [obj['category_id'] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field('labels', classes)
if ('segm' in self.ann_types):
masks = [obj['segmentation'] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field('masks', masks)
if ('keypoints' in self.ann_types):
if (anno and ('keypoints' in anno[0])):
keypoints = [obj['keypoints'] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field('keypoints', keypoints)
if ('parsing' in self.ann_types):
parsing = [get_parsing(self.root, obj['parsing']) for obj in anno]
parsing = Parsing(parsing, img.size)
target.add_field('parsing', parsing)
if ('uv' in self.ann_types):
uv_ann = []
for anno_uv in anno:
if ('dp_x' in anno_uv):
uv_ann.append([anno_uv['dp_x'], anno_uv['dp_y'], anno_uv['dp_I'], anno_uv['dp_U'], anno_uv['dp_V'], anno_uv['dp_masks']])
else:
uv_ann.append([])
uv = DenseposeUVs(uv_ann, img.size)
target.add_field('uv', uv)
target = target.clip_to_image(remove_empty=True)
if (self._transforms is not None):
(img, target) = self._transforms(img, target)
return (img, target, idx)
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
def pull_image(self, index):
img_id = self.id_to_img_map[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(os.path.join(self.root, path), cv2.IMREAD_COLOR) |
def get_version() -> str:
path = (Path(__file__).resolve().parents[2] / 'pyproject.toml')
pyproject = toml.loads(open(str(path)).read())
return cast(str, pyproject['tool']['poetry']['version']) |
class ImageDataset(Dataset):
def __init__(self, root_dir, split, data_transform=None, forward_context=0, back_context=0, strides=(1,), depth_type=None, **kwargs):
super().__init__()
assert ((depth_type is None) or (depth_type == '')), 'ImageDataset currently does not support depth types'
assert ((len(strides) == 1) and (strides[0] == 1)), 'ImageDataset currently only supports stride of 1.'
self.root_dir = root_dir
self.split = split
self.backward_context = back_context
self.forward_context = forward_context
self.has_context = ((self.backward_context + self.forward_context) > 0)
self.strides = 1
self.files = []
file_tree = read_files(root_dir)
for (k, v) in file_tree.items():
file_set = set(file_tree[k])
files = [fname for fname in sorted(v) if self._has_context(fname, file_set)]
self.files.extend([[k, fname] for fname in files])
self.data_transform = data_transform
def __len__(self):
return len(self.files)
def _change_idx(self, idx, filename):
(_, ext) = os.path.splitext(os.path.basename(filename))
return (self.split.format(idx) + ext)
def _has_context(self, filename, file_set):
context_paths = self._get_context_file_paths(filename)
return all([(f in file_set) for f in context_paths])
def _get_context_file_paths(self, filename):
fidx = get_idx(filename)
idxs = (list(np.arange(((- self.backward_context) * self.strides), 0, self.strides)) + list((np.arange(0, (self.forward_context * self.strides), self.strides) + self.strides)))
return [self._change_idx((fidx + i), filename) for i in idxs]
def _read_rgb_context_files(self, session, filename):
context_paths = self._get_context_file_paths(filename)
return [load_image(os.path.join(self.root_dir, session, filename)) for filename in context_paths]
def _read_rgb_file(self, session, filename):
return load_image(os.path.join(self.root_dir, session, filename))
def __getitem__(self, idx):
(session, filename) = self.files[idx]
image = self._read_rgb_file(session, filename)
sample = {'idx': idx, 'filename': ('%s_%s' % (session, os.path.splitext(filename)[0])), 'rgb': image, 'intrinsics': dummy_calibration(image)}
if self.has_context:
sample['rgb_context'] = self._read_rgb_context_files(session, filename)
if self.data_transform:
sample = self.data_transform(sample)
return sample |
class Lexicon():
def __init__(self, filename):
print('Loading lexicon', filename, file=log.v4)
lex_file = open(filename, 'rb')
if filename.endswith('.gz'):
lex_file = gzip.GzipFile(fileobj=lex_file)
self.phoneme_list = []
self.phonemes = {}
self.lemmas = {}
context = iter(ElementTree.iterparse(lex_file, events=('start', 'end')))
(_, root) = next(context)
tree = [root]
for (event, elem) in context:
if (event == 'start'):
tree += [elem]
elif (event == 'end'):
assert (tree[(- 1)] is elem)
tree = tree[:(- 1)]
if (elem.tag == 'phoneme'):
symbol = elem.find('symbol').text.strip()
assert isinstance(symbol, (str, unicode))
if (elem.find('variation') is not None):
variation = elem.find('variation').text.strip()
else:
variation = 'context'
assert (symbol not in self.phonemes)
assert (variation in ['context', 'none'])
self.phoneme_list.append(symbol)
self.phonemes[symbol] = {'index': len(self.phonemes), 'symbol': symbol, 'variation': variation}
root.clear()
elif (elem.tag == 'phoneme-inventory'):
print(('Finished phoneme inventory, %i phonemes' % len(self.phonemes)), file=log.v4)
root.clear()
elif (elem.tag == 'lemma'):
for orth_elem in elem.findall('orth'):
orth = (orth_elem.text or '').strip()
phons = [{'phon': e.text.strip(), 'score': float(e.attrib.get('score', 0))} for e in elem.findall('phon')]
assert (orth not in self.lemmas)
self.lemmas[orth] = {'orth': orth, 'phons': phons}
root.clear()
print(('Finished whole lexicon, %i lemmas' % len(self.lemmas)), file=log.v4) |
def create_pipeline_configuration(DEBUG=False, batch_size=32):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (CrossEntropyLoss, T5Block, T5LayerNorm, StatelessEmbedding, Linear, Dropout), 'model_inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2]}, 'decoder_attention_mask': {'shape': torch.Size([32, 1, 32, 32]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [3]}, 'decoder_input_ids': {'shape': torch.Size([32, 32]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([32, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [3]}, 'lm_labels': {'shape': torch.Size([32, 32]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [3]}}, 'model_outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 3}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([32, 32]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([32, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22': {'shape': torch.Size([32, 8, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1, 2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[1]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([32, 32, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')]}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22': {'shape': torch.Size([32, 8, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[1]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[3]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')]}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22': {'shape': torch.Size([32, 8, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[3]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')]}, 3: {'stage_cls': Partition3, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([32, 1, 32, 32]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'lm_labels': {'shape': torch.Size([32, 32]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([32, 384, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([32, 32, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')]}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
def test_timeout_non_int_fails():
parser = _get_command_line_parser(['valid-detector'], [], [])
assert_raises(SystemExit, parser.parse_args, ['run', 'ex1', 'valid-detector', '--timeout', 'string']) |
def _read_mat_binary(fd):
header = fd.read(3).decode()
if header.startswith('CM'):
return _read_compressed_mat(fd, header)
elif (header == 'FM '):
sample_size = 4
elif (header == 'DM '):
sample_size = 8
else:
raise UnknownMatrixHeader(("The header contained '%s'" % header))
assert (sample_size > 0)
(s1, rows, s2, cols) = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
buf = fd.read(((rows * cols) * sample_size))
if (sample_size == 4):
vec = np.frombuffer(buf, dtype='float32')
elif (sample_size == 8):
vec = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
mat = np.reshape(vec, (rows, cols))
return mat |
_optimizer('nag')
class FairseqNAG(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = NAG(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--momentum', default=0.99, type=float, metavar='M', help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay} |
class FuncDefNode(StatNode, BlockNode):
py_func = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
is_async_def = False
modifiers = []
has_fused_arguments = False
star_arg = None
starstar_arg = None
is_cyfunction = False
code_object = None
def analyse_default_values(self, env):
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
else:
error(arg.pos, 'This argument cannot have a default value')
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, 'Non-default argument following default argument')
def analyse_annotation(self, env, annotation):
if (annotation is None):
return None
if ((not env.directives['annotation_typing']) or (annotation.analyse_as_type(env) is None)):
annotation = annotation.analyse_types(env)
return annotation
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
arg.annotation = self.analyse_annotation(env, arg.annotation)
def align_argument_type(self, env, arg):
directive_locals = self.directive_locals
orig_type = arg.type
if (arg.name in directive_locals):
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
elif (isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']):
type_node = arg.annotation
other_type = arg.inject_type_from_annotations(env)
if (other_type is None):
return arg
else:
return arg
if (other_type is None):
error(type_node.pos, 'Not a type')
elif ((orig_type is not py_object_type) and (not orig_type.same_as(other_type))):
error(arg.base_type.pos, 'Signature does not agree with previous declaration')
error(type_node.pos, 'Previous declaration here')
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while (genv.is_py_class_scope or genv.is_c_class_scope):
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name, outer_scope=genv, parent_scope=env, scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name, outer_scope=genv, parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = (type.nogil and (not type.with_gil))
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
from . import Buffer
if self.return_type.is_memoryviewslice:
from . import MemoryView
lenv = self.local_scope
if (lenv.is_closure_scope and (not lenv.is_passthrough)):
outer_scope_cname = ('%s->%s' % (Naming.cur_scope_cname, Naming.outer_scope_cname))
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
self.body.generate_function_definitions(lenv, code)
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = ((self.entry.name == '__getbuffer__') and self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = ((self.entry.name == '__releasebuffer__') and self.entry.scope.is_c_class_scope)
is_buffer_slot = (is_getbuffer_slot or is_releasebuffer_slot)
if is_buffer_slot:
if ('cython_unused' not in self.modifiers):
self.modifiers = (self.modifiers + ['cython_unused'])
preprocessor_guard = self.get_preprocessor_guard()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if (profile or linetrace):
code.globalstate.use_utility_code(UtilityCode.load_cached('Profile', 'Profile.c'))
code.enter_cfunc_scope(lenv)
code.return_from_error_cleanup_label = code.new_label()
code.funcstate.gil_owned = (not lenv.nogil)
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
code.putln('')
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(code, with_pymethdef=with_pymethdef, proto_only=True)
self.generate_function_header(code, with_pymethdef=with_pymethdef)
cenv = env
while (cenv.is_py_class_scope or cenv.is_c_class_scope):
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(';')
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(';')
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(';')
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if (not (entry.in_closure or entry.is_arg)):
code.put_var_declaration(entry)
init = ''
if (not self.return_type.is_void):
if self.return_type.is_pyobject:
init = ' = NULL'
elif self.return_type.is_memoryviewslice:
init = (' = ' + MemoryView.memslice_entry_init)
code.putln(('%s%s;' % (self.return_type.declaration_code(Naming.retval_cname), init)))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
acquire_gil = self.acquire_gil
have_object_args = (self.needs_closure or self.needs_outer_scope)
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used]
acquire_gil_for_var_decls_only = (lenv.nogil and lenv.has_with_gil_block and (have_object_args or used_buffer_entries))
acquire_gil_for_refnanny_only = (lenv.nogil and lenv.has_with_gil_block and (not acquire_gil_for_var_decls_only))
use_refnanny = ((not lenv.nogil) or lenv.has_with_gil_block)
if (acquire_gil or acquire_gil_for_var_decls_only):
code.put_ensure_gil()
code.funcstate.gil_owned = True
elif (lenv.nogil and lenv.has_with_gil_block):
code.declare_gilstate()
if (profile or linetrace):
if (not self.is_generator):
tempvardecl_code.put_trace_declarations()
code_object = (self.code_object.calculate_result_code(code) if self.code_object else None)
code.put_trace_frame_init(code_object)
if is_getbuffer_slot:
self.getbuffer_check(code)
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
if is_getbuffer_slot:
self.getbuffer_init(code)
if self.needs_closure:
tp_slot = TypeSlots.ConstructorSlot('tp_new', '__new__')
slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
if (not slot_func_cname):
slot_func_cname = ('%s->tp_new' % lenv.scope_class.type.typeptr_cname)
code.putln(('%s = (%s)%s(%s, %s, NULL);' % (Naming.cur_scope_cname, lenv.scope_class.type.empty_declaration_code(), slot_func_cname, lenv.scope_class.type.typeptr_cname, Naming.empty_tuple)))
code.putln(('if (unlikely(!%s)) {' % Naming.cur_scope_cname))
code.putln(('%s = %s;' % (Naming.cur_scope_cname, lenv.scope_class.type.cast_code('Py_None'))))
code.put_incref('Py_None', py_object_type)
code.putln(code.error_goto(self.pos))
code.putln('} else {')
code.put_gotref(Naming.cur_scope_cname)
code.putln('}')
if self.needs_outer_scope:
if self.is_cyfunction:
code.putln(('%s = (%s) __Pyx_CyFunction_GetClosure(%s);' % (outer_scope_cname, cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)))
else:
code.putln(('%s = (%s) %s;' % (outer_scope_cname, cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)))
if lenv.is_passthrough:
code.putln(('%s = %s;' % (Naming.cur_scope_cname, outer_scope_cname)))
elif self.needs_closure:
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
if (profile or linetrace):
if (not self.is_generator):
if self.is_wrapper:
trace_name = (self.entry.name + ' (wrapper)')
else:
trace_name = self.entry.name
code.put_trace_call(trace_name, self.pos, nogil=(not code.funcstate.gil_owned))
code.funcstate.can_trace = True
self.generate_argument_parsing_code(env, code)
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if ((acquire_gil or (len(entry.cf_assignments) > 1)) and (not entry.in_closure)):
code.put_var_incref(entry)
elif (is_cdef and entry.type.is_memoryviewslice and (len(entry.cf_assignments) > 1)):
code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
for entry in lenv.var_entries:
if (entry.is_arg and (len(entry.cf_assignments) > 1) and (not entry.in_closure)):
if entry.xdecref_cleanup:
code.put_var_xincref(entry)
else:
code.put_var_incref(entry)
for entry in (lenv.var_entries + lenv.arg_entries):
if (entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used):
Buffer.put_init_vars(entry, code)
self.generate_argument_type_tests(code)
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
self.generate_function_body(env, code)
code.mark_pos(self.pos, trace=False)
code.putln('')
code.putln('/* function exit code */')
if (not self.body.is_terminator):
if self.return_type.is_pyobject:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln(('%s = %s;' % (Naming.retval_cname, val)))
elif (not self.return_type.is_void):
code.putln(('__Pyx_pretend_to_initialize(&%s);' % Naming.retval_cname))
if (code.error_label in code.labels_used):
if (not self.body.is_terminator):
code.put_goto(code.return_label)
code.put_label(code.error_label)
for (cname, type) in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=(not lenv.nogil))
buffers_present = (len(used_buffer_entries) > 0)
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln('{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;')
code.putln('__Pyx_PyThreadState_declare')
code.putln('__Pyx_PyThreadState_assign')
code.putln('__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);')
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
code.putln('__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}')
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if ((err_val is not None) or exc_check):
if (lenv.nogil and (not lenv.has_with_gil_block)):
code.putln('{')
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if (lenv.nogil and (not lenv.has_with_gil_block)):
code.put_release_ensured_gil()
code.putln('}')
else:
warning(self.entry.pos, ("Unraisable exception in function '%s'." % self.entry.qualified_name), 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
default_retval = self.return_type.default_value
if ((err_val is None) and default_retval):
err_val = default_retval
if (err_val is not None):
if (err_val != Naming.retval_cname):
code.putln(('%s = %s;' % (Naming.retval_cname, err_val)))
elif (not self.return_type.is_void):
code.putln(('__Pyx_pretend_to_initialize(&%s);' % Naming.retval_cname))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
if (buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice):
code.put_goto(code.return_from_error_cleanup_label)
code.put_label(code.return_label)
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname))
code.putln(('if (%s) {' % cond))
if env.nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln('}')
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if ((not entry.used) or entry.in_closure):
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname, have_gil=(not lenv.nogil))
elif entry.type.is_pyobject:
if ((not entry.is_arg) or (len(entry.cf_assignments) > 1)):
if entry.xdecref_cleanup:
code.put_var_xdecref(entry)
else:
code.put_var_decref(entry)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if ((acquire_gil or (len(entry.cf_assignments) > 1)) and (not entry.in_closure)):
code.put_var_decref(entry)
elif (entry.type.is_memoryviewslice and ((not is_cdef) or (len(entry.cf_assignments) > 1))):
code.put_xdecref_memoryviewslice(entry.cname, have_gil=(not lenv.nogil))
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
if (not lenv.nogil):
default_retval = self.return_type.default_value
err_val = self.error_value()
if ((err_val is None) and default_retval):
err_val = default_retval
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if (self.entry.is_special and (self.entry.name == '__hash__')):
code.putln(('if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;' % (Naming.retval_cname, Naming.retval_cname)))
if (profile or linetrace):
code.funcstate.can_trace = False
if (not self.is_generator):
if self.return_type.is_pyobject:
code.put_trace_return(Naming.retval_cname, nogil=(not code.funcstate.gil_owned))
else:
code.put_trace_return('Py_None', nogil=(not code.funcstate.gil_owned))
if (not lenv.nogil):
code.put_finish_refcount_context()
if (acquire_gil or (lenv.nogil and lenv.has_with_gil_block)):
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
if (not self.return_type.is_void):
code.putln(('return %s;' % Naming.retval_cname))
code.putln('}')
if preprocessor_guard:
code.putln(('#endif /*!(%s)*/' % preprocessor_guard))
tempvardecl_code.put_temp_declarations(code.funcstate)
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif ((not arg.type.is_complete()) and (not (arg.type.is_array or arg.type.is_memoryviewslice))):
error(arg.pos, ("Argument type '%s' is incomplete" % arg.type))
entry = env.declare_arg(arg.name, arg.type, arg.pos)
if arg.annotation:
entry.annotation = arg.annotation
return entry
def generate_arg_type_test(self, arg, code):
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(UtilityCode.load_cached('ArgTypeTest', 'FunctionArguments.c'))
typeptr_cname = arg.type.typeptr_cname
arg_code = ('((PyObject *)%s)' % arg.entry.cname)
code.putln(('if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (arg_code, typeptr_cname, arg.accept_none, arg.name, (arg.type.is_builtin_type and arg.type.require_exact), code.error_goto(arg.pos))))
else:
error(arg.pos, 'Cannot test type of extern C class without type object name specification')
def generate_arg_none_check(self, arg, code):
if arg.type.is_memoryviewslice:
cname = ('%s.memview' % arg.entry.cname)
else:
cname = arg.entry.cname
code.putln(('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname))
code.putln(('PyErr_Format(PyExc_TypeError, "Argument \'%%.%ds\' must not be None", "%s"); %s' % (max(200, len(arg.name)), arg.name, code.error_goto(arg.pos))))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for arg in self.args:
if (not arg.is_dynamic):
arg.generate_assignment_code(code)
def _get_py_buffer_info(self):
py_buffer = self.local_scope.arg_entries[1]
try:
obj_type = py_buffer.type.base_type.scope.entries['obj'].type
except (AttributeError, KeyError):
obj_type = None
return (py_buffer, obj_type)
def getbuffer_check(self, code):
(py_buffer, _) = self._get_py_buffer_info()
view = py_buffer.cname
code.putln(('if (%s == NULL) {' % view))
code.putln('PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");')
code.putln('return -1;')
code.putln('}')
def getbuffer_init(self, code):
(py_buffer, obj_type) = self._get_py_buffer_info()
view = py_buffer.cname
if (obj_type and obj_type.is_pyobject):
code.put_init_to_py_none(('%s->obj' % view), obj_type)
code.put_giveref(('%s->obj' % view))
else:
code.putln(('%s->obj = NULL;' % view))
def getbuffer_error_cleanup(self, code):
(py_buffer, obj_type) = self._get_py_buffer_info()
view = py_buffer.cname
if (obj_type and obj_type.is_pyobject):
code.putln(('if (%s->obj != NULL) {' % view))
code.put_gotref(('%s->obj' % view))
code.put_decref_clear(('%s->obj' % view), obj_type)
code.putln('}')
else:
code.putln(('Py_CLEAR(%s->obj);' % view))
def getbuffer_normal_cleanup(self, code):
(py_buffer, obj_type) = self._get_py_buffer_info()
view = py_buffer.cname
if (obj_type and obj_type.is_pyobject):
code.putln(('if (%s->obj == Py_None) {' % view))
code.put_gotref(('%s->obj' % view))
code.put_decref_clear(('%s->obj' % view), obj_type)
code.putln('}')
def get_preprocessor_guard(self):
if (not self.entry.is_special):
return None
name = self.entry.name
slot = TypeSlots.method_name_to_slot.get(name)
if (not slot):
return None
if ((name == '__long__') and (not self.entry.scope.lookup_here('__int__'))):
return None
if ((name in ('__getbuffer__', '__releasebuffer__')) and self.entry.scope.is_c_class_scope):
return None
return slot.preprocessor_guard_code() |
_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face') |
def isend(tensor, dst):
assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return _DistributedRequest(torch._C._dist_isend(tensor, dst)) |
class SkipSubset(data.Dataset):
def __init__(self, dataset, n=2):
self.dataset = dataset
assert (n >= 1)
self.indices = np.arange(len(dataset))[::n]
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def parser(self):
return self.dataset.parser
def transform(self):
return self.dataset.transform
def transform(self, t):
self.dataset.transform = t |
def main():
fruits = cv2.imread('fruits.jpg', cv2.IMREAD_COLOR)
frame = np.zeros(fruits.shape, np.uint8)
low_threshold = [50]
high_threshold = [150]
use_canny = [False]
settings = EnhancedWindow(10, 50, 270, 180, 'Settings')
cvui.init(WINDOW_NAME)
while True:
if use_canny[0]:
frame = cv2.cvtColor(fruits, cv2.COLOR_BGR2GRAY)
frame = cv2.Canny(frame, low_threshold[0], high_threshold[0], 3)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
else:
frame[:] = fruits[:]
settings.begin(frame)
if (settings.isMinimized() == False):
cvui.checkbox('Use Canny Edge', use_canny)
cvui.trackbar((settings.width() - 20), low_threshold, 5, 150)
cvui.trackbar((settings.width() - 20), high_threshold, 80, 300)
cvui.space(20)
cvui.text('Drag and minimize this settings window', 0.4, )
settings.end()
cvui.update()
cv2.imshow(WINDOW_NAME, frame)
if (cv2.waitKey(20) == 27):
break |
def test_unknown_data(testdir):
testdir.make_test('\()\(max_examples=1)\ndef test_(case):\n pass\n ', **as_param({'name': 'status', 'in': 'unknown', 'required': True, 'type': 'string'}), validate_schema=False)
testdir.run_and_assert(passed=1) |
def get_gold_standard_arc_seq(history_fn_list, model_space, metric_name_dict, with_skip_connection, with_input_blocks, num_input_blocks):
model_gen = get_model_space_generator(model_space, with_skip_connection=with_skip_connection, with_input_blocks=with_input_blocks, num_input_blocks=num_input_blocks)
df = read_history(history_fn_list, metric_name_dict)
gs = df.groupby(by='ID', as_index=False).agg(np.median)
gs['loss_rank'] = ss.rankdata(gs.loss)
gs['knowledge_rank'] = ss.rankdata(gs.knowledge)
archs = [x for x in model_gen]
arch2id = {','.join([str(x) for x in archs[i]]): i for i in range(len(archs))}
return (gs, arch2id) |
def dual_quaternion_mul(A, B, input):
dim = (input.size(1) // 2)
(C, D) = torch.split(input, [dim, dim], dim=1)
A_hamilton = make_quaternion_mul(A)
B_hamilton = make_quaternion_mul(B)
AC = torch.mm(C, A_hamilton)
AD = torch.mm(D, A_hamilton)
BC = torch.mm(C, B_hamilton)
AD_plus_BC = (AD + BC)
return torch.cat([AC, AD_plus_BC], dim=1) |
def diracnet18(pretrained=False):
model = DiracNet(18)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['diracnet18']))
return model |
class RewardMLP(MLP):
def compute_reward(self, X):
predits = (- tf.log((1.0 - self.output)))
Y_p = self._predict(predits, X)
return Y_p
def compute_score(self, X):
logits = self.output_layer.get_logits_for(L.get_output(self.layers[(- 2)]))
Y_p = self._predict(logits, X)
return Y_p
def likelihood_loss(self):
logits = self.output_layer.get_logits_for(L.get_output(self.layers[(- 2)]))
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, self.target_var)
return tf.reduce_sum(loss)
def complexity_loss(self, reg, cmx):
return tf.constant(0.0)
def loss(self, reg=0.0, cmx=0.0):
loss = self.likelihood_loss()
return loss
def clip_ops(self):
return [] |
def find_all_linear_names(peft_model, int4=False, int8=False):
cls = torch.nn.Linear
if (int4 or int8):
import bitsandbytes as bnb
if int4:
cls = bnb.nn.Linear4bit
elif int8:
cls = bnb.nn.Linear8bitLt
lora_module_names = set()
for (name, module) in peft_model.named_modules():
if isinstance(module, cls):
if ('lm_head' in name):
continue
if ('output_layer' in name):
continue
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
return sorted(lora_module_names) |
def main(hdf_file):
extractor = extr.PadDataExtractor((2, 2, 2), extr.DataExtractor(categories=(defs.KEY_IMAGES,)))
transform = tfm.Permute(permutation=(3, 0, 1, 2), entries=(defs.KEY_IMAGES,))
indexing_strategy = extr.PatchWiseIndexing(patch_shape=(32, 32, 32))
dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, transform)
direct_extractor = extr.ComposeExtractor([extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS, defs.KEY_IMAGES))])
assembler = assm.SubjectAssembler(dataset)
pytorch_dataset = pymia_torch.PytorchDatasetAdapter(dataset)
loader = torch_data.dataloader.DataLoader(pytorch_dataset, batch_size=2, shuffle=False)
dummy_network = nn.Sequential(nn.Conv3d(in_channels=2, out_channels=8, kernel_size=3, padding=0), nn.Conv3d(in_channels=8, out_channels=1, kernel_size=3, padding=0), nn.Sigmoid())
torch.set_grad_enabled(False)
nb_batches = len(loader)
for (i, batch) in enumerate(loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy().transpose((0, 2, 3, 4, 1))
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
(target, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES]) |
(Output('forecasting-select-file', 'options'), Output('forecasting-select-target', 'value'), Output('forecasting-select-features', 'value'), Output('forecasting-select-exog', 'value'), Input('forecasting-select-file-parent', 'n_clicks'), Input('forecasting-select-file', 'value'), [State('forecasting-select-target', 'value'), State('forecasting-select-features', 'value'), State('forecasting-select-exog', 'value')])
def update_select_file_dropdown(n_clicks, filename, target, features, exog):
options = []
ctx = dash.callback_context
if ctx.triggered:
prop_ids = {p['prop_id'].split('.')[0]: p['value'] for p in ctx.triggered}
if ('forecasting-select-file-parent' in prop_ids):
files = file_manager.uploaded_files()
for f in files:
options.append({'label': f, 'value': f})
if ('forecasting-select-file' in prop_ids):
(target, features, exog) = (None, None, None)
return (options, target, features, exog) |
.parametrize('metric', [['minkowski', 0.], ['mahalanobis', 0.]])
def test_deskl(metric):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
technique = DESKL(pool_classifiers, knn_metric=metric[0])
technique.fit(X_dsel, y_dsel)
assert np.isclose(technique.score(X_test, y_test), metric[1]) |
def test_learn_nse_different_proba_sizes():
m = 250
stream = RandomTreeGenerator(tree_random_state=7, sample_random_state=8, n_classes=2)
dt = DecisionTreeClassifier(random_state=7)
classifier = LearnPPNSEClassifier(base_estimator=dt, window_size=250)
(X, y) = stream.next_sample(m)
classifier.partial_fit(X, y, classes=np.array([0, 1, 2, 3]))
(X, y) = stream.next_sample(m)
y[(y == 0)] = 3
y[(y == 1)] = 2
classifier.partial_fit(X, y)
(X, y) = stream.next_sample(m)
y[(y == 0)] = 3
pred = classifier.predict(X)
classifier.partial_fit(X, y)
if (pred is not None):
corrects = np.sum((y == pred))
expected_correct_predictions = 115
assert (corrects == expected_correct_predictions)
stream.reset()
ht = HoeffdingTreeClassifier(leaf_prediction='mc')
classifier = LearnPPNSEClassifier(base_estimator=ht, window_size=250)
(X, y) = stream.next_sample(m)
with pytest.raises(RuntimeError):
classifier.partial_fit(X, y, classes=None)
classifier.reset()
classifier.partial_fit(X, y, classes=np.array([0, 1, 2, 3]))
(X, y) = stream.next_sample(m)
y[(y == 0)] = 3
y[(y == 1)] = 2
classifier.partial_fit(X, y)
(X, y) = stream.next_sample(m)
y[(y == 0)] = 3
pred = classifier.predict(X)
if (pred is not None):
corrects = np.sum((y == pred))
expected_correct_predictions = 109
assert (corrects == expected_correct_predictions) |
class TransformTwice():
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return (out1, out2) |
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if ((impl == 'raw') and IndexedRawTextDataset.exists(path)):
assert (dictionary is not None)
return IndexedRawTextDataset(path, dictionary)
elif ((impl == 'lazy') and IndexedDataset.exists(path)):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'cached') and IndexedDataset.exists(path)):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'mmap') and MMapIndexedDataset.exists(path)):
return MMapIndexedDataset(path)
elif ((impl == 'fasta') and FastaDataset.exists(path)):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
return None |
class TrainingRunViewer(gtd.ml.training_run_viewer.TrainingRunViewer):
def __init__(self):
runs = MiniWoBTrainingRuns(check_commit=False)
super(TrainingRunViewer, self).__init__(runs)
metadata = (lambda keys: JSONSelector('metadata.txt', keys))
self.add('name', run_name)
self.add('commit', Commit(), (lambda s: s[:8]))
self.add('dataset', metadata(['config', 'dataset', 'path']))
self.add('steps', NumSteps())
self.add('host', metadata(['host']), (lambda s: s[:10]))
self.add('last seen', metadata(['last_seen']))
two_decimal = (lambda f: '{:.2f}'.format(f)) |
def add_del_statements(statements: List[str]) -> Iterator[str]:
new_statements = [statements[(- 1)]]
variable_name_matcher = re.compile('t_[0-9]+|x[0-9]+')
inplace_arithmetic_matcher = re.compile('\\d \\S=')
alive = set(variable_name_matcher.findall(statements[(- 1)]))
for s in reversed(statements[:(- 1)]):
if ('#' in s):
new_statements.append(s)
else:
variables = variable_name_matcher.findall(s)
if (not variables):
new_statements.append(s)
continue
for v in variables[1:]:
if (v not in alive):
new_statements.append(f'del {v}')
alive.add(v)
if ((not inplace_arithmetic_matcher.findall(s)) and (variables[0] not in variables[1:])):
alive.discard(variables[0])
new_statements.append(s)
return reversed(new_statements) |
class Restormer(nn.Module):
def __init__(self, inp_channels=3, out_channels=3, dim=48, num_blocks=[4, 6, 6, 8], num_refinement_blocks=4, heads=[1, 2, 4, 8], ffn_expansion_factor=2.66, bias=False, LayerNorm_type='WithBias', dual_pixel_task=False):
super(Restormer, self).__init__()
self.patch_embed = OverlapPatchEmbed(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
self.down1_2 = Downsample(dim)
self.encoder_level2 = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 1))), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])
self.down2_3 = Downsample(int((dim * (2 ** 1))))
self.encoder_level3 = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 2))), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])
self.down3_4 = Downsample(int((dim * (2 ** 2))))
self.latent = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 3))), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])
self.up4_3 = Upsample(int((dim * (2 ** 3))))
self.reduce_chan_level3 = nn.Conv2d(int((dim * (2 ** 3))), int((dim * (2 ** 2))), kernel_size=1, bias=bias)
self.decoder_level3 = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 2))), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])
self.up3_2 = Upsample(int((dim * (2 ** 2))))
self.reduce_chan_level2 = nn.Conv2d(int((dim * (2 ** 2))), int((dim * (2 ** 1))), kernel_size=1, bias=bias)
self.decoder_level2 = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 1))), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])
self.up2_1 = Upsample(int((dim * (2 ** 1))))
self.decoder_level1 = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 1))), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
self.refinement = nn.Sequential(*[TransformerBlock(dim=int((dim * (2 ** 1))), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_refinement_blocks)])
self.dual_pixel_task = dual_pixel_task
if self.dual_pixel_task:
self.skip_conv = nn.Conv2d(dim, int((dim * (2 ** 1))), kernel_size=1, bias=bias)
self.output = nn.Conv2d(int((dim * (2 ** 1))), out_channels, kernel_size=3, stride=1, padding=1, bias=bias)
def forward(self, inp_img):
inp_enc_level1 = self.patch_embed(inp_img)
out_enc_level1 = self.encoder_level1(inp_enc_level1)
inp_enc_level2 = self.down1_2(out_enc_level1)
out_enc_level2 = self.encoder_level2(inp_enc_level2)
inp_enc_level3 = self.down2_3(out_enc_level2)
out_enc_level3 = self.encoder_level3(inp_enc_level3)
inp_enc_level4 = self.down3_4(out_enc_level3)
latent = self.latent(inp_enc_level4)
inp_dec_level3 = self.up4_3(latent)
inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)
inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)
out_dec_level3 = self.decoder_level3(inp_dec_level3)
inp_dec_level2 = self.up3_2(out_dec_level3)
inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)
inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)
out_dec_level2 = self.decoder_level2(inp_dec_level2)
inp_dec_level1 = self.up2_1(out_dec_level2)
inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)
out_dec_level1 = self.decoder_level1(inp_dec_level1)
out_dec_level1 = self.refinement(out_dec_level1)
if self.dual_pixel_task:
out_dec_level1 = (out_dec_level1 + self.skip_conv(inp_enc_level1))
out_dec_level1 = self.output(out_dec_level1)
else:
out_dec_level1 = (self.output(out_dec_level1) + inp_img)
return out_dec_level1 |
class ImageNetDataset(Dataset):
def __init__(self, imagenet_dir, transform=None):
super().__init__()
self.imagenet_dir = imagenet_dir
self.transform = transform
self.dataset = ImageFolder(self.imagenet_dir, transform=self.transform)
def __len__(self):
return 1000
def __getitem__(self, idx):
return self.dataset[idx][0] |
def random_bivariate_plateau_kernel(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_range, noise_range=None, is_isotropic=True):
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
assert (sigma_x_range[0] <= sigma_x_range[1]), 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if (is_isotropic is False):
assert (sigma_y_range[0] <= sigma_y_range[1]), 'Wrong sigma_y_range.'
assert (rotation_range[0] <= rotation_range[1]), 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
if (np.random.uniform() <= 0.5):
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, is_isotropic=is_isotropic)
if (noise_range is not None):
assert (noise_range[0] <= noise_range[1]), 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = (kernel * noise)
kernel = (kernel / np.sum(kernel))
return kernel |
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
if (args.arch == 'resnet50'):
model = Model(resnet50, args, width=1)
elif (args.arch == 'resnet50x2'):
model = Model(resnet50, args, width=2)
elif (args.arch == 'resnet50x4'):
model = Model(resnet50, args, width=4)
else:
raise NotImplementedError('model not supported {}'.format(args.arch))
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if k.startswith('module.encoder_q'):
state_dict[k.replace('module.encoder_q', 'encoder')] = state_dict[k]
del state_dict[k]
for k in list(state_dict.keys()):
if ('fc.0' in k):
state_dict[k.replace('fc.0', 'fc1')] = state_dict[k]
if ('fc.2' in k):
state_dict[k.replace('fc.2', 'fc2')] = state_dict[k]
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
for (param, param_m) in zip(model.encoder.parameters(), model.m_encoder.parameters()):
param_m.data.copy_(param.data)
param_m.requires_grad = False
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.batch_size_u = int((args.batch_size_u / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
criteria_x = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print('=> preparing dataset')
transform_strong = transforms.Compose([transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_weak = transforms.Compose([transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_eval = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
three_crops_transform = loader.ThreeCropsTransform(transform_weak, transform_strong, transform_strong)
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
labeled_dataset = datasets.ImageFolder(traindir, transform_weak)
unlabeled_dataset = datasets.ImageFolder(traindir, three_crops_transform)
if (not os.path.exists(args.annotation)):
label_per_class = (13 if (args.percent == 1) else 128)
if (args.gpu == 0):
random.shuffle(labeled_dataset.samples)
labeled_samples = []
unlabeled_samples = []
num_img = torch.zeros(args.num_class)
for (i, (img, label)) in enumerate(labeled_dataset.samples):
if (num_img[label] < label_per_class):
labeled_samples.append((img, label))
num_img[label] += 1
else:
unlabeled_samples.append((img, label))
annotation = {'labeled_samples': labeled_samples, 'unlabeled_samples': unlabeled_samples}
with open(args.annotation, 'w') as f:
json.dump(annotation, f)
print(('save annotation to %s' % args.annotation))
dist.barrier()
print(('load annotation from %s' % args.annotation))
annotation = json.load(open(args.annotation, 'r'))
if (args.percent == 1):
labeled_dataset.samples = (annotation['labeled_samples'] * 10)
else:
labeled_dataset.samples = annotation['labeled_samples']
unlabeled_dataset.samples = annotation['unlabeled_samples']
labeled_sampler = torch.utils.data.distributed.DistributedSampler(labeled_dataset)
labeled_loader = torch.utils.data.DataLoader(labeled_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=labeled_sampler)
unlabeled_sampler = torch.utils.data.distributed.DistributedSampler(unlabeled_dataset)
unlabeled_loader = torch.utils.data.DataLoader(unlabeled_dataset, batch_size=int(args.batch_size_u), shuffle=False, num_workers=args.workers, pin_memory=True, sampler=unlabeled_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transform_eval), batch_size=64, shuffle=False, num_workers=args.workers, pin_memory=True)
if (args.gpu == 0):
tb_logger = tensorboard_logger.Logger(logdir=os.path.join(args.exp_dir, 'tensorboard'), flush_secs=2)
logger = setup_default_logging(args)
logger.info(dict(args._get_kwargs()))
else:
tb_logger = None
logger = None
for epoch in range(args.start_epoch, args.epochs):
if (epoch == 0):
args.m = 0.99
else:
args.m = args.moco_m
adjust_learning_rate(optimizer, epoch, args)
train(labeled_loader, unlabeled_loader, model, criteria_x, optimizer, epoch, args, logger, tb_logger)
acc1 = validate(val_loader, model, args, logger, tb_logger, epoch)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, filename='{}/checkpoint_{:04d}.pth.tar'.format(args.exp_dir, epoch))
acc1 = validate(val_loader, model, args, logger, tb_logger, (- 1)) |
def inverse_laplace(ex, s, t, algorithm='maxima'):
if (not isinstance(ex, Expression)):
ex = SR(ex)
if (algorithm == 'maxima'):
return ex.parent()(ex._maxima_().ilt(var(s), var(t)))
elif (algorithm == 'sympy'):
(ex_sy, s, t) = (expr._sympy_() for expr in (ex, s, t))
from sympy import inverse_laplace_transform
from sage.interfaces.sympy import sympy_init
sympy_init()
result = inverse_laplace_transform(ex_sy, s, t)
try:
return result._sage_()
except AttributeError:
if ('InverseLaplaceTransform' in format(result)):
return dummy_inverse_laplace(ex, t, s)
else:
raise AttributeError('Unable to convert SymPy result (={}) into Sage'.format(result))
elif (algorithm == 'giac'):
from sage.interfaces.giac import giac
try:
result = giac.invlaplace(ex, s, t)
except TypeError:
raise ValueError(('Giac cannot make sense of: %s' % ex))
if ('ilaplace' in format(result)):
return dummy_inverse_laplace(ex, t, s)
else:
return result.sage()
else:
raise ValueError(('Unknown algorithm: %s' % algorithm)) |
_builder('coco_caption')
class COCOCapBuilder(BaseDatasetBuilder):
train_dataset_cls = COCOCapDataset
eval_dataset_cls = COCOCapEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/coco/defaults_cap.yaml'} |
def gen_model_input_sdm(train_set, user_profile, seq_short_max_len, seq_prefer_max_len):
train_uid = np.array([line[0] for line in train_set])
train_iid = np.array([line[1] for line in train_set])
train_label = np.array([line[2] for line in train_set])
short_train_seq = [line[3] for line in train_set]
prefer_train_seq = [line[4] for line in train_set]
train_short_len = np.array([line[5] for line in train_set])
train_prefer_len = np.array([line[6] for line in train_set])
short_train_seq_genres = np.array([line[7] for line in train_set])
prefer_train_seq_genres = np.array([line[8] for line in train_set])
train_short_item_pad = pad_sequences(short_train_seq, maxlen=seq_short_max_len, padding='post', truncating='post', value=0)
train_prefer_item_pad = pad_sequences(prefer_train_seq, maxlen=seq_prefer_max_len, padding='post', truncating='post', value=0)
train_short_genres_pad = pad_sequences(short_train_seq_genres, maxlen=seq_short_max_len, padding='post', truncating='post', value=0)
train_prefer_genres_pad = pad_sequences(prefer_train_seq_genres, maxlen=seq_prefer_max_len, padding='post', truncating='post', value=0)
train_model_input = {'user_id': train_uid, 'movie_id': train_iid, 'short_movie_id': train_short_item_pad, 'prefer_movie_id': train_prefer_item_pad, 'prefer_sess_length': train_prefer_len, 'short_sess_length': train_short_len, 'short_genres': train_short_genres_pad, 'prefer_genres': train_prefer_genres_pad}
for key in ['gender', 'age', 'occupation', 'zip']:
train_model_input[key] = user_profile.loc[train_model_input['user_id']][key].values
return (train_model_input, train_label) |
def run_experiment_lite(stub_method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='scripts/run_experiment_lite.py', python_command='python', mode='local', dry=False, docker_image=None, aws_config=None, env=None, variant=None, use_gpu=False, sync_s3_pkl=False, sync_log_on_termination=True, confirm_remote=True, terminate_machine=True, periodic_sync=True, periodic_sync_interval=15, sync_all_data_node_to_s3=True, use_cloudpickle=False, pre_commands=None, **kwargs):
assert ((stub_method_call is not None) or (batch_tasks is not None)), 'Must provide at least either stub_method_call or batch_tasks'
if (batch_tasks is None):
batch_tasks = [dict(kwargs, stub_method_call=stub_method_call, exp_name=exp_name, log_dir=log_dir, env=env, variant=variant, use_cloudpickle=use_cloudpickle)]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
for task in batch_tasks:
call = task.pop('stub_method_call')
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
else:
data = base64.b64encode(pickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
params = dict(kwargs)
if (task.get('exp_name', None) is None):
task['exp_name'] = ('%s_%s_%04d' % (exp_prefix, timestamp, exp_count))
if (task.get('log_dir', None) is None):
task['log_dir'] = ((((config.LOG_DIR + '/local/') + exp_prefix.replace('_', '-')) + '/') + task['exp_name'])
if (task.get('variant', None) is not None):
variant = task.pop('variant')
if ('exp_name' not in variant):
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(pickle.dumps(variant)).decode('utf-8')
elif ('variant' in task):
del task['variant']
task['remote_log_dir'] = osp.join(config.AWS_S3_PATH, exp_prefix.replace('_', '-'), task['exp_name'])
if ((mode not in ['local', 'local_docker']) and (not remote_confirmed) and (not dry) and confirm_remote):
remote_confirmed = query_yes_no(('Running in (non-dry) mode %s. Confirm?' % mode))
if (not remote_confirmed):
sys.exit(1)
if (mode == 'local'):
for task in batch_tasks:
del task['remote_log_dir']
env = task.pop('env', None)
command = to_local_command(task, python_command=python_command, script=osp.join(config.PROJECT_PATH, script), use_gpu=use_gpu)
print(command)
if dry:
return
try:
if (env is None):
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif (mode == 'local_docker'):
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task['remote_log_dir']
env = task.pop('env', None)
command = to_docker_command(task, docker_image=docker_image, pre_commands=pre_commands, script=script, env=env, use_gpu=use_gpu, use_tty=True)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print('terminating')
p.terminate()
except OSError:
print('os error!')
pass
p.wait()
elif (mode == 'ec2'):
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry)
launch_ec2(batch_tasks, exp_prefix=exp_prefix, docker_image=docker_image, python_command=python_command, script=script, aws_config=aws_config, dry=dry, terminate_machine=terminate_machine, use_gpu=use_gpu, code_full_path=s3_code_path, sync_s3_pkl=sync_s3_pkl, sync_log_on_termination=sync_log_on_termination, periodic_sync=periodic_sync, periodic_sync_interval=periodic_sync_interval, pre_commands=pre_commands)
elif (mode == 'lab_kube'):
s3_code_path = s3_sync_code(config, dry=dry)
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
task['resources'] = params.pop('resources', config.KUBE_DEFAULT_RESOURCES)
task['node_selector'] = params.pop('node_selector', config.KUBE_DEFAULT_NODE_SELECTOR)
task['exp_prefix'] = exp_prefix
pod_dict = to_lab_kube_pod(task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu, python_command=python_command, sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync, periodic_sync_interval=periodic_sync_interval, sync_all_data_node_to_s3=sync_all_data_node_to_s3, terminate_machine=terminate_machine)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = '{pod_dir}/{exp_prefix}'.format(pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = '{dir}/{exp_name}.json'.format(dir=dir, exp_name=task['exp_name'])
with open(fname, 'w') as fh:
fh.write(pod_str)
kubecmd = ('kubectl create -f %s' % fname)
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while (retry_count <= 5):
try:
return_code = subprocess.call(kubecmd, shell=True)
if (return_code == 0):
break
retry_count += 1
print('trying again...')
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError |
def _augment_gain(audio, low=0.75, high=1.25):
g = random.uniform(low, high)
return (audio * g) |
class RandomCrop(object):
def __init__(self, size, padding=0):
self.size = tuple(size)
self.padding = padding
def __call__(self, img, mask):
if (self.padding > 0):
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
if ((w == tw) and (h == th)):
return (img, mask)
if ((w < tw) or (h < th)):
return (img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST))
x1 = random.randint(0, (w - tw))
y1 = random.randint(0, (h - th))
return (img.crop((x1, y1, (x1 + tw), (y1 + th))), mask.crop((x1, y1, (x1 + tw), (y1 + th)))) |
def plot_gp(x: torch.Tensor, model: gpytorch.models.GP, num_samples: int, ax: mpl.axes.Axes) -> None:
with torch.no_grad(), gpytorch.settings.fast_pred_var():
pred = model(x)
mean = pred.mean.numpy()
error = (2 * pred.stddev.numpy())
true_values = objective(None, x, None)[0].numpy()
ax.fill_between(x, (mean - error), (mean + error), lw=0, alpha=0.4, color='C0')
ax.plot(x, mean, color='C0')
ax.plot(x, true_values, '--', color='k')
ax.plot(model.train_inputs[0].numpy(), model.train_targets.numpy(), 'x', markeredgewidth=2, markersize=5, color='C1')
for _ in range(num_samples):
ax.plot(x.numpy(), pred.sample().numpy())
ax.set_xlim(x[0], x[(- 1)])
ax.set_ylim((- 2.1), 2.1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('Inputs $\\theta$')
ax.set_ylabel('$J(\\theta)$') |
def RunAndExtractTestList(args=None):
p = gtest_test_utils.Subprocess(([COMMAND] + (args or [])), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if (match is not None):
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if (match is not None):
test = match.group(1)
tests_run.append(((test_case + '.') + test))
return (tests_run, p.exit_code) |
def inference_main(meta_files, ckpt, config, id, **kwargs):
import warnings
sweetdebug(use_telegram_if_cache_exists=False)
warnings.filterwarnings(action='ignore')
config = OmegaConf.load(config)
wrapper = TransformerWrapper(config)
wrapper = wrapper.load_from_checkpoint(ckpt, config=config).cuda()
wrapper.eval()
with torch.no_grad():
for meta_file in tqdm(meta_files):
sample = MidiAudioPair(meta_file)
some_not_generated = False
for (composer, value) in wrapper.composer_to_feature_token.items():
midi_path = sample.generated(composer=composer, generated=id)
os.makedirs(os.path.dirname(midi_path), exist_ok=True)
if (not os.path.exists(midi_path)):
some_not_generated = True
all_generated = (not some_not_generated)
if all_generated:
continue
beatstep = np.load(sample.beatstep)
if wrapper.use_mel:
(y, sr) = librosa.load(sample.song, sr=config.dataset.sample_rate)
vqvae_token = None
else:
vqvae_token = torch.load(sample.vqvae, map_location='cuda')
y = None
sr = None
for (composer, value) in wrapper.composer_to_feature_token.items():
midi_path = sample.generated(composer=composer, generated=id)
os.makedirs(os.path.dirname(midi_path), exist_ok=True)
if os.path.exists(midi_path):
continue
wrapper.generate(audio_path=None, composer=composer, model=id, save_midi=True, save_mix=False, show_plot=False, midi_path=midi_path, vqvae_token=vqvae_token, beatsteps=(beatstep - beatstep[0]), audio_y=y, audio_sr=sr) |
def get_device_details(devices_type):
global devices
global dpdk_drivers
dev = {}
dev_lines = subprocess.check_output(['lspci', '-Dvmmnnk']).splitlines()
for dev_line in dev_lines:
if (not dev_line):
if device_type_match(dev, devices_type):
if ('Driver' in dev.keys()):
dev['Driver_str'] = dev.pop('Driver')
if ('Module' in dev.keys()):
dev['Module_str'] = dev.pop('Module')
devices[dev['Slot']] = dict(dev)
dev = {}
else:
(name, value) = dev_line.decode('utf8').split('\t', 1)
value_list = value.rsplit(' ', 1)
if value_list:
dev[(name.rstrip(':') + '_str')] = value_list[0]
dev[name.rstrip(':')] = value_list[(len(value_list) - 1)].rstrip(']').lstrip('[')
if (devices_type == network_devices):
ssh_if = []
route = subprocess.check_output(['ip', '-o', 'route'])
route = '\n'.join(filter((lambda ln: (not ln.startswith('169.254'))), route.decode().splitlines()))
rt_info = route.split()
for i in range((len(rt_info) - 1)):
if (rt_info[i] == 'dev'):
ssh_if.append(rt_info[(i + 1)])
for d in devices.keys():
if (not device_type_match(devices[d], devices_type)):
continue
devices[d] = devices[d].copy()
devices[d].update(get_pci_device_details(d, False).items())
if (devices_type == network_devices):
for _if in ssh_if:
if (_if in devices[d]['Interface'].split(',')):
devices[d]['Ssh_if'] = True
devices[d]['Active'] = '*Active*'
break
if ('Module_str' in devices[d]):
for driver in dpdk_drivers:
if (driver not in devices[d]['Module_str']):
devices[d]['Module_str'] = (devices[d]['Module_str'] + (',%s' % driver))
else:
devices[d]['Module_str'] = ','.join(dpdk_drivers)
if has_driver(d):
modules = devices[d]['Module_str'].split(',')
if (devices[d]['Driver_str'] in modules):
modules.remove(devices[d]['Driver_str'])
devices[d]['Module_str'] = ','.join(modules) |
class Discovery(BaseTest):
def __init__(self, calculator: BaseCalculator, poinull: POI):
super().__init__(calculator, poinull)
def result(self, printlevel: int=1) -> tuple[(float, float)]:
(pnull, _) = self.calculator.pvalue(self.poinull, onesideddiscovery=True)
pnull = pnull[0]
significance = norm.ppf((1.0 - pnull))
if (printlevel > 0):
print(f'''
p_value for the Null hypothesis = {pnull}''')
print(f'Significance (in units of sigma) = {significance}')
return (pnull, significance) |
class GroupNorm(nn.Module):
ngroups: int = 32
def __call__(self, x):
input_shape = x.shape
group_shape = (x.shape[:(- 1)] + (self.ngroups, (x.shape[(- 1)] // self.ngroups)))
x = x.reshape(group_shape)
x = standardize(x, axis=[1, 2, 4], eps=1e-05)
x = x.reshape(input_shape)
bias_scale_shape = tuple(([1, 1, 1] + [input_shape[(- 1)]]))
x = (x * self.param('scale', nn.initializers.ones, bias_scale_shape))
x = (x + self.param('bias', nn.initializers.zeros, bias_scale_shape))
return x |
class AcuteKidneyInjuryLabValueLabeler(InpatientLabValueLabeler):
original_expanded_omop_concept_ids = [, 3020564, 3035090, 3022243, 3019397, 3040495, 3016723] |
class ToTensor(object):
def __call__(self, sample):
result = {}
for key in sample.keys():
if isinstance(sample[key], np.ndarray):
if (key == 'image'):
image = sample[key].transpose((2, 0, 1))
image = torch.from_numpy(image)
result[key] = image
continue
result[key] = torch.from_numpy(sample[key])
else:
result[key] = sample[key]
return result |
def _check_fp_args(a, b):
if z3_debug():
_z3_assert((is_fp(a) or is_fp(b)), 'First or second argument must be a Z3 floating-point expression') |
def container_construct_op_name(container_cls):
container_str = {dict: 'Dict', list: 'List', tuple: 'Tuple', set: 'Set', slice: 'Slice'}[container_cls]
return f'prim::{container_str}Construct' |
class SchellingAgent(Agent):
def __init__(self, pos, model, agent_type, homophily):
super().__init__(pos, model)
self.pos = pos
self.type = agent_type
self.homophily = homophily
def step(self):
similar = 0
for neighbor in self.model.grid.neighbor_iter(self.pos):
if (neighbor.type == self.type):
similar += 1
if (similar < self.homophily):
self.model.grid.move_to_empty(self)
else:
self.model.happy += 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.