code stringlengths 281 23.7M |
|---|
def train(train_loader, model, criterion, optimizer, epoch, args, tensor_writer=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
(output, cfeatures) = model(images)
pre_features = model.pre_features
pre_weight1 = model.pre_weight1
if (epoch >= args.epochp):
(weight1, pre_features, pre_weight1) = weight_learner(cfeatures, pre_features, pre_weight1, args, epoch, i)
else:
weight1 = Variable(torch.ones(cfeatures.size()[0], 1).cuda())
model.pre_features.data.copy_(pre_features)
model.pre_weight1.data.copy_(pre_weight1)
loss = criterion(output, target).view(1, (- 1)).mm(weight1).view(1)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
method_name = args.log_path.split('/')[(- 2)]
if ((i % args.print_freq) == 0):
progress.display(i, method_name)
progress.write_log(i, args.log_path)
tensor_writer.add_scalar('loss/train', losses.avg, epoch)
tensor_writer.add_scalar('/train', top1.avg, epoch)
tensor_writer.add_scalar('/train', top5.avg, epoch) |
class Image(SensorData):
def __init__(self, frame_number, width, height, image_type, fov, raw_data):
super(Image, self).__init__(frame_number=frame_number)
assert (len(raw_data) == ((4 * width) * height))
self.width = width
self.height = height
self.type = image_type
self.fov = fov
self.raw_data = raw_data
self._converted_data = None
def data(self):
if (self._converted_data is None):
from . import image_converter
if (self.type == 'Depth'):
self._converted_data = image_converter.depth_to_array(self)
elif (self.type == 'SemanticSegmentation'):
self._converted_data = image_converter.labels_to_array(self)
else:
self._converted_data = image_converter.to_rgb_array(self)
return self._converted_data
def save_to_disk(self, filename):
filename = _append_extension(filename, '.png')
try:
from PIL import Image as PImage
except ImportError:
raise RuntimeError('cannot import PIL, make sure pillow package is installed')
image = PImage.frombytes(mode='RGBA', size=(self.width, self.height), data=self.raw_data, decoder_name='raw')
color = image.split()
image = PImage.merge('RGB', color[2::(- 1)])
folder = os.path.dirname(filename)
if (not os.path.isdir(folder)):
os.makedirs(folder)
image.save(filename) |
def test_single_param_not_dotted_list_values():
param = 'SomethingOrOther'
values = (123, 765, 3512, 756437, 3125)
result = enumerate_param(param, values)
expected = {'SomethingOrOther.1': 123, 'SomethingOrOther.2': 765, 'SomethingOrOther.3': 3512, 'SomethingOrOther.4': 756437, 'SomethingOrOther.5': 3125}
assert (result == expected) |
def add_artifacts(resource_database: ResourceDatabase, mode: LayoutArtifactMode, artifact_minimum_progression: int) -> PoolResults:
item_pool: list[PickupEntry] = []
artifacts_to_place = mode.value
for i in range(artifacts_to_place):
item_pool.append(create_artifact(i, artifact_minimum_progression, resource_database))
first_automatic_artifact = artifacts_to_place
starting = [create_artifact(automatic_artifact, artifact_minimum_progression, resource_database) for automatic_artifact in range(first_automatic_artifact, 12)]
return PoolResults(item_pool, {}, starting) |
class Collate(nn.Module):
def __init__(self, transform=None, device=None):
super().__init__()
self.transform = transform
self.device = device
_mode()
def __call__(self, x: ImageNetData):
out = x.apply((lambda _tensor: _tensor.as_tensor())).pin_memory().to(self.device)
if self.transform:
out.images = self.transform(out.images)
return out |
class Application(object):
def __init__(self, conf, options):
self.conf = conf
self.options = options
logging.basicConfig(format=LOG_FORMAT)
self.logger = logging.getLogger()
self.log_filename = None
def setup_log(self, prefix):
prefix = re.sub('[^A-Za-z0-9_-]+', '_', prefix)
prefix = re.sub('_+', '_', prefix)
date = datetime.datetime.now()
date = date.strftime('%Y-%m-%d_%H-%M-%S.log')
filename = ('%s-%s' % (prefix, date))
self.log_filename = os.path.join(self.conf.directory, filename)
log = self.log_filename
if os.path.exists(log):
self.logger.error(('ERROR: Log file %s already exists' % log))
sys.exit(1)
self.safe_makedirs(os.path.dirname(log))
handler = logging.FileHandler(log)
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def create_subprocess(self, cmd, **kwargs):
self.logger.error(('+ %s' % ' '.join(map(shlex.quote, cmd))))
return subprocess.Popen(cmd, **kwargs)
def run_nocheck(self, *cmd, stdin_filename=None, **kwargs):
if stdin_filename:
stdin_file = open(stdin_filename, 'rb', 0)
kwargs['stdin'] = stdin_file.fileno()
else:
stdin_file = None
log_stdout = kwargs.pop('log_stdout', True)
if log_stdout:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
kwargs['universal_newlines'] = True
try:
proc = self.create_subprocess(cmd, **kwargs)
with proc:
if log_stdout:
for line in proc.stdout:
line = line.rstrip()
self.logger.error(line)
exitcode = proc.wait()
finally:
if (stdin_file is not None):
stdin_file.close()
if exitcode:
cmd_str = ' '.join(map(shlex.quote, cmd))
self.logger.error(('Command %s failed with exit code %s' % (cmd_str, exitcode)))
return exitcode
def run(self, *cmd, **kw):
exitcode = self.run_nocheck(*cmd, **kw)
if exitcode:
sys.exit(exitcode)
def get_output_nocheck(self, *cmd, **kwargs):
proc = self.create_subprocess(cmd, stdout=subprocess.PIPE, universal_newlines=True, **kwargs)
with proc:
stdout = proc.communicate()[0]
stdout = stdout.rstrip()
exitcode = proc.wait()
if exitcode:
cmd_str = ' '.join(map(shlex.quote, cmd))
self.logger.error(('Command %s failed with exit code %s' % (cmd_str, exitcode)))
return (exitcode, stdout)
def get_output(self, *cmd, **kwargs):
(exitcode, stdout) = self.get_output_nocheck(*cmd, **kwargs)
if exitcode:
for line in stdout.splitlines():
self.logger.error(line)
sys.exit(exitcode)
return stdout
def safe_makedirs(self, directory):
try:
os.makedirs(directory)
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise |
class AoAModel3_d1_24heads(AttModel):
def __init__(self, opt):
super(AoAModel3_d1_24heads, self).__init__(opt)
self.num_layers = 2
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if (opt.use_multi_head == 2):
del self.ctx2att
self.ctx2att = (lambda x: x)
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = (lambda x, y: x)
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, flag_feats, att_masks):
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, flag_feats, att_masks)
if self.use_mean_feats:
if (att_masks is None):
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum((att_feats * att_masks.unsqueeze((- 1))), 1) / torch.sum(att_masks.unsqueeze((- 1)), 1))
else:
mean_feats = self.fc_embed(fc_feats)
p_att_feats = self.ctx2att(att_feats)
return (mean_feats, att_feats, p_att_feats, att_masks) |
class DictTransactionManager(ModbusTransactionManager):
def __init__(self, client, **kwargs):
self.transactions = {}
super().__init__(client, **kwargs)
def __iter__(self):
return iter(self.transactions.keys())
def addTransaction(self, request, tid=None):
tid = (tid if (tid is not None) else request.transaction_id)
Log.debug('Adding transaction {}', tid)
self.transactions[tid] = request
def getTransaction(self, tid):
Log.debug('Getting transaction {}', tid)
if (not tid):
if self.transactions:
return self.transactions.popitem()[1]
return None
return self.transactions.pop(tid, None)
def delTransaction(self, tid):
Log.debug('deleting transaction {}', tid)
self.transactions.pop(tid, None) |
def plan_and_preprocess(task_string, processes_lowres=default_num_threads, processes_fullres=3, no_preprocessing=False):
from d_lka_former.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D
from d_lka_former.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string)
cropped_out_dir = join(nnFormer_cropped_data, task_string)
maybe_mkdir_p(preprocessing_output_dir_this_task_train)
shutil.copy(join(cropped_out_dir, 'dataset_properties.pkl'), preprocessing_output_dir_this_task_train)
shutil.copy(join(nnFormer_raw_data, task_string, 'dataset.json'), preprocessing_output_dir_this_task_train)
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if (not no_preprocessing):
exp_planner.run_preprocessing((processes_lowres, processes_fullres))
exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if (not no_preprocessing):
exp_planner.run_preprocessing(processes_fullres)
if (not no_preprocessing):
p = Pool(default_num_threads)
stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True) if (i.split('/')[(- 1)].find('stage') != (- 1))]
for s in stages:
print(s.split('/')[(- 1)])
list_of_npz_files = subfiles(s, True, None, '.npz', True)
list_of_pkl_files = [(i[:(- 4)] + '.pkl') for i in list_of_npz_files]
all_classes = []
for pk in list_of_pkl_files:
with open(pk, 'rb') as f:
props = pickle.load(f)
all_classes_tmp = np.array(props['classes'])
all_classes.append(all_classes_tmp[(all_classes_tmp >= 0)])
p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes))
p.close()
p.join() |
_required
_cache
def version_feedback(request, package_name, version):
plugin = get_object_or_404(Plugin, package_name=package_name)
version = get_object_or_404(PluginVersion, plugin=plugin, version=version)
is_user_plugin_owner: bool = (request.user in plugin.editors)
is_user_has_approval_rights: bool = check_plugin_version_approval_rights(request.user, plugin)
if ((not is_user_plugin_owner) and (not is_user_has_approval_rights)):
return render(request, template_name='plugins/version_permission_deny.html', context={}, status=403)
if (request.method == 'POST'):
form = VersionFeedbackForm(request.POST)
if form.is_valid():
tasks = form.cleaned_data['tasks']
for task in tasks:
PluginVersionFeedback.objects.create(version=version, reviewer=request.user, task=task)
version_feedback_notify(version, request.user)
form = VersionFeedbackForm()
feedbacks = PluginVersionFeedback.objects.filter(version=version)
return render(request, 'plugins/plugin_feedback.html', {'feedbacks': feedbacks, 'form': form, 'version': version, 'is_user_has_approval_rights': is_user_has_approval_rights, 'is_user_plugin_owner': is_user_plugin_owner}) |
class CommentMixin(LoginRequiredMixin, SuccessMessageMixin):
model = Comment
fields = ('content',)
template_name = 'dictionary/edit/comment_form.html'
def form_invalid(self, form):
for error in form.errors['content']:
notifications.error(self.request, error)
return super().form_invalid(form) |
class CondConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0, dilation=1, grounps=1, bias=True, K=4, init_weight=True):
super().__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = grounps
self.bias = bias
self.K = K
self.init_weight = init_weight
self.attention = Attention(in_planes=in_planes, K=K, init_weight=init_weight)
self.weight = nn.Parameter(torch.randn(K, out_planes, (in_planes // grounps), kernel_size, kernel_size), requires_grad=True)
if bias:
self.bias = nn.Parameter(torch.randn(K, out_planes), requires_grad=True)
else:
self.bias = None
if self.init_weight:
self._initialize_weights()
def _initialize_weights(self):
for i in range(self.K):
nn.init.kaiming_uniform_(self.weight[i])
def forward(self, x):
(bs, in_planels, h, w) = x.shape
softmax_att = self.attention(x)
x = x.view(1, (- 1), h, w)
weight = self.weight.view(self.K, (- 1))
aggregate_weight = torch.mm(softmax_att, weight).view((bs * self.out_planes), (self.in_planes // self.groups), self.kernel_size, self.kernel_size)
if (self.bias is not None):
bias = self.bias.view(self.K, (- 1))
aggregate_bias = torch.mm(softmax_att, bias).view((- 1))
output = F.conv2d(x, weight=aggregate_weight, bias=aggregate_bias, stride=self.stride, padding=self.padding, groups=(self.groups * bs), dilation=self.dilation)
else:
output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding, groups=(self.groups * bs), dilation=self.dilation)
output = output.view(bs, self.out_planes, h, w)
return output |
def test_solver_can_resolve_sdist_dependencies_with_extras(solver: Solver, repo: Repository, package: ProjectPackage, fixture_dir: FixtureDirGetter) -> None:
pendulum = get_package('pendulum', '2.0.3')
cleo = get_package('cleo', '1.0.0')
repo.add_package(pendulum)
repo.add_package(cleo)
path = (fixture_dir('distributions') / 'demo-0.1.0.tar.gz').as_posix()
package.add_dependency(Factory.create_dependency('demo', {'path': path, 'extras': ['foo']}))
transaction = solver.solve()
demo = Package('demo', '0.1.0', source_type='file', source_url=path)
ops = check_solver_result(transaction, [{'job': 'install', 'package': cleo}, {'job': 'install', 'package': pendulum}, {'job': 'install', 'package': demo}])
op = ops[2]
assert (op.package.name == 'demo')
assert (op.package.version.text == '0.1.0')
assert (op.package.source_type == 'file')
assert (op.package.source_url == path) |
def perturb_iterative(xvar, yvar, predict, nb_iter, eps, eps_iter, loss_fn, delta_init=None, minimize=False, ord=np.inf, clip_min=0.0, clip_max=1.0):
if (delta_init is not None):
delta = delta_init
else:
delta = torch.zeros_like(xvar)
delta.requires_grad_()
for ii in range(nb_iter):
outputs = predict((xvar + delta))
loss = loss_fn(outputs, yvar)
if minimize:
loss = (- loss)
loss.backward()
if (ord == np.inf):
grad_sign = delta.grad.data.sign()
delta.data = (delta.data + batch_multiply(eps_iter, grad_sign))
delta.data = batch_clamp(eps, delta.data)
delta.data = (clamp((xvar.data + delta.data), clip_min, clip_max) - xvar.data)
elif (ord == 2):
grad = delta.grad.data
grad = normalize_by_pnorm(grad)
delta.data = (delta.data + batch_multiply(eps_iter, grad))
delta.data = (clamp((xvar.data + delta.data), clip_min, clip_max) - xvar.data)
if (eps is not None):
delta.data = clamp_by_pnorm(delta.data, ord, eps)
else:
error = 'Only ord=inf and ord=2 have been implemented'
raise NotImplementedError(error)
delta.grad.data.zero_()
x_adv = clamp((xvar + delta), clip_min, clip_max)
r_adv = (x_adv - xvar)
return (x_adv, r_adv) |
def save_tf_session_single_gpu(sess: tf.compat.v1.Session(), path: 'str', input_tensor: 'str', output_tensor: 'str'):
with sess.graph.as_default():
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
inputs = sess.graph.get_tensor_by_name(input_tensor)
train_out = sess.graph.get_tensor_by_name(output_tensor)
with sess.graph.as_default():
train_signature = tf.compat.v1.saved_model.predict_signature_def(inputs={'x': inputs}, outputs={'out': train_out})
shutil.rmtree(path, ignore_errors=True)
builder = tf.compat.v1.saved_model.Builder(path)
builder.add_meta_graph_and_variables(sess, ['serve'], signature_def_map={'train': train_signature})
builder.save() |
def load_model(model, model_path, opt, optimizer=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=(lambda storage, loc: storage))
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
for k in state_dict_:
if (k.startswith('module') and (not k.startswith('module_list'))):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
for k in state_dict:
if (k in model_state_dict):
if ((state_dict[k].shape != model_state_dict[k].shape) or (opt.reset_hm and k.startswith('hm') and (state_dict[k].shape[0] in [80, 1]))):
if opt.reuse_hm:
print('Reusing parameter {}, required shape{}, loaded shape{}.'.format(k, model_state_dict[k].shape, state_dict[k].shape))
if (state_dict[k].shape[0] < state_dict[k].shape[0]):
model_state_dict[k][:state_dict[k].shape[0]] = state_dict[k]
else:
model_state_dict[k] = state_dict[k][:model_state_dict[k].shape[0]]
state_dict[k] = model_state_dict[k]
else:
print('Skip loading parameter {}, required shape{}, loaded shape{}.'.format(k, model_state_dict[k].shape, state_dict[k].shape))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict:
if (not (k in state_dict)):
print('No param {}.'.format(k))
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
if ((optimizer is not None) and opt.resume):
if ('optimizer' in checkpoint):
start_epoch = checkpoint['epoch']
start_lr = opt.lr
for step in opt.lr_step:
if (start_epoch >= step):
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if (optimizer is not None):
return (model, optimizer, start_epoch)
else:
return model |
class ArgumentCheckedCallable():
def __init__(self, target, explanation=None):
self.target = target
self.explanation = explanation
def __call__(self, *args, **kwargs):
self.checkargs(*args, **kwargs)
return self.target(*args, **kwargs)
def checkargs(self, *args, **kwargs):
try:
config = ExecutionContext.get_context().config
if (not config.reahlsystem.runtime_checking_enabled):
return
except (NoContextFound, AttributeError):
pass
if isinstance(self.target, PartialCallableObjectProxy):
to_check = self.target.__call__
elif inspect.ismethod(self.target):
to_check = self.target
elif inspect.isfunction(self.target):
to_check = self.target
elif inspect.isclass(self.target):
to_check = self.target.__init__
args = ((NotYetAvailable('self'),) + args)
elif isinstance(self.target, Callable):
to_check = self.target.__call__
else:
raise ProgrammerError(('%s was expected to be a callable object' % self.target))
try:
try:
bound_args = inspect.getcallargs(to_check, *args, **kwargs)
except TypeError as ex:
ex.args = (((('%s: ' % self.target) + ex.args[0]),) + ex.args[1:])
raise
args_to_check = getattr(to_check, 'arg_checks', {})
for (arg_name, arg_check) in args_to_check.items():
if (arg_name in bound_args.keys()):
arg_check.check(self.target, arg_name, bound_args[arg_name])
except (TypeError, ArgumentCheck) as ex:
if self.explanation:
(_, _, tb) = sys.exc_info()
new_ex = IncorrectArgumentError(self.explanation, ex)
raise new_ex.with_traceback(tb)
else:
raise |
class CLUEProcessor(CLSProcessor):
def __init__(self, data_args, training_args, model_args, tokenizer=None, post_tokenizer=False, keep_raw_data=True):
super().__init__(data_args, training_args, model_args, tokenizer, post_tokenizer=post_tokenizer, keep_raw_data=keep_raw_data)
param = {p.split('=')[0]: p.split('=')[1] for p in data_args.user_defined.split(' ')}
assert ('data_name' in param), "You must add one defined param 'data_name=xxx' in the user_defined parameter."
self.data_name = param['data_name']
self.is_pseudo = False
self.pseudo_threshold = 1.0
if ('is_pseudo' in param.keys()):
self.is_pseudo = bool(param['is_pseudo'])
self.pseudo_threshold = float(param['pseudo_threshold'])
self.data_dir = data_args.data_dir
assert (self.data_name in clue_processors.keys()), 'Unknown task name {}'.format(self.data_name)
self.processor = clue_processors[self.data_name]()
self.output_modes = clue_output_modes[self.data_name]
self.train_file = os.path.join(data_args.data_dir, 'train.json')
self.dev_file = os.path.join(data_args.data_dir, 'dev.json')
self.test_file = os.path.join(data_args.data_dir, 'test.json')
self.max_len = data_args.max_seq_length
self.doc_stride = data_args.doc_stride
self.sentence1_key = None
self.labels = self.processor.get_labels()
def get_data_collator(self):
pad_to_multiple_of_8 = (self.training_args.fp16 and (not self.data_args.pad_to_max_length))
return DataCollator(self.tokenizer, max_length=self.data_args.max_seq_length, pad_to_multiple_of=(8 if pad_to_multiple_of_8 else None), pad_to_max_length=self.data_args.pad_to_max_length)
def get_examples(self, set_type):
def read_pseudo(input_file):
with open(input_file, 'r', encoding='utf-8') as f:
reader = f.readlines()
lines = []
for line in reader:
pseudo_data = json.loads(line.strip())
if (float(pseudo_data['pseudo_proba']) >= self.pseudo_threshold):
lines.append(pseudo_data)
return lines
examples = list()
if (set_type == 'train'):
if self.is_pseudo:
train_lines = self._read_json2(os.path.join(self.data_dir, 'train.json'))
(dev_pseudo_num, test_pseudo_num) = (0, 0)
if os.path.exists(os.path.join(self.data_dir, 'dev_pseudo.json')):
dev_pseudo_lines = read_pseudo(os.path.join(self.data_dir, 'dev_pseudo.json'))
train_lines.extend(dev_pseudo_lines)
dev_pseudo_num += len(dev_pseudo_lines)
if os.path.exists(os.path.join(self.data_dir, 'test_pseudo.json')):
test_pseudo_lines = read_pseudo(os.path.join(self.data_dir, 'test_pseudo.json'))
train_lines.extend(test_pseudo_lines)
test_pseudo_num += len(test_pseudo_lines)
examples = self._create_examples(train_lines, 'train')
print('add pseudo dev num={}'.format(str(dev_pseudo_num)))
print('add pseudo test num={}'.format(str(test_pseudo_num)))
print('add pseudo all num={}'.format(str((dev_pseudo_num + test_pseudo_num))))
self.train_examples = examples
else:
examples = self._create_examples(self._read_json2(self.train_file), 'train')
self.train_examples = examples
elif (set_type == 'dev'):
examples = self._create_examples(self._read_json2(self.dev_file), 'dev')
examples = examples[:self.data_args.max_eval_samples]
self.dev_examples = examples
elif (set_type == 'test'):
examples = self._create_examples(self._read_json2(self.test_file), 'test')
examples = examples[:self.data_args.max_predict_samples]
self.test_examples = examples
return examples
def _create_examples(self, lines, set_type):
examples = self.processor.create_examples(lines, set_type)
return examples
def build_preprocess_function(self):
tokenizer = self.tokenizer
max_seq_length = self.data_args.max_seq_length
def func(examples):
if (examples['text_b'][0] == None):
text_pair = None
else:
text_pair = examples['text_b']
tokenized_examples = tokenizer(examples['text_a'], text_pair=text_pair, truncation=True, max_length=max_seq_length, padding=('max_length' if self.data_args.pad_to_max_length else False), return_offsets_mapping=True)
return tokenized_examples
return func
def get_predict_result(self, logits, examples, stage='dev'):
predictions = dict()
topk_result = dict()
pseudo_data = list()
preds = logits
if (self.output_modes == 'classification'):
preds = np.argmax(preds, axis=1)
elif (self.output_modes == 'regression'):
preds = np.squeeze(preds)
for (pred, example, logit) in zip(preds, examples, logits):
id_ = example['guid']
id_ = int(id_.split('-')[1])
predictions[id_] = pred
proba = softmax(logit)
indices = np.argsort((- proba))
out = list()
for index in indices[:20]:
prob = proba[index].tolist()
index = index.tolist()
out.append({'prob': prob, 'answer': index})
topk_result[id_] = out
pseudo_proba = proba[pred]
pseudo_data.append({'guid': str(id_), 'text_a': example['text_a'], 'text_b': example['text_b'], 'label': str(pred), 'pseudo_proba': str(pseudo_proba)})
with open(os.path.join(self.data_dir, '{}_pseudo.json'.format(stage)), 'w') as writer:
for (i, pred) in enumerate(pseudo_data):
json_d = pred
writer.write((json.dumps(json_d, ensure_ascii=False) + '\n'))
return (predictions, topk_result)
def compute_metrics(self, eval_predictions):
examples = self.raw_datasets['validation']
labels = examples['label']
(golden, dataname_map, dataname_type) = ({}, defaultdict(list), {})
(predictions, _) = self.get_predict_result(eval_predictions[0], examples, stage='dev')
for example in examples:
data_type = self.output_modes
data_name = self.data_name
if (data_name not in dataname_type):
dataname_type[data_name] = data_type
id_ = example['guid']
id_ = int(id_.split('-')[1])
dataname_map[data_name].append(id_)
golden[id_] = example['label']
all_metrics = {'eval_macro_f1': 0.0, 'eval_micro_f1': 0.0, 'eval_num': 0, 'eval_acc': 0.0}
for (dataname, data_ids) in dataname_map.items():
metric = datatype2metrics[dataname_type[dataname]]()
gold = {k: v for (k, v) in golden.items() if (k in data_ids)}
pred = {k: v for (k, v) in predictions.items() if (k in data_ids)}
score = metric.calc_metric(golden=gold, predictions=pred)
(acc, f1) = (score['acc'], score['f1'])
if ((len(gold) != len(pred)) or (len(gold) < 20)):
print('len(gold)=', len(gold))
print('len(pred)=', len(pred))
all_metrics['eval_macro_f1'] += f1
all_metrics['eval_micro_f1'] += (f1 * len(data_ids))
all_metrics['eval_num'] += len(data_ids)
all_metrics['eval_acc'] += acc
all_metrics[dataname] = round(f1, 4)
all_metrics['eval_macro_f1'] = round((all_metrics['eval_macro_f1'] / len(dataname_map)), 4)
all_metrics['eval_micro_f1'] = round((all_metrics['eval_micro_f1'] / all_metrics['eval_num']), 4)
all_metrics['eval_macro_acc'] = round((all_metrics['eval_acc'] / len(dataname_map)), 4)
return all_metrics
def save_result(self, logits, label_ids):
examples = self.raw_datasets['test']
(predicts, topk_predictions) = self.get_predict_result(logits, examples, stage='test')
clue_processor = clue_processors[self.data_name]()
label_list = clue_processor.get_labels()
id2label = {i: label for (i, label) in enumerate(label_list)}
answer = list()
for (k, v) in predicts.items():
if (v not in id2label.keys()):
res = ''
print('unknown')
else:
res = id2label[v]
answer.append({'id': k, 'label': res})
output_submit_file = os.path.join(self.training_args.output_dir, 'answer.json')
with open(output_submit_file, 'w') as writer:
for (i, pred) in enumerate(answer):
json_d = {}
json_d['id'] = i
json_d['label'] = pred['label']
writer.write((json.dumps(json_d) + '\n'))
topfile = os.path.join(self.training_args.output_dir, 'top20_predict.json')
with open(topfile, 'w', encoding='utf-8') as f2:
json.dump(topk_predictions, f2, ensure_ascii=False, indent=4) |
def format_to_lines(args):
corpora = {'train': [], 'valid': [], 'test': []}
read_root_path = Path(args.raw_path)
for corpus_type in ['valid', 'test', 'train']:
read_path = (read_root_path / corpus_type)
for fp in read_path.iterdir():
corpora[corpus_type].append(fp)
save_root_path = Path(args.save_path)
for corpus_type in ['train', 'valid', 'test']:
save_path = (save_root_path / corpus_type)
save_path.mkdir(parents=True, exist_ok=True)
a_lst = [(f, args) for f in corpora[corpus_type]]
pool = Pool(args.n_cpus)
dataset = []
p_ct = 0
for d in tqdm(pool.imap_unordered(_format_to_lines, a_lst)):
dataset.append(d)
if (len(dataset) > args.shard_size):
with (save_path / f'{p_ct}.json').open('w', encoding='utf-8') as s_f:
s_f.write(json.dumps(dataset))
p_ct += 1
dataset = []
pool.close()
pool.join()
if (len(dataset) > 0):
with (save_path / f'{p_ct}.json').open('w', encoding='utf-8') as s_f:
s_f.write(json.dumps(dataset))
p_ct += 1
dataset = [] |
class _CppLintState(object):
def __init__(self):
self.verbose_level = 1
self.error_count = 0
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total'
self.errors_by_category = {}
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
self.output_format = output_format
def SetVerboseLevel(self, level):
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
self.counting = counting_style
def SetFilters(self, filters):
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if (not (filt.startswith('+') or filt.startswith('-'))):
raise ValueError(('Every filter in --filters must start with + or - (%s does not)' % filt))
def ResetErrorCounts(self):
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
self.error_count += 1
if (self.counting in ('toplevel', 'detailed')):
if (self.counting != 'detailed'):
category = category.split('/')[0]
if (category not in self.errors_by_category):
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
for (category, count) in iteritems(self.errors_by_category):
sys.stderr.write(("Category '%s' errors found: %d\n" % (category, count)))
sys.stderr.write(('Total errors found: %d\n' % self.error_count)) |
class GroupViTVisionConfig(PretrainedConfig):
model_type = 'groupvit_vision_model'
def __init__(self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.depths = depths
if (num_hidden_layers != sum(depths)):
logger.warning(f'Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers = sum(depth) = {sum(depths)}')
self.num_hidden_layers = num_hidden_layers
self.num_group_tokens = num_group_tokens
self.num_output_groups = num_output_groups
self.num_attention_heads = num_attention_heads
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.assign_eps = assign_eps
self.assign_mlp_ratio = assign_mlp_ratio
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'groupvit'):
config_dict = config_dict['vision_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def runMssqlInfoModule(args):
if (checkOptionsGivenByTheUser(args, ['get-max-info'], checkAccount=False) == False):
return EXIT_MISS_ARGUMENT
if (args['get-max-info'] == True):
mssqlInfo = MssqlInfo(args)
productName = mssqlInfo.__getRemoteVersionThroughTDSResponse__()
args['print'].title('Try to get the remote database version thanks to the TDS protocol:')
if ((('Version' in productName) == True) and (('ProductName' in productName) == True)):
args['print'].goodNews('The SQL server version of {0}:{1}: {2} i.e. {3}'.format(args['host'], args['port'], productName['Version'], productName['ProductName']))
else:
args['print'].badNews('Impossible to get the remote database version thanks to the TDS protocol')
args['print'].title('Try to get information about the remote database thanks to SQL browser Server:')
info = mssqlInfo.__getRemoteVersionThroughSQLServerBrowser__()
if (info == {}):
args['print'].badNews('SQL Server Browser is not enabled on the server {0}:{1}'.format(args['host'], args['port']))
else:
args['print'].goodNews('SQL Server Browser is enabled on the server {0}:{1}:\n{2}'.format(args['host'], args['port'], mssqlInfo.returnPrintableStringFromDict(info))) |
def parse_frame(data, count, mask, extensions):
reader = StreamReader()
for _ in range(count):
reader.feed_data(data)
parser = Frame.parse(reader.read_exact, mask=mask, extensions=extensions)
try:
next(parser)
except StopIteration:
pass
else:
assert False, 'parser should return frame'
reader.feed_eof()
assert reader.at_eof(), 'parser should consume all data' |
class Counter(dict):
def __missing__(self, k):
return 0
def update(self, other):
for (k, v) in other.items():
self[k] += v
def subtract(self, other):
for (k, v) in other.items():
self[k] -= v
if (self[k] <= 0):
del self[k]
def subtract1(self, k):
self[k] -= 1
if (self[k] <= 0):
del self[k] |
def _lcs(a, b):
dp = _lcs_dp(a, b)
i = len(a)
j = len(b)
lcs = deque()
while ((i > 0) and (j > 0)):
if (a[(i - 1)] == b[(j - 1)]):
lcs.appendleft(a[(i - 1)])
i -= 1
j -= 1
elif (dp[(i - 1)][j] >= dp[i][(j - 1)]):
i -= 1
else:
j -= 1
assert (len(lcs) == dp[(- 1)][(- 1)])
return lcs |
class QdrantClient(QdrantFastembedMixin):
def __init__(self, location: Optional[str]=None, url: Optional[str]=None, port: Optional[int]=6333, grpc_port: int=6334, prefer_grpc: bool=False, Optional[bool]=None, api_key: Optional[str]=None, prefix: Optional[str]=None, timeout: Optional[float]=None, host: Optional[str]=None, path: Optional[str]=None, force_disable_check_same_thread: bool=False, **kwargs: Any):
super().__init__(**kwargs)
self._client: QdrantBase
if (sum([(param is not None) for param in (location, url, host, path)]) > 1):
raise ValueError('Only one of <location>, <url>, <host> or <path> should be specified.')
if (location == ':memory:'):
self._client = QdrantLocal(location=location, force_disable_check_same_thread=force_disable_check_same_thread)
elif (path is not None):
self._client = QdrantLocal(location=path, force_disable_check_same_thread=force_disable_check_same_thread)
else:
if ((location is not None) and (url is None)):
url = location
self._client = QdrantRemote(url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, api_key=api_key, prefix=prefix, timeout=timeout, host=host, **kwargs)
self._is_fastembed_installed: Optional[bool] = None
if (self._is_fastembed_installed is None):
try:
from fastembed.embedding import DefaultEmbedding
self._is_fastembed_installed = True
except ImportError:
self._is_fastembed_installed = False
def __del__(self) -> None:
self.close()
def close(self, **kwargs: Any) -> None:
if hasattr(self, '_client'):
self._client.close(**kwargs)
def grpc_collections(self) -> grpc.CollectionsStub:
if isinstance(self._client, QdrantRemote):
return self._client.grpc_collections
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def grpc_points(self) -> grpc.PointsStub:
if isinstance(self._client, QdrantRemote):
return self._client.grpc_points
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def async_grpc_points(self) -> grpc.PointsStub:
if isinstance(self._client, QdrantRemote):
return self._client.async_grpc_points
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def async_grpc_collections(self) -> grpc.CollectionsStub:
if isinstance(self._client, QdrantRemote):
return self._client.async_grpc_collections
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def rest(self) -> SyncApis[ApiClient]:
if isinstance(self._client, QdrantRemote):
return self._client.rest
raise NotImplementedError(f'REST client is not supported for {type(self._client)}')
def -> SyncApis[ApiClient]:
if isinstance(self._client, QdrantRemote):
return self._client.http
raise NotImplementedError(f'REST client is not supported for {type(self._client)}')
def search_batch(self, collection_name: str, requests: Sequence[types.SearchRequest], timeout: Optional[int]=None, consistency: Optional[types.ReadConsistency]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.search_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs)
def search(self, collection_name: str, query_vector: Union[(types.NumpyArray, Sequence[float], Tuple[(str, List[float])], types.NamedVector, types.NamedSparseVector)], query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: Optional[int]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, score_threshold: Optional[float]=None, append_payload: bool=True, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.search(collection_name=collection_name, query_vector=query_vector, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, append_payload=append_payload, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs)
def search_groups(self, collection_name: str, query_vector: Union[(types.NumpyArray, Sequence[float], Tuple[(str, List[float])], types.NamedVector, types.NamedSparseVector)], group_by: str, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, group_size: int=1, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, score_threshold: Optional[float]=None, with_lookup: Optional[types.WithLookupInterface]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> types.GroupsResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.search_groups(collection_name=collection_name, query_vector=query_vector, group_by=group_by, query_filter=query_filter, search_params=search_params, limit=limit, group_size=group_size, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, with_lookup=with_lookup, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs)
def recommend_batch(self, collection_name: str, requests: Sequence[types.RecommendRequest], consistency: Optional[types.ReadConsistency]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recommend_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs)
def recommend(self, collection_name: str, positive: Optional[Sequence[types.RecommendExample]]=None, negative: Optional[Sequence[types.RecommendExample]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: int=0, with_payload: Union[(bool, List[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, List[str])]=False, score_threshold: Optional[float]=None, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, strategy: Optional[types.RecommendStrategy]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recommend(collection_name=collection_name, positive=positive, negative=negative, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, using=using, lookup_from=lookup_from, consistency=consistency, shard_key_selector=shard_key_selector, strategy=strategy, timeout=timeout, **kwargs)
def recommend_groups(self, collection_name: str, group_by: str, positive: Optional[Sequence[types.RecommendExample]]=None, negative: Optional[Sequence[types.RecommendExample]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, group_size: int=1, score_threshold: Optional[float]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, with_lookup: Optional[types.WithLookupInterface]=None, strategy: Optional[types.RecommendStrategy]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> types.GroupsResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recommend_groups(collection_name=collection_name, group_by=group_by, positive=positive, negative=negative, query_filter=query_filter, search_params=search_params, limit=limit, group_size=group_size, score_threshold=score_threshold, with_payload=with_payload, with_vectors=with_vectors, using=using, lookup_from=lookup_from, with_lookup=with_lookup, strategy=strategy, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs)
def discover(self, collection_name: str, target: Optional[types.TargetVector]=None, context: Optional[Sequence[types.ContextExamplePair]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: int=0, with_payload: Union[(bool, List[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, List[str])]=False, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
return self._client.discover(collection_name=collection_name, target=target, context=context, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, using=using, lookup_from=lookup_from, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs)
def discover_batch(self, collection_name: str, requests: Sequence[types.DiscoverRequest], consistency: Optional[types.ReadConsistency]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
return self._client.discover_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs)
def scroll(self, collection_name: str, scroll_filter: Optional[types.Filter]=None, limit: int=10, offset: Optional[types.PointId]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> Tuple[(List[types.Record], Optional[types.PointId])]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.scroll(collection_name=collection_name, scroll_filter=scroll_filter, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, consistency=consistency, shard_key_selector=shard_key_selector, **kwargs)
def count(self, collection_name: str, count_filter: Optional[types.Filter]=None, exact: bool=True, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.CountResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.count(collection_name=collection_name, count_filter=count_filter, exact=exact, shard_key_selector=shard_key_selector, **kwargs)
def upsert(self, collection_name: str, points: types.Points, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.upsert(collection_name=collection_name, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def update_vectors(self, collection_name: str, points: Sequence[types.PointVectors], wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.update_vectors(collection_name=collection_name, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector)
def delete_vectors(self, collection_name: str, vectors: Sequence[str], points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_vectors(collection_name=collection_name, vectors=vectors, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector)
def retrieve(self, collection_name: str, ids: Sequence[types.PointId], with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> List[types.Record]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.retrieve(collection_name=collection_name, ids=ids, with_payload=with_payload, with_vectors=with_vectors, consistency=consistency, shard_key_selector=shard_key_selector, **kwargs)
def delete(self, collection_name: str, points_selector: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete(collection_name=collection_name, points_selector=points_selector, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def set_payload(self, collection_name: str, payload: types.Payload, points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.set_payload(collection_name=collection_name, payload=payload, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def overwrite_payload(self, collection_name: str, payload: types.Payload, points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.overwrite_payload(collection_name=collection_name, payload=payload, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def delete_payload(self, collection_name: str, keys: Sequence[str], points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_payload(collection_name=collection_name, keys=keys, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def clear_payload(self, collection_name: str, points_selector: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.clear_payload(collection_name=collection_name, points_selector=points_selector, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs)
def batch_update_points(self, collection_name: str, update_operations: Sequence[types.UpdateOperation], wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> List[types.UpdateResult]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.batch_update_points(collection_name=collection_name, update_operations=update_operations, wait=wait, ordering=ordering, **kwargs)
def update_collection_aliases(self, change_aliases_operations: Sequence[types.AliasOperations], timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.update_collection_aliases(change_aliases_operations=change_aliases_operations, timeout=timeout, **kwargs)
def get_collection_aliases(self, collection_name: str, **kwargs: Any) -> types.CollectionsAliasesResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.get_collection_aliases(collection_name=collection_name, **kwargs)
def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.get_aliases(**kwargs)
def get_collections(self, **kwargs: Any) -> types.CollectionsResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.get_collections(**kwargs)
def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.get_collection(collection_name=collection_name, **kwargs)
def update_collection(self, collection_name: str, optimizers_config: Optional[types.OptimizersConfigDiff]=None, collection_params: Optional[types.CollectionParamsDiff]=None, vectors_config: Optional[types.VectorsConfigDiff]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, quantization_config: Optional[types.QuantizationConfigDiff]=None, timeout: Optional[int]=None, sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, **kwargs: Any) -> bool:
if (('optimizer_config' in kwargs) and (optimizers_config is not None)):
raise ValueError('Only one of optimizer_config and optimizers_config should be specified')
if ('optimizer_config' in kwargs):
optimizers_config = kwargs.pop('optimizer_config')
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.update_collection(collection_name=collection_name, optimizers_config=optimizers_config, collection_params=collection_params, vectors_config=vectors_config, hnsw_config=hnsw_config, quantization_config=quantization_config, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs)
def delete_collection(self, collection_name: str, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_collection(collection_name=collection_name, timeout=timeout, **kwargs)
def create_collection(self, collection_name: str, vectors_config: Union[(types.VectorParams, Mapping[(str, types.VectorParams)])], sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, shard_number: Optional[int]=None, sharding_method: Optional[types.ShardingMethod]=None, replication_factor: Optional[int]=None, write_consistency_factor: Optional[int]=None, on_disk_payload: Optional[bool]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, optimizers_config: Optional[types.OptimizersConfigDiff]=None, wal_config: Optional[types.WalConfigDiff]=None, quantization_config: Optional[types.QuantizationConfig]=None, init_from: Optional[types.InitFrom]=None, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.create_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, sharding_method=sharding_method, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs)
def recreate_collection(self, collection_name: str, vectors_config: Union[(types.VectorParams, Mapping[(str, types.VectorParams)])], sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, shard_number: Optional[int]=None, sharding_method: Optional[types.ShardingMethod]=None, replication_factor: Optional[int]=None, write_consistency_factor: Optional[int]=None, on_disk_payload: Optional[bool]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, optimizers_config: Optional[types.OptimizersConfigDiff]=None, wal_config: Optional[types.WalConfigDiff]=None, quantization_config: Optional[types.QuantizationConfig]=None, init_from: Optional[types.InitFrom]=None, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recreate_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, sharding_method=sharding_method, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs)
def upload_records(self, collection_name: str, records: Iterable[types.Record], batch_size: int=64, parallel: int=1, method: Optional[str]=None, max_retries: int=3, wait: bool=False, **kwargs: Any) -> None:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.upload_records(collection_name=collection_name, records=records, batch_size=batch_size, parallel=parallel, method=method, max_retries=max_retries, wait=wait, **kwargs)
def upload_collection(self, collection_name: str, vectors: Union[(Dict[(str, types.NumpyArray)], types.NumpyArray, Iterable[types.VectorStruct])], payload: Optional[Iterable[Dict[(Any, Any)]]]=None, ids: Optional[Iterable[types.PointId]]=None, batch_size: int=64, parallel: int=1, method: Optional[str]=None, max_retries: int=3, wait: bool=False, **kwargs: Any) -> None:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.upload_collection(collection_name=collection_name, vectors=vectors, payload=payload, ids=ids, batch_size=batch_size, parallel=parallel, method=method, max_retries=max_retries, wait=wait, **kwargs)
def create_payload_index(self, collection_name: str, field_name: str, field_schema: Optional[types.PayloadSchemaType]=None, field_type: Optional[types.PayloadSchemaType]=None, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.create_payload_index(collection_name=collection_name, field_name=field_name, field_schema=field_schema, field_type=field_type, wait=wait, ordering=ordering, **kwargs)
def delete_payload_index(self, collection_name: str, field_name: str, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_payload_index(collection_name=collection_name, field_name=field_name, wait=wait, ordering=ordering, **kwargs)
def list_snapshots(self, collection_name: str, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.list_snapshots(collection_name=collection_name, **kwargs)
def create_snapshot(self, collection_name: str, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.create_snapshot(collection_name=collection_name, wait=wait, **kwargs)
def delete_snapshot(self, collection_name: str, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_snapshot(collection_name=collection_name, snapshot_name=snapshot_name, wait=wait, **kwargs)
def list_full_snapshots(self, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.list_full_snapshots(**kwargs)
def create_full_snapshot(self, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.create_full_snapshot(wait=wait, **kwargs)
def delete_full_snapshot(self, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_full_snapshot(snapshot_name=snapshot_name, wait=wait, **kwargs)
def recover_snapshot(self, collection_name: str, location: str, priority: Optional[types.SnapshotPriority]=None, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recover_snapshot(collection_name=collection_name, location=location, priority=priority, wait=wait, **kwargs)
def list_shard_snapshots(self, collection_name: str, shard_id: int, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.list_shard_snapshots(collection_name=collection_name, shard_id=shard_id, **kwargs)
def create_shard_snapshot(self, collection_name: str, shard_id: int, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.create_shard_snapshot(collection_name=collection_name, shard_id=shard_id, wait=wait, **kwargs)
def delete_shard_snapshot(self, collection_name: str, shard_id: int, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.delete_shard_snapshot(collection_name=collection_name, shard_id=shard_id, snapshot_name=snapshot_name, wait=wait, **kwargs)
def recover_shard_snapshot(self, collection_name: str, shard_id: int, location: str, priority: Optional[types.SnapshotPriority]=None, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.recover_shard_snapshot(collection_name=collection_name, shard_id=shard_id, location=location, priority=priority, wait=wait, **kwargs)
def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.lock_storage(reason=reason, **kwargs)
def unlock_storage(self, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.unlock_storage(**kwargs)
def get_locks(self, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.get_locks(**kwargs)
def migrate(self, dest_client: QdrantBase, collection_names: Optional[List[str]]=None, batch_size: int=100, recreate_on_collision: bool=False) -> None:
migrate(self, dest_client, collection_names=collection_names, batch_size=batch_size, recreate_on_collision=recreate_on_collision)
def create_shard_key(self, collection_name: str, shard_key: types.ShardKey, shards_number: Optional[int]=None, replication_factor: Optional[int]=None, placement: Optional[List[int]]=None, **kwargs: Any) -> bool:
return self._client.create_shard_key(collection_name=collection_name, shard_key=shard_key, shards_number=shards_number, replication_factor=replication_factor, placement=placement, **kwargs)
def delete_shard_key(self, collection_name: str, shard_key: types.ShardKey, **kwargs: Any) -> bool:
return self._client.delete_shard_key(collection_name=collection_name, shard_key=shard_key, **kwargs) |
class TestKeyedTensor(unittest.TestCase):
def test_key_lookup(self) -> None:
tensor_list = [torch.Tensor([[1.0, 1.0]]), torch.Tensor([[2.0, 2.0], [3.0, 3.0]])]
keys = ['dense_0', 'dense_1']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, cat_dim=0, key_dim=0)
self.assertEqual(kt.key_dim(), 0)
self.assertTrue(torch.equal(kt['dense_0'], tensor_list[0]))
self.assertTrue(torch.equal(kt['dense_1'], tensor_list[1]))
def test_key_lookup_dim_1(self) -> None:
tensor_list = [torch.tensor([[1.0, 1.0]]).T, torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T]
keys = ['dense_0', 'dense_1']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=1)
self.assertEqual(kt.key_dim(), 1)
self.assertTrue(torch.equal(kt['dense_0'], tensor_list[0]))
self.assertTrue(torch.equal(kt['dense_1'], tensor_list[1]))
def test_to_dict(self) -> None:
tensor_list = [torch.Tensor([[1.0, 1.0]]), torch.Tensor([[2.0, 2.0], [3.0, 3.0]])]
keys = ['dense_0', 'dense_1']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, cat_dim=0, key_dim=0)
self.assertEqual(kt.key_dim(), 0)
d = kt.to_dict()
for key in keys:
self.assertTrue(torch.equal(kt[key], d[key]))
def test_to_dict_dim_1(self) -> None:
tensor_list = [torch.tensor([[1.0, 1.0]]).T, torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T]
keys = ['dense_0', 'dense_1']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=1)
self.assertEqual(kt.key_dim(), 1)
d = kt.to_dict()
for key in keys:
self.assertTrue(torch.equal(kt[key], d[key]))
def test_regroup_single_kt(self) -> None:
tensor_list = [torch.randn(2, 3) for i in range(5)]
key_dim = 1
keys = ['dense_0', 'dense_1', 'dense_2', 'dense_3', 'dense_4']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim)
grouped_tensors = KeyedTensor.regroup([kt], [['dense_0', 'dense_4'], ['dense_1', 'dense_3'], ['dense_2']])
self.assertTrue(torch.equal(grouped_tensors[0], torch.cat([tensor_list[0], tensor_list[4]], key_dim)))
self.assertTrue(torch.equal(grouped_tensors[1], torch.cat([tensor_list[1], tensor_list[3]], key_dim)))
self.assertTrue(torch.equal(grouped_tensors[2], tensor_list[2]))
def test_regroup_multiple_kt(self) -> None:
key_dim = 1
tensor_list_1 = [torch.randn(2, 3) for i in range(3)]
keys_1 = ['dense_0', 'dense_1', 'dense_2']
kt_1 = KeyedTensor.from_tensor_list(keys_1, tensor_list_1, key_dim)
tensor_list_2 = [torch.randn(2, 3) for i in range(2)]
keys_2 = ['sparse_0', 'sparse_1']
kt_2 = KeyedTensor.from_tensor_list(keys_2, tensor_list_2, key_dim)
grouped_tensors = KeyedTensor.regroup([kt_1, kt_2], [['dense_0', 'sparse_1', 'dense_2'], ['dense_1', 'sparse_0']])
self.assertTrue(torch.equal(grouped_tensors[0], torch.cat([tensor_list_1[0], tensor_list_2[1], tensor_list_1[2]], key_dim)))
self.assertTrue(torch.equal(grouped_tensors[1], torch.cat([tensor_list_1[1], tensor_list_2[0]], key_dim)))
def test_regroup_scriptable(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, inputs: List[KeyedTensor], groups: List[List[str]]) -> List[torch.Tensor]:
return KeyedTensor.regroup(inputs, groups)
m = MyModule()
torch.jit.script(m)
def test_regroup_fxable(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, inputs: List[KeyedTensor], groups: List[List[str]]) -> List[torch.Tensor]:
return KeyedTensor.regroup(inputs, groups)
m = MyModule()
key_dim = 1
tensor_list_1 = [torch.randn(2, 3) for i in range(3)]
keys_1 = ['dense_0', 'dense_1', 'dense_2']
kt_1 = KeyedTensor.from_tensor_list(keys_1, tensor_list_1, key_dim)
tensor_list_2 = [torch.randn(2, 3) for i in range(2)]
keys_2 = ['sparse_0', 'sparse_1']
kt_2 = KeyedTensor.from_tensor_list(keys_2, tensor_list_2, key_dim)
inputs = [kt_1, kt_2]
groups = [['dense_0', 'sparse_1', 'dense_2'], ['dense_1', 'sparse_0']]
gm = torch.fx.symbolic_trace(m)
results = m(inputs, groups)
traced_results = gm(inputs, groups)
self.assertEqual(len(results), len(traced_results))
for (result, traced_result) in zip(results, traced_results):
self.assertTrue(torch.equal(result, traced_result))
def test_scriptable(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, input: KeyedTensor) -> torch.Tensor:
values = input['any'].values()
return values
m = MyModule()
torch.jit.script(m)
def test_string_none(self) -> None:
jag_tensor = KeyedTensor([], [], torch.Tensor())
self.assertEqual(str(jag_tensor), 'KeyedTensor()\n')
def test_string_basic(self) -> None:
tensor_list = [torch.tensor([[1.0]])]
keys = ['key']
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=0)
self.assertEqual(str(kt), 'KeyedTensor({\n "key": [[1.0]]\n})\n')
def test_string_values(self) -> None:
tensor_list = [torch.tensor([[1.0, 1.0]]).T, torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T]
keys = ['dense_0', 'dense_1']
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
self.assertEqual(str(kt), 'KeyedTensor({\n "dense_0": [[1.0], [1.0]],\n "dense_1": [[2.0, 3.0], [2.0, 3.0]]\n})\n') |
class TurnOnBehavior(BaseModel):
preset: Optional[int] = Field(alias='index', default=None)
mode: BehaviorMode
_validator
def _mode_based_on_preset(cls, values):
if (values['preset'] is not None):
values['mode'] = BehaviorMode.Preset
else:
values['mode'] = BehaviorMode.Last
return values
class Config():
validate_assignment = True |
.parametrize(('filename', 'info'), WHEEL_INFO_TESTS, ids=[t[0] for t in WHEEL_INFO_TESTS])
def test_wheel_info(filename, info):
if inspect.isclass(info):
with pytest.raises(info):
Wheel(filename)
return
w = Wheel(filename)
assert ({k: getattr(w, k) for k in info.keys()} == info) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', required=True)
parser.add_argument('--preset', default='Starter Preset')
parser.add_argument('--target-seed-count', type=int, default=100)
parser.add_argument('--process-count', type=int, default=6)
args = parser.parse_args()
this_dir = Path(__file__).parent
target_game = args.game
target_preset = args.preset
target_seed_count = args.target_seed_count
process_count = args.process_count
output_path = this_dir.joinpath('bulk')
rdvgame_path = (output_path / 'rdvgame')
report_path = (output_path / 'report.json')
webhook_url = os.environ['WEBHOOK_URL']
permalink = subprocess.run([sys.executable, '-m', 'randovania', 'layout', 'permalink', '--game', target_game, '--preset-name', target_preset, '--seed-number', '1000', '--development'], check=True, capture_output=True, text=True).stdout.strip()
shutil.rmtree(output_path, ignore_errors=True)
output_path.mkdir(parents=True)
before = datetime.datetime.now()
generate_log = subprocess.run([sys.executable, '-m', 'randovania', 'layout', 'batch-distribute', '--process-count', str(process_count), permalink, str(target_seed_count), os.fspath(rdvgame_path)], check=True, capture_output=True, text=True).stdout
duration = (datetime.datetime.now() - before)
print(generate_log)
generated_count = sum((1 for _ in rdvgame_path.rglob('*')))
failed_count = (target_seed_count - generated_count)
timed_out_count = generate_log.count('Timeout reached when validating possibility')
subprocess.run([sys.executable, os.fspath(this_dir.joinpath('log_analyzer.py')), rdvgame_path, report_path], check=True)
with tarfile.TarFile('games.tar.gz', 'w') as games_tar:
games_tar.add(rdvgame_path, arcname=rdvgame_path.relative_to(output_path))
games_tar.add(report_path, arcname=report_path.relative_to(output_path))
games_tar.addfile(tarfile.TarInfo('generation.log'), io.BytesIO(generate_log.encode('utf-8')))
real_time = str(duration)
subprocess.run([sys.executable, os.fspath(this_dir.joinpath('send_report_to_discord.py')), '--title', f'Batch report for {target_game}', '--field', f'Generated:{generated_count} out of {target_seed_count}', '--field', f'Timed out:{timed_out_count} out of {failed_count} failures', '--field', f'Preset:{target_preset}', '--field', f'Elapsed real time:{real_time}', '--attach', 'games.tar.gz', '--webhook', webhook_url], check=True) |
class GameResult(Object):
def from_dict(self):
super().from_dict()
self.rank = self._data.get('rank')
self.game_result = self._data.get('gameResult')
self.team_id = self._data.get('teamId')
self.stats = Stats(self._data.get('stats', {}))
self.account_id = self._data.get('accountId') |
.parametrize('mtime_minus_now,needs_upgrade', [(((- shared_libs.SHARED_LIBS_MAX_AGE_SEC) - (5 * 60)), True), (((- shared_libs.SHARED_LIBS_MAX_AGE_SEC) + (5 * 60)), False)])
def test_auto_update_shared_libs(capsys, pipx_ultra_temp_env, mtime_minus_now, needs_upgrade):
now = time.time()
shared_libs.shared_libs.create(verbose=True)
shared_libs.shared_libs.has_been_updated_this_run = False
access_time = now
os.utime(shared_libs.shared_libs.pip_path, (access_time, (mtime_minus_now + now)))
assert (shared_libs.shared_libs.needs_upgrade is needs_upgrade) |
def set_graph_random_seed(datapipe: DataPipe, seed_generator: SeedGenerator) -> DataPipe:
graph = traverse_dps(datapipe)
sharding_filter_dps = find_dps(graph, ShardingFilter)
cache = set()
dps_before_sharding = []
for sf_dp in sharding_filter_dps:
dps = list_dps(traverse_dps(sf_dp))
for dp in dps:
if (id(dp) not in cache):
cache.add(id(dp))
dps_before_sharding.append(dp)
set_datapipes_seed(dps_before_sharding, seed_generator, distributed_shared=True)
dps_after_sharding = list_dps(graph, exclude_dps=sharding_filter_dps)
set_datapipes_seed(dps_after_sharding, seed_generator, distributed_shared=False)
return datapipe |
.requires_user_action
class EVENT_MOUSEMOTION(InteractiveTestCase):
def on_mouse_motion(self, x, y, dx, dy):
print(('Mouse at (%f, %f); relative (%f, %f).' % (x, y, dx, dy)))
def test_motion(self):
w = Window(200, 200)
try:
w.push_handlers(self)
while (not w.has_exit):
w.dispatch_events()
finally:
w.close()
self.user_verify('Pass test?', take_screenshot=False) |
def main():
parser = build_parser()
args = parser.parse_args()
assert args.output.endswith('.h5ad'), 'Output file must be in .h5ad format'
threads = min(args.threads, len(args.prefix))
pool = multiprocessing.Pool(threads)
adatas = list(pool.map(read_prefix, args.prefix))
pool.close()
pool.join()
common_bins = adatas[0].var_names
for adata in adatas[1:]:
common_bins = sc_data_loaders.harmonize_atac_intervals(common_bins, adata.var_names)
logging.info(f'Aggregated {len(args.prefix)} prefixes into {len(common_bins)} bins')
pfunc = functools.partial(sc_data_loaders.repool_atac_bins, target_bins=common_bins)
pool = multiprocessing.Pool(threads)
adatas = list(pool.map(pfunc, adatas))
pool.close()
pool.join()
retval = adatas[0]
if (len(adatas) > 1):
retval = retval.concatenate(adatas[1:])
logging.info(f'Concatenated {len(args.prefix)} prefixes into a single adata of {retval.shape}')
logging.info(f'Writing to {args.output}')
retval.write(args.output) |
def get_parser(desc, default_task='translation'):
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N', help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-init-scale', default=(2 ** 7), type=int, help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=0.0001, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int, help='number of bytes reserved for gathering stats from workers')
parser.add_argument('--float-valid', default=False, action='store_true', help='if true, use float type for valid step (for DynamicConv)')
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys())
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task')
parser.add_argument('--num-ref', default=None, nargs='+', action=StoreDictKeyPair, metavar='NUMREFSPLIT', help='dict for number of references for valid and test')
return parser |
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = int(v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
class OpenWithInvalidFlagsTest(FakeFileOpenTestBase):
def test_capital_r(self):
with self.assertRaises(ValueError):
self.open('some_file', 'R')
def test_capital_w(self):
with self.assertRaises(ValueError):
self.open('some_file', 'W')
def test_capital_a(self):
with self.assertRaises(ValueError):
self.open('some_file', 'A')
def test_lower_u(self):
with self.assertRaises(ValueError):
self.open('some_file', 'u')
def test_lower_rw(self):
with self.assertRaises(ValueError):
self.open('some_file', 'rw') |
def check_render_rest(data_root, verbose=False):
(_, video_paths) = get_json_files(data_root)
fields = ('description', 'summary')
error_by_path = {}
valid = True
for file_path in video_paths:
with open(file_path, encoding='UTF-8') as fp:
blob = json.load(fp)
for field in fields:
text = (blob.get(field) or '')
(error, level) = validate_rest(text)
if (error and (level >= INVALID_ERROR_LEVEL)):
valid = False
if error:
msg = 'ReST validation error (level {level}):\n\tFile: {fp}\n\tKey: {key}\n\tError:\n{error}'
print(msg.format(fp=file_path, key=field, level=level, error=textwrap.indent(error, '\t\t')), flush=True)
if verbose:
print('\t', error, sep='', flush=True)
if (not valid):
sys.exit(1) |
class TransfoXLConfig(PretrainedConfig):
pretrained_config_archive_map = TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, tgt_len=128, ext_len=0, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=(- 1), adaptive=True, tie_weight=True, dropout=0.1, dropatt=0.0, untie_r=True, init='normal', init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-05, **kwargs):
super(TransfoXLConfig, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = ([False] + ([True] * len(self.cutoffs)))
else:
self.tie_projs = ([False] + ([False] * len(self.cutoffs)))
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
self.layer_norm_epsilon = layer_norm_epsilon
def max_position_embeddings(self):
return ((self.tgt_len + self.ext_len) + self.mem_len)
def n_token(self):
return self.vocab_size
_token.setter
def n_token(self, value):
self.vocab_size = value
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
def _modify_tensor_quantizers(input_output_tensor_quantizers: TensorQuantizersTupleType, setting_name: str, quantizer_setting: Union[(dict, bool)], modified_tensor_quantizers: Dict[(TensorQuantizer, Set)]):
setting_type = get_setting_type(setting_name)
tensor_quantizers_to_modify = _get_tensor_quantizers_to_modify(input_output_tensor_quantizers, setting_name, quantizer_setting)
for tensor_quantizer in tensor_quantizers_to_modify:
if ((tensor_quantizer in modified_tensor_quantizers) and (setting_type in modified_tensor_quantizers[tensor_quantizer])):
if (setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]):
current_setting = tensor_quantizer.enabled
elif (setting_name == ConfigDictKeys.IS_SYMMETRIC):
current_setting = tensor_quantizer.use_symmetric_encodings
else:
current_setting = {ConfigDictKeys.MIN: tensor_quantizer.encoding_min_max_fixed_vals[0], ConfigDictKeys.MAX: tensor_quantizer.encoding_min_max_fixed_vals[1]}
log_with_error_and_assert_if_false((current_setting == quantizer_setting), logger, f'Conflicting tensor quantizer settings for {setting_name}')
else:
if (setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]):
tensor_quantizer.enabled = quantizer_setting
elif (setting_name == ConfigDictKeys.IS_SYMMETRIC):
tensor_quantizer.use_symmetric_encodings = quantizer_setting
elif (setting_name == ConfigDictKeys.ENCODING_CONSTRAINTS):
tensor_quantizer.encoding_min_max_fixed_vals = (quantizer_setting[ConfigDictKeys.MIN], quantizer_setting[ConfigDictKeys.MAX])
if (tensor_quantizer not in modified_tensor_quantizers):
modified_tensor_quantizers[tensor_quantizer] = {setting_type}
else:
modified_tensor_quantizers[tensor_quantizer].add(setting_type) |
class SysModulesSnapshot():
def __init__(self, preserve: Optional[Callable[([str], bool)]]=None) -> None:
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self) -> None:
if self.__preserve:
self.__saved.update(((k, m) for (k, m) in sys.modules.items() if self.__preserve(k)))
sys.modules.clear()
sys.modules.update(self.__saved) |
def _check_and_occupation(video_path, result_path):
if os.path.isfile(result_path):
return True
try:
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
except OSError as err:
print(err)
with open(result_path, 'w') as f:
f.write('Occ')
return False |
class FairseqBMUF(FairseqOptimizer):
def __init__(self, args, optimizer):
super().__init__(args)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = self.args.global_sync_iter
self.block_momentum = self.args.block_momentum
self.block_lr = self.args.block_lr
self._reset_local_data()
self.warmup_iteration = self.args.warmup_iterations
self.use_nbm = self.args.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.args.average_sync
self.world_size = self.args.distributed_world_size
def add_args(parser):
gen_parser_from_dataclass(parser, FairseqBMUFConfig())
def optimizer(self):
return self._optimizer.optimizer
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
self.initial_state = self._optimizer.state_dict()
def multiply_grads(self, c):
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if (self.world_size <= 1):
return
if (self.block_momentum != 0):
self._calc_grad()
self._avg_grad_from_all_gpus()
if (self.block_momentum != 0):
self._update_global_model()
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
if (self.get_num_updates() == self.warmup_iteration):
return True
return False
def _is_bmuf_iter(self):
if ((self.get_num_updates() > self.warmup_iteration) and ((self.get_num_updates() % self.sync_iter) == 0)):
return True
return False
def _warmup_sync(self, root_rank=0):
if (self.world_size <= 1):
return
for param in self.params:
dist.broadcast(param.data, src=root_rank)
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
self._optimizer.step(closure)
self.set_num_updates((self.get_num_updates() + 1))
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
self._optimizer.zero_grad()
def get_num_updates(self):
return self._num_updates
def set_num_updates(self, num_updates):
self._num_updates = num_updates
_grad()
def _reset_local_data(self):
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
for (param, global_param) in zip(self.params, self.global_params):
global_param.copy_(param.data)
_grad()
def _calc_grad(self):
for (index, (param, global_param)) in enumerate(zip(self.params, self.global_params)):
self.grads[index] = (global_param - param.data)
def _avg_grad_from_all_gpus(self):
for (index, param) in enumerate(self.params):
sync_para = (param.data if (self.block_momentum == 0) else self.grads[index])
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
_grad()
def _update_global_model(self):
for (index, (param, global_param, smoothed_grad, grad)) in enumerate(zip(self.params, self.global_params, self.smoothed_grads, self.grads)):
smoothed_grad = ((self.block_momentum * smoothed_grad) + (self.block_lr * grad))
param.data.copy_((global_param - smoothed_grad))
if self.use_nbm:
param.data.copy_((param.data - (self.block_momentum * smoothed_grad)))
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data) |
class TestComplexObject():
def test_repr_smoke(self):
class TestObject(pystiche.ComplexObject):
pass
test_object = TestObject()
assert isinstance(repr(test_object), str)
def test_repr(self):
_properties = OrderedDict((('a', 1),))
extra_properties = OrderedDict((('b', 2),))
_named_children = (('c', 3),)
extra_named_children = (('d', 4),)
class TestObject(pystiche.ComplexObject):
def _properties(self):
return _properties
def extra_properties(self):
return extra_properties
def _named_children(self):
return iter(_named_children)
def extra_named_children(self):
return iter(extra_named_children)
test_object = TestObject()
properties = OrderedDict([property for property in itertools.chain(_properties.items(), extra_properties.items())])
named_children = tuple(itertools.chain(_named_children, extra_named_children))
actual = repr(test_object)
desired = build_complex_obj_repr(name='TestObject', properties=properties, named_children=named_children)
assert (actual == desired) |
class _Linux(_Platform):
def _get_data_path(self):
base_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
def _checkpath(fname):
return os.path.exists(os.path.join(base_path, fname))
if all(map(_checkpath, ('INSTALL', 'setup.py', 'pytrainer/main.py', 'locale'))):
return (base_path + '/')
else:
return os.path.join(sys.prefix, 'share/pytrainer/')
def __init__(self):
self._home_dir = os.environ['HOME']
self._conf_dir_name = '.pytrainer'
self._data_path = self._get_data_path()
try:
results = subprocess.check_output(('locale', 'first_weekday', 'week-1stday'), universal_newlines=True).splitlines()
day_delta = datetime.timedelta(days=(int(results[0]) - 1))
base_date = datetime.datetime.strptime(results[1], '%Y%m%d')
first_day = (base_date + day_delta)
self._first_day_of_week = int(first_day.strftime('%w'))
except subprocess.CalledProcessError:
self._first_day_of_week = 0 |
def test_initiator_lock_expired():
amount = (UNIT_TRANSFER_AMOUNT * 2)
pseudo_random_generator = random.Random()
channels = factories.make_channel_set_from_amounts([amount, 0])
block_number = 10
transfer_description = factories.create(factories.TransferDescriptionProperties(secret=UNIT_SECRET, token_network_registry_address=channels[0].token_network_registry_address))
current_state = make_initiator_manager_state(channels=channels, transfer_description=transfer_description, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
initiator_state = get_transfer_at_index(current_state, 0)
transfer = initiator_state.transfer
assert (transfer.lock.secrethash in channels[0].our_state.secrethashes_to_lockedlocks)
state_change = Block(block_number=channel.get_sender_expiration_threshold(transfer.lock.expiration), gas_limit=1, block_hash=factories.make_transaction_hash())
iteration = initiator_manager.state_transition(payment_state=current_state, state_change=state_change, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=pseudo_random_generator, block_number=block_number)
lock_expired = search_for_item(iteration.events, SendLockExpired, {'balance_proof': {'nonce': 2, 'transferred_amount': 0, 'locked_amount': 0}, 'secrethash': transfer.lock.secrethash, 'recipient': channels[0].partner_state.address})
assert (lock_expired is not None)
assert (search_for_item(iteration.events, EventRouteFailed, {}) is not None)
assert search_for_item(iteration.events, EventUnlockFailed, {})
payment_failed = search_for_item(iteration.events, EventPaymentSentFailed, {'token_network_registry_address': channels[0].token_network_registry_address, 'token_network_address': channels[0].token_network_address, 'identifier': UNIT_TRANSFER_IDENTIFIER, 'target': transfer.target, 'reason': 'lock expired'})
assert (payment_failed is not None)
assert (transfer.lock.secrethash not in channels[0].our_state.secrethashes_to_lockedlocks)
msg = 'the initiator payment task must be deleted at block of the lock expiration'
assert (not iteration.new_state), msg
transfer2_state = make_initiator_manager_state(channels=channels, transfer_description=factories.create(factories.TransferDescriptionProperties(payment_identifier='transfer2')), pseudo_random_generator=pseudo_random_generator, block_number=30)
initiator2_state = get_transfer_at_index(transfer2_state, 0)
transfer2_lock = initiator2_state.transfer.lock
transfer3_state = make_initiator_manager_state(channels=channels, transfer_description=factories.create(factories.TransferDescriptionProperties(payment_identifier='transfer3')), pseudo_random_generator=pseudo_random_generator, block_number=32)
initiator3_state = get_transfer_at_index(transfer3_state, 0)
transfer3_lock = initiator3_state.transfer.lock
assert (len(channels[0].our_state.secrethashes_to_lockedlocks) == 2)
assert (transfer2_lock.secrethash in channels[0].our_state.secrethashes_to_lockedlocks)
expiration_block_number = channel.get_sender_expiration_threshold(transfer2_lock.expiration)
block = Block(block_number=expiration_block_number, gas_limit=1, block_hash=factories.make_transaction_hash())
iteration = initiator_manager.state_transition(payment_state=transfer2_state, state_change=block, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=pseudo_random_generator, block_number=expiration_block_number)
assert (transfer2_lock.secrethash not in channels[0].our_state.secrethashes_to_lockedlocks)
assert (transfer3_lock.secrethash in channels[0].our_state.secrethashes_to_lockedlocks) |
class TestMakeOrder():
def test_subclasses_cannot_be_compared(self):
class A():
a = attr.ib()
class B(A):
pass
a = A(42)
b = B(42)
assert (a <= a)
assert (a >= a)
assert (not (a < a))
assert (not (a > a))
assert (NotImplemented == a.__lt__(b) == a.__le__(b) == a.__gt__(b) == a.__ge__(b))
with pytest.raises(TypeError):
(a <= b)
with pytest.raises(TypeError):
(a >= b)
with pytest.raises(TypeError):
(a < b)
with pytest.raises(TypeError):
(a > b) |
.parametrize('obj,expected', [(block('provider', 'aws', {}), 'aws'), (block('provider', 'aws', {}).alias, 'aws'), (block('provider', 'aws', {'region': 'eu-west-1'}), 'aws'), (block('provider', 'aws', {'region': 'eu-west-1'}).alias, 'aws'), (block('provider', 'aws', {'alias': 'nonprod'}), 'aws.nonprod'), (block('provider', 'aws', {'alias': 'nonprod'}).alias, 'aws.nonprod'), (block('variable', 'one', {}), '${var.one}'), (block('resource', 'one', 'two', {}), '${one.two}'), (block('resource', 'aws_instance', 'www', {}).ipv6_addresses[0], '${aws_instance.www.ipv6_addresses[0]}'), (block('resource', 'one', 'two', {}).list[0].another_list[1], '${one.two.list[0].another_list[1]}')])
def test_block(obj, expected):
assert (str(obj) == expected) |
_start_docstrings('Bert Based model to embed queries or document for document retrieval.', RETRIBERT_START_DOCSTRING)
class RetriBertModel(RetriBertPreTrainedModel):
def __init__(self, config: RetriBertConfig) -> None:
super().__init__(config)
self.projection_dim = config.projection_dim
self.bert_query = BertModel(config)
self.bert_doc = (None if config.share_encoders else BertModel(config))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.ce_loss = nn.CrossEntropyLoss(reduction='mean')
self.post_init()
def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=(- 1)):
if ((checkpoint_batch_size < 0) or (input_ids.shape[0] < checkpoint_batch_size)):
return sent_encoder(input_ids, attention_mask=attention_mask)[1]
else:
device = input_ids.device
input_shape = input_ids.size()
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
head_mask = ([None] * sent_encoder.config.num_hidden_layers)
extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(attention_mask, input_shape)
def partial_encode(*inputs):
encoder_outputs = sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = sent_encoder.pooler(sequence_output)
return pooled_output
embedding_output = sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None)
pooled_output_list = []
for b in range(math.ceil((input_ids.shape[0] / checkpoint_batch_size))):
b_embedding_output = embedding_output[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)]
b_attention_mask = extended_attention_mask[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)]
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
pooled_output_list.append(pooled_output)
return torch.cat(pooled_output_list, dim=0)
def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)):
q_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query, checkpoint_batch_size)
return self.project_query(q_reps)
def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)):
a_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, (self.bert_query if (self.bert_doc is None) else self.bert_doc), checkpoint_batch_size)
return self.project_doc(a_reps)
def forward(self, input_ids_query: torch.LongTensor, attention_mask_query: Optional[torch.FloatTensor], input_ids_doc: torch.LongTensor, attention_mask_doc: Optional[torch.FloatTensor], checkpoint_batch_size: int=(- 1)) -> torch.FloatTensor:
device = input_ids_query.device
q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
compare_scores = torch.mm(q_reps, a_reps.t())
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
loss = ((loss_qa + loss_aq) / 2)
return loss |
_vision
_torch
class Pix2StructProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = Pix2StructImageProcessor()
tokenizer = T5Tokenizer.from_pretrained('t5-small')
processor = Pix2StructProcessor(image_processor, tokenizer)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = Pix2StructProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = Pix2StructProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, Pix2StructImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str, return_token_type_ids=False, add_special_tokens=False)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['flattened_patches', 'attention_mask', 'decoder_attention_mask', 'decoder_input_ids'])
with pytest.raises(ValueError):
processor()
def test_processor_max_patches(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
max_patches = [512, 1024, 2048, 4096]
expected_hidden_size = [770, 770, 770, 770]
for (i, max_patch) in enumerate(max_patches):
inputs = processor(text=input_str, images=image_input, max_patches=max_patch)
self.assertEqual(inputs['flattened_patches'][0].shape[0], max_patch)
self.assertEqual(inputs['flattened_patches'][0].shape[1], expected_hidden_size[i])
for (i, max_patch) in enumerate(max_patches):
inputs = processor(images=image_input, max_patches=max_patch)
self.assertEqual(inputs['flattened_patches'][0].shape[0], max_patch)
self.assertEqual(inputs['flattened_patches'][0].shape[1], expected_hidden_size[i])
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['flattened_patches', 'attention_mask', 'decoder_attention_mask', 'decoder_input_ids'])
inputs = processor(text=input_str)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask']) |
def _normalize_dates(data):
def normalize_date(x):
if isinstance(x, pa.tslib.NaTType):
return ValueError()
elif (isinstance(x, pa.tslib.Timestamp) or isinstance(x, dt.datetime)):
return dt.datetime(*x.timetuple()[:6], tzinfo=(x.tzinfo or pytz.utc))
elif isinstance(x, dt.date):
return dt.datetime(*x.timetuple()[:3], tzinfo=pytz.utc)
return x
return [[normalize_date(c) for c in r] for r in data] |
def get_pipes(maxage=timedelta(seconds=0), targetID=None, use_volatile=False, cmd_options=dict()):
pipes_cmd = ops.cmd.getDszCommand('netconnections', complexity='PipesOnly', **cmd_options)
return ops.project.generic_cache_get(pipes_cmd, maxage=maxage, cache_tag=NETSTAT_PIPES_LIST_TAG, targetID=targetID) |
_images
def test_process(host, docker_image):
init = host.process.get(pid=1)
assert (init.ppid == 0)
assert (init.euid == 0)
assert (init.user == 'root')
(args, comm) = {'rockylinux9': ('/usr/sbin/init', 'systemd'), 'debian_bookworm': ('/sbin/init', 'systemd')}[docker_image]
assert (init.args == args)
assert (init.comm == comm) |
class Ljpeg(Codec):
codec_id = 'imagecodecs_ljpeg'
def __init__(self, bitspersample=None):
self.bitspersample = bitspersample
def encode(self, buf):
buf = protective_squeeze(numpy.asarray(buf))
return imagecodecs.ljpeg_encode(buf, bitspersample=self.bitspersample)
def decode(self, buf, out=None):
return imagecodecs.ljpeg_decode(buf, out=out) |
def add_constant(s, cst, unit=None, var=None, inplace=False):
var = _get_unique_var(s, var, inplace)
if (unit is not None):
Iunit = s.units[var]
if (unit != Iunit):
from radis.phys.convert import conv2
cst = conv2(cst, unit, Iunit)
if (not inplace):
s = s.copy(quantity=var)
(w, I) = s.get(var, wunit=s.get_waveunit(), Iunit=s.units[var], copy=False)
I += cst
return s |
class Effect1361(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Weapon Disruption')), 'capacitorNeed', (container.getModifiedItemAttr('capNeedBonus') * level), **kwargs) |
def get_quantity(name):
try:
q = list(quantities[name])
except KeyError:
raise ValueError('Unknown quantity. Quantity is not yet specified.')
try:
u = units[name]
except KeyError:
raise RuntimeError('Unknown unit. Quantity has been specified but unit has not.')
q[1] = Unit(*units[name])
return Quantity(*q) |
def _timedelta_offset_str(tdelta: timedelta) -> str:
offset_s = tdelta.total_seconds()
offset_h = int((offset_s / 3600))
offset_m = int(((offset_s / 60) % 60))
offset_t = time(abs(offset_h), abs(offset_m))
operator = ('+' if (offset_s > 0) else '-')
offset = offset_t.strftime('{}%H:%M'.format(operator))
return offset |
class Tracker():
def __init__(self) -> None:
if (not self.has_cookie()):
self.set_cookie()
self.user_id = self.get_cookie()['id']
self.env = self.get_environment()
def cookie_path(self):
return os.path.join(self.cookie_dir, '.user.yml')
def cookie_dir(self):
return flags.RE_DATA_CONFIG_DIR
def get_cookie(self):
with open(self.cookie_path, 'r') as fh:
user = yaml.load(fh, Loader=SafeLoader)
return user
def has_cookie(self):
if (not os.path.exists(self.cookie_path)):
return False
return True
def set_cookie(self):
user = {'id': str(uuid.uuid4())}
if (not os.path.exists(self.cookie_dir)):
os.makedirs(self.cookie_dir)
if (not os.path.exists(self.cookie_path)):
with open(self.cookie_path, 'w') as fh:
yaml.dump(user, fh)
analytics.identify(user['id'], {})
def get_environment(self):
return {'dbt_version': metadata.version('dbt-core'), 're_data_version': metadata.version('re-data'), 'python_version': platform.python_version(), 'os_system': platform.system()}
def track(self, event, properties):
analytics.track(self.user_id, event, properties) |
def bose_hubbard(x_dimension, y_dimension, tunneling, interaction, chemical_potential=0.0, dipole=0.0, periodic=True):
n_sites = (x_dimension * y_dimension)
hubbard_model = BosonOperator()
for site in range(n_sites):
right_neighbor = _right_neighbor(site, x_dimension, y_dimension, periodic)
bottom_neighbor = _bottom_neighbor(site, x_dimension, y_dimension, periodic)
if ((x_dimension == 2) and periodic and ((site % 2) == 1)):
right_neighbor = None
if ((y_dimension == 2) and periodic and (site >= x_dimension)):
bottom_neighbor = None
if (right_neighbor is not None):
hubbard_model += _hopping_term(site, right_neighbor, (- tunneling), bosonic=True)
hubbard_model += _coulomb_interaction_term(n_sites, site, right_neighbor, dipole, particle_hole_symmetry=False, bosonic=True)
if (bottom_neighbor is not None):
hubbard_model += _hopping_term(site, bottom_neighbor, (- tunneling), bosonic=True)
hubbard_model += _coulomb_interaction_term(n_sites, site, bottom_neighbor, dipole, particle_hole_symmetry=False, bosonic=True)
hubbard_model += (number_operator(n_sites, site, (0.5 * interaction), parity=1) * (number_operator(n_sites, site, parity=1) - BosonOperator(())))
hubbard_model += number_operator(n_sites, site, (- chemical_potential), parity=1)
return hubbard_model |
.parametrize('api', ['cufile', 'posix', 'cufile-mfma', 'cufile-mf', 'cufile-ma', 'zarr'])
def test_single_node_io(run_cmd, tmp_path, api):
if ('zarr' in api):
kz = pytest.importorskip('kvikio.zarr')
if (not kz.supported):
pytest.skip(f'requires Zarr >={kz.MINIMUM_ZARR_VERSION}')
retcode = run_cmd(cmd=[(sys.executable or 'python'), 'single-node-io.py', '-n', '1MiB', '-d', str(tmp_path), '--api', api], cwd=benchmarks_path)
assert (retcode == 0) |
class ClassFeatures():
def __init__(self, numbers=19, proto_momentum=0.9999, dev=torch.device('cpu')):
self.class_numbers = numbers
self.class_features = [[] for _ in range(self.class_numbers)]
self.dev = dev
self.num = np.zeros(numbers)
self.proto_momentum = proto_momentum
self.objective_vectors_num = torch.zeros([self.class_numbers]).to(self.dev)
self.objective_vectors = torch.zeros([self.class_numbers, 256]).to(self.dev)
def calculate_mean_vector(self, feat_cls, outputs):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
outputs_pred = outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if (scale_factor[n][t].item() == 0):
continue
if ((outputs_pred[n][t] > 0).sum() < 10):
continue
s = (feat_cls[n] * outputs_pred[n][t])
s = (F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t])
vectors.append(s)
ids.append(t)
return (vectors, ids)
def process_label(self, label):
(batch, _, w, h) = label.size()
pred1 = torch.zeros(batch, (self.class_numbers + 1), w, h).to(self.dev)
idx = torch.where((label < self.class_numbers), label, torch.Tensor([self.class_numbers]).to(self.dev))
pred1 = pred1.scatter_(1, idx.long(), 1)
return pred1
def update_objective_SingleVector(self, idx, vector, name='moving_average', start_mean=True):
if (vector.sum().item() == 0):
return
if (start_mean and (self.objective_vectors_num[idx].item() < 100)):
name = 'mean'
if (name == 'moving_average'):
self.objective_vectors[idx] = ((self.objective_vectors[idx] * self.proto_momentum) + ((1 - self.proto_momentum) * vector.squeeze()))
self.objective_vectors_num[idx] += 1
self.objective_vectors_num[idx] = min(self.objective_vectors_num[idx], 3000)
elif (name == 'mean'):
self.objective_vectors[idx] = ((self.objective_vectors[idx] * self.objective_vectors_num[idx]) + vector.squeeze())
self.objective_vectors_num[idx] += 1
self.objective_vectors[idx] = (self.objective_vectors[idx] / self.objective_vectors_num[idx])
self.objective_vectors_num[idx] = min(self.objective_vectors_num[idx], 3000)
pass
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', training_args)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=False)
model = AutoModelForQuestionAnswering.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir)
is_language_sensitive = hasattr(model.config, 'lang2id')
train_dataset = (SquadDataset(data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir) if training_args.do_train else None)
eval_dataset = (SquadDataset(data_args, tokenizer=tokenizer, mode='dev', is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir) if training_args.do_eval else None)
data_collator = (DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator)
if training_args.do_train:
trainer.train(model_path=(model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None))
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir) |
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None
def foi_lancado(self):
return True
def colidir_com_chao(self):
pass
def calcular_posicao(self, tempo):
return (1, 1)
def lancar(self, angulo, tempo_de_lancamento):
pass |
class ResNet_b(nn.Module):
def __init__(self, block, layers, num_classes=1000, number_net=4, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet_b, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.number_net = number_net
self.num_classes = num_classes
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
fix_planes = self.inplanes
for i in range(self.number_net):
self.inplanes = fix_planes
setattr(self, ('layer3_' + str(i)), self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]))
setattr(self, ('layer4_' + str(i)), self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]))
setattr(self, ('classifier_' + str(i)), nn.Linear((512 * block.expansion), self.num_classes))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
logits = []
embedding = []
input = x
for i in range(self.number_net):
x = getattr(self, ('layer3_' + str(i)))(input)
x = getattr(self, ('layer4_' + str(i)))(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
embedding.append(x)
x = getattr(self, ('classifier_' + str(i)))(x)
logits.append(x)
return (logits, embedding)
def forward(self, x):
return self._forward_impl(x) |
def upgrade(saveddata_engine):
saveddata_engine.execute('DELETE FROM damagePatterns WHERE name LIKE ? OR ID LIKE ?', ('Uniform', '1'))
saveddata_engine.execute('INSERT INTO damagePatterns (ID, name, emAmount, thermalAmount, kineticAmount, explosiveAmount, ownerID) VALUES (?, ?, ?, ?, ?, ?, ?)', (1, 'Uniform', 25, 25, 25, 25, None)) |
def _run_command(args: List[str], *, stdin: BinaryIO, timeout: int) -> 'subprocess.CompletedProcess[bytes]':
logging.debug('$ %s', ' '.join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=IS_WINDOWS, timeout=timeout, check=True)
finally:
end_time = time.monotonic()
logging.debug('took %dms', ((end_time - start_time) * 1000)) |
class Adaptor(a_base.Base):
def __init__(self):
a_base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
def sanity_check(self):
pass
def get_lease_target(self, tgt):
lease_tgt = rsurl.Url(tgt)
lease_tgt.path = '/shell_file_adaptor_command_shell/'
return lease_tgt |
def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER, iat=None, exp=None, nbf=None, iss=None, subject=None):
if (subject is None):
(_, subject) = build_context_and_subject(ValidatedAuthContext(user=user))
return {'iss': (iss or instance_keys.service_name), 'aud': audience, 'nbf': (nbf if (nbf is not None) else int(time.time())), 'iat': (iat if (iat is not None) else int(time.time())), 'exp': (exp if (exp is not None) else int((time.time() + TOKEN_VALIDITY_LIFETIME_S))), 'sub': subject, 'access': access, 'context': context} |
def _generate_deprecation_message(since, message='', name='', alternative='', pending=False, obj_type='attribute', addendum='', removal=''):
if (removal == ''):
removal = 'soon'
elif removal:
if pending:
raise ValueError('A pending deprecation cannot have a scheduled removal')
removal = 'in {}'.format(removal)
if (not message):
message = (((('The %(name)s %(obj_type)s' + (' will be deprecated in a future version' if pending else (' was deprecated in %(projectName)s %(since)s' + (' and will be removed %(removal)s' if removal else '')))) + '.') + (' Use %(alternative)s instead.' if alternative else '')) + (' %(addendum)s' if addendum else ''))
return (message % dict(func=name, name=name, obj_type=obj_type, since=since, removal=removal, alternative=alternative, addendum=addendum, projectName=_projectName)) |
def test_scalar_array_types_store(i8: wp.array(dtype=wp.int8), u8: wp.array(dtype=wp.uint8), i16: wp.array(dtype=wp.int16), u16: wp.array(dtype=wp.uint16), i32: wp.array(dtype=wp.int32), u32: wp.array(dtype=wp.uint32), i64: wp.array(dtype=wp.int64), u64: wp.array(dtype=wp.uint64), f32: wp.array(dtype=wp.float32), f64: wp.array(dtype=wp.float64)):
tid = wp.tid()
i8[tid] = wp.int8(tid)
u8[tid] = wp.uint8(tid)
i16[tid] = wp.int16(tid)
u16[tid] = wp.uint16(tid)
i32[tid] = wp.int32(tid)
u32[tid] = wp.uint32(tid)
i64[tid] = wp.int64(tid)
u64[tid] = wp.uint64(tid)
f32[tid] = wp.float32(tid)
f64[tid] = wp.float64(tid)
wp.expect_eq(int(i8[tid]), tid)
wp.expect_eq(int(u8[tid]), tid)
wp.expect_eq(int(i16[tid]), tid)
wp.expect_eq(int(u16[tid]), tid)
wp.expect_eq(int(i32[tid]), tid)
wp.expect_eq(int(u32[tid]), tid)
wp.expect_eq(int(i64[tid]), tid)
wp.expect_eq(int(u64[tid]), tid)
wp.expect_eq(float(f32[tid]), float(tid))
wp.expect_eq(float(f64[tid]), float(tid)) |
def remap_log_file(input_log_file, remap_dict_file, output_log_file, item_feat_dict_file):
with open(remap_dict_file, 'rb') as f:
uid_remap_dict = pkl.load(f)
iid_remap_dict = pkl.load(f)
cid_remap_dict = pkl.load(f)
sid_remap_dict = pkl.load(f)
item_feat_dict = {}
newlines = []
with open(input_log_file, 'r') as f:
for line in f:
(uid, sid, iid, cid, btype, date) = line[:(- 1)].split(',')
if (btype != '0'):
continue
uid = uid_remap_dict[uid]
iid = iid_remap_dict[iid]
cid = cid_remap_dict[cid]
sid = sid_remap_dict[sid]
ts = str(int(time.mktime(datetime.datetime.strptime(date, '%Y%m%d').timetuple())))
month = int(date[4:6])
day = int(date[6:])
sea_id = str((get_season(month) + ORI_FEAT_SIZE))
ud_id = str(((get_ud(day) + ORI_FEAT_SIZE) + 4))
if (iid not in item_feat_dict):
item_feat_dict[iid] = [cid, sid]
newline = (','.join([uid, iid, cid, sid, sea_id, ud_id, ts]) + '\n')
newlines.append(newline)
with open(output_log_file, 'w') as f:
f.writelines(newlines)
with open(item_feat_dict_file, 'wb') as f:
pkl.dump(item_feat_dict, f) |
class RandomRotation(object):
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if (degrees < 0):
raise ValueError('If degrees is a single number, it must be positive.')
self.degrees = ((- degrees), degrees)
else:
if (len(degrees) != 2):
raise ValueError('If degrees is a sequence, it must be of len 2.')
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
def get_params(degrees):
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = (self.__class__.__name__ + '(degrees={0}'.format(self.degrees))
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if (self.center is not None):
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string |
class MoleculeDataLoader(DataLoader):
def __init__(self, dataset: MoleculeDataset, batch_size: int=50, num_workers: int=8, class_balance: bool=False, shuffle: bool=False, seed: int=0, pin_memory: bool=False):
self._dataset = dataset
self._batch_size = batch_size
self._num_workers = num_workers
self._class_balance = class_balance
self._shuffle = shuffle
self._seed = seed
self._context = None
self._timeout = 0
is_main_thread = (threading.current_thread() is threading.main_thread())
if ((not is_main_thread) and (self._num_workers > 0)):
self._context = 'forkserver'
self._timeout = 3600
self._sampler = MoleculeSampler(dataset=self._dataset, class_balance=self._class_balance, shuffle=self._shuffle, seed=self._seed)
super().__init__(dataset=self._dataset, batch_size=self._batch_size, sampler=self._sampler, num_workers=self._num_workers, collate_fn=construct_molecule_batch, multiprocessing_context=self._context, timeout=self._timeout, pin_memory=pin_memory)
def targets(self) -> List[List[Optional[float]]]:
if (self._class_balance or self._shuffle):
raise ValueError('Cannot safely extract targets when class balance or shuffle are enabled.')
return [self._dataset[index].targets for index in self._sampler]
def iter_size(self) -> int:
return len(self._sampler) |
def get_paths(args):
if args.paths:
prefix = 'file://'
prefix_length = len(prefix)
paths = [(path[prefix_length:] if path.startswith(prefix) else path) for path in args.paths]
else:
start_directory = os.environ.get('PWD')
is_valid_start_directory = (start_directory and os.path.exists(start_directory))
if (not is_valid_start_directory):
start_directory = __get_home_directory()
paths = [start_directory]
return paths |
class MarkImportsUnreachableVisitor(TraverserVisitor):
def visit_import(self, node: Import) -> None:
node.is_unreachable = True
def visit_import_from(self, node: ImportFrom) -> None:
node.is_unreachable = True
def visit_import_all(self, node: ImportAll) -> None:
node.is_unreachable = True |
def value_from_ast(ast_node: ast.AST, ctx: Context, *, error_on_unrecognized: bool=True) -> Value:
val = _Visitor(ctx).visit(ast_node)
if (val is None):
if error_on_unrecognized:
ctx.show_error('Invalid type annotation', node=ast_node)
return AnyValue(AnySource.error)
return val |
class SyntheticImageDataset(Dataset):
DEFAULT_SIZE = 50000
def __init__(self, cfg, path: str, split: str, dataset_name: str, data_source='synthetic'):
super(SyntheticImageDataset, self).__init__()
self.cfg = cfg
self.split = split
self.data_source = data_source
self._num_samples = max(self.DEFAULT_SIZE, cfg.DATA[split].DATA_LIMIT)
def num_samples(self):
return self._num_samples
def __len__(self):
return self.num_samples()
def __getitem__(self, idx):
img = get_mean_image(self.cfg['DATA'][self.split].DEFAULT_GRAY_IMG_SIZE)
is_success = True
return (img, is_success) |
def compare_dicom_cli(command, original, expected):
pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)
try:
subprocess.check_call(command)
cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)
assert (str(cli_adjusted_ds) == str(expected))
finally:
remove_file(ORIGINAL_DICOM_FILENAME)
remove_file(ADJUSTED_DICOM_FILENAME) |
def _test_tensor_list_sync_state(dst_rank: Optional[int]=None) -> None:
device = init_from_env()
if (dist.get_rank() == 0):
state_data = {_METRIC_NAME: {'seen': [torch.tensor(1, device=device), torch.tensor(3, device=device)], 'total': [torch.tensor(1, device=device)]}}
elif (dist.get_rank() == 1):
state_data = {_METRIC_NAME: {'seen': [torch.tensor(1, device=device)], 'total': [torch.tensor(1, device=device)]}}
else:
state_data = {_METRIC_NAME: {'seen': [torch.tensor(1, device=device)], 'total': [torch.tensor(1, device=device)]}}
dict_items = metrics_traversal_order(state_data)
synced_states = sync_states(state_data, {_METRIC_NAME: device}, dict_items, rank=dst_rank)
tc = unittest.TestCase()
if ((dst_rank is None) or (dist.get_rank() == dst_rank)):
tc.assertIsNotNone(synced_states)
tc.assertEqual(len(synced_states), 3)
tc.assertTrue(all(((len(synced_states[i]) == 1) for i in range(3))))
tc.assertTrue(all(((_METRIC_NAME in synced_states[i]) for i in range(3))))
tc.assertTrue(all(((len(synced_states[i][_METRIC_NAME]) == 2) for i in range(3))))
tc.assertTrue(all((('seen' in synced_states[i][_METRIC_NAME]) for i in range(3))))
tc.assertTrue(all((('total' in synced_states[i][_METRIC_NAME]) for i in range(3))))
torch.testing.assert_close(synced_states[0][_METRIC_NAME]['seen'], [torch.tensor(1, device=device), torch.tensor(3, device=device)])
torch.testing.assert_close(synced_states[0][_METRIC_NAME]['total'], [torch.tensor(1, device=device)])
torch.testing.assert_close(synced_states[1][_METRIC_NAME]['seen'], [torch.tensor(1, device=device)])
torch.testing.assert_close(synced_states[1][_METRIC_NAME]['total'], [torch.tensor(1, device=device)])
torch.testing.assert_close(synced_states[2][_METRIC_NAME]['seen'], [torch.tensor(1, device=device)])
torch.testing.assert_close(synced_states[2][_METRIC_NAME]['total'], [torch.tensor(1, device=device)])
else:
tc.assertIsNone(synced_states) |
class PQStatCat():
def __init__(self):
self.iou = 0.0
self.tp = 0
self.fp = 0
self.fn = 0
def __iadd__(self, pq_stat_cat):
self.iou += pq_stat_cat.iou
self.tp += pq_stat_cat.tp
self.fp += pq_stat_cat.fp
self.fn += pq_stat_cat.fn
return self |
class TestUDPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if (not allowed_names):
allowed_names = []
config = get_collector_config('UDPCollector', {'allowed_names': allowed_names, 'interval': 1})
self.collector = UDPCollector(config, None)
def test_import(self):
self.assertTrue(UDPCollector)
('os.access', Mock(return_value=True))
('__builtin__.open')
(Collector, 'publish')
def test_should_open_proc_net_snmp(self, publish_mock, open_mock):
UDPCollector.PROC = ['/proc/net/snmp']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/snmp')
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp([])
UDPCollector.PROC = [self.getFixturePath('proc_net_snmp_1')]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
UDPCollector.PROC = [self.getFixturePath('proc_net_snmp_2')]
self.collector.collect()
metrics = {'InDatagrams': .0, 'InErrors': 5.0, 'NoPorts': 449.0, 'OutDatagrams': .0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
def _check_dsa_parameters(parameters: DSAParameterNumbers) -> None:
if (parameters.p.bit_length() not in [1024, 2048, 3072, 4096]):
raise ValueError('p must be exactly 1024, 2048, 3072, or 4096 bits long')
if (parameters.q.bit_length() not in [160, 224, 256]):
raise ValueError('q must be exactly 160, 224, or 256 bits long')
if (not (1 < parameters.g < parameters.p)):
raise ValueError("g, p don't satisfy 1 < g < p.") |
class _ResultProxy():
_metadata = True
def __init__(self, context):
self._context = context
def context(self):
return self._context
async def execute(self, one=False, return_model=True, status=False, return_context=False):
context = self._context
param_groups = []
for params in context.parameters:
replace_params = []
for val in params:
if asyncio.iscoroutine(val):
val = (await val)
replace_params.append(val)
param_groups.append(replace_params)
cursor = context.cursor
if context.executemany:
return (await cursor.async_execute(context.statement, context.timeout, param_groups, many=True))
args = param_groups[0]
if context.baked_query:
rows = (await cursor.execute_baked(context.baked_query, context.timeout, args, one))
else:
rows = (await cursor.async_execute(context.statement, context.timeout, args, (1 if one else 0)))
item = context.process_rows(rows, return_model=return_model)
if one:
if item:
item = item[0]
else:
item = None
if status:
return (cursor.get_statusmsg(), item)
if return_context:
return (context, item)
return item
def iterate(self):
if self._context.executemany:
raise ValueError('too many multiparams')
return _IterableCursor(self._context)
async def prepare(self, clause):
return (await self._context.cursor.prepare(self._context, clause))
def _soft_close(self):
pass |
class FatalError(RxHeader):
def __init__(self, sock: socket.socket) -> None:
super().__init__(sock, 'FatalError')
self.error_code = FATALERRORMESSAGE[self.control_code]
assert (self.message_parameter == 0)
self.error_message = receive_exact(sock, self.payload_length) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_project_update_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update', args=[project_id])
response = client.get(url)
if (project_id in change_project_permission_map.get(username, [])):
assert (response.status_code == 200)
elif password:
assert (response.status_code == 403)
else:
assert (response.status_code == 302) |
def bottleneck(x, out_channels, stride=1, expansion=4, name=''):
res = x
x = Conv2D(out_channels, 1, 1, use_bias=False, name=(name + '/conv1'))(x)
x = BatchNormalization(name=(name + '/bn1'))(x)
x = ReLU(name=(name + '/relu1'))(x)
x = Conv2D(out_channels, 3, stride, 'same', use_bias=False, name=(name + '/conv2'))(x)
x = BatchNormalization(name=(name + '/bn2'))(x)
x = ReLU(name=(name + '/relu2'))(x)
x = Conv2D((out_channels * expansion), 1, 1, use_bias=False, name=(name + '/conv3'))(x)
x = BatchNormalization(name=(name + '/bn3'))(x)
if ((stride != 1) or (res.shape[(- 1)] != x.shape[(- 1)])):
res = Conv2D((out_channels * expansion), 1, stride, use_bias=False, name=(name + '/skip_conv'))(res)
res = BatchNormalization(name=(name + '/skip_bn'))(res)
x = Add(name=(name + '/add'))([x, res])
x = ReLU(name=(name + '/relu3'))(x)
return x |
class RtorrentSweep(ScriptBaseWithConfig):
ARGS_HELP = '<space requirement>|SHOW'
def add_options(self):
super(RtorrentSweep, self).add_options()
self.add_bool_option('-n', '--dry-run', help='do not remove anything, just tell what would happen')
self.add_value_option('-p', '--path', 'PATH', help='path into the filesystem to sweep (else the default download location)')
self.add_value_option('-r', '--rules', 'RULESET [-r ...]', action='append', default=[], help='name the ruleset(s) to use, instead of the default ones')
def mainloop(self):
if (len(self.args) < 1):
self.parser.error('No space requirement provided!')
sweeper = broom.DiskSpaceManager(rulesets=self.options.rules)
if (self.args[0] == 'show'):
fmt = '{:4.4s} {:10.10s} {:15.15s} {:15.15s} {:60.60s}'
print(fmt.format('PRIO', 'RULESET', 'NAME', 'ORDER', 'FILTER'))
print(fmt.format(*((('=' * 66),) * len(fmt.split()))))
for rule in sweeper.rules:
print(fmt.format(str(rule.prio).zfill(4), rule.ruleset, rule.name, rule.order, rule.filter))
self.LOG.info('Protected items: {}'.format(sweeper.protected))
else:
self.fatal('Not implemented') |
def read_csv(csv_file, class_whitelist=None, capacity=0):
start = time.time()
entries = defaultdict(list)
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert (len(row) in [7, 8]), ('Wrong number of columns: ' + row)
image_key = make_image_key(row[0], row[1])
(x1, y1, x2, y2) = [float(n) for n in row[2:6]]
action_id = int(row[6])
if (class_whitelist and (action_id not in class_whitelist)):
continue
score = 1.0
if (len(row) == 8):
score = float(row[7])
if ((capacity < 1) or (len(entries[image_key]) < capacity)):
heapq.heappush(entries[image_key], (score, action_id, y1, x1, y2, x2))
elif (score > entries[image_key][0][0]):
heapq.heapreplace(entries[image_key], (score, action_id, y1, x1, y2, x2))
for image_key in entries:
entry = sorted(entries[image_key], key=(lambda tup: (- tup[0])))
for item in entry:
(score, action_id, y1, x1, y2, x2) = item
boxes[image_key].append([y1, x1, y2, x2])
labels[image_key].append(action_id)
scores[image_key].append(score)
print_time(('read file ' + csv_file.name), start)
return (boxes, labels, scores) |
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo))
cmd_str = ' '.join(cmd)
if (result.returncode > 0):
stderr = '\n'.join(result.stderr)
raise RuntimeError(f''''{cmd_str}' failed with returncode {result.returncode}
The combined stderr from workers follows:
{stderr}''')
if ((not result.stdout) and (not result.stderr)):
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result |
class BMPImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.bmp']
def decode(self, filename, file):
if (not file):
file = open(filename, 'rb')
bytes = file.read()
buffer = ctypes.c_buffer(bytes)
if (bytes[:2] != b'BM'):
raise ImageDecodeException(('Not a Windows bitmap file: %r' % (filename or file)))
file_header = to_ctypes(buffer, 0, BITMAPFILEHEADER)
bits_offset = file_header.bfOffBits
info_header_offset = ctypes.sizeof(BITMAPFILEHEADER)
info_header = to_ctypes(buffer, info_header_offset, BITMAPINFOHEADER)
palette_offset = (info_header_offset + info_header.biSize)
if (info_header.biSize < ctypes.sizeof(BITMAPINFOHEADER)):
raise ImageDecodeException(('Unsupported BMP type: %r' % (filename or file)))
width = info_header.biWidth
height = info_header.biHeight
if ((width <= 0) or (info_header.biPlanes != 1)):
raise ImageDecodeException(('BMP file has corrupt parameters: %r' % (filename or file)))
pitch_sign = (((height < 0) and (- 1)) or 1)
height = abs(height)
compression = info_header.biCompression
if (compression not in (BI_RGB, BI_BITFIELDS)):
raise ImageDecodeException(('Unsupported compression: %r' % (filename or file)))
clr_used = 0
bitcount = info_header.biBitCount
if (bitcount == 1):
pitch = ((width + 7) // 8)
bits_type = ctypes.c_ubyte
decoder = decode_1bit
elif (bitcount == 4):
pitch = ((width + 1) // 2)
bits_type = ctypes.c_ubyte
decoder = decode_4bit
elif (bitcount == 8):
bits_type = ctypes.c_ubyte
pitch = width
decoder = decode_8bit
elif (bitcount == 16):
pitch = (width * 2)
bits_type = ctypes.c_uint16
decoder = decode_bitfields
elif (bitcount == 24):
pitch = (width * 3)
bits_type = ctypes.c_ubyte
decoder = decode_24bit
elif (bitcount == 32):
pitch = (width * 4)
if (compression == BI_RGB):
decoder = decode_32bit_rgb
bits_type = ctypes.c_ubyte
elif (compression == BI_BITFIELDS):
decoder = decode_bitfields
bits_type = ctypes.c_uint32
else:
raise ImageDecodeException(('Unsupported compression: %r' % (filename or file)))
else:
raise ImageDecodeException(('Unsupported bit count %d: %r' % (bitcount, (filename or file))))
pitch = ((pitch + 3) & (~ 3))
packed_width = (pitch // ctypes.sizeof(bits_type))
if ((bitcount < 16) and (compression == BI_RGB)):
clr_used = (info_header.biClrUsed or (1 << bitcount))
palette = to_ctypes(buffer, palette_offset, (RGBQUAD * clr_used))
bits = to_ctypes(buffer, bits_offset, ((bits_type * packed_width) * height))
return decoder(bits, palette, width, height, pitch, pitch_sign)
elif ((bitcount >= 16) and (compression == BI_RGB)):
bits = to_ctypes(buffer, bits_offset, (bits_type * (packed_width * height)))
return decoder(bits, None, width, height, pitch, pitch_sign)
elif (compression == BI_BITFIELDS):
if (info_header.biSize >= ctypes.sizeof(BITMAPV4HEADER)):
info_header = to_ctypes(buffer, info_header_offset, BITMAPV4HEADER)
r_mask = info_header.bV4RedMask
g_mask = info_header.bV4GreenMask
b_mask = info_header.bV4BlueMask
else:
fields_offset = (info_header_offset + ctypes.sizeof(BITMAPINFOHEADER))
fields = to_ctypes(buffer, fields_offset, RGBFields)
r_mask = fields.red
g_mask = fields.green
b_mask = fields.blue
class _BitsArray(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [('data', ((bits_type * packed_width) * height))]
bits = to_ctypes(buffer, bits_offset, _BitsArray).data
return decoder(bits, r_mask, g_mask, b_mask, width, height, pitch, pitch_sign) |
def is_qmk_firmware(qmk_firmware):
paths = [qmk_firmware, (qmk_firmware / 'quantum'), (qmk_firmware / 'requirements.txt'), (qmk_firmware / 'requirements-dev.txt'), (qmk_firmware / 'lib/python/qmk/cli/__init__.py')]
for path in paths:
if (not path.exists()):
return False
return True |
class DatabaseSchema():
def __init__(self, table_json=None, db_id=None, table_names=None, column_names=None, primary_keys=None, foreign_keys=None, type_for_column_for_table=None, table_data=None, column_key_in_table=None, column_used_with_keys=None):
if (table_json is not None):
(db_id, table_names, column_names, primary_keys, foreign_keys, type_for_column_for_table) = self.parse_sql_database(table_json)
self.db_id = db_id
self.table_names = table_names
self.column_names = column_names
self.primary_keys = primary_keys
self.type_for_column_for_table = type_for_column_for_table
self.foreign_keys = []
self.foreign_key_tgt_for_src = {}
self.foreign_keys_src_for_tgt = {}
self.add_foreign_keys(foreign_keys)
self.data_loaded_tests = False
if (table_data is None):
self.data_loaded = False
else:
self.data_loaded = True
self.update_data_to_self(table_data, table_names, column_names, type_for_column_for_table)
self.column_key_in_table = column_key_in_table
self.column_used_with_keys = column_used_with_keys
self.table_data = table_data
def add_foreign_keys(self, foreign_keys):
for for_key in foreign_keys:
if (for_key in self.foreign_keys):
continue
self.foreign_keys.append(for_key)
src_key = GroundingKey.make_column_grounding(for_key['table_src'], for_key['col_src'])
tgt_key = GroundingKey.make_column_grounding(for_key['table_tgt'], for_key['col_tgt'])
self.foreign_key_tgt_for_src[src_key] = tgt_key
if (tgt_key in self.foreign_keys_src_for_tgt):
self.foreign_keys_src_for_tgt[tgt_key].append(src_key)
else:
self.foreign_keys_src_for_tgt[tgt_key] = [src_key]
def parse_sql_database(table_json):
db_id = table_json['db_id']
table_names = table_json['table_names_original']
spider_column_names = table_json['column_names_original']
spider_column_types = table_json['column_types']
assert (len(spider_column_types) == len(spider_column_names))
assert (tuple(spider_column_names[0]) == ((- 1), '*'))
column_names = {tbl: [] for tbl in table_names}
type_for_column_for_table = {tbl: {} for tbl in table_names}
for ((table_indx, col_name), col_type) in zip(spider_column_names[1:], spider_column_types[1:]):
tbl_name = table_names[table_indx]
column_names[tbl_name].append(col_name)
type_for_column_for_table[tbl_name][col_name] = col_type
primary_keys = {}
for i_col in table_json['primary_keys']:
(i_table, col_name) = spider_column_names[i_col]
tbl_name = table_names[i_table]
primary_keys[tbl_name] = col_name
foreign_keys = []
for (src_key, tgt_key) in table_json['foreign_keys']:
(i_table_src, col_name_src) = spider_column_names[src_key]
(i_table_tgt, col_name_tgt) = spider_column_names[tgt_key]
key_data = {}
table_src = table_names[i_table_src]
key_data['table_src'] = table_src
key_data['col_src'] = col_name_src
table_tgt = table_names[i_table_tgt]
key_data['table_tgt'] = table_tgt
key_data['col_tgt'] = col_name_tgt
foreign_keys.append(key_data)
return (db_id, table_names, column_names, primary_keys, foreign_keys, type_for_column_for_table)
def get_db_folder(db_path, db_id):
return os.path.join(db_path, db_id)
def load_table_data(self, db_path):
if self.data_loaded:
return
db_id = self.db_id
self.db_path = db_path
print(f'Loading {db_id} from {db_path}')
self.sqlite_file = os.path.join(self.get_db_folder(db_path, db_id), f'{db_id}.sqlite')
(tbl_data, table_names, col_names, type_for_column_for_table, foreign_keys_from_data) = self.load_data_from_sqlite(self.sqlite_file)
self.add_foreign_keys(foreign_keys_from_data)
self.type_for_column_for_table = type_for_column_for_table
self.table_data = tbl_data
self.update_self_to_loaded_data(tbl_data, table_names, col_names, type_for_column_for_table, foreign_keys_from_data)
self.data_loaded = True
def load_data_from_sqlite(sqlite_file):
conn = sqlite3.connect(sqlite_file)
conn.text_factory = (lambda b: b.decode(errors='ignore'))
c = conn.cursor()
query_get_all_tables = "SELECT name FROM sqlite_master WHERE type='table'"
c.execute(query_get_all_tables)
table_names = c.fetchall()
table_names = [t[0] for t in table_names]
tbl_data = {}
col_names = {}
foreign_keys_from_data = []
type_for_column_for_table = {}
for tbl_name in table_names:
c.execute(f'PRAGMA table_info({tbl_name})')
col_data = c.fetchall()
col_names[tbl_name] = [t[1] for t in col_data]
type_for_column_for_table[tbl_name] = {}
for (col_name, t) in zip(col_names[tbl_name], col_data):
type_for_column_for_table[tbl_name][col_name] = t[2]
if ('char' in type_for_column_for_table[tbl_name][col_name].lower()):
type_for_column_for_table[tbl_name][col_name] = 'TEXT'
if ('text' == type_for_column_for_table[tbl_name][col_name].lower()):
type_for_column_for_table[tbl_name][col_name] = 'TEXT'
if any(((t in type_for_column_for_table[tbl_name][col_name].lower()) for t in ['number', 'int', 'bit'])):
type_for_column_for_table[tbl_name][col_name] = 'INTEGER'
if any(((t in type_for_column_for_table[tbl_name][col_name].lower()) for t in ['float', 'real', 'numeric', 'decimal'])):
type_for_column_for_table[tbl_name][col_name] = 'FLOAT'
for tbl_name in table_names:
c.execute(f'PRAGMA foreign_key_list({tbl_name});')
foreign_keys = c.fetchall()
for key in foreign_keys:
col_src = key[3]
col_tgt = key[4]
table_src = tbl_name
table_tgt = key[2]
if ((col_src not in col_names[table_src]) or (col_tgt not in col_names[table_tgt])):
continue
foreign_keys_from_data.append({'col_src': col_src, 'col_tgt': col_tgt, 'table_src': table_src, 'table_tgt': table_tgt})
for tbl_name in table_names:
c.execute(f'SELECT * FROM {tbl_name}')
tbl_data[tbl_name] = c.fetchall()
conn.close()
return (tbl_data, table_names, col_names, type_for_column_for_table, foreign_keys_from_data)
def load_test_table_data(self, db_path):
if self.data_loaded_tests:
return
self.load_table_data(db_path)
paths = sorted(glob.glob(os.path.join(self.get_db_folder(db_path, self.db_id), '*.sqlite')))
self.test_schemas = []
for p in paths:
if (os.path.abspath(p) == os.path.abspath(self.sqlite_file)):
continue
print(f'Loading test database {p}')
(tbl_data, table_names, col_names, type_for_column_for_table, foreign_keys_from_data) = self.load_data_from_sqlite(p)
self.update_data_to_self(tbl_data, table_names, col_names, type_for_column_for_table)
test_schema = DatabaseSchema(None, self.db_id, table_names, col_names, self.primary_keys, self.foreign_keys, type_for_column_for_table, tbl_data, self.column_key_in_table, self.column_used_with_keys)
test_schema.sqlite_file = p
self.test_schemas.append(test_schema)
self.data_loaded_tests = True
def execute_sql_query(self, sql_query):
assert self.data_loaded, 'Data should be loaded into schema to enable SQL execution'
conn = sqlite3.connect(self.sqlite_file)
conn.text_factory = (lambda b: b.decode(errors='ignore'))
c = conn.cursor()
c.execute(sql_query)
result = c.fetchall()
conn.close()
return result
def update_self_to_loaded_data(self, tbl_data, table_names, col_names, type_for_column_for_table, foreign_keys_from_data):
assert (sorted(self.table_names) == sorted(table_names)), f'Table names loaded from database file do not match the ones in the schema file: {table_names} and {self.table_names}'
for t in table_names:
assert (self.column_names[t] == col_names[t]), f'Column names of {t} from the database are not consistent with the ones from the schema: {col_names[t]} and {self.column_names[t]}'
for tbl_name in table_names:
if (tbl_name not in self.primary_keys):
continue
col_name = self.primary_keys[tbl_name]
if (not self.check_column_is_key(tbl_name, col_name, self.column_names[tbl_name], self.table_data[tbl_name])):
del self.primary_keys[tbl_name]
for tbl_name in table_names:
if (tbl_name not in self.primary_keys):
cols = self.column_names[tbl_name]
key_name = f'{tbl_name}_id'
if (key_name in cols):
key_name_base = key_name
i = 0
while (key_name in cols):
key_name = f'{key_name_base}_{i}'
i += 1
self.column_names[tbl_name] = ([key_name] + self.column_names[tbl_name])
self.primary_keys[tbl_name] = key_name
self.type_for_column_for_table[tbl_name][key_name] = 'INTEGER'
old_content = self.table_data[tbl_name]
self.table_data[tbl_name] = []
for (i, row) in enumerate(old_content):
self.table_data[tbl_name].append(tuple(([i] + list(row))))
assert (tbl_name in self.primary_keys), f'Could now find a primary key in database {self.db_id} table {tbl_name}, cols: {cols}'
self.column_key_in_table = {}
self.column_used_with_keys = {}
for (tbl_name, col_name) in self.primary_keys.items():
self.column_key_in_table[tbl_name] = [col_name]
self.column_used_with_keys[tbl_name] = [col_name]
for key_data in self.foreign_keys:
table_src = key_data['table_src']
col_name_src = key_data['col_src']
table_tgt = key_data['table_tgt']
col_name_tgt = key_data['col_tgt']
if (col_name_tgt not in self.column_used_with_keys[table_tgt]):
self.column_used_with_keys[table_tgt].append(col_name_tgt)
if self.check_column_is_key(table_tgt, col_name_tgt, self.column_names[table_tgt], self.table_data[table_tgt]):
self.column_key_in_table[table_tgt].append(col_name_tgt)
if (col_name_src not in self.column_used_with_keys[table_src]):
self.column_used_with_keys[table_src].append(col_name_src)
def check_column_is_key(tbl_name, col_name, column_names, table_content):
i_col = column_names.index(col_name)
col_data = [row[i_col] for row in table_content]
return (len(set(col_data)) == len(col_data))
def update_data_to_self(self, tbl_data, table_names, col_names, type_for_column_for_table):
assert (sorted(self.table_names) == sorted(table_names)), f'Table names loaded from database file do not match the ones in the schema file: {table_names} and {self.table_names}'
for tbl_name in table_names:
if (len(self.column_names[tbl_name]) == len(col_names[tbl_name])):
assert (self.column_names[tbl_name] == col_names[tbl_name]), f'Column names of {tbl_name} from the database are not consistent with the ones from the schema: {col_names[tbl_name]} and {self.column_names[tbl_name]}'
assert (self.type_for_column_for_table[tbl_name] == type_for_column_for_table[tbl_name]), f'Column types of {tbl_name} from the database are not consistent with the ones from the schema: {type_for_column_for_table[tbl_name]} and {self.type_for_column_for_table[tbl_name]}'
assert self.check_column_is_key(tbl_name, self.primary_keys[tbl_name], col_names[tbl_name], tbl_data[tbl_name]), f'Primary key {self.primary_keys[tbl_name]} of {tbl_name} is not a key'
else:
assert (self.column_names[tbl_name][1:] == col_names[tbl_name]), f'Column names of {tbl_name} from the database are not consistent with the ones from the schema: {col_names[tbl_name]} and {self.column_names[tbl_name]}'
assert (tbl_name in self.primary_keys), f'Do not have primary key for table {tbl_name}'
key_name = self.primary_keys[tbl_name]
assert (key_name == self.column_names[tbl_name][0]), f'Primary key mismatch in table {tbl_name}'
col_names[tbl_name] = ([key_name] + col_names[tbl_name])
type_for_column_for_table[tbl_name][key_name] = self.type_for_column_for_table[tbl_name][key_name]
assert (self.type_for_column_for_table[tbl_name] == type_for_column_for_table[tbl_name]), f'Column types of {tbl_name} from the database are not consistent with the ones from the schema: {type_for_column_for_table[tbl_name]} and {self.type_for_column_for_table[tbl_name]}'
old_content = tbl_data[tbl_name]
tbl_data[tbl_name] = []
for (i, row) in enumerate(old_content):
tbl_data[tbl_name].append(tuple(([i] + list(row))))
for key_data in self.foreign_keys:
table_src = key_data['table_src']
col_name_src = key_data['col_src']
table_tgt = key_data['table_tgt']
col_name_tgt = key_data['col_tgt']
assert self.check_column_is_key(table_tgt, col_name_tgt, col_names[table_tgt], tbl_data[table_tgt]), f'Target of a foreign key {table_src}{col_name_src}->{table_tgt}:{col_name_tgt} is not a key' |
class CargoInfo():
def __init__(self, itemID, amount):
self.itemID = itemID
self.amount = amount
def fromCargo(cls, cargo):
if (cargo is None):
return None
info = cls(itemID=cargo.itemID, amount=cargo.amount)
return info
def toCargo(self):
item = Market.getInstance().getItem(self.itemID)
cargo = Cargo(item)
cargo.amount = self.amount
return cargo
def __repr__(self):
return makeReprStr(self, ['itemID', 'amount']) |
class CountingIterator(object):
def __init__(self, iterable, start=0):
self.iterable = iterable
self.count = start
self.itr = iter(self)
self.len = (start + len(iterable))
def __len__(self):
return self.len
def __iter__(self):
for x in self.iterable:
if (self.count >= self.len):
return
self.count += 1
(yield x)
def __next__(self):
return next(self.itr)
def has_next(self):
return (self.count < len(self))
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
self.len = min(self.len, n) |
def _get_quadratic_model(xs: List[np.ndarray], ys: List[float], xopt: np.ndarray) -> Pipeline:
linear_model = LinearRegression(fit_intercept=False)
model = Pipeline([('poly', PolynomialFeatures(degree=2)), ('linear_model', linear_model)])
shifted_xs = [(x - xopt) for x in xs]
model = model.fit(shifted_xs, ys)
return model |
_MODELS.register_module()
class SMPL(nn.Module):
def __init__(self, smpl_path, joints_regressor):
super().__init__()
assert has_smpl, 'Please install smplx to use SMPL.'
self.smpl_neutral = SMPL_(model_path=smpl_path, create_global_orient=False, create_body_pose=False, create_transl=False, gender='neutral')
self.smpl_male = SMPL_(model_path=smpl_path, create_betas=False, create_global_orient=False, create_body_pose=False, create_transl=False, gender='male')
self.smpl_female = SMPL_(model_path=smpl_path, create_betas=False, create_global_orient=False, create_body_pose=False, create_transl=False, gender='female')
joints_regressor = torch.tensor(np.load(joints_regressor), dtype=torch.float)[(None, ...)]
self.register_buffer('joints_regressor', joints_regressor)
self.num_verts = self.smpl_neutral.get_num_verts()
self.num_joints = self.joints_regressor.shape[1]
def smpl_forward(self, model, **kwargs):
betas = kwargs['betas']
batch_size = betas.shape[0]
device = betas.device
output = {}
if (batch_size == 0):
output['vertices'] = betas.new_zeros([0, self.num_verts, 3])
output['joints'] = betas.new_zeros([0, self.num_joints, 3])
else:
smpl_out = model(**kwargs)
output['vertices'] = smpl_out.vertices
output['joints'] = torch.matmul(self.joints_regressor.to(device), output['vertices'])
return output
def get_faces(self):
return self.smpl_neutral.faces
def forward(self, betas, body_pose, global_orient, transl=None, gender=None):
batch_size = betas.shape[0]
pose2rot = (True if (body_pose.dim() == 2) else False)
if ((batch_size > 0) and (gender is not None)):
output = {'vertices': betas.new_zeros([batch_size, self.num_verts, 3]), 'joints': betas.new_zeros([batch_size, self.num_joints, 3])}
mask = (gender < 0)
_out = self.smpl_forward(self.smpl_neutral, betas=betas[mask], body_pose=body_pose[mask], global_orient=global_orient[mask], transl=(transl[mask] if (transl is not None) else None), pose2rot=pose2rot)
output['vertices'][mask] = _out['vertices']
output['joints'][mask] = _out['joints']
mask = (gender == 0)
_out = self.smpl_forward(self.smpl_male, betas=betas[mask], body_pose=body_pose[mask], global_orient=global_orient[mask], transl=(transl[mask] if (transl is not None) else None), pose2rot=pose2rot)
output['vertices'][mask] = _out['vertices']
output['joints'][mask] = _out['joints']
mask = (gender == 1)
_out = self.smpl_forward(self.smpl_male, betas=betas[mask], body_pose=body_pose[mask], global_orient=global_orient[mask], transl=(transl[mask] if (transl is not None) else None), pose2rot=pose2rot)
output['vertices'][mask] = _out['vertices']
output['joints'][mask] = _out['joints']
else:
return self.smpl_forward(self.smpl_neutral, betas=betas, body_pose=body_pose, global_orient=global_orient, transl=transl, pose2rot=pose2rot)
return output |
class CNN(nn.Module):
def __init__(self, in_word_embedding_dimension: int, out_channels: int=256, kernel_sizes: List[int]=[1, 3, 5]):
nn.Module.__init__(self)
self.config_keys = ['in_word_embedding_dimension', 'out_channels', 'kernel_sizes']
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = (out_channels * len(kernel_sizes))
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
for kernel_size in kernel_sizes:
padding_size = int(((kernel_size - 1) / 2))
conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding_size)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features['token_embeddings']
token_embeddings = token_embeddings.transpose(1, (- 1))
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, (- 1))
features.update({'token_embeddings': out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'cnn_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def load(input_path: str):
with open(os.path.join(input_path, 'cnn_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = CNN(**config)
model.load_state_dict(weights)
return model |
def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
args = get_args()
load_dir = getattr(args, load_arg)
if isinstance(model, torchDDP):
model = model.module
tracker_filename = get_checkpoint_tracker_filename(load_dir)
if (not os.path.isfile(tracker_filename)):
print_rank_0('WARNING: could not find the metadata file {} '.format(tracker_filename))
print_rank_0(' will not load any checkpoints and will start from random')
return 0
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = (metastring == 'release')
if (not release):
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(tracker_filename))
sys.exit()
assert ((iteration > 0) or release), 'error parsing metadata file {}'.format(tracker_filename)
checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
if (mpu.get_data_parallel_rank() == 0):
print('global rank {} is loading checkpoint {}'.format(torch.distributed.get_rank(), checkpoint_name))
try:
state_dict = torch.load(checkpoint_name, map_location='cpu')
except ModuleNotFoundError:
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules['megatron.fp16.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
except BaseException:
print_rank_0('could not load the checkpoint')
sys.exit()
if (args.finetune or release):
iteration = 0
else:
try:
iteration = state_dict['iteration']
except KeyError:
try:
iteration = state_dict['total_iters']
except KeyError:
print_rank_0('A metadata file exists but unable to load iteration from checkpoint {}, exiting'.format(checkpoint_name))
sys.exit()
if ('args' in state_dict):
checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args)
else:
print_rank_0('could not find arguments in the checkpoint ...')
model.load_state_dict(state_dict['model'])
if ((not release) and (not args.finetune) and (not args.no_load_optim)):
try:
if (optimizer is not None):
optimizer.load_state_dict(state_dict['optimizer'])
if (lr_scheduler is not None):
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. Specify --no-load-optim or --finetune to prevent attempting to load the optimizer state, exiting ...'.format(checkpoint_name))
sys.exit()
if ((not release) and (not args.finetune) and (not args.no_load_rng)):
try:
random.setstate(state_dict['random_rng_state'])
np.random.set_state(state_dict['np_rng_state'])
torch.set_rng_state(state_dict['torch_rng_state'])
torch.cuda.set_rng_state(state_dict['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(state_dict['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. Specify --no-load-rng or --finetune to prevent attempting to load the optimizer state, exiting ...'.format(checkpoint_name))
sys.exit()
torch.distributed.barrier()
if (mpu.get_data_parallel_rank() == 0):
print(' successfully loaded {}'.format(checkpoint_name))
return iteration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.