code stringlengths 281 23.7M |
|---|
class OutputFilesPage(QWizardPage):
def __init__(self, parent=None):
super(OutputFilesPage, self).__init__(parent)
self.setTitle('Output Files')
self.setSubTitle('Specify where you want the wizard to put the generated skeleton code.')
self.setPixmap(QWizard.LogoPixmap, QPixmap(':/images/logo3.png'))
outputDirLabel = QLabel('&Output directory:')
self.outputDirLineEdit = QLineEdit()
outputDirLabel.setBuddy(self.outputDirLineEdit)
headerLabel = QLabel('&Header file name:')
self.headerLineEdit = QLineEdit()
headerLabel.setBuddy(self.headerLineEdit)
implementationLabel = QLabel('&Implementation file name:')
self.implementationLineEdit = QLineEdit()
implementationLabel.setBuddy(self.implementationLineEdit)
self.registerField('outputDir*', self.outputDirLineEdit)
self.registerField('header*', self.headerLineEdit)
self.registerField('implementation*', self.implementationLineEdit)
layout = QGridLayout()
layout.addWidget(outputDirLabel, 0, 0)
layout.addWidget(self.outputDirLineEdit, 0, 1)
layout.addWidget(headerLabel, 1, 0)
layout.addWidget(self.headerLineEdit, 1, 1)
layout.addWidget(implementationLabel, 2, 0)
layout.addWidget(self.implementationLineEdit, 2, 1)
self.setLayout(layout)
def initializePage(self):
className = self.field('className')
self.headerLineEdit.setText((className.lower() + '.h'))
self.implementationLineEdit.setText((className.lower() + '.cpp'))
self.outputDirLineEdit.setText(QDir.toNativeSeparators(QDir.tempPath())) |
class Solution(object):
def isValidBST(self, root):
return self.isVaild_helper(root, ((- sys.maxint) - 1), sys.maxint)
def isVaild_helper(self, root, minVal, maxVal):
if (root is None):
return True
if ((root.val >= maxVal) or (root.val <= minVal)):
return False
return (self.isVaild_helper(root.left, minVal, root.val) and self.isVaild_helper(root.right, root.val, maxVal)) |
def selfies_to_hot(selfie, largest_selfie_len, alphabet):
symbol_to_int = dict(((c, i) for (i, c) in enumerate(alphabet)))
selfie += ('[nop]' * (largest_selfie_len - sf.len_selfies(selfie)))
symbol_list = sf.split_selfies(selfie)
integer_encoded = [symbol_to_int[symbol] for symbol in symbol_list]
onehot_encoded = list()
for index in integer_encoded:
letter = ([0] * len(alphabet))
letter[index] = 1
onehot_encoded.append(letter)
return (integer_encoded, np.array(onehot_encoded)) |
def process(input_json: str, output_file: str, output_json: str=None, threshold: int=1, keep_punctuation: bool=False, character_level: bool=False, retokenize: bool=False, host_address: str=' zh: bool=True):
logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info('Build Vocab')
vocabulary = build_vocab(input_json=input_json, output_json=output_json, threshold=threshold, keep_punctuation=keep_punctuation, host_address=host_address, character_level=character_level, retokenize=retokenize, zh=zh)
pickle.dump(vocabulary, open(output_file, 'wb'))
logging.info('Total vocabulary size: {}'.format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file)) |
def test_kafka_batch_npartitions():
j1 = random.randint(0, 10000)
ARGS1 = {'bootstrap.servers': 'localhost:9092', 'group.id': ('streamz-test%i' % j1), 'enable.auto.commit': False, 'auto.offset.reset': 'earliest'}
j2 = (j1 + 1)
ARGS2 = {'bootstrap.servers': 'localhost:9092', 'group.id': ('streamz-test%i' % j2), 'enable.auto.commit': False, 'auto.offset.reset': 'earliest'}
with kafka_service() as kafka:
(kafka, TOPIC) = kafka
TOPIC = 'test-partitions'
subprocess.call(shlex.split('docker exec streamz-kafka /opt/kafka_2.11-0.10.1.0/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 2 --topic test-partitions'))
for i in range(10):
if ((i % 2) == 0):
kafka.produce(TOPIC, (b'value-%d' % i), partition=0)
else:
kafka.produce(TOPIC, (b'value-%d' % i), partition=1)
kafka.flush()
with pytest.raises(ValueError):
stream1 = Stream.from_kafka_batched(TOPIC, ARGS1, asynchronous=True, npartitions=0)
stream1.gather().sink_to_list()
stream1.start()
stream2 = Stream.from_kafka_batched(TOPIC, ARGS1, asynchronous=True, npartitions=1)
out2 = stream2.gather().sink_to_list()
stream2.start()
wait_for((lambda : stream2.upstream.started), 10, 0.1)
wait_for((lambda : ((len(out2) == 1) and (len(out2[0]) == 5))), 10, 0.1)
stream2.upstream.stopped = True
stream3 = Stream.from_kafka_batched(TOPIC, ARGS2, asynchronous=True, npartitions=4)
out3 = stream3.gather().sink_to_list()
stream3.start()
wait_for((lambda : stream3.upstream.started), 10, 0.1)
wait_for((lambda : ((len(out3) == 2) and ((len(out3[0]) + len(out3[1])) == 10))), 10, 0.1)
stream3.upstream.stopped = True |
def configure_logging(config: Configuration, debug: bool) -> None:
logging.captureWarnings(capture=True)
if debug:
logging_level = logging.DEBUG
warnings.simplefilter('always')
else:
logging_level = logging.INFO
formatter: logging.Formatter
if (not sys.stdin.isatty()):
formatter = CustomJsonFormatter('%(levelname)s %(message)s %(funcName)s %(lineno)d %(module)s %(name)s %(pathname)s %(process)d %(processName)s %(thread)d %(threadName)s')
else:
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.setLevel(logging_level)
root_logger.addHandler(handler)
if (_is_containerized() and (not _has_PID1_parent())):
file_handler = logging.FileHandler('/proc/1/fd/1', mode='w')
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
if config.has_logging_options:
logging.config.fileConfig(config.filename) |
def test_access_token(initialized_db):
user = model.user.get_user('devtable')
token = create_token(user, 'Some token')
assert (token.last_accessed is None)
token = access_valid_token(get_full_token_string(token))
assert (token.last_accessed is not None)
revoke_token(token)
assert (access_valid_token(get_full_token_string(token)) is None) |
class TagListWrapper(abc.Mapping):
def __init__(self, taglist, merge=False):
self._list = taglist
self._merge = merge
def __len__(self):
return self._list.n_tags()
def __iter__(self):
for i in range(len(self)):
(yield self._list.nth_tag_name(i))
def __getitem__(self, key):
if (not Gst.tag_exists(key)):
raise KeyError
values = []
index = 0
while 1:
value = self._list.get_value_index(key, index)
if (value is None):
break
values.append(value)
index += 1
if (not values):
raise KeyError
if self._merge:
try:
return ' - '.join(values)
except TypeError:
return values[0]
return values |
class TestPetPhotoEndpoint(BaseTestPetstore):
def test_get_valid(self, client, data_gif):
client.cookies.set('user', '1')
headers = {'Authorization': 'Basic testuser', 'Api-Key': self.api_key_encoded}
response = client.get('/v1/pets/1/photo', headers=headers)
assert (response.content == data_gif)
assert (response.status_code == 200)
def test_post_valid(self, client, data_gif):
client.cookies.set('user', '1')
content_type = 'image/gif'
headers = {'Authorization': 'Basic testuser', 'Api-Key': self.api_key_encoded, 'Content-Type': content_type}
response = client.post('/v1/pets/1/photo', headers=headers, content=data_gif)
assert (not response.text)
assert (response.status_code == 201) |
def _stringify_obj(obj: Any) -> str:
if ((inspect.isbuiltin(obj) and (obj.__self__ is not None)) or isinstance(obj, types.MethodType)):
return f'{_stringify_obj(obj.__self__)}.{obj.__name__}'
elif (hasattr(obj, 'decorator') and hasattr(obj, 'instance')):
if hasattr(obj.instance, '__name__'):
cls = obj.instance
else:
cls = type(obj.instance)
return f'{_stringify_obj(cls)}.{obj.decorator.fn.__name__}'
elif isinstance(obj, super):
return ('super(%s, self)' % _stringify_obj(obj.__self_class__))
else:
return f'{obj.__module__}.{obj.__name__}' |
class AMPTrainer(SimpleTrainer):
def run_step(self):
assert self.model.training, '[AMPTrainer] model was changed to eval mode!'
assert torch.cuda.is_available(), '[AMPTrainer] CUDA is required for AMP training!'
start = time.perf_counter()
data = next(self._data_loader_iter)
data1 = next(self._data_loader_iter1)
data_time = (time.perf_counter() - start)
loss_dict = self.model(data, data1)
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
with amp.scale_loss(losses, self.optimizer) as scaled_loss:
scaled_loss.backward()
self._write_metrics(loss_dict, data_time)
self.optimizer.step() |
def find_all_conv_bn_with_activation(model: torch.nn.Module, input_shape: Tuple) -> Dict:
device = utils.get_device(model)
inp_tensor_list = utils.create_rand_tensors_given_shapes(input_shape, device)
connected_graph = ConnectedGraph(model, inp_tensor_list)
return find_all_conv_bn_with_activation_in_graph(connected_graph) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
loader_cfg = {**dict(seed=cfg.get('seed'), drop_last=False, dist=distributed), **({} if (torch.__version__ != 'parrots') else dict(prefetch_num=2, pin_memory=False)), **dict(((k, cfg.data[k]) for k in ['seed', 'prefetch_num', 'pin_memory', 'persistent_workers'] if (k in cfg.data)))}
test_loader_cfg = {**loader_cfg, **dict(shuffle=False, drop_last=False), **dict(workers_per_gpu=cfg.data.get('workers_per_gpu', 1)), **dict(samples_per_gpu=cfg.data.get('samples_per_gpu', 1)), **cfg.data.get('test_dataloader', {})}
data_loader = build_dataloader(dataset, **test_loader_cfg)
model = build_posenet(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
eval_config = cfg.get('evaluation', {})
eval_config = merge_configs(eval_config, dict(metric=args.eval))
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
results = dataset.evaluate(outputs, cfg.work_dir, **eval_config)
for (k, v) in sorted(results.items()):
print(f'{k}: {v}') |
class DeformNet(nn.Module):
def __init__(self, n_cat=6, nv_prior=1024):
super(DeformNet, self).__init__()
self.n_cat = n_cat
self.instance_geometry = nn.Sequential(nn.Conv1d(3, 64, 1), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.ReLU())
self.instance_global = nn.Sequential(nn.Conv1d(64, 128, 1), nn.ReLU(), nn.Conv1d(128, 1024, 1), nn.ReLU(), nn.AdaptiveAvgPool1d(1))
self.category_local = nn.Sequential(nn.Conv1d(3, 64, 1), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.ReLU())
self.category_global = nn.Sequential(nn.Conv1d(64, 128, 1), nn.ReLU(), nn.Conv1d(128, 1024, 1), nn.ReLU(), nn.AdaptiveAvgPool1d(1))
self.assignment = nn.Sequential(nn.Conv1d(2112, 512, 1), nn.ReLU(), nn.Conv1d(512, 256, 1), nn.ReLU(), nn.Conv1d(256, nv_prior, 1))
self.deformation = nn.Sequential(nn.Conv1d(2112, 512, 1), nn.ReLU(), nn.Conv1d(512, 256, 1), nn.ReLU(), nn.Conv1d(256, 3, 1))
self.deformation[4].weight.data.normal_(0, 0.0001)
def forward(self, points, prior):
(bs, n_pts) = points.size()[:2]
nv = prior.size()[1]
points = points.permute(0, 2, 1)
inst_local = self.instance_geometry(points)
inst_global = self.instance_global(inst_local)
cat_prior = prior.permute(0, 2, 1)
cat_local = self.category_local(cat_prior)
cat_global = self.category_global(cat_local)
assign_feat = torch.cat((inst_local, inst_global.repeat(1, 1, n_pts), cat_global.repeat(1, 1, n_pts)), dim=1)
assign_mat = self.assignment(assign_feat)
assign_mat = assign_mat.permute(0, 2, 1).contiguous()
deform_feat = torch.cat((cat_local, cat_global.repeat(1, 1, nv), inst_global.repeat(1, 1, nv)), dim=1)
deltas = self.deformation(deform_feat)
deltas = deltas.permute(0, 2, 1).contiguous()
return (assign_mat, deltas) |
def test_switch_step_calls_cof():
with pytest.raises(Call) as err:
run_step(Context({'switch': [{'case': False, 'call': 'sg1'}, {'case': True, 'call': 'sg2'}]}))
cof = err.value
assert isinstance(cof, Call)
assert (cof.groups == ['sg2'])
assert (cof.success_group is None)
assert (cof.failure_group is None)
assert (cof.original_config == ('switch', [{'case': False, 'call': 'sg1'}, {'case': True, 'call': 'sg2'}])) |
class Iterator():
def __init__(self, data):
self.data = data
self.idx = 0
def hasNext(self) -> bool:
return (len(self.data) > self.idx)
def next(self):
if (len(self.data) > self.idx):
temp = self.data[self.idx]
self.idx += 1
return temp
else:
return None |
class Recorder():
def __init__(self, metrics):
self.metrics = metrics
self.metric2sum = {}
self.n_records = 0
self.reset()
def reset(self):
self.n_records = 0
for metric in self.metrics:
self.metric2sum[metric] = 0.0
def record(self, n_records, values):
self.n_records += n_records
for (k, v) in zip(self.metrics, values):
self.metric2sum[k] += (v * n_records)
def report_avg(self):
metric2avg = {}
for metric in self.metrics:
summ = self.metric2sum[metric]
avg = (summ / self.n_records)
metric2avg[metric] = avg
return metric2avg |
class TTrueAudioFile(TestCase):
def setUp(self):
self.song = TrueAudioFile(get_data_path('silence-44-s.tta'))
def test_length(self):
assert (self.song('~#length') == pytest.approx(3.684, abs=0.001))
def test_audio_props(self):
assert (self.song('~#samplerate') == 44100)
def test_format_codec(self):
assert (self.song('~format') == 'True Audio')
assert (self.song('~codec') == 'True Audio')
assert (self.song('~encoding') == '')
assert (self.song('~codec') == 'True Audio') |
def load_pretrained_weights(p, model):
print('Loading pre-trained weights from {}'.format(p['pretraining']))
state_dict = torch.load(p['pretraining'], map_location='cpu')['model']
new_state = {}
for (k, v) in state_dict.items():
if k.startswith('module.model_q.'):
new_state[k.rsplit('module.model_q.')[1]] = v
else:
pass
msg = model.load_state_dict(new_state, strict=False)
print('Loading state dict from checkpoint')
print('Warning: This piece of code was only tested for linear classification')
print('Warning: Assertions should probably depend on model type (Segm/ContrastiveSegm)')
assert (set(msg[0]) == set(['decoder.4.weight', 'decoder.4.bias']))
assert (set(msg[1]) == set(['head.weight', 'head.bias', 'classification_head.weight']))
if ('deeplab' in p['head']):
model.decoder[4].weight.data.normal_(mean=0.0, std=0.01)
model.decoder[4].bias.data.zero_() |
class DescribeCharacterStyle():
def it_knows_which_style_it_is_based_on(self, base_get_fixture):
(style, StyleFactory_, StyleFactory_calls, base_style_) = base_get_fixture
base_style = style.base_style
assert (StyleFactory_.call_args_list == StyleFactory_calls)
assert (base_style == base_style_)
def it_can_change_its_base_style(self, base_set_fixture):
(style, value, expected_xml) = base_set_fixture
style.base_style = value
assert (style._element.xml == expected_xml)
def it_provides_access_to_its_font(self, font_fixture):
(style, Font_, font_) = font_fixture
font = style.font
Font_.assert_called_once_with(style._element)
assert (font is font_)
(params=[('w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Foo})', 1, 0), ('w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Bar})', 1, (- 1)), ('w:styles/w:style', 0, (- 1))])
def base_get_fixture(self, request, StyleFactory_):
(styles_cxml, style_idx, base_style_idx) = request.param
styles = element(styles_cxml)
style = CharacterStyle(styles[style_idx])
if (base_style_idx >= 0):
base_style = styles[base_style_idx]
StyleFactory_calls = [call(base_style)]
expected_value = StyleFactory_.return_value
else:
StyleFactory_calls = []
expected_value = None
return (style, StyleFactory_, StyleFactory_calls, expected_value)
(params=[('w:style', 'Foo', 'w:style/w:basedOn{w:val=Foo}'), ('w:style/w:basedOn{w:val=Foo}', 'Bar', 'w:style/w:basedOn{w:val=Bar}'), ('w:style/w:basedOn{w:val=Bar}', None, 'w:style')])
def base_set_fixture(self, request, style_):
(style_cxml, base_style_id, expected_style_cxml) = request.param
style = CharacterStyle(element(style_cxml))
style_.style_id = base_style_id
base_style = (style_ if (base_style_id is not None) else None)
expected_xml = xml(expected_style_cxml)
return (style, base_style, expected_xml)
def font_fixture(self, Font_, font_):
style = CharacterStyle(element('w:style'))
return (style, Font_, font_)
def Font_(self, request, font_):
return class_mock(request, 'docx.styles.style.Font', return_value=font_)
def font_(self, request):
return instance_mock(request, Font)
def style_(self, request):
return instance_mock(request, BaseStyle)
def StyleFactory_(self, request):
return function_mock(request, 'docx.styles.style.StyleFactory') |
class TestOptimizer(unittest.TestCase):
def test_init(self):
params = [torch.nn.Parameter(torch.randn(2, 3, 4))]
try:
optimizer = Optimizer(torch.optim.Adam(params))
except:
self.fail('__init__ failed.')
self.assertEquals(optimizer.max_grad_norm, 0)
def test_update(self):
params = [torch.nn.Parameter(torch.randn(2, 3, 4))]
optimizer = Optimizer(torch.optim.Adam(params, lr=1), max_grad_norm=5)
scheduler = StepLR(optimizer.optimizer, 1, gamma=0.1)
optimizer.set_scheduler(scheduler)
optimizer.update(10, 0)
optimizer.update(10, 1)
self.assertEquals(optimizer.optimizer.param_groups[0]['lr'], 0.1)
('torch.nn.utils.clip_grad_norm_')
def test_step(self, mock_clip_grad_norm):
params = [torch.nn.Parameter(torch.randn(2, 3, 4))]
optim = Optimizer(torch.optim.Adam(params), max_grad_norm=5)
optim.step()
mock_clip_grad_norm.assert_called_once() |
class InverseEvalresp(FrequencyResponse):
respfile = String.T()
nslc_id = Tuple.T(4, String.T())
target = String.T(default='dis')
instant = Float.T()
def __init__(self, respfile, trace, target='dis', **kwargs):
FrequencyResponse.__init__(self, respfile=respfile, nslc_id=trace.nslc_id, instant=((trace.tmin + trace.tmax) / 2.0), target=target, **kwargs)
def evaluate(self, freqs):
(network, station, location, channel) = self.nslc_id
x = evalresp.evalresp(sta_list=station, cha_list=channel, net_code=network, locid=location, instant=self.instant, freqs=freqs, units=self.target.upper(), file=self.respfile, rtype='CS')
transfer = x[0][4]
return (1.0 / transfer)
def summary(self):
return 'inv_eresp' |
def compute_grid_bound(model: MPMModelStruct, state: MPMStateStruct):
tid = wp.tid()
x = state.particle_q[tid]
fx = ((x[0] - (model.dx * 4.0)) * model.inv_dx)
fy = ((x[1] - (model.dx * 4.0)) * model.inv_dx)
fz = ((x[2] - (model.dx * 4.0)) * model.inv_dx)
ix = int(wp.floor(fx))
iy = int(wp.floor(fy))
iz = int(wp.floor(fz))
wp.atomic_min(state.grid_lower, 0, ix)
wp.atomic_min(state.grid_lower, 1, iy)
wp.atomic_min(state.grid_lower, 2, iz)
fx = ((x[0] + (model.dx * 4.0)) * model.inv_dx)
fy = ((x[1] + (model.dx * 4.0)) * model.inv_dx)
fz = ((x[2] + (model.dx * 4.0)) * model.inv_dx)
ix = int(wp.ceil(fx))
iy = int(wp.ceil(fy))
iz = int(wp.ceil(fz))
wp.atomic_max(state.grid_upper, 0, ix)
wp.atomic_max(state.grid_upper, 1, iy)
wp.atomic_max(state.grid_upper, 2, iz) |
class ModelArguments():
model_name_or_path: str = field(default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}) |
class PythonConsole(Gtk.ScrolledWindow):
def __init__(self, namespace=None, destroy_cb=None):
Gtk.ScrolledWindow.__init__(self)
self.destroy_cb = destroy_cb
self.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.set_shadow_type(Gtk.ShadowType.NONE)
self.view = Gtk.TextView()
add_css(self, 'scrolledwindow { padding: 6px; background-color: white; background-color: _view_bg;}')
self.view.modify_font(Pango.font_description_from_string('Monospace'))
self.view.set_editable(True)
self.view.set_wrap_mode(Gtk.WrapMode.CHAR)
self.add(self.view)
self.view.show()
buffer = self.view.get_buffer()
self.normal = buffer.create_tag('normal')
self.error = buffer.create_tag('error')
self.error.set_property('foreground', 'red')
self.command = buffer.create_tag('command')
self.command.set_property('foreground', 'blue')
self.__spaces_pattern = re.compile('^\\s+')
self.namespace = (namespace or {})
self.block_command = False
buffer.create_mark('input-line', buffer.get_end_iter(), True)
buffer.insert(buffer.get_end_iter(), '>>> ')
buffer.create_mark('input', buffer.get_end_iter(), True)
self.history = ['']
self.history_pos = 0
self.current_command = ''
self.namespace['__history__'] = self.history
self.stdout = OutFile(self, self.normal)
self.stderr = OutFile(self, self.error)
self.view.connect('key-press-event', self.__key_press_event_cb)
buffer.connect('mark-set', self.__mark_set_cb)
def __key_press_event_cb(self, view, event):
modifier_mask = Gtk.accelerator_get_default_mod_mask()
event_state = (event.state & modifier_mask)
if ((event.keyval == Gdk.KEY_d) and (event_state == Gdk.ModifierType.CONTROL_MASK)):
self.destroy()
elif ((event.keyval == Gdk.KEY_Return) and (event_state & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.SHIFT_MASK))):
buffer = view.get_buffer()
inp_mark = buffer.get_mark('input')
inp = buffer.get_iter_at_mark(inp_mark)
cur = buffer.get_end_iter()
line = buffer.get_text(inp, cur, True)
self.current_command = ((self.current_command + line) + '\n')
self.history_add(line)
cur = buffer.get_end_iter()
buffer.insert(cur, '\n... ')
cur = buffer.get_end_iter()
buffer.move_mark(inp_mark, cur)
spaces = re.match(self.__spaces_pattern, line)
if (spaces is not None):
buffer.insert(cur, line[spaces.start():spaces.end()])
else:
buffer.insert(cur, ' ')
cur = buffer.get_end_iter()
buffer.place_cursor(cur)
GLib.idle_add(self.scroll_to_end)
return True
elif (event.keyval == Gdk.KEY_Return):
buffer = view.get_buffer()
lin_mark = buffer.get_mark('input-line')
inp_mark = buffer.get_mark('input')
inp = buffer.get_iter_at_mark(inp_mark)
cur = buffer.get_end_iter()
line = buffer.get_text(inp, cur, True)
self.current_command = ((self.current_command + line) + '\n')
lin = buffer.get_iter_at_mark(lin_mark)
buffer.apply_tag(self.command, lin, cur)
buffer.insert(cur, '\n')
cur_strip = self.current_command.rstrip()
if (cur_strip.endswith(':') or ((self.current_command[(- 2):].strip() != '') and self.block_command)):
self.block_command = True
com_mark = '... '
elif cur_strip.endswith('\\'):
com_mark = '... '
else:
self.history_add(cur_strip)
self.__run(self.current_command)
self.current_command = ''
self.block_command = False
com_mark = '>>> '
cur = buffer.get_end_iter()
buffer.move_mark(lin_mark, cur)
buffer.insert(cur, com_mark)
cur = buffer.get_end_iter()
buffer.move_mark(inp_mark, cur)
if (com_mark == '... '):
spaces = re.match(self.__spaces_pattern, line)
if (spaces is not None):
buffer.insert(cur, line[spaces.start():spaces.end()])
if cur_strip.endswith(':'):
buffer.insert(cur, ' ')
buffer.place_cursor(cur)
GLib.idle_add(self.scroll_to_end)
return True
elif ((event.keyval == Gdk.KEY_c) and (event_state == Gdk.ModifierType.CONTROL_MASK)):
buffer = view.get_buffer()
lin_mark = buffer.get_mark('input-line')
inp_mark = buffer.get_mark('input')
cur = buffer.get_end_iter()
buffer.insert(cur, '\n')
com_mark = '>>> '
cur = buffer.get_end_iter()
buffer.move_mark(lin_mark, cur)
buffer.insert(cur, com_mark)
cur = buffer.get_end_iter()
buffer.move_mark(inp_mark, cur)
buffer.place_cursor(cur)
GLib.idle_add(self.scroll_to_end)
return True
elif ((event.keyval == Gdk.KEY_KP_Down) or (event.keyval == Gdk.KEY_Down)):
view.emit_stop_by_name('key_press_event')
self.history_down()
GLib.idle_add(self.scroll_to_end)
return True
elif ((event.keyval == Gdk.KEY_KP_Up) or (event.keyval == Gdk.KEY_Up)):
view.emit_stop_by_name('key_press_event')
self.history_up()
GLib.idle_add(self.scroll_to_end)
return True
elif ((event.keyval == Gdk.KEY_KP_Left) or (event.keyval == Gdk.KEY_Left) or (event.keyval == Gdk.KEY_BackSpace)):
buffer = view.get_buffer()
inp = buffer.get_iter_at_mark(buffer.get_mark('input'))
cur = buffer.get_iter_at_mark(buffer.get_insert())
return (inp.compare(cur) == 0)
elif (event.keyval == Gdk.KEY_Home):
buffer = view.get_buffer()
inp = buffer.get_iter_at_mark(buffer.get_mark('input'))
if (event_state == Gdk.ModifierType.SHIFT_MASK):
buffer.move_mark_by_name('insert', inp)
else:
buffer.place_cursor(inp)
return True
elif (((event.keyval == Gdk.KEY_Tab) or (event.keyval == Gdk.KEY_ISO_Left_Tab)) or ((event.keyval == Gdk.KEY_space) and ((event_state == (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.SHIFT_MASK)) or (event_state == Gdk.ModifierType.CONTROL_MASK)))):
buffer = view.get_buffer()
_inp_mark = buffer.get_mark('input')
_ins_mark = buffer.get_mark('insert')
_inp = buffer.get_iter_at_mark(_inp_mark)
ins = buffer.get_iter_at_mark(_ins_mark)
cmd_start = buffer.get_text(_inp, ins, True)
_identifiers_chars = takewhile((lambda x: (x.isalnum() or (x in {'_', '.'}))), reversed(cmd_start))
_idcs_len = len(list(_identifiers_chars))
ids_str = cmd_start[(- _idcs_len):]
is_shift = bool((event_state & Gdk.ModifierType.SHIFT_MASK))
comp_items = self.get_completion_items(ids_str, include_private=is_shift)
choice = None
if (len(comp_items) > 1):
comp_items.sort(key=(lambda x: (x[0].startswith('_'), x[0].lower())))
dialog = ListChoiceDialog(self.get_parent(), comp_items)
choice = dialog.run()
dialog.destroy()
elif (len(comp_items) == 1):
last = ids_str.split('.')[(- 1)]
if (last == comp_items[0][0]):
next_comp_items = self.get_completion_items((ids_str + '.'), include_private=is_shift)
if next_comp_items:
buffer.insert(ins, '.')
else:
choice = 0
elif ('\n' in self.current_command):
buffer.insert(ins, ' ')
if (isinstance(choice, int) and (choice >= 0)):
last = ids_str.split('.')[(- 1)]
insert_text = comp_items[choice][0]
if last:
insert_text = insert_text[len(last):]
buffer.insert(ins, insert_text)
return True
def __mark_set_cb(self, buffer, iter, name):
input = buffer.get_iter_at_mark(buffer.get_mark('input'))
pos = buffer.get_iter_at_mark(buffer.get_insert())
self.view.set_editable((pos.compare(input) != (- 1)))
def get_command_line(self):
buffer = self.view.get_buffer()
inp = buffer.get_iter_at_mark(buffer.get_mark('input'))
cur = buffer.get_end_iter()
return buffer.get_text(inp, cur, True)
def set_command_line(self, command):
buffer = self.view.get_buffer()
mark = buffer.get_mark('input')
inp = buffer.get_iter_at_mark(mark)
cur = buffer.get_end_iter()
buffer.delete(inp, cur)
buffer.insert(inp, command)
buffer.select_range(buffer.get_iter_at_mark(mark), buffer.get_end_iter())
self.view.grab_focus()
def history_add(self, line):
if (line.strip() != ''):
self.history_pos = len(self.history)
self.history[(self.history_pos - 1)] = line
self.history.append('')
def history_up(self):
if (self.history_pos > 0):
self.history[self.history_pos] = self.get_command_line()
self.history_pos -= 1
self.set_command_line(self.history[self.history_pos])
def history_down(self):
if (self.history_pos < (len(self.history) - 1)):
self.history[self.history_pos] = self.get_command_line()
self.history_pos += 1
self.set_command_line(self.history[self.history_pos])
def scroll_to_end(self):
iter = self.view.get_buffer().get_end_iter()
self.view.scroll_to_iter(iter, 0.0, False, 0.5, 0.5)
return False
def write(self, text, tag=None):
buf = self.view.get_buffer()
if (tag is None):
buf.insert(buf.get_end_iter(), text)
else:
buf.insert_with_tags(buf.get_end_iter(), text, tag)
GLib.idle_add(self.scroll_to_end)
def eval(self, command, display_command=False):
buffer = self.view.get_buffer()
lin = buffer.get_mark('input-line')
buffer.delete(buffer.get_iter_at_mark(lin), buffer.get_end_iter())
if isinstance(command, (list | tuple)):
for c in command:
if display_command:
self.write((('>>> ' + c) + '\n'), self.command)
self.__run(c)
else:
if display_command:
self.write((('>>> ' + c) + '\n'), self.command)
self.__run(command)
cur = buffer.get_end_iter()
buffer.move_mark_by_name('input-line', cur)
buffer.insert(cur, '>>> ')
cur = buffer.get_end_iter()
buffer.move_mark_by_name('input', cur)
self.view.scroll_to_iter(buffer.get_end_iter(), 0.0, False, 0.5, 0.5)
def __run(self, command):
(sys.stdout, self.stdout) = (self.stdout, sys.stdout)
(sys.stderr, self.stderr) = (self.stderr, sys.stderr)
try:
try:
r = eval(command, self.namespace, self.namespace)
if (r is not None):
print_(repr(r))
except SyntaxError:
exec(command, self.namespace)
except Exception:
if (hasattr(sys, 'last_type') and (sys.last_type == SystemExit)):
self.destroy()
else:
traceback.print_exc()
(sys.stdout, self.stdout) = (self.stdout, sys.stdout)
(sys.stderr, self.stderr) = (self.stderr, sys.stderr)
def get_completion_items(self, ids_str, include_private=False):
import inspect
def get_comp(obj, pre):
dir_result = (dir(obj) if (obj is not None) else self.namespace)
comp = []
for name in dir_result:
if ((pre and (not name.startswith(pre))) or ((not include_private) and name.startswith('__'))):
continue
if (obj is not None):
try:
f = getattr(obj, name)
except Exception:
continue
else:
f = self.namespace.get(name)
if (not callable(f)):
comp.append((name, ''))
else:
try:
spec = inspect.getfullargspec(f)
except TypeError:
spec = None
if spec:
sargs = []
arglen = (len(spec.args) if spec.args else 0)
deflen = (len(spec.defaults) if spec.defaults else 0)
noargs = (arglen - deflen)
for i in range(arglen):
if ((i == 0) and (spec.args[i] == 'self')):
continue
if (i < noargs):
sargs.append(spec.args[i])
else:
default_arg = repr(spec.defaults[(i - noargs)])
sargs.append(((spec.args[i] + '=') + default_arg))
details = ' ({})'.format(', '.join(sargs))
comp.append((name, details))
else:
comp.append((name, ' ()'))
return comp
comp = None
if ('.' in ids_str):
spl = ids_str.split('.')
var = None
for fname in spl[:(- 1)]:
if (var is None):
var = self.namespace.get(fname)
else:
try:
var = getattr(var, fname, None)
except Exception:
pass
if (var is None):
break
if (var is not None):
comp = get_comp(obj=var, pre=spl[(- 1)])
else:
comp = get_comp(obj=None, pre=ids_str)
return (comp or []) |
class FigurePlot():
def __init__(self, x_axis_label, y_axis_label, title):
self.x_axis_label = x_axis_label
self.y_axis_label = y_axis_label
self.title = title
self.source = ColumnDataSource(data=dict(x=[], y=[]))
self.title_object = Title()
self.title_object.text = self.title
def update(self, new_x_coordinate, new_y_coordinate):
new_data = {'x': [new_x_coordinate], 'y': [new_y_coordinate]}
self.source.stream(new_data)
def update_title(self, new_title):
self.title_object.text = new_title
def style(p):
p.title.align = 'center'
p.title.text_font_size = '14pt'
p.title.text_font = 'serif'
p.xaxis.axis_label_text_font_size = '12pt'
p.yaxis.axis_label_text_font_size = '12pt'
p.xaxis.major_label_text_font_size = '10pt'
p.yaxis.major_label_text_font_size = '10pt'
return p |
class TestActivate():
expected_msg = "Humanize cannot determinate the default location of the 'locale' folder. You need to pass the path explicitly."
def test_default_locale_path_null__file__(self) -> None:
i18n = importlib.import_module('humanize.i18n')
i18n.__file__ = None
with pytest.raises(Exception) as excinfo:
i18n.activate('ru_RU')
assert (str(excinfo.value) == self.expected_msg)
def test_default_locale_path_undefined__file__(self) -> None:
i18n = importlib.import_module('humanize.i18n')
del i18n.__file__
with pytest.raises(Exception) as excinfo:
i18n.activate('ru_RU')
assert (str(excinfo.value) == self.expected_msg) |
class FileFolderNavigator(GridBox):
_attribute_decorator('WidgetSpecific', 'Defines wether it is possible to select multiple items.', bool, {})
def multiple_selection(self):
return self._multiple_selection
_selection.setter
def multiple_selection(self, value):
self._multiple_selection = value
_attribute_decorator('WidgetSpecific', 'Defines the actual navigator location.', str, {})
def selection_folder(self):
return self._selection_folder
_folder.setter
def selection_folder(self, value):
self.chdir(value)
_attribute_decorator('WidgetSpecific', 'Defines if files are selectable.', bool, {})
def allow_file_selection(self):
return self._allow_file_selection
_file_selection.setter
def allow_file_selection(self, value):
self._allow_file_selection = value
_attribute_decorator('WidgetSpecific', 'Defines if folders are selectable.', bool, {})
def allow_folder_selection(self):
return self._allow_folder_selection
_folder_selection.setter
def allow_folder_selection(self, value):
self._allow_folder_selection = value
def __init__(self, multiple_selection=False, selection_folder='.', allow_file_selection=True, allow_folder_selection=False, **kwargs):
super(FileFolderNavigator, self).__init__(**kwargs)
self.css_grid_template_columns = '30px auto 30px'
self.css_grid_template_rows = '30px auto'
self.define_grid([('button_back', 'url_editor', 'button_go'), ('items', 'items', 'items')])
self.multiple_selection = multiple_selection
self.allow_file_selection = allow_file_selection
self.allow_folder_selection = allow_folder_selection
self.selectionlist = []
self.currDir = ''
self.controlBack = Button('Up')
self.controlBack.onclick.connect(self.dir_go_back)
self.controlGo = Button('Go >>')
self.controlGo.onclick.connect(self.dir_go)
self.pathEditor = TextInput()
self.pathEditor.style['resize'] = 'none'
self.pathEditor.attributes['rows'] = '1'
self.append(self.controlBack, 'button_back')
self.append(self.pathEditor, 'url_editor')
self.append(self.controlGo, 'button_go')
self.itemContainer = Container(width='100%', height='100%')
self.append(self.itemContainer, key='items')
self.folderItems = list()
self.selection_folder = selection_folder
def get_selection_list(self):
if (self.allow_folder_selection and (not self.selectionlist)):
self.selectionlist.append(self.currDir)
return self.selectionlist
def populate_folder_items(self, directory):
def _sort_files(a, b):
if (os.path.isfile(a) and os.path.isdir(b)):
return 1
elif (os.path.isfile(b) and os.path.isdir(a)):
return (- 1)
else:
try:
if (a[0] == '.'):
a = a[1:]
if (b[0] == '.'):
b = b[1:]
return (1 if (a.lower() > b.lower()) else (- 1))
except (IndexError, ValueError):
return (1 if (a > b) else (- 1))
log.debug('FileFolderNavigator - populate_folder_items')
if pyLessThan3:
directory = directory.decode('utf-8')
l = os.listdir(directory)
l.sort(key=functools.cmp_to_key(_sort_files))
self._last_valid_path = directory
self.remove_child(self.itemContainer)
self.itemContainer = Container(width='100%', height='100%')
self.itemContainer.style.update({'overflow-y': 'scroll', 'overflow-x': 'hidden'})
for i in l:
full_path = os.path.join(directory, i)
is_folder = (not os.path.isfile(full_path))
if ((not is_folder) and (not self.allow_file_selection)):
continue
fi = FileFolderItem(full_path, i, is_folder)
fi.onclick.connect(self.on_folder_item_click)
fi.onselection.connect(self.on_folder_item_selected)
self.folderItems.append(fi)
self.itemContainer.append(fi)
self.append(self.itemContainer, key='items')
def dir_go_back(self, widget):
curpath = os.getcwd()
try:
os.chdir(self.pathEditor.get_text())
os.chdir('..')
self.chdir(os.getcwd())
except Exception as e:
self.pathEditor.set_text(self._last_valid_path)
log.error('error changing directory', exc_info=True)
os.chdir(curpath)
def dir_go(self, widget):
curpath = os.getcwd()
try:
os.chdir(self.pathEditor.get_text())
self.chdir(os.getcwd())
except Exception as e:
log.error('error going to directory', exc_info=True)
self.pathEditor.set_text(self._last_valid_path)
os.chdir(curpath)
def chdir(self, directory):
self._selection_folder = directory
curpath = os.getcwd()
log.debug(('FileFolderNavigator - chdir: %s' % directory))
for c in self.folderItems:
self.itemContainer.remove_child(c)
self.folderItems = []
self.selectionlist = []
os.chdir(directory)
directory = os.getcwd()
self.disable_refresh()
self.populate_folder_items(directory)
self.enable_refresh()
self.pathEditor.set_text(directory)
self.currDir = directory
os.chdir(curpath)
_set_on_listener('(self, emitter, selected_item, selection_list)')
_event
def on_folder_item_selected(self, folderitem):
if (folderitem.isFolder and (not self.allow_folder_selection)):
folderitem.set_selected(False)
self.on_folder_item_click(folderitem)
return (None, self.selectionlist)
if (not self.multiple_selection):
self.selectionlist = []
for c in self.folderItems:
c.set_selected(False)
folderitem.set_selected(True)
log.debug('FileFolderNavigator - on_folder_item_click')
f = os.path.join(self.pathEditor.get_text(), folderitem.get_text())
if (f in self.selectionlist):
self.selectionlist.remove(f)
else:
self.selectionlist.append(f)
return (folderitem, self.selectionlist)
_set_on_listener('(self, emitter, clicked_item)')
_event
def on_folder_item_click(self, folderitem):
log.debug('FileFolderNavigator - on_folder_item_dblclick')
f = os.path.join(self.pathEditor.get_text(), folderitem.get_text())
if (not os.path.isfile(f)):
self.chdir(f)
return (folderitem,)
def get_selected_filefolders(self):
return self.selectionlist |
def test_export_data_access_groups(simple_project):
records = simple_project.export_records(export_data_access_groups=True)
for record in records:
assert ('redcap_data_access_group' in record)
records = simple_project.export_records()
for record in records:
assert (not ('redcap_data_access_group' in record)) |
class Config(object):
def __init__(self, dataset, embedding):
self.model_name = 'TextRCNN'
self.train_path = (dataset + '/data/train.txt')
self.dev_path = (dataset + '/data/dev.txt')
self.test_path = (dataset + '/data/test.txt')
self.class_list = [x.strip() for x in open((dataset + '/data/class.txt'), encoding='utf-8').readlines()]
self.vocab_path = (dataset + '/data/vocab.pkl')
self.save_path = (((dataset + '/saved_dict/') + self.model_name) + '.ckpt')
self.log_path = ((dataset + '/log/') + self.model_name)
self.embedding_pretrained = (torch.tensor(np.load(((dataset + '/data/') + embedding))['embeddings'].astype('float32')) if (embedding != 'random') else None)
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.dropout = 1.0
self.require_improvement = 1000
self.num_classes = len(self.class_list)
self.n_vocab = 0
self.num_epochs = 10
self.batch_size = 128
self.pad_size = 32
self.learning_rate = 0.001
self.embed = (self.embedding_pretrained.size(1) if (self.embedding_pretrained is not None) else 300)
self.hidden_size = 256
self.num_layers = 1 |
class AutoModelForTokenClassification():
def __init__(self):
raise EnvironmentError('AutoModelForTokenClassification is designed to be instantiated using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForTokenClassification.from_config(config)` methods.')
def from_config(cls, config):
if isinstance(config, CamembertConfig):
return CamembertForTokenClassification(config)
elif isinstance(config, DistilBertConfig):
return DistilBertForTokenClassification(config)
elif isinstance(config, BertConfig):
return BertForTokenClassification(config)
elif isinstance(config, XLNetConfig):
return XLNetForTokenClassification(config)
elif isinstance(config, RobertaConfig):
return RobertaForTokenClassification(config)
elif isinstance(config, XLMRobertaConfig):
return XLMRobertaForTokenClassification(config)
raise ValueError('Unrecognized configuration class {}'.format(config))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if ('camembert' in pretrained_model_name_or_path):
return CamembertForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('distilbert' in pretrained_model_name_or_path):
return DistilBertForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlm-roberta' in pretrained_model_name_or_path):
return XLMRobertaForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'xlnet', 'camembert', 'distilbert', 'xlm-roberta', 'roberta'".format(pretrained_model_name_or_path)) |
def read_tokenizer(lang_id, g2p_model='latest', device=None, use_lexicon=True):
lang_id = normalize_lang_id(lang_id)
if (lang_id in lang2tokenizer):
return lang2tokenizer[lang_id](lang_id=lang_id, g2p_model=g2p_model, device=device, use_lexicon=use_lexicon)
else:
return read_g2p_tokenizer(lang_id=lang_id, g2p_model=g2p_model, device=device) |
class BaseFragmentBlender():
passes = []
def __init__(self):
self.device = get_shared().device
self.size = (0, 0)
self._combine_pass_pipeline = None
self._combine_pass_bind_group = None
self._texture_info = {}
usg = wgpu.TextureUsage
self._texture_info['color'] = (wgpu.TextureFormat.rgba8unorm_srgb, ((usg.RENDER_ATTACHMENT | usg.COPY_SRC) | usg.TEXTURE_BINDING))
self._texture_info['depth'] = (wgpu.TextureFormat.depth32float, (usg.RENDER_ATTACHMENT | usg.COPY_SRC))
self._texture_info['pick'] = (wgpu.TextureFormat.rgba16uint, (usg.RENDER_ATTACHMENT | usg.COPY_SRC))
def clear(self):
for key in self._texture_info.keys():
setattr(self, (key + '_clear'), True)
def clear_depth(self):
self.depth_clear = True
def ensure_target_size(self, size):
assert (len(size) == 2)
size = (size[0], size[1])
if (size == self.size):
return
self.size = size
tex_size = (size + (1,))
self._combine_pass_bind_group = None
for (name, (format, usage)) in self._texture_info.items():
wgpu_texture = self.device.create_texture(size=tex_size, usage=usage, dimension='2d', format=format)
setattr(self, (name + '_format'), format)
setattr(self, (name + '_tex'), wgpu_texture)
setattr(self, (name + '_view'), wgpu_texture.create_view())
setattr(self, (name + '_clear'), True)
def get_color_descriptors(self, pass_index):
return self.passes[pass_index].get_color_descriptors(self)
def get_color_attachments(self, pass_index):
return self.passes[pass_index].get_color_attachments(self)
def get_depth_descriptor(self, pass_index, depth_test=True):
des = self.passes[pass_index].get_depth_descriptor(self)
if (not depth_test):
des['depth_compare'] = wgpu.CompareFunction.always
des['depth_write_enabled'] = False
return {**des, 'stencil_read_mask': 0, 'stencil_write_mask': 0, 'stencil_front': {}, 'stencil_back': {}}
def get_depth_attachment(self, pass_index):
return {**self.passes[pass_index].get_depth_attachment(self), 'stencil_read_only': True, 'stencil_load_op': wgpu.LoadOp.clear, 'stencil_store_op': wgpu.StoreOp.discard}
def get_shader_kwargs(self, pass_index):
return {'blending_code': self.passes[pass_index].get_shader_code(self), 'write_pick': self.passes[pass_index].write_pick}
def get_pass_count(self):
return len(self.passes)
def perform_combine_pass(self):
if (not self._combine_pass_pipeline):
self._combine_pass_pipeline = self._create_combination_pipeline()
if (not self._combine_pass_pipeline):
return []
if (not self._combine_pass_bind_group):
self._combine_pass_bind_group = self._create_combination_bind_group(self._combine_pass_pipeline.get_bind_group_layout(0))
command_encoder = self.device.create_command_encoder()
render_pass = command_encoder.begin_render_pass(color_attachments=[{'view': self.color_view, 'resolve_target': None, 'load_op': wgpu.LoadOp.load, 'store_op': wgpu.StoreOp.store}], depth_stencil_attachment=None, occlusion_query_set=None)
render_pass.set_pipeline(self._combine_pass_pipeline)
render_pass.set_bind_group(0, self._combine_pass_bind_group, [], 0, 99)
render_pass.draw(4, 1)
render_pass.end()
return [command_encoder.finish()]
def _create_combination_pipeline(self):
return None |
def preprocess(data_dir, hparams: Hyperparameter, temp_dir='temp', device='cuda:0', max_workers=4):
data_dir = os.path.abspath(data_dir)
temp_dir = os.path.abspath(temp_dir)
mel_dir = os.path.join(temp_dir, 'mels')
os.makedirs(mel_dir, exist_ok=True)
mel_config = {'sampling_rate': hparams.sample_rate, 'win_length': hparams.win_length, 'hop_length': hparams.hop_length, 'filter_length': hparams.n_fft, 'mel_fmin': hparams.mel_fmin, 'mel_fmax': hparams.mel_fmax, 'n_mel_channels': hparams.n_mels}
min_wav_length = (hparams.batch_mel_length * hparams.hop_length)
wav_files = glob.glob(f'{data_dir}/**/*.wav', recursive=True)
print('num of wavs:', len(wav_files))
batch_size = 100
batch_num = int(np.ceil((len(wav_files) / batch_size)))
batches = [wav_files[(i * batch_size):((i + 1) * batch_size)] for i in range(batch_num)]
results = []
with ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(mel_transform, batch, mel_dir, mel_config, device, min_wav_length) for batch in batches]
for f in tqdm.tqdm(futures, desc='Preprocessing', total=batch_num):
results.extend(f.result())
save_metadata(results, os.path.join(temp_dir, 'metadata.txt'))
random.shuffle(results)
save_metadata(results[:hparams.eval_sample_num], hparams.eval_metadata_file)
save_metadata(results[(- hparams.test_sample_num):], hparams.test_metadata_file)
save_metadata(results[hparams.eval_sample_num:(- hparams.test_sample_num)], hparams.train_metadata_file) |
class OptimizableInterface(with_metaclass(ABCMeta, object)):
def problem_size(self):
pass
def get_current_point(self):
pass
def set_current_point(self, current_point):
pass
current_point = abstractproperty(get_current_point, set_current_point)
def compute_objective_function(self, **kwargs):
pass
def compute_grad_objective_function(self, **kwargs):
pass
def compute_hessian_objective_function(self, **kwargs):
pass |
def shareable_word_hash(hash_bytes: bytes, all_games: list[RandovaniaGame]):
rng = Random(sum(((hash_byte * ((2 ** 8) ** i)) for (i, hash_byte) in enumerate(hash_bytes))))
games_left = []
selected_words = []
for _ in range(3):
if (not games_left):
games_left = list(all_games)
selected_game = rng.choice(games_left)
games_left = [game for game in games_left if (game != selected_game)]
game_word_list = _all_hash_words()
if (selected_game.hash_words is not None):
game_word_list = selected_game.hash_words
selected_words.append(rng.choice(game_word_list))
return ' '.join(selected_words) |
.parametrize('text', ('`test identifier`', 'simple_identifier', "query''", '_internal_value', 'get_pubkeys&signatures', 'dict::udict_set_builder', '2+2=2*2', '-alsovalidname', '{hehehe}'))
def test_func_identifier(lexer_func, text):
assert (list(lexer_func.get_tokens(text))[0] == (Name.Variable, text)) |
def convert_example_to_feature(data, args):
(sums, contexts) = ([], [])
for sample in data:
if (args.sum_mode == 'final'):
sum = sample['FinalSumm']
elif (args.sum_mode == 'user'):
sum = sample['UserSumm']
elif (args.sum_mode == 'agent'):
sum = sample['AgentSumm']
if (args.split_mode == 'comma'):
split_sum = []
for s in sum:
last_index = 0
for i in range(len(s)):
if ((s[i] == ',') or (s[i] == '') or (s[i] == ',')):
split_sum.append(s[last_index:(i + 1)])
last_index = (i + 1)
split_sum = [' '.join(jieba.lcut(s)) for s in split_sum]
sums.append(split_sum)
elif (args.split_mode == 'period'):
split_sum = [' '.join(jieba.lcut(s)) for s in sum]
tmp_sums = []
for sum in split_sum:
if (sum.strip() != ''):
tmp_sums.append(sum)
sums.append(tmp_sums)
context = []
if (args.turn_mode == 'single'):
for turn in sample['Dialogue']:
tmp_utt = []
if (turn['speaker'] == 'Q'):
tmp_utt += [sample['QRole'], ':']
else:
tmp_utt += ['', ':']
if args.complete:
sent = jieba.lcut(''.join(turn['new_utterance'].split()))
else:
sent = turn['utterance'].split()
for word in sent:
if ((len(word) > 2) and (word[0] == '[') and (word[(- 1)] == ']')):
tmp_utt += ['[', word[1:(- 1)], ']']
else:
tmp_utt.append(word)
tmp_utt = ' '.join(tmp_utt)
if (args.context_mode == 'both'):
context.append(tmp_utt)
elif ((args.context_mode == 'user') and (turn['speaker'] == 'Q')):
context.append(tmp_utt)
elif ((args.context_mode == 'agent') and (turn['speaker'] == 'A')):
context.append(tmp_utt)
elif (args.turn_mode == 'multi'):
(last_speaker, tmp_utt) = ('', [])
for turn in sample['Dialogue']:
turn['utterance'] = add_period(turn['utterance'])
if (last_speaker != turn['speaker']):
if (tmp_utt != []):
if (args.context_mode == 'both'):
context.append(' '.join(tmp_utt))
elif ((args.context_mode == 'user') and (last_speaker == 'Q')):
context.append(' '.join(tmp_utt))
elif ((args.context_mode == 'agent') and (last_speaker == 'A')):
context.append(' '.join(tmp_utt))
tmp_utt = []
if (turn['speaker'] == 'Q'):
tmp_utt += [sample['QRole'], ':']
else:
tmp_utt += ['', ':']
for word in turn['utterance'].split():
if ((len(word) > 2) and (word[0] == '[') and (word[(- 1)] == ']')):
tmp_utt += ['[', word[1:(- 1)], ']']
else:
tmp_utt.append(word)
last_speaker = turn['speaker']
else:
for word in turn['utterance'].split():
if ((len(word) > 2) and (word[0] == '[') and (word[(- 1)] == ']')):
tmp_utt += ['[', word[1:(- 1)], ']']
else:
tmp_utt.append(word)
if (args.context_mode == 'both'):
context.append(' '.join(tmp_utt))
elif ((args.context_mode == 'user') and (last_speaker == 'Q')):
context.append(' '.join(tmp_utt))
elif ((args.context_mode == 'agent') and (last_speaker == 'A')):
context.append(' '.join(tmp_utt))
contexts.append(context)
return (sums, contexts) |
def _get_plugin_config():
if config.has_option('plugins', 'trayicon_window_hide'):
value = config.getboolean('plugins', 'trayicon_window_hide')
config.remove_option('plugins', 'trayicon_window_hide')
config.set('plugins', 'icon_window_hide', value)
pconfig = PluginConfig('icon')
pconfig.defaults.set('window_hide', True)
pconfig.defaults.set('tooltip', DEFAULT_PATTERN)
pconfig.defaults.set('modifier_swap', False)
return pconfig |
def ddp(script: str, nnodes: int=1, name: str='ddp_app', role: str='worker', env: Optional[Dict[(str, str)]]=None, *script_args: str) -> specs.AppDef:
app_env: Dict[(str, str)] = {}
if env:
app_env.update(env)
entrypoint = os.path.join(specs.macros.img_root, script)
ddp_role = specs.Role(name=role, image='dummy_image', entrypoint=entrypoint, args=list(script_args), env=app_env, num_replicas=nnodes, resource=specs.Resource(cpu=1, gpu=0, memMB=1))
return specs.AppDef(name, roles=[ddp_role]) |
def _configure_stderr_logging(*, verbosity=None, verbosity_shortcuts=None):
global console_stderr_handler
if (console_stderr_handler is not None):
_logger.warning('stderr handler already exists')
return
console_stderr_handler = logging.StreamHandler(sys.stderr)
console_stderr_handler.setFormatter(console_formatter)
if ((not verbosity) and (not verbosity_shortcuts)):
console_stderr_handler.setLevel(logging.WARNING)
root_logger.addHandler(console_stderr_handler)
else:
console_stderr_handler.setLevel(logging.DEBUG)
root_logger.addHandler(console_stderr_handler)
_process_verbosity_log_levels(verbosity)
_process_verbosity_filter_shortcuts(verbosity_shortcuts, handler=console_stderr_handler)
if _inmemory_startup_logs:
_inmemory_startup_logs.dump_to_target(console_stderr_handler) |
def train():
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
train_data = f.readlines()
f.close()
num_samples = len(train_data)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if (num_samples > 500):
num_samples = 500
else:
num_episodes = num_samples
for episode in range(num_samples):
print(('Episode %d' % episode))
print('Training Sample:', train_data[(episode % num_samples)][:(- 1)])
env = Env(dataPath, train_data[(episode % num_samples)])
sample = train_data[(episode % num_samples)].split()
try:
good_episodes = teacher(sample[0], sample[1], 5, env, graphpath)
except Exception as e:
print('Cannot find a path')
continue
for item in good_episodes:
state_batch = []
action_batch = []
for (t, transition) in enumerate(item):
state_batch.append(transition.state)
action_batch.append(transition.action)
state_batch = np.squeeze(state_batch)
state_batch = np.reshape(state_batch, [(- 1), state_dim])
policy_nn.update(state_batch, action_batch)
saver.save(sess, ('models/policy_supervised_' + relation))
print('Model saved') |
def test_main_failure(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None:
requirements_file = (tmp_path / 'requirements.txt')
requirements_file.touch()
source_dir = (tmp_path / 'source')
source_dir.mkdir()
source_file = (source_dir / 'source.py')
source_file.write_text('import pytest')
caplog.set_level(logging.WARN)
with pytest.raises(SystemExit) as excinfo:
find_missing_reqs.main(arguments=['--requirements', str(requirements_file), str(source_dir)])
assert (excinfo.value.code == 1)
assert (caplog.records[0].message == 'Missing requirements:')
assert (caplog.records[1].message == f'{source_file}:1 dist=pytest module=pytest') |
def check_match(condition: models.Match, value: Any) -> bool:
if isinstance(condition, models.MatchValue):
return (value == condition.value)
if isinstance(condition, models.MatchText):
return ((value is not None) and (condition.text in value))
if isinstance(condition, models.MatchAny):
return (value in condition.any)
if isinstance(condition, models.MatchExcept):
return (value not in condition.except_)
raise ValueError(f'Unknown match condition: {condition}') |
class LatentBrownianBridgeModel(BrownianBridgeModel):
def __init__(self, model_config):
super().__init__(model_config)
self.vqgan = VQModel(**vars(model_config.VQGAN.params)).eval()
self.vqgan.train = disabled_train
for param in self.vqgan.parameters():
param.requires_grad = False
print(f'load vqgan from {model_config.VQGAN.params.ckpt_path}')
if (self.condition_key == 'nocond'):
self.cond_stage_model = None
elif (self.condition_key == 'first_stage'):
self.cond_stage_model = self.vqgan
elif (self.condition_key == 'SpatialRescaler'):
self.cond_stage_model = SpatialRescaler(**vars(model_config.CondStageParams))
else:
raise NotImplementedError
def get_ema_net(self):
return self
def get_parameters(self):
if (self.condition_key == 'SpatialRescaler'):
print('get parameters to optimize: SpatialRescaler, UNet')
params = itertools.chain(self.denoise_fn.parameters(), self.cond_stage_model.parameters())
else:
print('get parameters to optimize: UNet')
params = self.denoise_fn.parameters()
return params
def apply(self, weights_init):
super().apply(weights_init)
if (self.cond_stage_model is not None):
self.cond_stage_model.apply(weights_init)
return self
def forward(self, x, x_cond, context=None):
with torch.no_grad():
x_latent = self.encode(x, cond=False)
x_cond_latent = self.encode(x_cond, cond=True)
context = self.get_cond_stage_context(x_cond)
return super().forward(x_latent.detach(), x_cond_latent.detach(), context)
def get_cond_stage_context(self, x_cond):
if (self.cond_stage_model is not None):
context = self.cond_stage_model(x_cond)
if (self.condition_key == 'first_stage'):
context = context.detach()
else:
context = None
return context
_grad()
def encode(self, x, cond=True, normalize=None):
normalize = (self.model_config.normalize_latent if (normalize is None) else normalize)
model = self.vqgan
x_latent = model.encoder(x)
if (not self.model_config.latent_before_quant_conv):
x_latent = model.quant_conv(x_latent)
if normalize:
if cond:
x_latent = ((x_latent - self.cond_latent_mean) / self.cond_latent_std)
else:
x_latent = ((x_latent - self.ori_latent_mean) / self.ori_latent_std)
return x_latent
_grad()
def decode(self, x_latent, cond=True, normalize=None):
normalize = (self.model_config.normalize_latent if (normalize is None) else normalize)
if normalize:
if cond:
x_latent = ((x_latent * self.cond_latent_std) + self.cond_latent_mean)
else:
x_latent = ((x_latent * self.ori_latent_std) + self.ori_latent_mean)
model = self.vqgan
if self.model_config.latent_before_quant_conv:
x_latent = model.quant_conv(x_latent)
(x_latent_quant, loss, _) = model.quantize(x_latent)
out = model.decode(x_latent_quant)
return out
_grad()
def sample(self, x_cond, clip_denoised=False, sample_mid_step=False):
x_cond_latent = self.encode(x_cond, cond=True)
if sample_mid_step:
(temp, one_step_temp) = self.p_sample_loop(y=x_cond_latent, context=self.get_cond_stage_context(x_cond), clip_denoised=clip_denoised, sample_mid_step=sample_mid_step)
out_samples = []
for i in tqdm(range(len(temp)), initial=0, desc='save output sample mid steps', dynamic_ncols=True, smoothing=0.01):
with torch.no_grad():
out = self.decode(temp[i].detach(), cond=False)
out_samples.append(out.to('cpu'))
one_step_samples = []
for i in tqdm(range(len(one_step_temp)), initial=0, desc='save one step sample mid steps', dynamic_ncols=True, smoothing=0.01):
with torch.no_grad():
out = self.decode(one_step_temp[i].detach(), cond=False)
one_step_samples.append(out.to('cpu'))
return (out_samples, one_step_samples)
else:
temp = self.p_sample_loop(y=x_cond_latent, context=self.get_cond_stage_context(x_cond), clip_denoised=clip_denoised, sample_mid_step=sample_mid_step)
x_latent = temp
out = self.decode(x_latent, cond=False)
return out
_grad()
def sample_vqgan(self, x):
(x_rec, _) = self.vqgan(x)
return x_rec |
def test_dataid_elements_picklable():
import pickle
from satpy.tests.utils import make_dataid
did = make_dataid(name='hi', wavelength=(10, 11, 12), resolution=1000, calibration='radiance')
for value in did.values():
pickled_value = pickle.loads(pickle.dumps(value))
assert (value == pickled_value) |
class Incidents(Cog):
message_link_embeds_cache = RedisCache()
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.incidents_webhook = None
scheduling.create_task(self.fetch_webhook())
self.event_lock = asyncio.Lock()
self.crawl_task = scheduling.create_task(self.crawl_incidents())
async def fetch_webhook(self) -> None:
(await self.bot.wait_until_guild_available())
try:
self.incidents_webhook = (await self.bot.fetch_webhook(Webhooks.incidents.id))
except discord.HTTPException:
log.error(f'Failed to fetch incidents webhook with id `{Webhooks.incidents.id}`.')
async def crawl_incidents(self) -> None:
(await self.bot.wait_until_guild_available())
incidents: discord.TextChannel = self.bot.get_channel(Channels.incidents)
log.debug(f'Crawling messages in #incidents: CRAWL_LIMIT={CRAWL_LIMIT!r}, CRAWL_SLEEP={CRAWL_SLEEP!r}')
async for message in incidents.history(limit=CRAWL_LIMIT):
if (not is_incident(message)):
log.trace(f'Skipping message {message.id}: not an incident')
continue
if has_signals(message):
log.trace(f'Skipping message {message.id}: already has all signals')
continue
(await add_signals(message))
(await asyncio.sleep(CRAWL_SLEEP))
log.debug('Crawl task finished!')
async def archive(self, incident: discord.Message, outcome: Signal, actioned_by: discord.Member) -> bool:
log.info(f'Archiving incident: {incident.id} (outcome: {outcome}, actioned by: {actioned_by})')
(embed, attachment_file) = (await make_embed(incident, outcome, actioned_by))
try:
webhook = (await self.bot.fetch_webhook(Webhooks.incidents_archive.id))
(await webhook.send(embed=embed, username=sub_clyde(incident.author.display_name), avatar_url=incident.author.display_avatar.url, file=attachment_file))
except Exception:
log.exception(f'Failed to archive incident {incident.id} to #incidents-archive')
return False
else:
log.trace('Message archived successfully!')
return True
def make_confirmation_task(self, incident: discord.Message, timeout: int=5) -> asyncio.Task:
log.trace(f'Confirmation task will wait timeout={timeout!r} seconds for incident.id={incident.id!r} to be deleted')
def check(payload: discord.RawReactionActionEvent) -> bool:
return (payload.message_id == incident.id)
coroutine = self.bot.wait_for('raw_message_delete', check=check, timeout=timeout)
return scheduling.create_task(coroutine)
async def process_event(self, reaction: str, incident: discord.Message, member: discord.Member) -> None:
members_roles: set[int] = {role.id for role in member.roles}
if (not (members_roles & ALLOWED_ROLES)):
log.debug(f'Removing invalid reaction: user {member} is not permitted to send signals')
try:
(await incident.remove_reaction(reaction, member))
except discord.NotFound:
log.trace("Couldn't remove reaction because the reaction or its message was deleted")
return
try:
signal = Signal(reaction)
except ValueError:
log.debug(f'Removing invalid reaction: emoji {reaction} is not a valid signal')
try:
(await incident.remove_reaction(reaction, member))
except discord.NotFound:
log.trace("Couldn't remove reaction because the reaction or its message was deleted")
return
log.trace(f'Received signal: {signal}')
if (signal not in (Signal.ACTIONED, Signal.NOT_ACTIONED)):
log.debug('Reaction was valid, but no action is currently defined for it')
return
relay_successful = (await self.archive(incident, signal, actioned_by=member))
if (not relay_successful):
log.trace('Original message will not be deleted as we failed to relay it to the archive')
return
timeout = 5
confirmation_task = self.make_confirmation_task(incident, timeout)
log.trace('Deleting original message')
try:
(await incident.delete())
except discord.NotFound:
log.trace("Couldn't delete message because it was already deleted")
log.trace(f'Awaiting deletion confirmation: timeout={timeout!r} seconds')
try:
(await confirmation_task)
except asyncio.TimeoutError:
log.info(f'Did not receive incident deletion confirmation within {timeout} seconds!')
else:
log.trace('Deletion was confirmed')
if self.incidents_webhook:
(await self.delete_msg_link_embed(incident.id))
async def resolve_message(self, message_id: int) -> (discord.Message | None):
(await self.bot.wait_until_guild_available())
log.trace(f'Resolving message for: message_id={message_id!r}')
message: (discord.Message | None) = self.bot._connection._get_message(message_id)
if (message is not None):
log.trace('Message was found in cache')
return message
log.trace('Message not found, attempting to fetch')
try:
message = (await self.bot.get_channel(Channels.incidents).fetch_message(message_id))
except discord.NotFound:
log.trace("Message doesn't exist, it was likely already relayed")
except Exception:
log.exception(f'Failed to fetch message {message_id}!')
else:
log.trace('Message fetched successfully!')
return message
()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent) -> None:
if ((payload.channel_id != Channels.incidents) or payload.member.bot):
return
log.trace(f'Received reaction add event in #incidents, waiting for crawler: self.crawl_task.done()={self.crawl_task.done()!r}')
(await self.crawl_task)
log.trace(f'Acquiring event lock: self.event_lock.locked()={self.event_lock.locked()!r}')
async with self.event_lock:
message = (await self.resolve_message(payload.message_id))
if (message is None):
log.debug('Listener will abort as related message does not exist!')
return
if (not is_incident(message)):
log.debug('Ignoring event for a non-incident message')
return
(await self.process_event(str(payload.emoji), message, payload.member))
log.trace('Releasing event lock')
()
async def on_message(self, message: discord.Message) -> None:
if (not is_incident(message)):
return
(await add_signals(message))
if self.incidents_webhook:
if (embed_list := (await self.extract_message_links(message))):
(await self.send_message_link_embeds(embed_list, message, self.incidents_webhook))
()
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
if self.incidents_webhook:
(await self.delete_msg_link_embed(payload.message_id))
async def extract_message_links(self, message: discord.Message) -> (list[discord.Embed] | None):
message_links = DISCORD_MESSAGE_LINK_RE.findall(message.content)
if (not message_links):
log.trace(f'No message links detected on incident message with id {message.id}.')
return None
embeds = []
for message_link in message_links[:10]:
ctx = (await self.bot.get_context(message))
embed = (await make_message_link_embed(ctx, message_link[0]))
if embed:
embeds.append(embed)
return embeds
async def send_message_link_embeds(self, webhook_embed_list: list, message: discord.Message, webhook: discord.Webhook) -> (int | None):
try:
webhook_msg = (await webhook.send(embeds=[embed for embed in webhook_embed_list if embed], username=sub_clyde(message.author.name), avatar_url=message.author.display_avatar.url, wait=True))
except discord.DiscordException:
log.exception(f'Failed to send message link embed {message.id} to #incidents.')
else:
(await self.message_link_embeds_cache.set(message.id, webhook_msg.id))
log.trace('Message link embeds sent successfully to #incidents!')
return webhook_msg.id
async def delete_msg_link_embed(self, message_id: int) -> None:
log.trace("Deleting Discord message link's webhook message.")
webhook_msg_id = (await self.message_link_embeds_cache.get(int(message_id)))
if webhook_msg_id:
try:
(await self.incidents_webhook.delete_message(webhook_msg_id))
except discord.errors.NotFound:
log.trace(f'Incidents message link embed (`{webhook_msg_id}`) has already been deleted, skipping.')
(await self.message_link_embeds_cache.delete(message_id))
log.trace('Successfully deleted discord links webhook message.') |
class SubQuery():
def __init__(self, subquery: Any, subquery_raw: str, alias: Optional[str]):
self.query = subquery
self.query_raw = subquery_raw
self.alias = (escape_identifier_name(alias) if (alias is not None) else f'subquery_{hash(self)}')
def __str__(self):
return self.alias
def __repr__(self):
return ('SubQuery: ' + str(self))
def __eq__(self, other):
return (isinstance(other, SubQuery) and (self.query_raw == other.query_raw))
def __hash__(self):
return hash(self.query_raw)
def of(subquery: Any, alias: Optional[str]) -> 'SubQuery':
raise NotImplementedError |
def run(*cmd: str, capture: bool=False, raise_on_err: bool=True, check_code: t.Callable[([int], bool)]=(lambda c: (c == 0)), **popen_kwargs: t.Any) -> RunReturn:
stdout = (subprocess.PIPE if capture else None)
stderr = (subprocess.PIPE if capture else None)
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **popen_kwargs)
(out, err) = proc.communicate()
result = RunReturn(proc.returncode, ('' if (out is None) else out.decode()), ('' if (err is None) else err.decode()))
if (raise_on_err and (not check_code(result.returncode))):
raise RuntimeError(result)
return result |
class ThreadLocal(Generic[_StateType]):
def __init__(self, default: Callable[([], _StateType)]):
self._default = default
self._state: WeakKeyDictionary[(Thread, _StateType)] = WeakKeyDictionary()
def get(self) -> _StateType:
thread = current_thread()
if (thread not in self._state):
state = self._state[thread] = self._default()
else:
state = self._state[thread]
return state |
_edge_encoder('VOCEdge')
class VOCEdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super().__init__()
VOC_edge_input_dim = (2 if (cfg.dataset.name == 'edge_wt_region_boundary') else 1)
self.encoder = torch.nn.Linear(VOC_edge_input_dim, emb_dim)
def forward(self, batch):
batch.edge_attr = self.encoder(batch.edge_attr)
return batch |
def createProgram(shaderList):
program = glCreateProgram()
for shader in shaderList:
glAttachShader(program, shader)
glLinkProgram(program)
status = glGetProgramiv(program, GL_LINK_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetProgramInfoLog(program)
print(('Linker failure: \n' + str(strInfoLog)))
for shader in shaderList:
glDetachShader(program, shader)
return program |
def process_mnemonics(X_protoset_cumuls, Y_protoset_cumuls, mnemonics_raw, mnemonics_label, order_list, nb_cl_fg, nb_cl, iteration, start_iter):
mnemonics = mnemonics_raw[0]
mnemonics_array_new = np.zeros((len(mnemonics), len(mnemonics[0]), 32, 32, 3))
mnemonics_list = []
mnemonics_label_list = []
for idx in range(len(mnemonics)):
this_mnemonics = []
for sub_idx in range(len(mnemonics[idx])):
processed_img = tensor2im(mnemonics[idx][sub_idx])
mnemonics_array_new[idx][sub_idx] = processed_img
diff = (len(X_protoset_cumuls) - len(mnemonics_array_new))
for idx in range(len(mnemonics_array_new)):
X_protoset_cumuls[(idx + diff)] = mnemonics_array_new[idx]
return X_protoset_cumuls |
class FileHandler(Configurable):
path = Option(filesystem_path, required=True, positional=True, __doc__='Path to use within the provided filesystem.')
eol = Option(str, default='\n', __doc__='Character to use as line separator.')
mode = Option(str, __doc__='What mode to use for open() call.')
encoding = Option(str, default='utf-8', __doc__='Encoding.')
fs = Service('fs', __doc__='The filesystem instance to use.')
def file(self, context, *, fs):
with self.open(fs) as file:
(yield file)
def open(self, fs):
return fs.open(self.path, self.mode, encoding=self.encoding) |
def get_all_terminals(tree, is_l_value, insideQuery):
if (not isinstance(tree, Node)):
return [tree]
if isPathExpression(tree):
return get_path_expression_terminals(tree, insideQuery)
elif isTryExceptExpression(tree):
return get_try_except_expression_terminals(tree, insideQuery)
elif (isTupleConstructor(tree, is_l_value) and (insideQuery == True)):
return get_tuple_constructor_terminals(tree)
elif isQuery(tree):
return get_query_terminals(tree)
else:
children = []
if (tree.label == 'expr_stmt'):
child_0 = get_all_terminals(tree.children[0], True, insideQuery)
rest = [get_all_terminals(c, False, insideQuery) for c in tree.children[1:]]
return (child_0 + reduce((lambda x, y: (x + y)), rest))
elif tree.children:
children = reduce((lambda x, y: (x + y)), [get_all_terminals(c, is_l_value, insideQuery) for c in tree.children])
return children |
class TestContingency(TestCase):
def test_chi2_independence(self):
np.random.seed(42)
(mean, cov) = ([0.5, 0.5], [(1, 0.6), (0.6, 1)])
(x, y) = np.random.multivariate_normal(mean, cov, 30).T
data = pd.DataFrame({'x': x, 'y': y})
mask_class_1 = (data > 0.5)
data[mask_class_1] = 1
data[(~ mask_class_1)] = 0
(_, _, stats) = pg.chi2_independence(data, x='x', y='y')
contingency_table = pd.crosstab(data['x'], data['y'])
for i in stats.index:
lambda_ = stats.at[(i, 'lambda')]
dof = stats.at[(i, 'dof')]
chi2 = stats.at[(i, 'chi2')]
p = round(stats.at[(i, 'pval')], 6)
(sp_chi2, sp_p, sp_dof, _) = chi2_contingency(contingency_table, lambda_=lambda_)
np.testing.assert_allclose([chi2, p, dof], [sp_chi2, sp_p, sp_dof], rtol=0.0001)
mask_nan = (np.random.random(data.shape) > 0.8)
data[mask_nan] = np.nan
pg.chi2_independence(data, x='x', y='y')
def expect_assertion_error(*params):
with pytest.raises(AssertionError):
pg.chi2_independence(*params)
expect_assertion_error(1, 'x', 'y')
expect_assertion_error(data, x, 'y')
expect_assertion_error(data, 'x', y)
expect_assertion_error(data, 'x', 'z')
data['x'] = np.nan
with pytest.raises(ValueError):
pg.chi2_independence(data, x='x', y='y')
data['x'] = 1
data['y'] = 1
(expected, observed, stats) = pg.chi2_independence(data, 'x', 'y')
assert (expected.iloc[(0, 0)] == observed.iloc[(0, 0)])
assert (stats.at[(0, 'dof')] == 0)
for i in stats.index:
chi2 = stats.at[(i, 'chi2')]
p = stats.at[(i, 'pval')]
assert ((chi2, p) == (0.0, 1.0))
data.iloc[(0, 0)] = 0
with pytest.warns(UserWarning):
pg.chi2_independence(data, 'x', 'y')
(_, _, stats) = pg.chi2_independence(df_ind, 'sex', 'target')
assert (round(stats.at[(0, 'chi2')], 3) == 22.717)
assert (stats.at[(0, 'dof')] == 1)
assert np.isclose(stats.at[(0, 'pval')], 1.877e-06)
assert (round(stats.at[(0, 'cramer')], 2) == 0.27)
(_, _, stats) = pg.chi2_independence(df_ind, 'cp', 'target')
assert (round(stats.at[(0, 'chi2')], 3) == 81.686)
assert (stats.at[(0, 'dof')] == 3.0)
assert (stats.at[(0, 'pval')] < 2.2e-16)
assert (round(stats.at[(0, 'cramer')], 3) == 0.519)
assert np.isclose(stats.at[(0, 'power')], 1.0)
def test_chi2_mcnemar(self):
np.random.seed(42)
(mean, cov) = ([0.5, 0.5], [(1, 0.6), (0.6, 1)])
(x, y) = np.random.multivariate_normal(mean, cov, 30).T
data = pd.DataFrame({'x': x, 'y': y})
mask_class_1 = (data > 0.5)
data[mask_class_1] = 1
data[(~ mask_class_1)] = 0
def expect_assertion_error(*params):
with pytest.raises(AssertionError):
pg.chi2_mcnemar(*params)
expect_assertion_error(1, 'x', 'y')
expect_assertion_error(data, x, 'y')
expect_assertion_error(data, 'x', y)
expect_assertion_error(data, 'x', 'z')
pg.chi2_mcnemar(data, 'x', 'y')
data.iloc[(0, 0)] = np.nan
with pytest.raises(ValueError):
pg.chi2_mcnemar(data, 'x', 'y')
data.iloc[(0, 0)] = 3
with pytest.raises(ValueError):
pg.chi2_mcnemar(data, 'x', 'y')
data = pd.DataFrame({'x': [0, 0, 0, 1, 1, 1], 'y': [0, 0, 0, 1, 1, 1]})
with pytest.raises(ValueError):
pg.chi2_mcnemar(data, 'x', 'y')
(_, stats) = pg.chi2_mcnemar(df_mcnemar, 'treatment_X', 'treatment_Y')
assert (round(stats.at[('mcnemar', 'chi2')], 3) == 20.021)
assert (stats.at[('mcnemar', 'dof')] == 1)
assert np.isclose(stats.at[('mcnemar', 'p-approx')], 7.66e-06)
assert np.isclose(stats.at[('mcnemar', 'p-exact')], 3.305e-06)
def test_dichotomize_series(self):
a = pg.contingency._dichotomize_series(data_ct, 'A').to_numpy()
b = pg.contingency._dichotomize_series(data_ct, 'B').to_numpy()
d = pg.contingency._dichotomize_series(data_ct, 'D').to_numpy()
np.testing.assert_array_equal(a, b)
np.testing.assert_array_equal(b, d)
with pytest.raises(ValueError):
pg.contingency._dichotomize_series(data_ct, 'C')
def test_dichotomous_crosstab(self):
d1 = pg.dichotomous_crosstab(data_ct, 'A', 'B')
d2 = pg.dichotomous_crosstab(data_ct, 'A', 'D')
assert d1.equals(d2)
pg.dichotomous_crosstab(data_ct, 'A', 'E')
pg.dichotomous_crosstab(data_ct, 'E', 'A')
with pytest.raises(ValueError):
pg.dichotomous_crosstab(data_ct, 'E', 'E') |
def test_voltage():
with expected_protocol(Keithley2200, [(b'INST:SEL CH1;VOLT 1.456', None), (b'INST:SEL CH1;VOLT?', 1.456), (b'INST:SEL CH1;MEAS:VOLT?', 1.456), (b'INST:SEL CH3;VOLT 1.456', None)]) as instr:
instr.ch_1.voltage_setpoint = 1.456
assert (instr.ch_1.voltage_setpoint == 1.456)
assert (instr.ch_1.voltage == 1.456)
instr.ch_3.voltage_setpoint = 1.456 |
def test_write_calibration_data():
invalid_cal_data = VALID_CAL_DATA.copy()
invalid_cal_data[1] = 1
invalid_cal_write_xfers = convert_cal_data_to_cal_write_xfers(invalid_cal_data)
with expected_protocol(HP3478A, invalid_cal_write_xfers) as instr:
instr.write_calibration_data(invalid_cal_data, verify_calibration_data=False) |
class MVArray(np.ndarray):
def __new__(cls, input_array):
(input_shape, layout, dtype) = _interrogate_nested_mvs(input_array)
obj = np.empty(input_shape, dtype=object)
for index in np.ndindex(input_shape):
obj[index] = _index_nested_iterable(input_array, index)
self = obj.view(cls)
return self
def __array_finalize__(self, obj):
if (obj is None):
return
def _get_first_element(self):
return self[((0,) * self.ndim)]
def value(self):
value_dtype = self._get_first_element().value.dtype
return _get_vectorized_value_func(value_dtype)(self)
def from_value_array(layout, value_array):
v_new_mv = np.vectorize((lambda v: layout.MultiVector(v)), otypes=[object], signature='(n)->()')
return MVArray(v_new_mv(value_array))
def save(self, filename, compression=True, transpose=False, sparse=False, support=False, compression_opts=1):
first_element = self._get_first_element()
write_ga_file(filename, self.value, first_element.layout.metric, first_element.layout.basis_names, compression=compression, transpose=transpose, sparse=sparse, support=support, compression_opts=compression_opts)
def sum(self):
out = self[0]
for k in self[1:]:
out += k
return out
def gp(self):
out = self[0]
for k in self[1:]:
out *= k
return out
def op(self):
out = self[0]
for k in self[1:]:
out = (out ^ k)
return out
def normal(self):
return normal_array(self)
def dual(self):
return dual_array(self)
def __call__(self, A):
return call_array(self, A) |
class BIP32_KeyStore(Xpub, Deterministic_KeyStore):
type = 'bip32'
def __init__(self, d):
Xpub.__init__(self, derivation_prefix=d.get('derivation'), root_fingerprint=d.get('root_fingerprint'))
Deterministic_KeyStore.__init__(self, d)
self.xpub = d.get('xpub')
self.xprv = d.get('xprv')
def format_seed(self, seed):
return ' '.join(seed.split())
def dump(self):
d = Deterministic_KeyStore.dump(self)
d['xpub'] = self.xpub
d['xprv'] = self.xprv
d['derivation'] = self.get_derivation_prefix()
d['root_fingerprint'] = self.get_root_fingerprint()
return d
def get_master_private_key(self, password):
return pw_decode(self.xprv, password, version=self.pw_hash_version)
def check_password(self, password):
xprv = pw_decode(self.xprv, password, version=self.pw_hash_version)
try:
bip32node = BIP32Node.from_xkey(xprv)
except BaseDecodeError as e:
raise InvalidPassword() from e
if (bip32node.chaincode != self.get_bip32_node_for_xpub().chaincode):
raise InvalidPassword()
def update_password(self, old_password, new_password):
self.check_password(old_password)
if (new_password == ''):
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode(decoded, new_password, version=PW_HASH_VERSION_LATEST)
if self.passphrase:
decoded = self.get_passphrase(old_password)
self.passphrase = pw_encode(decoded, new_password, version=PW_HASH_VERSION_LATEST)
if (self.xprv is not None):
b = pw_decode(self.xprv, old_password, version=self.pw_hash_version)
self.xprv = pw_encode(b, new_password, version=PW_HASH_VERSION_LATEST)
self.pw_hash_version = PW_HASH_VERSION_LATEST
def is_watching_only(self):
return (self.xprv is None)
def add_xpub(self, xpub):
assert is_xpub(xpub)
self.xpub = xpub
(root_fingerprint, derivation_prefix) = bip32.root_fp_and_der_prefix_from_xkey(xpub)
self.add_key_origin(derivation_prefix=derivation_prefix, root_fingerprint=root_fingerprint)
def add_xprv(self, xprv):
assert is_xprv(xprv)
self.xprv = xprv
self.add_xpub(bip32.xpub_from_xprv(xprv))
def add_xprv_from_seed(self, bip32_seed, xtype, derivation):
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
node = rootnode.subkey_at_private_derivation(derivation)
self.add_xprv(node.to_xprv())
self.add_key_origin_from_root_node(derivation_prefix=derivation, root_node=rootnode)
def get_private_key(self, sequence: Sequence[int], password):
xprv = self.get_master_private_key(password)
node = BIP32Node.from_xkey(xprv).subkey_at_private_derivation(sequence)
pk = node.eckey.get_secret_bytes()
return (pk, True)
def get_keypair(self, sequence, password):
(k, _) = self.get_private_key(sequence, password)
cK = ecc.ECPrivkey(k).get_public_key_bytes()
return (cK, k) |
class SpecParser(Parser[ConfigNamespace]):
def __init__(self, spec: ConfigSpec):
self.spec = spec
def parse(self, key_path: str, raw_config: RawConfig) -> ConfigNamespace:
parsed = ConfigNamespace()
for (key, spec) in self.spec.items():
assert ('.' not in key), 'dots are not allowed in keys'
if key_path:
sub_key_path = f'{key_path}.{key}'
else:
sub_key_path = key
parser = Parser.from_spec(spec)
parsed[key] = parser.parse(sub_key_path, raw_config)
return parsed |
def load_collection_(path, retain_titles):
with open(path) as f:
collection = []
for line in file_tqdm(f):
(_, passage, title) = line.strip().split('\t')
if retain_titles:
passage = ((title + ' | ') + passage)
collection.append(passage)
return collection |
def test_marker_prefix_does_not_interfere_with_order_marks(test_path):
test_path.makepyfile(test_marker='\n import pytest\n\n .order(3)\n def test_a():\n pass\n\n .order(1)\n def test_b():\n pass\n\n .order(2)\n def test_c():\n pass\n ')
result = test_path.runpytest('-v', '--order-marker-prefix=m')
result.assert_outcomes(passed=3, skipped=0)
result.stdout.fnmatch_lines(['test_marker.py::test_b PASSED', 'test_marker.py::test_c PASSED', 'test_marker.py::test_a PASSED']) |
class GenTensorVariable(TensorVariable):
def __init__(self, op, type, name=None):
super().__init__(type=type, owner=None, name=name)
self.op = op
def set_gen(self, gen):
self.op.set_gen(gen)
def set_default(self, value):
self.op.set_default(value)
def clone(self):
cp = self.__class__(self.op, self.type, self.name)
cp.tag = copy(self.tag)
return cp |
_request_params(*docs._get_observations, docs._pagination)
def get_observation_species_counts(**params) -> JsonResponse:
if (params.get('page') == 'all'):
return paginate_all(get, f'{API_V1}/observations/species_counts', **params)
else:
return get(f'{API_V1}/observations/species_counts', **params).json() |
def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None:
deselect_prefixes = tuple((config.getoption('deselect') or []))
if (not deselect_prefixes):
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining |
def main(args: Optional[Sequence[str]]=None) -> None:
parser = ArgumentParser()
parser.add_argument('file', nargs='+', help='Validate specified file(s).')
parser.add_argument('--errors', choices=('best-match', 'all'), default='best-match', help='Control error reporting. Defaults to "best-match", use "all" to get all subschema errors.')
parser.add_argument('--schema', type=str, choices=['detect', '2.0', '3.0', '3.1', '3.0.0', '3.1.0'], default='detect', metavar='{detect,2.0,3.0,3.1}', help='OpenAPI schema version (default: detect).')
args_parsed = parser.parse_args(args)
for filename in args_parsed.file:
reader = read_from_filename
if (filename in ['-', '/-']):
filename = 'stdin'
reader = read_from_stdin
try:
(spec, base_uri) = reader(filename)
except Exception as exc:
print(exc)
sys.exit(1)
validators = {'detect': None, '2.0': OpenAPIV2SpecValidator, '3.0': OpenAPIV30SpecValidator, '3.1': OpenAPIV31SpecValidator, '3.0.0': OpenAPIV30SpecValidator, '3.1.0': OpenAPIV31SpecValidator}
validator_cls = validators[args_parsed.schema]
try:
validate(spec, base_uri=base_uri, cls=validator_cls)
except ValidationError as exc:
print_validationerror(filename, exc, args_parsed.errors)
sys.exit(1)
except Exception as exc:
print_error(filename, exc)
sys.exit(2)
else:
print_ok(filename) |
('/v1/repository/<apirepopath:repository>/trigger/<trigger_uuid>/activate')
_param('repository', 'The full path of the repository. e.g. namespace/name')
_param('trigger_uuid', 'The UUID of the build trigger')
class BuildTriggerActivate(RepositoryParamResource):
schemas = {'BuildTriggerActivateRequest': {'type': 'object', 'required': ['config'], 'properties': {'config': {'type': 'object', 'description': 'Arbitrary json.'}, 'pull_robot': {'type': 'string', 'description': 'The name of the robot that will be used to pull images.'}}}}
_repo_admin(allow_for_superuser=True)
_for_app_repositories
_for_non_normal_repositories
_for_user_namespace
('activateBuildTrigger')
_json_request('BuildTriggerActivateRequest')
def post(self, namespace_name, repo_name, trigger_uuid):
trigger = get_trigger(trigger_uuid)
handler = BuildTriggerHandler.get_handler(trigger)
if handler.is_active():
raise InvalidRequest('Trigger config is not sufficient for activation.')
user_permission = UserAdminPermission(trigger.connected_user.username)
if user_permission.can():
pull_robot_name = request.get_json().get('pull_robot', None)
if pull_robot_name:
try:
pull_robot = model.user.lookup_robot(pull_robot_name)
except model.InvalidRobotException:
raise NotFound()
(robot_namespace, _) = parse_robot_username(pull_robot_name)
if (not AdministerOrganizationPermission(robot_namespace).can()):
raise Unauthorized()
if (robot_namespace != namespace_name):
raise Unauthorized()
trigger.pull_robot = pull_robot
new_config_dict = request.get_json()['config']
write_token_name = ('Build Trigger: %s' % trigger.service.name)
write_token = model.token.create_delegate_token(namespace_name, repo_name, write_token_name, 'write')
try:
path = url_for('webhooks.build_trigger_webhook', trigger_uuid=trigger.uuid)
authed_url = _prepare_webhook_url(app.config['PREFERRED_URL_SCHEME'], '$token', write_token.get_code(), app.config['SERVER_HOSTNAME'], path)
handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
(final_config, private_config) = handler.activate(authed_url)
if ('private_key' in private_config):
trigger.secure_private_key = DecryptedValue(private_config['private_key'])
except TriggerException as exc:
write_token.delete_instance()
raise request_error(message=str(exc))
update_build_trigger(trigger, final_config, write_token=write_token)
repo = model.repository.get_repository(namespace_name, repo_name)
log_action('setup_repo_trigger', namespace_name, {'repo': repo_name, 'namespace': namespace_name, 'trigger_id': trigger.uuid, 'service': trigger.service.name, 'pull_robot': (trigger.pull_robot.username if trigger.pull_robot else None), 'config': final_config}, repo=repo)
return trigger_view(trigger, can_admin=True)
else:
raise Unauthorized() |
def prep_utt2label(utt2labelid_path, label_id_map_path, utt2label_paths):
print((('generating utt2labelid(%s) and label_id_map(%s)' % (utt2labelid_path, label_id_map_path)) + (' from utt2labels(%s)' % utt2label_paths)))
utt_list = []
label_list = []
for utt2label_path in utt2label_paths:
with open(utt2label_path) as f:
(_utt_list, _label_list) = zip(*[line.rstrip().split() for line in f])
utt_list += _utt_list
label_list += _label_list
if (len(utt_list) != len(np.unique(utt_list))):
raise ValueError(('duplicated utt detected! check %s first' % (utt2label_paths,)))
utt2label = OrderedDict(zip(utt_list, label_list))
(_, idx) = np.unique(utt2label.values(), return_index=True)
unique_labels = np.array(utt2label.values())[np.sort(idx)]
label_id_map = dict(zip(unique_labels, (np.arange(len(unique_labels)) + 1)))
with open(label_id_map_path, 'w') as f:
for k in unique_labels:
f.write(('%s %s\n' % (k, label_id_map[k])))
with open(utt2labelid_path, 'w') as f:
for (utt, label) in utt2label.iteritems():
f.write(('%s %s\n' % (utt, label_id_map[label]))) |
class InputFeatures(object):
def __init__(self, input_ids_spc, input_mask, segment_ids, label_id, polarities=None, valid_ids=None, label_mask=None):
self.input_ids_spc = input_ids_spc
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.polarities = polarities |
class TestRegister(TestScript):
handlers = RAPIDSMS_HANDLERS
def testRegister(self):
self.assertInteraction('\n > register as someuser\n < Thank you for registering, as someuser!\n ')
def testLang(self):
self.assertInteraction(('\n > lang english\n < %s\n > register as someuser\n < Thank you for registering, as someuser!\n > lang english\n < I will speak to you in English.\n > lang klingon\n < Sorry, I don\'t speak "klingon".\n ' % ('You must JOIN or REGISTER yourself before you can set ' + 'your language preference.')))
def testHelp(self):
self.assertInteraction('\n > lang\n < To set your language, send LANGUAGE <CODE>\n > register\n < To register, send JOIN <NAME>\n ') |
class ModelSpec_Modified(object):
def __init__(self, matrix, ops, data_format='channels_last'):
if (not isinstance(matrix, np.ndarray)):
matrix = np.array(matrix)
shape = np.shape(matrix)
if ((len(shape) != 2) or (shape[0] != shape[1])):
raise ValueError('matrix must be square')
if (shape[0] != len(ops)):
raise ValueError('length of ops must match matrix dimensions')
if (not is_upper_triangular(matrix)):
raise ValueError('matrix must be upper triangular')
self.original_matrix = copy.deepcopy(matrix)
self.original_ops = copy.deepcopy(ops)
self.matrix = copy.deepcopy(matrix)
self.ops = copy.deepcopy(ops)
self.valid_spec = True
self.data_format = data_format
def hash_spec(self, canonical_ops):
labeling = (([(- 1)] + [canonical_ops.index(op) for op in self.ops[1:(- 1)]]) + [(- 2)])
return graph_util.hash_module(self.matrix, labeling) |
def TVRegDiffPoint(u, dx, index=None):
u = u.flatten()
n = len(u)
if (index == None):
index = int(((n - 1) / 2))
ux = TVRegDiff(u, 1, 0.1, dx=dx, plotflag=False, diffkernel='sq')
uxx = TVRegDiff(ux, 1, 0.1, dx=dx, plotflag=False, diffkernel='sq')
return (ux[index], uxx[index]) |
def register_onchain_secret_endstate(end_state: NettingChannelEndState, secret: Secret, secrethash: SecretHash, secret_reveal_block_number: BlockNumber, delete_lock: bool=True) -> None:
pending_lock: Optional[HashTimeLockState] = None
if is_lock_locked(end_state, secrethash):
pending_lock = end_state.secrethashes_to_lockedlocks[secrethash]
if (secrethash in end_state.secrethashes_to_unlockedlocks):
pending_lock = end_state.secrethashes_to_unlockedlocks[secrethash].lock
if pending_lock:
if (pending_lock.expiration < secret_reveal_block_number):
return
if delete_lock:
_del_lock(end_state, secrethash)
end_state.secrethashes_to_onchain_unlockedlocks[secrethash] = UnlockPartialProofState(pending_lock, secret) |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms._CAST5Internal((b'\x00' * 16)), modes.ECB())), skip_message='Does not support CAST5 ECB')
class TestCAST5ModeECB():
test_ecb = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', 'CAST5'), ['cast5-ecb.txt'], (lambda key, **kwargs: algorithms._CAST5Internal(binascii.unhexlify(key))), (lambda **kwargs: modes.ECB())) |
def test_smartdevice_examples(mocker):
p = asyncio.run(get_device_for_file('HS110(EU)_1.0_1.2.5.json', 'IOT'))
mocker.patch('kasa.smartdevice.SmartDevice', return_value=p)
mocker.patch('kasa.smartdevice.SmartDevice.update')
res = xdoctest.doctest_module('kasa.smartdevice', 'all')
assert (not res['failed']) |
class ChatAction(StringEnum):
__slots__ = ()
CHOOSE_STICKER = 'choose_sticker'
FIND_LOCATION = 'find_location'
RECORD_VOICE = 'record_voice'
RECORD_VIDEO = 'record_video'
RECORD_VIDEO_NOTE = 'record_video_note'
TYPING = 'typing'
UPLOAD_VOICE = 'upload_voice'
UPLOAD_DOCUMENT = 'upload_document'
UPLOAD_PHOTO = 'upload_photo'
UPLOAD_VIDEO = 'upload_video'
UPLOAD_VIDEO_NOTE = 'upload_video_note' |
def dependency_pyserial(func):
(func)
def wrapper(*args, **kwargs):
if (not is_usable()):
raise RuntimeError('Printing with Serial requires the pyserial library tobe installed. Please refer to the documentation onwhat to install and install the dependencies for pyserial.')
return func(*args, **kwargs)
return wrapper |
class LEDTokenizer(BartTokenizer):
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
encoded_inputs = super()._pad(encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
if (return_attention_mask and ('global_attention_mask' in encoded_inputs)):
required_input = encoded_inputs[self.model_input_names[0]]
needs_to_be_padded = (len(encoded_inputs['global_attention_mask']) != len(required_input))
if needs_to_be_padded:
difference = (len(required_input) - len(encoded_inputs['global_attention_mask']))
if (self.padding_side == 'right'):
encoded_inputs['global_attention_mask'] = (encoded_inputs['global_attention_mask'] + ([(- 1)] * difference))
elif (self.padding_side == 'left'):
encoded_inputs['global_attention_mask'] = (([(- 1)] * difference) + encoded_inputs['global_attention_mask'])
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
return encoded_inputs |
class Discriminator(nn.Module):
def __init__(self, args):
super(Discriminator, self).__init__()
in_channels = args.n_colors
out_channels = 64
depth = 7
def _block(_in_channels, _out_channels, stride=1):
return nn.Sequential(nn.Conv2d(_in_channels, _out_channels, 3, padding=1, stride=stride, bias=False), nn.BatchNorm2d(_out_channels), nn.LeakyReLU(negative_slope=0.2, inplace=True))
m_features = [_block(in_channels, out_channels)]
for i in range(depth):
in_channels = out_channels
if ((i % 2) == 1):
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(_block(in_channels, out_channels, stride=stride))
patch_size = (args.patch_size // (2 ** ((depth + 1) // 2)))
m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Linear(1024, 1)]
self.features = nn.Sequential(*m_features)
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), (- 1)))
return output |
class AdaptedMethod():
def __init__(self, declared_method, arg_names=[], kwarg_name_map={}):
self.declared_method = declared_method
self.arg_names = arg_names
self.kwarg_name_map = kwarg_name_map
self.kwarg_name_map_reversed = dict([(sent_name, adapted_to_name) for (adapted_to_name, sent_name) in kwarg_name_map.items()])
self.full_arg_names = arg_names
self.full_kwarg_names = kwarg_name_map.values()
def set_full_arg_names(self, full_arg_names, full_kwarg_names=[]):
self.full_arg_names = full_arg_names
self.full_kwarg_names = full_kwarg_names
def __call__(self, *args, **kwargs):
args_to_send = self.get_args_to_send(args)
kwargs_to_send = self.get_kwargs_to_send(kwargs)
return self.declared_method(*args_to_send, **kwargs_to_send)
def get_args_to_send(self, args_received):
args_to_send = []
for name in self.arg_names:
i = self.full_arg_names.index(name)
args_to_send.append(args_received[i])
return args_to_send
def get_kwargs_to_send(self, kwargs_received):
kwargs_to_send = {}
for (name, value) in kwargs_received.items():
if (name in self.kwarg_name_map_reversed):
kwargs_to_send[self.kwarg_name_map_reversed[name]] = value
return kwargs_to_send |
class Organization(models.Model):
name = models.CharField(max_length=200, verbose_name=gettext_lazy('Name'), unique=True, null=False, blank=False)
default_template = models.ForeignKey('PetitionTemplate', blank=True, null=True, related_name='+', verbose_name=gettext_lazy('Default petition template'), to_field='id', on_delete=models.SET_NULL)
slugname = models.SlugField(max_length=200, unique=True)
members = models.ManyToManyField(PytitionUser, through='Permission')
def is_last_admin(self, user):
perms = Permission.objects.filter(can_modify_permissions=True, organization=self)
if (perms.count() > 1):
return False
elif (perms.count() == 1):
if (perms.first().user == user):
return True
else:
return False
else:
return True
def is_allowed_to(self, user, right):
try:
perm = Permission.objects.get(organization=self, user=user)
except Permission.DoesNotExist:
return False
else:
return getattr(perm, right)
def __str__(self):
return self.name
def __repr__(self):
return '< {} >'.format(self.name)
def owners(self):
return self.members.filter(permission__can_modify_permissions=True)
def kind(self):
return 'org'
def fullname(self):
return self.name
def save(self, *args, **kwargs):
if (not self.slugname):
self.slugname = slugify(self.name)
super(Organization, self).save(*args, **kwargs) |
class GetChatHistory():
async def get_chat_history(self: 'pyrogram.Client', chat_id: Union[(int, str)], limit: int=0, offset: int=0, offset_id: int=0, offset_date: datetime=utils.zero_datetime()) -> Optional[AsyncGenerator[('types.Message', None)]]:
current = 0
total = (limit or ((1 << 31) - 1))
limit = min(100, total)
while True:
messages = (await get_chunk(client=self, chat_id=chat_id, limit=limit, offset=offset, from_message_id=offset_id, from_date=offset_date))
if (not messages):
return
offset_id = messages[(- 1)].id
for message in messages:
(yield message)
current += 1
if (current >= total):
return |
class HSTISR(IntEnum):
DCONNI = (1 << 0)
DDISCI = (1 << 1)
RSTI = (1 << 2)
RSMEDI = (1 << 3)
RXRSMI = (1 << 4)
HSOFI = (1 << 5)
HWUPI = (1 << 6)
PEP_0 = (1 << 8)
PEP_1 = (1 << 9)
PEP_2 = (1 << 10)
PEP_3 = (1 << 11)
PEP_4 = (1 << 12)
PEP_5 = (1 << 13)
PEP_6 = (1 << 14)
PEP_7 = (1 << 15)
PEP_8 = (1 << 16)
PEP_9 = (1 << 17)
DMA_1 = (1 << 25)
DMA_2 = (1 << 26)
DMA_3 = (1 << 27)
DMA_4 = (1 << 28)
DMA_5 = (1 << 29)
DMA_6 = (1 << 30) |
class ReportType(str, Enum):
INVENTORY = '_GET_FLAT_FILE_OPEN_LISTINGS_DATA_'
ALL_LISTINGS = '_GET_MERCHANT_LISTINGS_ALL_DATA_'
ACTIVE_LISTINGS = '_GET_MERCHANT_LISTINGS_DATA_'
INACTIVE_LISTINGS = '_GET_MERCHANT_LISTINGS_INACTIVE_DATA_'
OPEN_LISTINGS = '_GET_MERCHANT_LISTINGS_DATA_BACK_COMPAT_'
OPEN_LISTINGS_LITE = '_GET_MERCHANT_LISTINGS_DATA_LITE_'
OPEN_LISTINGS_LITER = '_GET_MERCHANT_LISTINGS_DATA_LITER_'
CANCELED_LISTINGS = '_GET_MERCHANT_CANCELLED_LISTINGS_DATA_'
SOLD_LISTINGS = '_GET_CONVERGED_FLAT_FILE_SOLD_LISTINGS_DATA_'
LISTING_QUALITY_AND_SUPPRESSED = '_GET_MERCHANT_LISTINGS_DEFECT_DATA_'
PAN_EUROPEAN_ELIGIBILITY_FBA_ASINS = '_GET_PAN_EU_OFFER_STATUS_'
PAN_EUROPEAN_ELIGIBILITY_SELF_FULFILLED_ASINS = '_GET_MFN_PAN_EU_OFFER_STATUS_'
GLOBAL_EXPANSION_OPPORTUNITIES = '_GET_FLAT_FILE_GEO_OPPORTUNITIES_'
REFERRAL_FEE_PREVIEW = '_GET_REFERRAL_FEE_PREVIEW_REPORT_'
ORDERS_UNSHIPPED = '_GET_FLAT_FILE_ACTIONABLE_ORDER_DATA_'
ORDERS_SCHEDULED_XML = '_GET_ORDERS_DATA_'
ORDERS_REQUESTED_OR_SCHEDULED = '_GET_FLAT_FILE_ORDERS_DATA_'
ORDERS_CONVERGED = '_GET_CONVERGED_FLAT_FILE_ORDER_REPORT_DATA_'
TRACKING_BY_LAST_UPDATE = '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_LAST_UPDATE_'
TRACKING_BY_ORDER_DATE = '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_'
TRACKING_ARCHIVED_ORDERS_FLATFILE = '_GET_FLAT_FILE_ARCHIVED_ORDERS_DATA_BY_ORDER_DATE_'
TRACKING_BY_LAST_UPDATE_XML = '_GET_XML_ALL_ORDERS_DATA_BY_LAST_UPDATE_'
TRACKING_BY_ORDER_DATE_XML = '_GET_XML_ALL_ORDERS_DATA_BY_ORDER_DATE_'
PENDING_ORDERS_FLAT_FILE = '_GET_FLAT_FILE_PENDING_ORDERS_DATA_'
PENDING_ORDERS_XML = '_GET_PENDING_ORDERS_DATA_'
PENDING_ORDERS_CONVERGED_FLAT_FILE = '_GET_CONVERGED_FLAT_FILE_PENDING_ORDERS_DATA_'
RETURNS_XML_DATA_BY_RETURN_DATE = '_GET_XML_RETURNS_DATA_BY_RETURN_DATE_'
RETURNS_FLAT_FILE_RETURNS_DATA_BY_RETURN_DATE = '_GET_FLAT_FILE_RETURNS_DATA_BY_RETURN_DATE_'
RETURNS_XML_MFN_PRIME_RETURNS_REPORT = '_GET_XML_MFN_PRIME_RETURNS_REPORT_'
RETURNS_CSV_MFN_PRIME_RETURNS_REPORT = '_GET_CSV_MFN_PRIME_RETURNS_REPORT_'
RETURNS_XML_MFN_SKU_RETURN_ATTRIBUTES_REPORT = '_GET_XML_MFN_SKU_RETURN_ATTRIBUTES_REPORT_'
RETURNS_FLAT_FILE_MFN_SKU_RETURN_ATTRIBUTES_REPORT = '_GET_FLAT_FILE_MFN_SKU_RETURN_ATTRIBUTES_REPORT_'
PERFORMANCE_FEEDBACK = '_GET_SELLER_FEEDBACK_DATA_'
PERFORMANCE_CUSTOMER_METRICS_XML = '_GET_V1_SELLER_PERFORMANCE_REPORT_'
SETTLEMENT_FLATFILE = '_GET_V2_SETTLEMENT_REPORT_DATA_FLAT_FILE_'
SETTLEMENT_V2_XML = '_GET_V2_SETTLEMENT_REPORT_DATA_XML_'
SETTLEMENT_V2_FLATFILE = '_GET_V2_SETTLEMENT_REPORT_DATA_FLAT_FILE_V2_'
FBA_SALES_AMAZON_FULFILLED = '_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_'
FBA_SALES_ALL_LAST_UPDATE = '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_LAST_UPDATE_'
FBA_SALES_ALL_BY_ORDER_DATE = '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_'
FBA_SALES_ALL_BY_LAST_UPDATE_XML = '_GET_XML_ALL_ORDERS_DATA_BY_LAST_UPDATE_'
FBA_SALES_ALL_BY_ORDER_DATE_XML = '_GET_XML_ALL_ORDERS_DATA_BY_ORDER_DATE_'
FBA_SALES_CUSTOMER_SHIPMENT = '_GET_FBA_FULFILLMENT_CUSTOMER_SHIPMENT_SALES_DATA_'
FBA_SALES_PROMOTIONS = '_GET_FBA_FULFILLMENT_CUSTOMER_SHIPMENT_PROMOTION_DATA_'
FBA_SALES_CUSTOMER_TAXES = '_GET_FBA_FULFILLMENT_CUSTOMER_TAXES_DATA_'
FBA_SALES_REMOTE_FULFILLMENT_ELIGIBILITY = '_GET_REMOTE_FULFILLMENT_ELIGIBILITY_'
FBA_INVENTORY_AFN = '_GET_AFN_INVENTORY_DATA_'
FBA_INVENTORY_AFN_BY_COUNTRY = '_GET_AFN_INVENTORY_DATA_BY_COUNTRY_'
FBA_INVENTORY_HISTORY_DAILY = '_GET_FBA_FULFILLMENT_CURRENT_INVENTORY_DATA_'
FBA_INVENTORY_HISTORY_MONTHLY = '_GET_FBA_FULFILLMENT_MONTHLY_INVENTORY_DATA_'
FBA_INVENTORY_RECEIVED = '_GET_FBA_FULFILLMENT_INVENTORY_RECEIPTS_DATA_'
FBA_INVENTORY_RESERVED = '_GET_RESERVED_INVENTORY_DATA_'
FBA_INVENTORY_EVENT_DETAIL = '_GET_FBA_FULFILLMENT_INVENTORY_SUMMARY_DATA_'
FBA_INVENTORY_ADJUSTMENTS = '_GET_FBA_FULFILLMENT_INVENTORY_ADJUSTMENTS_DATA_'
FBA_INVENTORY_HEALTH = '_GET_FBA_FULFILLMENT_INVENTORY_HEALTH_DATA_'
FBA_INVENTORY_MANAGE_ACTIVE = '_GET_FBA_MYI_UNSUPPRESSED_INVENTORY_DATA_'
FBA_INVENTORY_MANAGE_ALL = '_GET_FBA_MYI_ALL_INVENTORY_DATA_'
FBA_INVENTORY_RESTOCK_INVENTORY = '_GET_RESTOCK_INVENTORY_RECOMMENDATIONS_REPORT_'
FBA_INVENTORY_CROSS_BORDER_MOVEMENT = '_GET_FBA_FULFILLMENT_CROSS_BORDER_INVENTORY_MOVEMENT_DATA_'
FBA_INVENTORY_INBOUND_PERFORMANCE = '_GET_FBA_FULFILLMENT_INBOUND_NONCOMPLIANCE_DATA_'
FBA_INVENTORY_STRANDED = '_GET_STRANDED_INVENTORY_UI_DATA_'
FBA_INVENTORY_BULK_FIX_STRANDED = '_GET_STRANDED_INVENTORY_LOADER_DATA_'
FBA_INVENTORY_AGE = '_GET_FBA_INVENTORY_AGED_DATA_'
FBA_INVENTORY_EXCESS = '_GET_EXCESS_INVENTORY_DATA_'
FBA_INVENTORY_STORAGE_FEE_CHARGES = '_GET_FBA_STORAGE_FEE_CHARGES_DATA_'
FBA_INVENTORY_PRODUCT_EXCHANGE = '_GET_PRODUCT_EXCHANGE_DATA_'
FBA_PAYMENTS_FEE_PREVIEW = '_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_'
FBA_PAYMENTS_REIMBURSEMENTS = '_GET_FBA_REIMBURSEMENTS_DATA_'
FBA_PAYMENTS_LONGTERM_STORAGE_FEE_CHARGES = '_GET_FBA_FULFILLMENT_LONGTERM_STORAGE_FEE_CHARGES_DATA_'
FBA_CONCESSION_RETURNS = '_GET_FBA_FULFILLMENT_CUSTOMER_RETURNS_DATA_'
FBA_CONCESSION_SHIPMENT_REPLACEMENT = '_GET_FBA_FULFILLMENT_CUSTOMER_SHIPMENT_REPLACEMENT_DATA_'
FBA_REMOVAL_RECOMMENDED = '_GET_FBA_RECOMMENDED_REMOVAL_DATA_'
FBA_REMOVAL_ORDER_DETAIL = '_GET_FBA_FULFILLMENT_REMOVAL_ORDER_DETAIL_DATA_'
FBA_REMOVAL_SHIPMENT_DETAIL = '_GET_FBA_FULFILLMENT_REMOVAL_SHIPMENT_DETAIL_DATA_'
FBA_SMALL_LIGHT_INVENTORY = '_GET_FBA_UNO_INVENTORY_DATA_'
SALES_TAX = '_GET_FLAT_FILE_SALES_TAX_DATA_'
VAT_CALCULATION = '_SC_VAT_TAX_REPORT_'
VAT_TRANSACTIONS = '_GET_VAT_TRANSACTION_DATA_'
TAX_GST_MERCHANT_B2B = '_GET_GST_MTR_B2B_CUSTOM_'
TAX_GST_MERCHANT_B2C = '_GET_GST_MTR_B2C_CUSTOM_'
BROWSE_TREE = '_GET_XML_BROWSE_TREE_DATA_'
EASYSHIP_DOCUMENTS = '_GET_EASYSHIP_DOCUMENTS_'
EASYSHIP_PICKED_UP = '_GET_EASYSHIP_PICKEDUP_'
EASYSHIP_WAITING_FOR_PICKUP = '_GET_EASYSHIP_WAITING_FOR_PICKUP_'
AMZN_BUSINESS_FEE_DISCOUNTS_REPORT = '_FEE_DISCOUNTS_REPORT_'
AMZN_BUSINESS_RFQD_BULK_DOWNLOAD = '_RFQD_BULK_DOWNLOAD_'
AMAZONPAY_SANDBOX_SETTLEMENT = '_GET_FLAT_FILE_OFFAMAZONPAYMENTS_SANDBOX_SETTLEMENT_DATA_' |
class Import(ImportBase):
__slots__ = ('ids',)
__match_args__ = ('ids',)
ids: list[tuple[(str, (str | None))]]
def __init__(self, ids: list[tuple[(str, (str | None))]]) -> None:
super().__init__()
self.ids = ids
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import(self) |
class BaseResponseUnmarshaller(BaseResponseValidator, BaseUnmarshaller):
def _unmarshal(self, response: Response, operation: SchemaPath) -> ResponseUnmarshalResult:
try:
operation_response = self._find_operation_response(response.status_code, operation)
except ResponseFinderError as exc:
return ResponseUnmarshalResult(errors=[exc])
try:
validated_data = self._get_data(response.data, response.content_type, operation_response)
except DataValidationError as exc:
validated_data = None
data_errors = [exc]
else:
data_errors = []
try:
validated_headers = self._get_headers(response.headers, operation_response)
except HeadersError as exc:
validated_headers = exc.headers
headers_errors = exc.context
else:
headers_errors = []
errors = list(chainiters(data_errors, headers_errors))
return ResponseUnmarshalResult(errors=errors, data=validated_data, headers=validated_headers)
def _unmarshal_data(self, response: Response, operation: SchemaPath) -> ResponseUnmarshalResult:
try:
operation_response = self._find_operation_response(response.status_code, operation)
except ResponseFinderError as exc:
return ResponseUnmarshalResult(errors=[exc])
try:
validated = self._get_data(response.data, response.content_type, operation_response)
except DataValidationError as exc:
validated = None
data_errors = [exc]
else:
data_errors = []
return ResponseUnmarshalResult(errors=data_errors, data=validated)
def _unmarshal_headers(self, response: Response, operation: SchemaPath) -> ResponseUnmarshalResult:
try:
operation_response = self._find_operation_response(response.status_code, operation)
except ResponseFinderError as exc:
return ResponseUnmarshalResult(errors=[exc])
try:
validated = self._get_headers(response.headers, operation_response)
except HeadersError as exc:
validated = exc.headers
headers_errors = exc.context
else:
headers_errors = []
return ResponseUnmarshalResult(errors=headers_errors, headers=validated) |
def test_logq_globals(three_var_approx):
if (not three_var_approx.has_logq):
pytest.skip(('%s does not implement logq' % three_var_approx))
approx = three_var_approx
(logq, symbolic_logq) = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 1, 0)
e = logq.eval()
es = symbolic_logq.eval()
assert (e.shape == ())
assert (es.shape == (1,))
(logq, symbolic_logq) = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 2, 0)
e = logq.eval()
es = symbolic_logq.eval()
assert (e.shape == ())
assert (es.shape == (2,)) |
def test_stringify_file_id():
file_id = 'BQACAgIAAx0CAAGgr9AAAgmPX7b4UxbjNoFEO_L0I4s6wrXNJA8AAgQAA4GkuUm9FFvIaOhXWR4E'
string = "{'major': 4, 'minor': 30, 'file_type': <FileType.DOCUMENT: 5>, 'dc_id': 2, 'file_reference': b'\\x02\\x00\\xa0\\xaf\\xd0\\x00\\x00\\t\\x8f_\\xb6\\xf8S\\x16\\xe36\\x81D;\\xf2\\xf4#\\x8b:\\xc2\\xb5\\xcd$\\x0f', 'media_id': , 'access_hash': , 'thumbnail_size': ''}"
assert (str(FileId.decode(file_id)) == string) |
class SeqInfo():
def __init__(self, seq_path):
self.info = self.get_seq_info_data(seq_path)
def get_obj_name(self, convert=False):
if convert:
if ('chair' in self.info['cat']):
return 'chair'
if ('ball' in self.info['cat']):
return 'sports ball'
return self.info['cat']
def get_gender(self):
return self.info['gender']
def get_config(self):
return self.info['config']
def get_intrinsic(self):
return self.info['intrinsic']
def get_empty_dir(self):
return self.info['empty']
def beta_init(self):
return self.info['beta']
def kinect_count(self):
if ('kinects' in self.info):
return len(self.info['kinects'])
else:
return 3
def kids(self):
count = self.kinect_count()
return [i for i in range(count)]
def get_seq_info_data(self, seq):
info_file = join(seq, 'info.json')
data = json.load(open(info_file))
path_names = ['config', 'empty', 'intrinsic']
for name in path_names:
if (data[name] is not None):
data[name] = join(seq, data[name])
return data |
class RegistrationPendingForm(Form):
def __init__(self, view):
super().__init__(view, 'register_pending')
if self.exception:
self.add_child(Alert(view, self.exception.as_user_message(), 'warning'))
actions = self.add_child(ActionButtonGroup(view, legend_text=_('Re-send registration email')))
actions.add_child(Button(self, self.user_interface.account_management_interface.events.resend_event, style='primary')) |
class ResNeXt_with_features(nn.Module):
def __init__(self, block, num_blocks, cardinality, bottleneck_width, strides):
super().__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self.make_layer(block, num_blocks[0], strides[0])
self.layer2 = self.make_layer(block, num_blocks[1], strides[1])
self.layer3 = self.make_layer(block, num_blocks[2], strides[2])
self.layer4 = self.make_layer(block, num_blocks[3], strides[3])
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
feature1 = self.layer1(out)
feature2 = self.layer2(feature1)
feature3 = self.layer3(feature2)
feature4 = self.layer4(feature3)
return (feature2, feature3, feature4)
def make_layer(self, block, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_channels, self.cardinality, self.bottleneck_width, stride))
self.in_channels = ((block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers) |
class GetCategoriesTests(MockPagesTestCase):
def test_get_root_categories(self):
result = utils.get_categories(BASE_PATH)
info = PARSED_CATEGORY_INFO
categories = {'category': info, 'tmp': info, 'not_a_page.md': info}
self.assertEqual(result, categories)
def test_get_categories_with_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, 'category'))
self.assertEqual(result, {'subcategory': PARSED_CATEGORY_INFO})
def test_get_categories_without_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, 'category/subcategory'))
self.assertEqual(result, {}) |
def _conversion_checks(item, keys, box_config, check_only=False, pre_check=False):
if (box_config['box_duplicates'] != 'ignore'):
if pre_check:
keys = (list(keys) + [item])
key_list = [(k, _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix'])) for k in keys]
if (len(key_list) > len(set((x[1] for x in key_list)))):
seen = set()
dups = set()
for x in key_list:
if (x[1] in seen):
dups.add('{0}({1})'.format(x[0], x[1]))
seen.add(x[1])
if box_config['box_duplicates'].startswith('warn'):
warnings.warn('Duplicate conversion attributes exist: {0}'.format(dups))
else:
raise BoxError('Duplicate conversion attributes exist: {0}'.format(dups))
if check_only:
return
for k in keys:
if (item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix'])):
return k |
def _del_unclaimed_lock(end_state: NettingChannelEndState, secrethash: SecretHash) -> None:
if (secrethash in end_state.secrethashes_to_lockedlocks):
del end_state.secrethashes_to_lockedlocks[secrethash]
if (secrethash in end_state.secrethashes_to_unlockedlocks):
del end_state.secrethashes_to_unlockedlocks[secrethash] |
def upgrade(op, tables, tester):
op.create_table('quotaregistrysize', sa.Column('id', sa.Integer(), nullable=False), sa.Column('size_bytes', sa.BigInteger(), nullable=False, server_default='0'), sa.Column('running', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()), sa.Column('queued', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()), sa.Column('completed_ms', sa.BigInteger(), nullable=True), sa.PrimaryKeyConstraint('id', name=op.f('pk_quotaregistrysizeid'))) |
def completer_obj(qtbot, status_command_stub, config_stub, monkeypatch, stubs, completion_widget_stub):
monkeypatch.setattr(completer, 'QTimer', stubs.InstaTimer)
config_stub.val.completion.show = 'auto'
return completer.Completer(cmd=status_command_stub, win_id=0, parent=completion_widget_stub) |
def crf(train_image, final_probabilities, train_annotation, number_class):
for index_image in xrange(1):
image = train_image
softmax = final_probabilities[0].squeeze()
softmax = softmax.transpose((2, 0, 1))
unary = unary_from_softmax(softmax)
unary = np.ascontiguousarray(unary)
d = dcrf.DenseCRF((image.shape[0] * image.shape[1]), number_class)
d.setUnaryEnergy(unary)
feats = create_pairwise_gaussian(sdims=(10, 10), shape=image.shape[:2])
d.addPairwiseEnergy(feats, compat=3, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)
feats = create_pairwise_bilateral(sdims=(50, 50), schan=(20, 20, 20), img=image, chdim=2)
d.addPairwiseEnergy(feats, compat=10, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(5)
res = np.argmax(Q, axis=0).reshape((image.shape[0], image.shape[1]))
cmap = plt.get_cmap('bwr')
(f, (ax1, ax2)) = plt.subplots(1, 2, sharey=True)
ax1.imshow(res, vmax=1.5, vmin=(- 0.4), cmap=cmap)
ax1.set_title('Segmentation with CRF post-processing')
probability_graph = ax2.imshow((np.dstack(((train_annotation,) * 3)) * 100))
ax2.set_title('Ground-Truth Annotation')
plt.savefig(('annotation_%d.png' % index_image), bbox_inches='tight', pad_inches=0)
plt.gcf().clear()
plt.show() |
def test_guard_against_oversized_packets():
zc = Zeroconf(interfaces=['127.0.0.1'])
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for i in range(5000):
generated.add_answer_at_time(r.DNSText('packet{i}.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), 500, b'path=/~paulsm/'), 0)
try:
with patch.object(outgoing, '_MAX_MSG_ABSOLUTE', 100000), patch.object(outgoing, '_MAX_MSG_TYPICAL', 100000):
over_sized_packet = generated.packets()[0]
assert (len(over_sized_packet) > const._MAX_MSG_ABSOLUTE)
except AttributeError:
zc.close()
return
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
okpacket_record = r.DNSText('okpacket.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), 500, b'path=/~paulsm/')
generated.add_answer_at_time(okpacket_record, 0)
ok_packet = generated.packets()[0]
listener = _listener.AsyncListener(zc)
listener.transport = unittest.mock.MagicMock()
listener.datagram_received(ok_packet, ('127.0.0.1', const._MDNS_PORT))
assert (zc.cache.async_get_unique(okpacket_record) is not None)
listener.datagram_received(over_sized_packet, ('127.0.0.1', const._MDNS_PORT))
assert (zc.cache.async_get_unique(r.DNSText('packet0.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), 500, b'path=/~paulsm/')) is None)
logging.getLogger('zeroconf').setLevel(logging.INFO)
listener.datagram_received(over_sized_packet, ('::1', const._MDNS_PORT, 1, 1))
assert (zc.cache.async_get_unique(r.DNSText('packet0.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), 500, b'path=/~paulsm/')) is None)
zc.close() |
def parse_args_and_arch(parser: argparse.ArgumentParser, input_args: List[str]=None, parse_known: bool=False, suppress_defaults: bool=False, modify_parser: Optional[Callable[([argparse.ArgumentParser], None)]]=None):
if suppress_defaults:
args = parse_args_and_arch(parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for (k, v) in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(**{k: v for (k, v) in vars(args).items() if (v is not None)})
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if (modify_parser is not None):
modify_parser(parser)
(args, _) = parser.parse_known_args(input_args)
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group('Model-specific configuration', argument_default=argparse.SUPPRESS)
if (args.arch in ARCH_MODEL_REGISTRY):
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif (args.arch in MODEL_REGISTRY):
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
if hasattr(cls, 'add_args'):
cls.add_args(parser)
if hasattr(args, 'task'):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, 'use_bmuf', False):
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
if (modify_parser is not None):
modify_parser(parser)
if parse_known:
(args, extra) = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
if ((hasattr(args, 'batch_size_valid') and (args.batch_size_valid is None)) or (not hasattr(args, 'batch_size_valid'))):
args.batch_size_valid = args.batch_size
if (hasattr(args, 'max_tokens_valid') and (args.max_tokens_valid is None)):
args.max_tokens_valid = 1024
if getattr(args, 'memory_efficient_fp16', False):
args.fp16 = True
if getattr(args, 'memory_efficient_bf16', False):
args.bf16 = True
args.tpu = getattr(args, 'tpu', False)
args.bf16 = getattr(args, 'bf16', False)
if args.bf16:
args.tpu = True
if (args.tpu and args.fp16):
raise ValueError('Cannot combine --fp16 and --tpu, use --bf16 on TPUs')
if (getattr(args, 'seed', None) is None):
args.seed = 1
args.no_seed_provided = True
else:
args.no_seed_provided = False
if (hasattr(args, 'arch') and (args.arch in ARCH_CONFIG_REGISTRY)):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return (args, extra)
else:
return args |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.