code stringlengths 281 23.7M |
|---|
def setup_model_and_optimizer(model_provider_func):
args = get_args()
model = get_model(model_provider_func)
optimizer = get_optimizer(model)
lr_scheduler = get_learning_rate_scheduler(optimizer)
if (args.load is not None):
args.iteration = load_checkpoint(model, optimizer, lr_scheduler)
else:
args.iteration = 0
unwrapped_model = model
while hasattr(unwrapped_model, 'module'):
unwrapped_model = unwrapped_model.module
if ((args.iteration == 0) and hasattr(unwrapped_model, 'init_state_dict_from_bert')):
print('Initializing ICT from pretrained BERT model', flush=True)
unwrapped_model.init_state_dict_from_bert()
return (model, optimizer, lr_scheduler) |
('view')
def view() -> None:
try:
container_list = get_list_environments()
if (not container_list):
print(':computer: No freshenv environments found.')
for container in container_list:
if ('Exited' in container.get('Status')):
img = ':arrow_down: '
else:
img = ':arrow_forward: '
print(img, (('Name: [bold blue]' + container.get('Names')[0]) + '[/bold blue]'), (('| Flavour: [bold blue]' + container.get('Image').split('/')[(- 1)]) + '[/bold blue]'), (('| State: [bold blue]' + container.get('Status')) + '[/bold blue]'))
except errors.DockerException:
print(':cross_mark_button: Docker not installed or running. ')
except Exception as e:
print(f'Unknown exception: {e}') |
_small_list(immutable=True, unbox_num=True)
class W_ImpVectorStar(W_InterposeVector):
import_from_mixin(ImpersonatorMixin)
errorname = 'impersonate-vector'
def self_arg(self):
return True
def post_set_cont(self, new, i, env, cont, app=None):
return imp_vec_set_cont(self.inner, i, app, env, cont)
def post_ref_cont(self, i, env, cont, app=None):
return impersonate_vector_reference_cont(self.refh, self, self.inner, i, app, env, cont) |
class ConstantType(click.ParamType):
name = 'SMILES'
def convert(self, value, param, ctx):
if (not isinstance(value, str)):
return value
try:
(mol, frags) = parse_smiles_then_fragment(value)
if (not (1 <= len(frags) <= 3)):
raise MolProcessingError('Constant SMILES must contain only 1, 2, or 3 fragments')
sanitize_atom_properties(mol, frags, what=IS_CONSTANT)
except MolProcessingError as err:
self.fail(err.error_message, param, ctx)
return ProcessedSmiles(Chem.MolToSmiles(mol)) |
(frozen=True)
class FakeCommand():
name: str = ''
desc: str = ''
hide: bool = False
debug: bool = False
deprecated: bool = False
completion: Any = None
maxsplit: int = None
takes_count: Callable[([], bool)] = (lambda : False)
modes: Tuple[usertypes.KeyMode] = (usertypes.KeyMode.normal,) |
.parametrize('username,password', users)
.parametrize('issue_id', issues)
def test_update(db, client, username, password, issue_id):
client.login(username=username, password=password)
url = reverse(urlnames['detail'], args=[issue_id])
data = {}
response = client.put(url, data, content_type='application/json')
if password:
assert (response.status_code == 405)
else:
assert (response.status_code == 401) |
def my_syscall_write(ql: Qiling, fd: int, buf: int, count: int):
try:
data = ql.mem.read(buf, count)
fobj = ql.os.fd[fd]
if hasattr(fobj, 'write'):
fobj.write(data)
except:
ret = (- 1)
else:
ret = count
ql.log.info(f'my_syscall_write({fd}, {buf:#x}, {count}) = {ret}')
return ret |
class SeqEntityScore(object):
def __init__(self, id2label, markup='bio'):
self.id2label = id2label
self.markup = markup
self.reset()
def reset(self):
self.origins = []
self.founds = []
self.rights = []
def compute(self, origin, found, right):
recall = (0 if (origin == 0) else (right / origin))
precision = (0 if (found == 0) else (right / found))
f1 = (0.0 if ((recall + precision) == 0) else (((2 * precision) * recall) / (precision + recall)))
return (recall, precision, f1)
def result(self):
class_info = {}
origin_counter = Counter([x[0] for x in self.origins])
found_counter = Counter([x[0] for x in self.founds])
right_counter = Counter([x[0] for x in self.rights])
for (type_, count) in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
(recall, precision, f1) = self.compute(origin, found, right)
class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
(recall, precision, f1) = self.compute(origin, found, right)
return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
def update(self, label_paths, pred_paths):
for (label_path, pre_path) in zip(label_paths, pred_paths):
label_entities = get_entities(label_path, self.id2label, self.markup)
pre_entities = get_entities(pre_path, self.id2label, self.markup)
self.origins.extend(label_entities)
self.founds.extend(pre_entities)
self.rights.extend([pre_entity for pre_entity in pre_entities if (pre_entity in label_entities)]) |
_REGISTRY.register()
class bjzBlack(bjzStation, ImageDataset):
dataset_name = 'bjzblack'
def __init__(self, root='datasets'):
self.root = root
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.query_dir = osp.join(self.dataset_dir, 'benchmark/black_general_reid/query')
self.gallery_dir = osp.join(self.dataset_dir, 'benchmark/black_general_reid/gallery')
(query, gallery) = self.process_test(self.query_dir, self.gallery_dir)
ImageDataset.__init__(self, [], query, gallery) |
class ArchX86(Arch):
def __init__(self):
super().__init__()
def arch_insn_size(self):
return 15
def regs(self):
return ('eax', 'ebx', 'ecx', 'edx', 'esp', 'ebp', 'esi', 'edi', 'eip', 'ss', 'cs', 'ds', 'es', 'fs', 'gs', 'eflags')
def read_insn(self, address: int) -> bytes:
return self.read_mem(address, self.arch_insn_size)
def get_flags(bits: int) -> Mapping[(str, bool)]:
return {'CF': ((bits & 1) != 0), 'PF': ((bits & 4) != 0), 'AF': ((bits & 16) != 0), 'ZF': ((bits & 64) != 0), 'SF': ((bits & 128) != 0), 'OF': ((bits & 2048) != 0)} |
class FakeNetCDF4FileHandlerMimicLow(FakeNetCDF4FileHandler):
def get_test_content(self, filename, filename_info, filetype_info):
dt_s = filename_info.get('start_time', DEFAULT_DATE)
dt_e = filename_info.get('end_time', DEFAULT_DATE)
if (filetype_info['file_type'] == 'mimicTPW2_comp'):
file_content = {'/attr/start_time': dt_s.strftime('%Y%m%d.%H%M%S'), '/attr/end_time': dt_e.strftime('%Y%m%d.%H%M%S'), '/attr/platform_shortname': 'aggregated microwave', '/attr/sensor': 'mimic'}
file_content['latArr'] = DEFAULT_LAT
file_content['latArr/shape'] = (DEFAULT_FILE_SHAPE[0],)
file_content['latArr/attr/units'] = 'degress_north'
file_content['lonArr'] = DEFAULT_LON
file_content['lonArr/shape'] = (DEFAULT_FILE_SHAPE[1],)
file_content['lonArr/attr/units'] = 'degrees_east'
file_content['/dimension/lat'] = DEFAULT_FILE_SHAPE[0]
file_content['/dimension/lon'] = DEFAULT_FILE_SHAPE[1]
for float_var in float_variables:
file_content[float_var] = DEFAULT_FILE_FLOAT_DATA.reshape(DEFAULT_FILE_SHAPE)
file_content['{}/shape'.format(float_var)] = DEFAULT_FILE_SHAPE
file_content_attr[float_var] = {'units': 'mm'}
for date_var in date_variables:
file_content[date_var] = DEFAULT_FILE_DATE_DATA.reshape(DEFAULT_FILE_SHAPE)
file_content['{}/shape'.format(date_var)] = DEFAULT_FILE_SHAPE
file_content_attr[date_var] = {'units': 'minutes'}
for ubyte_var in ubyte_variables:
file_content[ubyte_var] = DEFAULT_FILE_UBYTE_DATA.reshape(DEFAULT_FILE_SHAPE)
file_content['{}/shape'.format(ubyte_var)] = DEFAULT_FILE_SHAPE
file_content_attr[ubyte_var] = {'source_key': 'Key: 0: None, 1: NOAA-N, 2: NOAA-P, 3: Metop-A, 4: Metop-B, 5: SNPP, 6: SSMI-17, 7: SSMI-18'}
for (key, val) in file_content.items():
if ((key == 'lonArr') or (key == 'latArr')):
file_content[key] = xr.DataArray(val)
elif isinstance(val, np.ndarray):
if (val.ndim > 1):
file_content[key] = xr.DataArray(val, dims=('y', 'x'), attrs=file_content_attr[key])
else:
file_content[key] = xr.DataArray(val)
for key in itertools.chain(float_variables, ubyte_variables):
file_content[key].attrs['_FillValue'] = (- 999.0)
file_content[key].attrs['name'] = key
file_content[key].attrs['file_key'] = key
file_content[key].attrs['file_type'] = self.filetype_info['file_type']
else:
msg = 'Wrong Test Reader for file_type {}'.format(filetype_info['file_type'])
raise AssertionError(msg)
return file_content |
def bit_mask_of_modes_acted_on_by_fermionic_terms(fermion_term_list, n_qubits=None):
if (n_qubits is None):
n_qubits = 0
for term in fermion_term_list:
n_qubits = max(n_qubits, count_qubits(term))
mask = numpy.zeros((n_qubits, len(fermion_term_list)), dtype=bool)
for (term_number, term) in enumerate(fermion_term_list):
actions = term.terms
for action in actions:
for single_operator in action:
mode = single_operator[0]
try:
mask[mode][term_number] = True
except IndexError:
raise ValueError('Bad n_qubits: must be greater than highest mode in any FermionOperator.')
return mask |
class TestFactoryMethods():
def test_empty(self, shape, density):
nnz = (int(((shape[0] * shape[1]) * density)) or 1)
base = csr.empty(shape[0], shape[1], nnz)
sci = base.as_scipy(full=True)
assert isinstance(base, data.CSR)
assert isinstance(sci, scipy.sparse.csr_matrix)
assert (base.shape == shape)
assert (sci.data.shape == (nnz,))
assert (sci.indices.shape == (nnz,))
assert (sci.indptr.shape == ((shape[0] + 1),))
def test_zeros(self, shape):
base = csr.zeros(shape[0], shape[1])
sci = base.as_scipy()
assert isinstance(base, data.CSR)
assert (base.shape == shape)
assert (sci.nnz == 0)
assert (sci.indptr.shape == ((shape[0] + 1),))
.parametrize('dimension', [1, 5, 100])
.parametrize('scale', [None, 2, (- 0.1), 1.5, (1.5 + 1j)], ids=['none', 'int', 'negative', 'float', 'complex'])
def test_identity(self, dimension, scale):
base = (csr.identity(dimension) if (scale is None) else csr.identity(dimension, scale))
sci = base.as_scipy()
scipy_test = scipy.sparse.eye(dimension, dtype=np.complex128, format='csr')
if (scale is not None):
scipy_test *= scale
assert isinstance(base, data.CSR)
assert (base.shape == (dimension, dimension))
assert (sci.nnz == dimension)
assert ((sci - scipy_test).nnz == 0)
.parametrize(['diagonals', 'offsets', 'shape'], [pytest.param([2j, 3, 5, 9], None, None, id='main diagonal'), pytest.param([1], None, None, id='1x1'), pytest.param([[0.2j, 0.3]], None, None, id='main diagonal list'), pytest.param([0.2j, 0.3], 2, None, id='superdiagonal'), pytest.param([0.2j, 0.3], (- 2), None, id='subdiagonal'), pytest.param([[0.2, 0.3, 0.4], [0.1, 0.9]], [(- 2), 3], None, id='two diagonals'), pytest.param([1, 2, 3], 0, (3, 5), id='main wide'), pytest.param([1, 2, 3], 0, (5, 3), id='main tall'), pytest.param([[1, 2, 3], [4, 5]], [(- 1), (- 2)], (4, 8), id='two wide sub'), pytest.param([[1, 2, 3, 4], [4, 5, 4j, 1j]], [1, 2], (4, 8), id='two wide super'), pytest.param([[1, 2, 3], [4, 5]], [1, 2], (8, 4), id='two tall super'), pytest.param([[1, 2, 3, 4], [4, 5, 4j, 1j]], [(- 1), (- 2)], (8, 4), id='two tall sub'), pytest.param([[1, 2, 3], [4, 5, 6], [1, 2]], [1, (- 1), (- 2)], (4, 4), id='out of order'), pytest.param([[1, 2, 3], [4, 5, 6], [1, 2]], [1, 1, (- 2)], (4, 4), id='sum duplicates')])
def test_diags(self, diagonals, offsets, shape):
base = csr.diags(diagonals, offsets, shape)
if (not isinstance(diagonals[0], list)):
diagonals = [diagonals]
offsets = np.atleast_1d((offsets if (offsets is not None) else [0]))
if (shape is None):
size = (len(diagonals[0]) + abs(offsets[0]))
shape = (size, size)
test = np.zeros(shape, dtype=np.complex128)
for (diagonal, offset) in zip(diagonals, offsets):
test[np.where((np.eye(*shape, k=offset) == 1))] += diagonal
assert isinstance(base, data.CSR)
assert (base.shape == shape)
np.testing.assert_allclose(base.to_array(), test, rtol=1e-10)
.parametrize(['shape', 'position', 'value'], [pytest.param((1, 1), (0, 0), None, id='minimal'), pytest.param((10, 10), (5, 5), 1j, id='on diagonal'), pytest.param((10, 10), (1, 5), 1.0, id='upper'), pytest.param((10, 10), (5, 1), 2.0, id='lower'), pytest.param((10, 1), (5, 0), None, id='column'), pytest.param((1, 10), (0, 5), (- 5j), id='row'), pytest.param((10, 2), (5, 1), (1 + 2j), id='tall'), pytest.param((2, 10), (1, 5), 10, id='wide')])
def test_one_element(self, shape, position, value):
test = np.zeros(shape, dtype=np.complex128)
if (value is None):
base = data.one_element_csr(shape, position)
test[position] = (1.0 + 0j)
else:
base = data.one_element_csr(shape, position, value)
test[position] = value
assert isinstance(base, data.CSR)
assert (base.shape == shape)
np.testing.assert_allclose(base.to_array(), test, atol=1e-10)
.parametrize(['shape', 'position', 'value'], [pytest.param((0, 0), (0, 0), None, id='zero shape'), pytest.param((10, (- 2)), (5, 0), 1j, id='neg shape'), pytest.param((10, 10), (10, 5), 1.0, id='outside'), pytest.param((10, 10), (5, (- 1)), 2.0, id='outside neg')])
def test_one_element_error(self, shape, position, value):
with pytest.raises(ValueError) as exc:
base = data.one_element_csr(shape, position, value)
assert str(exc.value).startswith('Position of the elements out of bound: ') |
def join_user_profile(user_profile_file, behavior_file, joined_file):
user_profile_dict = {}
with open(user_profile_file, 'r') as f:
for line in f:
(uid, aid, gid) = line[:(- 1)].split(',')
user_profile_dict[uid] = ','.join([aid, gid])
newlines = []
with open(behavior_file, 'r') as f:
for line in f:
uid = line[:(- 1)].split(',')[0]
user_profile = user_profile_dict[uid]
newlines.append((((line[:(- 1)] + ',') + user_profile) + '\n'))
with open(joined_file, 'w') as f:
f.writelines(newlines) |
_module()
class MobileNetV2(nn.Module):
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], [6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1.0, strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1), out_indices=(1, 2, 4, 6), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False):
super(MobileNetV2, self).__init__()
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == len(self.arch_settings))
self.out_indices = out_indices
for index in out_indices:
if (index not in range(0, 7)):
raise ValueError(f'the item in out_indices must in range(0, 7). But received {index}')
if (frozen_stages not in range((- 1), 7)):
raise ValueError(f'frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible((32 * widen_factor), 8)
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.layers = []
for (i, layer_cfg) in enumerate(self.arch_settings):
(expand_ratio, channel, num_blocks) = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible((channel * widen_factor), 8)
inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, dilation=dilation, expand_ratio=expand_ratio)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
def make_layer(self, out_channels, num_blocks, stride, dilation, expand_ratio):
layers = []
for i in range(num_blocks):
layers.append(InvertedResidual(self.in_channels, out_channels, (stride if (i == 0) else 1), expand_ratio=expand_ratio, dilation=(dilation if (i == 0) else 1), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for (i, layer_name) in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False)
def forward(self, input):
output = self.conv(input)
return output |
def attack_one_batch(sess, ops, attacked_data):
is_training = False
attacked_label = (np.ones(shape=len(attacked_data), dtype=int) * TARGET)
attacked_label = np.squeeze(attacked_label)
lower_bound = np.zeros(BATCH_SIZE)
WEIGHT = (np.ones(BATCH_SIZE) * INITIAL_WEIGHT)
upper_bound = (np.ones(BATCH_SIZE) * UPPER_BOUND_WEIGHT)
o_bestdist = ([.0] * BATCH_SIZE)
o_bestdist_h = ([.0] * BATCH_SIZE)
o_bestdist_f = ([.0] * BATCH_SIZE)
o_bestscore = ([(- 1)] * BATCH_SIZE)
o_bestattack = np.ones(shape=(BATCH_SIZE, ((NUM_ADD * NUM_CLUSTER) + NUM_POINT), 3))
feed_dict = {ops['pointclouds_pl']: attacked_data, ops['is_training_pl']: is_training, ops['lr_attack']: LR_ATTACK}
for j in range(NUM_CLUSTER):
feed_dict[ops['initial_point_pl_list'][j]] = init_points_list[j]
for out_step in range(BINARY_SEARCH_STEP):
feed_dict[ops['dist_weight']] = WEIGHT
for j in range(NUM_CLUSTER):
sess.run(tf.assign(ops['pert_list'][j], tf.truncated_normal([BATCH_SIZE, NUM_ADD, 3], mean=0, stddev=1e-07)))
bestdist = ([.0] * BATCH_SIZE)
bestscore = ([(- 1)] * BATCH_SIZE)
prev = 1000000.0
for iteration in range(NUM_ITERATIONS):
for j in range(NUM_CLUSTER):
_ = sess.run([ops['attack_op_list'][j]], feed_dict=feed_dict)
(adv_loss_val, pred_val, input_val) = sess.run([ops['adv_loss'], ops['pred'], ops['pointclouds_input']], feed_dict=feed_dict)
pred_val = np.argmax(pred_val, 1)
farthest_loss_list_val = sess.run(ops['farthest_loss_list'], feed_dict)
farthest_loss_list_val = np.stack(farthest_loss_list_val)
farthest_loss_list_val = np.average(farthest_loss_list_val, axis=0)
nndistance_loss_list_val = sess.run(ops['nndistance_loss_list'], feed_dict)
nndistance_loss_list_val = np.stack(nndistance_loss_list_val)
nndistance_loss_list_val = np.average(nndistance_loss_list_val, axis=0)
loss = (adv_loss_val + np.average(((farthest_loss_list_val + (MU * nndistance_loss_list_val)) * WEIGHT)))
if ((iteration % ((NUM_ITERATIONS // 10) or 1)) == 0):
print((' Iteration {} of {}: loss={} adv_loss:{} ' + 'distance={},{}').format(iteration, NUM_ITERATIONS, loss, adv_loss_val, np.average(farthest_loss_list_val), np.average(nndistance_loss_list_val)))
'\n if ABORT_EARLY and iteration % ((MAX_ITERATIONS // 10) or 1) == 0:\n \n if loss > prev * .9999999:\n msg = " Failed to make progress; stop early"\n print(msg)\n break\n prev = loss\n '
for (e, (dist_h, dist_f, pred, ii)) in enumerate(zip(nndistance_loss_list_val, farthest_loss_list_val, pred_val, input_val)):
dist = ((dist_h * MU) + dist_f)
if ((dist < bestdist[e]) and (pred == TARGET)):
bestdist[e] = dist
bestscore[e] = pred
if ((dist < o_bestdist[e]) and (pred == TARGET)):
o_bestdist[e] = dist
o_bestdist_h[e] = dist_h
o_bestdist_f[e] = dist_f
o_bestscore[e] = pred
o_bestattack[e] = ii
for e in range(BATCH_SIZE):
if ((bestscore[e] == TARGET) and (bestscore[e] != (- 1)) and (bestdist[e] <= o_bestdist[e])):
lower_bound[e] = max(lower_bound[e], WEIGHT[e])
WEIGHT[e] = ((lower_bound[e] + upper_bound[e]) / 2)
else:
upper_bound[e] = min(upper_bound[e], WEIGHT[e])
WEIGHT[e] = ((lower_bound[e] + upper_bound[e]) / 2)
print(' Successfully generated adversarial exampleson {} of {} instances.'.format(sum((lower_bound > 0)), BATCH_SIZE))
return ([o_bestdist, o_bestdist_f, o_bestdist_h], o_bestattack) |
def mpl_time_axis(axes, approx_ticks=5.0):
from matplotlib.ticker import Locator, Formatter
class labeled_float(float):
pass
class TimeLocator(Locator):
def __init__(self, approx_ticks=5.0):
self._approx_ticks = approx_ticks
Locator.__init__(self)
def __call__(self):
(vmin, vmax) = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if (vmax < vmin):
(vmin, vmax) = (vmax, vmin)
if (vmin == vmax):
return []
tinc_approx = ((vmax - vmin) / self._approx_ticks)
(tinc, tinc_unit) = nice_time_tick_inc(tinc_approx)
(times, labels) = time_tick_labels(vmin, vmax, tinc, tinc_unit)
ftimes = []
for (t, label) in zip(times, labels):
ftime = labeled_float(t)
ftime._mpl_label = label
ftimes.append(ftime)
return self.raise_if_exceeds(ftimes)
class TimeFormatter(Formatter):
def __call__(self, x, pos=None):
if isinstance(x, labeled_float):
return x._mpl_label
else:
return time_to_str(x, format='%Y-%m-%d %H:%M:%S.6FRAC')
axes.xaxis.set_major_locator(TimeLocator(approx_ticks=approx_ticks))
axes.xaxis.set_major_formatter(TimeFormatter()) |
def eval_adv_test_blackbox(model_target, model_source, device, test_loader):
model_target.eval()
model_source.eval()
robust_err_total = 0
natural_err_total = 0
for (data, target) in test_loader:
(data, target) = (data.to(device), target.to(device))
(X, y) = (Variable(data, requires_grad=True), Variable(target))
(err_natural, err_robust) = _pgd_blackbox(model_target, model_source, X, y)
robust_err_total += err_robust
natural_err_total += err_natural
print('natural_err_total: ', natural_err_total)
print('robust_err_total: ', robust_err_total) |
def test_abi_label_convertor():
tmp_dir = tempfile.TemporaryDirectory()
dict_file = osp.join(tmp_dir.name, 'fake_dict.txt')
_create_dummy_dict_file(dict_file)
label_convertor = ABIConvertor(dict_file=dict_file, max_seq_len=10)
label_convertor.end_idx
strings = ['hell']
targets_dict = label_convertor.str2tensor(strings)
assert torch.allclose(targets_dict['targets'][0], torch.LongTensor([0, 1, 2, 2, 8]))
assert torch.allclose(targets_dict['padded_targets'][0], torch.LongTensor([8, 0, 1, 2, 2, 8, 9, 9, 9, 9]))
strings = ['hellhellhell']
targets_dict = label_convertor.str2tensor(strings)
assert torch.allclose(targets_dict['targets'][0], torch.LongTensor([0, 1, 2, 2, 0, 1, 2, 2, 0, 8]))
assert torch.allclose(targets_dict['padded_targets'][0], torch.LongTensor([8, 0, 1, 2, 2, 0, 1, 2, 2, 0]))
tmp_dir.cleanup() |
def test_calculate_ssim():
with pytest.raises(AssertionError):
calculate_ssim(np.ones((16, 16)), np.ones((10, 10)), crop_border=0)
with pytest.raises(ValueError):
calculate_ssim(np.ones((16, 16)), np.ones((16, 16)), crop_border=1, input_order='WRONG')
out = calculate_ssim(np.ones((10, 10, 3)), (np.ones((10, 10, 3)) * 2), crop_border=1, test_y_channel=True)
assert isinstance(out, float) |
def test_trustme_cli_expires_on(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.chdir(tmp_path)
main(argv=['--expires-on', '2035-03-01'])
assert tmp_path.joinpath('server.key').exists()
assert tmp_path.joinpath('server.pem').exists()
assert tmp_path.joinpath('client.pem').exists() |
.skipif((not modeltest.HAS_QT_TESTER), reason='No Qt modeltester available')
def test_qt_tester_invalid(testdir):
testdir.makeini('\n [pytest]\n qt_log_level_fail = NO\n ')
testdir.makepyfile('\n from pytestqt.qt_compat import qt_api\n from pytestqt import modeltest\n\n assert modeltest.HAS_QT_TESTER\n\n\n class Model(qt_api.QtCore.QAbstractItemModel):\n def data(self, index, role=qt_api.QtCore.Qt.ItemDataRole.DisplayRole):\n return None\n\n def rowCount(self, parent=qt_api.QtCore.QModelIndex()):\n return 0\n\n def columnCount(self, parent=qt_api.QtCore.QModelIndex()):\n return -1\n\n def index(self, row, column, parent=qt_api.QtCore.QModelIndex()):\n return qt_api.QtCore.QModelIndex()\n\n def parent(self, index):\n return qt_api.QtCore.QModelIndex()\n\n\n def test_ok(qtmodeltester):\n model = Model()\n qtmodeltester.check(model)\n ')
res = testdir.runpytest()
res.stdout.fnmatch_lines(['*__ test_ok __*', 'test_qt_tester_invalid.py:*: Qt modeltester errors', '*-- Captured Qt messages --*', '*QtWarningMsg: FAIL! model->columnCount(QModelIndex()) >= 0 () returned FALSE (*qabstractitemmodeltester.cpp:*)', '*-- Captured stdout call --*', 'modeltest: Using Qt C++ tester', '*== 1 failed in * ==*']) |
def read_validation_evaluation_run(save_dir: str) -> Optional[EvaluationRun]:
save_path = os.path.join(save_dir, VALIDATION_EVALUATION_DIR, EVALUATION_RUN_PICKLE_FILE_NAME)
if (not os.path.exists(save_path)):
return None
with open(save_path, 'rb') as pkl_file:
return pickle.load(pkl_file) |
def _normalize(x, params, forward=True):
assert ('scale' in params)
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
scale = params['scale']
offset = params['offset']
x = x.to(device=scale.device, dtype=scale.dtype)
src_shape = x.shape
x = x.reshape((- 1), scale.shape[0])
if forward:
x = ((x * scale) + offset)
else:
x = ((x - offset) / scale)
x = x.reshape(src_shape)
return x |
.supported(only_if=(lambda backend: backend.ed25519_supported()), skip_message='Requires OpenSSL with Ed25519 support')
_tests('eddsa_test.json')
def test_ed25519_signature(backend, wycheproof):
assert (wycheproof.testgroup['key']['curve'] == 'edwards25519')
key = Ed25519PublicKey.from_public_bytes(binascii.unhexlify(wycheproof.testgroup['key']['pk']))
if (wycheproof.valid or wycheproof.acceptable):
key.verify(binascii.unhexlify(wycheproof.testcase['sig']), binascii.unhexlify(wycheproof.testcase['msg']))
else:
with pytest.raises(InvalidSignature):
key.verify(binascii.unhexlify(wycheproof.testcase['sig']), binascii.unhexlify(wycheproof.testcase['msg'])) |
class TestConvModuleMeta():
def test_main(self):
conv_module_meta = {'kernel_size': (2,), 'stride': (3,), 'padding': (4,), 'dilation': (5,)}
x = make_conv_module(nn.Conv1d, **conv_module_meta)
actual = meta.conv_module_meta(x)
desired = conv_module_meta
assert (actual == desired)
def test_kwargs(self):
stride = (2,)
x = make_conv_module(nn.Conv2d, stride=stride)
actual = meta.conv_module_meta(x, stride=stride)['stride']
desired = stride
assert (actual == desired) |
class DynamicImportPatchTest(TestPyfakefsUnittestBase):
def __init__(self, methodName='runTest'):
super(DynamicImportPatchTest, self).__init__(methodName)
def test_os_patch(self):
import os
os.mkdir('test')
self.assertTrue(self.fs.exists('test'))
self.assertTrue(os.path.exists('test'))
def test_os_import_as_patch(self):
import os as _os
_os.mkdir('test')
self.assertTrue(self.fs.exists('test'))
self.assertTrue(_os.path.exists('test'))
def test_os_path_patch(self):
import os.path
os.mkdir('test')
self.assertTrue(self.fs.exists('test'))
self.assertTrue(os.path.exists('test'))
def test_shutil_patch(self):
import shutil
self.fs.set_disk_usage(100)
self.assertEqual(100, shutil.disk_usage('/').total)
def test_pathlib_path_patch(self):
file_path = 'test.txt'
path = pathlib.Path(file_path)
with path.open('w') as f:
f.write('test')
self.assertTrue(self.fs.exists(file_path))
file_object = self.fs.get_object(file_path)
self.assertEqual('test', file_object.contents) |
def transcribe(audio, speech2text=None, config=None):
if (config is None):
config = TranscribeConfig()
if (speech2text is None):
speech2text = load_default_model()
if isinstance(audio, str):
audio = librosa.load(audio, sr=config.samplerate)[0]
nsamples = len(audio)
pos = 0
while (pos < nsamples):
segment = audio[pos:]
if (len(segment) > config.window):
blank = _find_blank(segment[:config.window], speech2text, config.blank_threshold)
segment = segment[:blank.end]
asr = speech2text(np.pad(segment, config.padding, mode='constant'))[0][0]
for (start, end, text) in _split_text(asr, segment, speech2text):
(yield Caption(start_seconds=((pos + start) / config.samplerate), end_seconds=((pos + end) / config.samplerate), text=text))
pos += len(segment) |
class FSDPStrategy(Strategy):
process_group: Optional[ProcessGroup] = None
sharding_strategy: Optional[ShardingStrategy] = None
cpu_offload: Optional[CPUOffload] = None
auto_wrap_policy: Optional[Callable[([torch.nn.Module, bool, int], bool)]] = None
backward_prefetch: Optional[BackwardPrefetch] = BackwardPrefetch.BACKWARD_PRE
mixed_precision: Optional[MixedPrecision] = None
ignored_modules: Optional[Iterable[torch.nn.Module]] = None
param_init_fn: Optional[Callable[([torch.nn.Module], None)]] = None
sync_module_states: bool = False
forward_prefetch: bool = False
limit_all_gathers: bool = True
use_orig_params: bool = False
state_dict_type: Optional[StateDictType] = None
state_dict_config: Optional[StateDictConfig] = None
optim_state_dict_config: Optional[OptimStateDictConfig] = None |
class ReconnectLDAPObject(SimpleLDAPObject):
__transient_attrs__ = {'_l', '_ldap_object_lock', '_trace_file', '_reconnect_lock', '_last_bind'}
def __init__(self, uri, trace_level=0, trace_file=None, trace_stack_limit=5, bytes_mode=None, bytes_strictness=None, retry_max=1, retry_delay=60.0, fileno=None):
self._uri = uri
self._options = []
self._last_bind = None
SimpleLDAPObject.__init__(self, uri, trace_level, trace_file, trace_stack_limit, bytes_mode, bytes_strictness=bytes_strictness, fileno=fileno)
self._reconnect_lock = ldap.LDAPLock(desc=('reconnect lock within %s' % repr(self)))
self._retry_max = retry_max
self._retry_delay = retry_delay
self._start_tls = 0
self._reconnects_done = 0
def __getstate__(self):
state = {k: v for (k, v) in self.__dict__.items() if (k not in self.__transient_attrs__)}
state['_last_bind'] = (self._last_bind[0].__name__, self._last_bind[1], self._last_bind[2])
return state
def __setstate__(self, d):
hardfail = d.get('bytes_mode_hardfail')
if hardfail:
d.setdefault('bytes_strictness', 'error')
else:
d.setdefault('bytes_strictness', 'warn')
self.__dict__.update(d)
self._last_bind = (getattr(SimpleLDAPObject, self._last_bind[0]), self._last_bind[1], self._last_bind[2])
self._ldap_object_lock = self._ldap_lock()
self._reconnect_lock = ldap.LDAPLock(desc=('reconnect lock within %s' % repr(self)))
self._trace_file = ldap._trace_file
self.reconnect(self._uri, force=True)
def _store_last_bind(self, _method, *args, **kwargs):
self._last_bind = (_method, args, kwargs)
def _apply_last_bind(self):
if (self._last_bind != None):
(func, args, kwargs) = self._last_bind
func(self, *args, **kwargs)
else:
SimpleLDAPObject.simple_bind_s(self, None, None)
def _restore_options(self):
for (k, v) in self._options:
SimpleLDAPObject.set_option(self, k, v)
def passwd_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.passwd_s, *args, **kwargs)
def reconnect(self, uri, retry_max=1, retry_delay=60.0, force=True):
self._reconnect_lock.acquire()
try:
if hasattr(self, '_l'):
if force:
SimpleLDAPObject.unbind_s(self)
else:
return
reconnect_counter = retry_max
while reconnect_counter:
counter_text = ('%d. (of %d)' % (((retry_max - reconnect_counter) + 1), retry_max))
if (__debug__ and (self._trace_level >= 1)):
self._trace_file.write('*** Trying {} reconnect to {}...\n'.format(counter_text, uri))
try:
try:
self._l = ldap.functions._ldap_function_call(ldap._ldap_module_lock, _ldap.initialize, uri)
self._restore_options()
if self._start_tls:
SimpleLDAPObject.start_tls_s(self)
self._apply_last_bind()
except ldap.LDAPError:
SimpleLDAPObject.unbind_s(self)
raise
except (ldap.SERVER_DOWN, ldap.TIMEOUT):
if (__debug__ and (self._trace_level >= 1)):
self._trace_file.write('*** {} reconnect to {} failed\n'.format(counter_text, uri))
reconnect_counter = (reconnect_counter - 1)
if (not reconnect_counter):
raise
if (__debug__ and (self._trace_level >= 1)):
self._trace_file.write(('=> delay %s...\n' % retry_delay))
time.sleep(retry_delay)
else:
if (__debug__ and (self._trace_level >= 1)):
self._trace_file.write('*** {} reconnect to {} successful => repeat last operation\n'.format(counter_text, uri))
self._reconnects_done = (self._reconnects_done + 1)
break
finally:
self._reconnect_lock.release()
return
def _apply_method_s(self, func, *args, **kwargs):
self.reconnect(self._uri, retry_max=self._retry_max, retry_delay=self._retry_delay, force=False)
try:
return func(self, *args, **kwargs)
except ldap.SERVER_DOWN:
self.reconnect(self._uri, retry_max=self._retry_max, retry_delay=self._retry_delay, force=True)
return func(self, *args, **kwargs)
def set_option(self, option, invalue):
self._options.append((option, invalue))
return SimpleLDAPObject.set_option(self, option, invalue)
def bind_s(self, *args, **kwargs):
res = self._apply_method_s(SimpleLDAPObject.bind_s, *args, **kwargs)
self._store_last_bind(SimpleLDAPObject.bind_s, *args, **kwargs)
return res
def simple_bind_s(self, *args, **kwargs):
res = self._apply_method_s(SimpleLDAPObject.simple_bind_s, *args, **kwargs)
self._store_last_bind(SimpleLDAPObject.simple_bind_s, *args, **kwargs)
return res
def start_tls_s(self, *args, **kwargs):
res = self._apply_method_s(SimpleLDAPObject.start_tls_s, *args, **kwargs)
self._start_tls = 1
return res
def sasl_interactive_bind_s(self, *args, **kwargs):
res = self._apply_method_s(SimpleLDAPObject.sasl_interactive_bind_s, *args, **kwargs)
self._store_last_bind(SimpleLDAPObject.sasl_interactive_bind_s, *args, **kwargs)
return res
def sasl_bind_s(self, *args, **kwargs):
res = self._apply_method_s(SimpleLDAPObject.sasl_bind_s, *args, **kwargs)
self._store_last_bind(SimpleLDAPObject.sasl_bind_s, *args, **kwargs)
return res
def add_ext_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.add_ext_s, *args, **kwargs)
def cancel_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.cancel_s, *args, **kwargs)
def compare_ext_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.compare_ext_s, *args, **kwargs)
def delete_ext_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.delete_ext_s, *args, **kwargs)
def extop_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.extop_s, *args, **kwargs)
def modify_ext_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.modify_ext_s, *args, **kwargs)
def rename_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.rename_s, *args, **kwargs)
def search_ext_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.search_ext_s, *args, **kwargs)
def whoami_s(self, *args, **kwargs):
return self._apply_method_s(SimpleLDAPObject.whoami_s, *args, **kwargs) |
class ResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bottleneck=True, conv1_stride=False):
super(ResUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
if bottleneck:
self.body = ResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride)
else:
self.body = ResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = (x + identity)
x = self.activ(x)
return x |
def main():
args = parse_args()
root_path = args.root_path
split_info = mmcv.load(osp.join(root_path, 'annotations', 'train_valid_test_split.json'))
split_info['training'] = split_info.pop('train')
split_info['val'] = split_info.pop('valid')
for split in ['training', 'val', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert NAF annotation'):
files = collect_files(osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), split_info[split])
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(image_infos, osp.join(root_path, (('instances_' + split) + '.json'))) |
def _read_spotting_detections_and_labels(args: Dict, num_classes: int) -> Tuple[(List[np.ndarray], List[np.ndarray])]:
splits_dir = Path(args[ARGS_SPLITS_DIR])
results_dir = Path(args[ARGS_RESULTS_DIR])
labels_dir = Path(args[ARGS_LABELS_DIR])
features_dir = Path(args[ARGS_FEATURES_DIR])
game_one_hot_label_reader = GameOneHotSpottingLabelReader(SOCCERNET_TYPE, EVALUATION_FRAME_RATE, num_classes)
game_predictions_reader = GameSpottingPredictionsReader(SOCCERNET_TYPE, EVALUATION_FRAME_RATE, num_classes)
game_paths_reader = GamePathsReader(SOCCERNET_TYPE, EVALUATION_FEATURE_NAME, features_dir, labels_dir, splits_dir)
valid_game_paths = game_paths_reader.read_valid(SPLIT_KEY_TEST)
detections = []
labels = []
for game_paths in valid_game_paths:
num_video_frames_one = read_num_frames(game_paths.features_one)
num_video_frames_two = read_num_frames(game_paths.features_two)
num_label_frames_one = num_video_frames_one
num_label_frames_two = num_video_frames_two
if REPRODUCE_SOCCERNET_EVALUATION:
frame_slack = int((EVALUATION_FRAME_RATE * SLACK_SECONDS))
num_label_frames_one += frame_slack
num_label_frames_two += frame_slack
(labels_and_valid_one, labels_and_valid_two) = game_one_hot_label_reader.read(game_paths.labels.get(Task.SPOTTING), num_label_frames_one, num_label_frames_two)
assert labels_and_valid_one[INDEX_VALID]
assert labels_and_valid_two[INDEX_VALID]
labels.append(labels_and_valid_one[INDEX_LABELS])
labels.append(labels_and_valid_two[INDEX_LABELS])
detections_dir = (results_dir / game_paths.relative)
detections_path = _spotting_detections_path(detections_dir)
(detections_one, detections_two) = game_predictions_reader.read(detections_path, num_video_frames_one, num_video_frames_two)
detections.append(detections_one)
detections.append(detections_two)
return (detections, labels) |
def _list_paths_with_resource(game, print_only_area: bool, resource: ResourceInfo, needed_quantity: (int | None)):
from randovania.game_description.game_description import GameDescription
count = 0
game = typing.cast(GameDescription, game)
for area in game.region_list.all_areas:
area_had_resource = False
for (source, connection) in area.connections.items():
for (target, requirement) in connection.items():
for alternative in requirement.as_set(game.resource_database).alternatives:
individual = alternative.get(resource)
if (individual is None):
continue
if ((needed_quantity is None) or (needed_quantity == individual.amount)):
area_had_resource = True
if (not print_only_area):
print('At {}, from {} to {}:\n{}\n'.format(game.region_list.area_name(area), source.name, target.name, sorted((individual for individual in alternative.values() if (individual.resource != resource)))))
count += 1
if (area_had_resource and print_only_area):
print(game.region_list.area_name(area))
print(f'Total routes: {count}') |
class Selector(Layer):
def __init__(self, select, **kwargs):
super(Selector, self).__init__(**kwargs)
self.select = select
self.select_neuron = K.constant(value=self.select)
def build(self, input_shape):
super(Selector, self).build(input_shape)
def call(self, x):
return K.cast(K.equal(x, self.select_neuron), dtype='float64')
def get_config(self):
config = {'select': self.select}
base_config = super(Selector, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
def compute_output_shape(self, input_shape):
return input_shape |
(nodes.TypeAlias)
def verify_typealias(stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: list[str]) -> Iterator[Error]:
stub_target = mypy.types.get_proper_type(stub.target)
stub_desc = f'Type alias for {stub_target}'
if isinstance(runtime, Missing):
(yield Error(object_path, 'is not present at runtime', stub, runtime, stub_desc=stub_desc))
return
runtime_origin = (get_origin(runtime) or runtime)
if isinstance(stub_target, mypy.types.Instance):
if (not isinstance(runtime_origin, type)):
(yield Error(object_path, 'is inconsistent, runtime is not a type', stub, runtime, stub_desc=stub_desc))
return
stub_origin = stub_target.type
runtime_name: object
try:
runtime_name = runtime_origin.__qualname__
except AttributeError:
runtime_name = getattr(runtime_origin, '__name__', MISSING)
if isinstance(runtime_name, str):
runtime_module: object = getattr(runtime_origin, '__module__', MISSING)
if isinstance(runtime_module, str):
if ((runtime_module == 'collections.abc') or ((runtime_module == 're') and (runtime_name in {'Match', 'Pattern'}))):
runtime_module = 'typing'
runtime_fullname = f'{runtime_module}.{runtime_name}'
if re.fullmatch(f'_?{re.escape(stub_origin.fullname)}', runtime_fullname):
return
(yield from verify(stub_origin, runtime_origin, object_path))
return
if isinstance(stub_target, mypy.types.UnionType):
if ((runtime_origin is not Union) and (not ((sys.version_info >= (3, 10)) and isinstance(runtime, types.UnionType)))):
(yield Error(object_path, 'is not a Union', stub, runtime, stub_desc=str(stub_target)))
return
if isinstance(stub_target, mypy.types.TupleType):
if (tuple not in getattr(runtime_origin, '__mro__', ())):
(yield Error(object_path, 'is not a subclass of tuple', stub, runtime, stub_desc=stub_desc))
return
if isinstance(stub_target, mypy.types.CallableType):
if (runtime_origin is not collections.abc.Callable):
(yield Error(object_path, 'is not a type alias for Callable', stub, runtime, stub_desc=stub_desc))
return
if isinstance(stub_target, mypy.types.AnyType):
return
(yield Error(object_path, 'is not a recognised type alias', stub, runtime, stub_desc=stub_desc)) |
class PFContextMenuPref(PreferenceView):
def populatePanel(self, panel):
self.title = _t('Context Menus')
self.settings = ContextMenuSettings.getInstance()
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.dirtySettings = False
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.stTitle = wx.StaticText(panel, wx.ID_ANY, self.title, wx.DefaultPosition, wx.DefaultSize, 0)
self.stTitle.Wrap((- 1))
self.stTitle.SetFont(wx.Font(12, 70, 90, 90, False, wx.EmptyString))
mainSizer.Add(self.stTitle, 0, (wx.EXPAND | wx.ALL), 5)
self.stSubTitle = wx.StaticText(panel, wx.ID_ANY, _t('Disabling context menus can improve responsiveness.\nYou can hold {} key + right-click to show all menu items regardless of these settings.').format(('Command' if ('wxMac' in wx.PlatformInfo) else 'Control')), wx.DefaultPosition, wx.DefaultSize, 0)
self.stSubTitle.Wrap((- 1))
mainSizer.Add(self.stSubTitle, 0, wx.ALL, 5)
self.m_staticline1 = wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline1, 0, (wx.EXPAND | wx.ALL), 5)
rbSizerRow1 = wx.BoxSizer(wx.HORIZONTAL)
self.rbBox1 = wx.RadioBox(panel, (- 1), _t('Set as Damage Pattern'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox1.SetSelection(self.settings.get('ammoPattern'))
rbSizerRow1.Add(self.rbBox1, 1, wx.ALL, 5)
self.rbBox1.Bind(wx.EVT_RADIOBOX, self.OnSetting1Change)
self.rbBox2 = wx.RadioBox(panel, (- 1), _t('Change Skills'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox2.SetSelection(self.settings.get('changeAffectingSkills'))
rbSizerRow1.Add(self.rbBox2, 1, wx.ALL, 5)
self.rbBox2.Bind(wx.EVT_RADIOBOX, self.OnSetting2Change)
self.rbBox3 = wx.RadioBox(panel, (- 1), _t('Variations'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox3.SetSelection(self.settings.get('metaSwap'))
rbSizerRow1.Add(self.rbBox3, 1, wx.ALL, 5)
self.rbBox3.Bind(wx.EVT_RADIOBOX, self.OnSetting3Change)
mainSizer.Add(rbSizerRow1, 1, (wx.ALL | wx.EXPAND), 0)
rbSizerRow2 = wx.BoxSizer(wx.HORIZONTAL)
self.rbBox4 = wx.RadioBox(panel, (- 1), _t('Project onto Fit'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox4.SetSelection(self.settings.get('project'))
rbSizerRow2.Add(self.rbBox4, 1, wx.ALL, 5)
self.rbBox4.Bind(wx.EVT_RADIOBOX, self.OnSetting4Change)
self.rbBox5 = wx.RadioBox(panel, (- 1), _t('Fill with module'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox5.SetSelection(self.settings.get('moduleFill'))
rbSizerRow2.Add(self.rbBox5, 1, wx.ALL, 5)
self.rbBox5.Bind(wx.EVT_RADIOBOX, self.OnSetting5Change)
mainSizer.Add(rbSizerRow2, 1, (wx.ALL | wx.EXPAND), 0)
rbSizerRow3 = wx.BoxSizer(wx.HORIZONTAL)
self.rbBox6 = wx.RadioBox(panel, (- 1), _t('Spoolup'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox6.SetSelection(self.settings.get('spoolup'))
rbSizerRow3.Add(self.rbBox6, 1, wx.ALL, 5)
self.rbBox6.Bind(wx.EVT_RADIOBOX, self.OnSetting6Change)
self.rbBox7 = wx.RadioBox(panel, (- 1), _t('Additions Panel Copy/Paste'), wx.DefaultPosition, wx.DefaultSize, [_t('Disabled'), _t('Enabled')], 1, wx.RA_SPECIFY_COLS)
self.rbBox7.SetSelection(self.settings.get('additionsCopyPaste'))
rbSizerRow3.Add(self.rbBox7, 1, wx.ALL, 5)
self.rbBox7.Bind(wx.EVT_RADIOBOX, self.OnSetting7Change)
mainSizer.Add(rbSizerRow3, 1, (wx.ALL | wx.EXPAND), 0)
panel.SetSizer(mainSizer)
panel.Layout()
def OnSetting1Change(self, event):
self.settings.set('ammoPattern', event.GetInt())
def OnSetting2Change(self, event):
self.settings.set('changeAffectingSkills', event.GetInt())
def OnSetting3Change(self, event):
self.settings.set('metaSwap', event.GetInt())
def OnSetting4Change(self, event):
self.settings.set('project', event.GetInt())
def OnSetting5Change(self, event):
self.settings.set('moduleFill', event.GetInt())
def OnSetting6Change(self, event):
self.settings.set('spoolup', event.GetInt())
def OnSetting7Change(self, event):
self.settings.set('additionsCopyPaste', event.GetInt())
def getImage(self):
return BitmapLoader.getBitmap('settings_menu', 'gui') |
def test_lowest_common_ancestor(graph_nodes, test_instance, root=None):
leaves = imagenet_spec.get_leaves(graph_nodes)
for _ in range(10000):
first_ind = np.random.randint(len(leaves))
second_ind = np.random.randint(len(leaves))
while (first_ind == second_ind):
second_ind = np.random.randint(len(leaves))
leaf_a = leaves[first_ind]
leaf_b = leaves[second_ind]
(lca, height) = imagenet_spec.get_lowest_common_ancestor(leaf_a, leaf_b)
test_lowest_common_ancestor_(lca, height, leaf_a, leaf_b, test_instance, root=root) |
class inconv(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=[3, 3, 3], block=BasicBlock, norm=nn.BatchNorm3d):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = ([kernel_size] * 3)
pad_size = [(i // 2) for i in kernel_size]
self.conv1 = nn.Conv3d(in_ch, out_ch, kernel_size=kernel_size, padding=pad_size, bias=False)
self.conv2 = block(out_ch, out_ch, kernel_size=kernel_size, norm=norm)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
return out |
def search(word, exact=True, return_words=True, cache=True):
response_text = request_search(word, cache=cache)
soup = bs4.BeautifulSoup(response_text, 'html.parser')
definitions = soup.find_all('h2', class_='vignette__title')
if (definitions is None):
return []
urlnames = []
for definition in definitions:
definition_title = definition.text
if ((not exact) or (word in get_search_link_variants(definition_title))):
urlnames.append(definition.find('a')['href'].split('/')[(- 1)])
if (not return_words):
return urlnames
return [get(urlname, cache=cache) for urlname in urlnames] |
def test_parameterized_types(hive_client):
hms = HMS(hive_client)
client = HiveMetastoreClient(hms)
table = client.schema('test_db', 'test_table3')
fields = table.fields
assert (len(fields) == 7)
assert (fields[0].extra_attrs['name'] == 'col16')
assert isinstance(fields[0], UnionType)
assert isinstance(fields[0].types[0], NullType)
assert isinstance(fields[0].types[1], BytesType)
assert (fields[0].types[1].logical == 'build.recap.Decimal')
assert (fields[0].types[1].bytes_ == 16)
assert (fields[0].types[1].extra_attrs['precision'] == 10)
assert (fields[0].types[1].extra_attrs['scale'] == 2)
assert (fields[0].doc == 'c16')
assert (fields[1].extra_attrs['name'] == 'col17')
assert isinstance(fields[1], UnionType)
assert isinstance(fields[1].types[0], NullType)
assert isinstance(fields[1].types[1], StringType)
assert (fields[1].types[1].bytes_ == 10)
assert (fields[1].doc == 'c17')
assert (fields[2].extra_attrs['name'] == 'col18')
assert isinstance(fields[2], UnionType)
assert isinstance(fields[2].types[0], NullType)
assert isinstance(fields[2].types[1], StringType)
assert (fields[2].types[1].bytes_ == 10)
assert (fields[2].doc == 'c18')
assert (fields[3].extra_attrs['name'] == 'col19')
assert isinstance(fields[3], UnionType)
assert isinstance(fields[3].types[0], NullType)
assert isinstance(fields[3].types[1], ListType)
assert isinstance(fields[3].types[1].values, UnionType)
assert isinstance(fields[3].types[1].values.types[0], NullType)
assert isinstance(fields[3].types[1].values.types[1], IntType)
assert (fields[3].types[1].values.types[1].bits == 32)
assert fields[3].types[1].values.types[1].signed
assert (fields[3].doc == 'c19')
assert (fields[4].extra_attrs['name'] == 'col20')
assert isinstance(fields[4], UnionType)
assert isinstance(fields[4].types[0], NullType)
assert isinstance(fields[4].types[1], MapType)
assert isinstance(fields[4].types[1].keys, UnionType)
assert isinstance(fields[4].types[1].keys.types[0], NullType)
assert isinstance(fields[4].types[1].keys.types[1], IntType)
assert (fields[4].types[1].keys.types[1].bits == 32)
assert fields[4].types[1].keys.types[1].signed
assert isinstance(fields[4].types[1].values, UnionType)
assert isinstance(fields[4].types[1].values.types[0], NullType)
assert isinstance(fields[4].types[1].values.types[1], StringType)
assert (fields[4].types[1].values.types[1].bytes_ == )
assert (fields[4].doc == 'c20')
assert (fields[5].extra_attrs['name'] == 'col21')
assert isinstance(fields[5], UnionType)
assert isinstance(fields[5].types[0], NullType)
assert isinstance(fields[5].types[1], StructType)
assert (len(fields[5].types[1].fields) == 2)
assert (fields[5].types[1].fields[0].extra_attrs['name'] == 'a')
assert isinstance(fields[5].types[1].fields[0], UnionType)
assert isinstance(fields[5].types[1].fields[0].types[0], NullType)
assert isinstance(fields[5].types[1].fields[0].types[1], IntType)
assert (fields[5].types[1].fields[0].types[1].bits == 32)
assert fields[5].types[1].fields[0].types[1].signed
assert (fields[5].types[1].fields[1].extra_attrs['name'] == 'b')
assert isinstance(fields[5].types[1].fields[1], UnionType)
assert isinstance(fields[5].types[1].fields[1].types[0], NullType)
assert isinstance(fields[5].types[1].fields[1].types[1], StringType)
assert (fields[5].types[1].fields[1].types[1].bytes_ == )
assert (fields[5].doc == 'c21')
assert (fields[6].extra_attrs['name'] == 'col22')
assert isinstance(fields[6], UnionType)
assert (len(fields[6].types) == 3)
assert isinstance(fields[6].types[0], NullType)
assert isinstance(fields[6].types[1], IntType)
assert (fields[6].types[1].bits == 32)
assert fields[6].types[1].signed
assert isinstance(fields[6].types[2], StringType)
assert (fields[6].types[2].bytes_ == )
assert (fields[6].doc == 'c22') |
def update_last_accessed(token_or_user):
if (not config.app_config.get('FEATURE_USER_LAST_ACCESSED')):
return
threshold = timedelta(seconds=config.app_config.get('LAST_ACCESSED_UPDATE_THRESHOLD_S', 120))
if ((token_or_user.last_accessed is not None) and ((datetime.utcnow() - token_or_user.last_accessed) < threshold)):
return
model_class = token_or_user.__class__
last_accessed = datetime.utcnow()
try:
model_class.update(last_accessed=last_accessed).where((model_class.id == token_or_user.id)).execute()
token_or_user.last_accessed = last_accessed
except ReadOnlyModeException:
pass
except PeeweeException as ex:
strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
if strict_logging_disabled:
data = {'exception': ex, 'token_or_user': token_or_user.id, 'class': str(model_class)}
logger.exception('update last_accessed for token/user failed', extra=data)
else:
raise |
def _create_walkable_graph(state: EnvironmentState):
doors = state.get_nodes_by_attr('class_name', 'door')
doorjambs = state.get_nodes_by_attr('class_name', 'doorjamb')
adj_lists = {}
for door_node in doors:
door_rooms = state.get_nodes_from(door_node, Relation.BETWEEN)
if (len(door_rooms) > 1):
adj_lists.setdefault(door_rooms[0].id, []).append((door_rooms[1].id, door_node.id))
adj_lists.setdefault(door_rooms[1].id, []).append((door_rooms[0].id, door_node.id))
for dj_node in doorjambs:
dj_rooms = state.get_nodes_from(dj_node, Relation.BETWEEN)
if (len(dj_rooms) > 1):
adj_lists.setdefault(dj_rooms[0].id, []).append((dj_rooms[1].id, dj_node.id))
adj_lists.setdefault(dj_rooms[1].id, []).append((dj_rooms[0].id, dj_node.id))
return adj_lists |
()
def eggs_clean(context):
with context.cd(TASK_ROOT_STR):
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs) |
def get_nuScenes_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
nuScenesyaml = yaml.safe_load(stream)
nuScenes_label_name = dict()
for i in sorted(list(nuScenesyaml['learning_map'].keys()))[::(- 1)]:
val_ = nuScenesyaml['learning_map'][i]
nuScenes_label_name[val_] = nuScenesyaml['labels_16'][val_]
return nuScenes_label_name |
_wraps(_stdlib_socket.socketpair, assigned=(), updated=())
def socketpair(family: FamilyT=FamilyDefault, type: TypeT=SocketKind.SOCK_STREAM, proto: int=0) -> tuple[(SocketType, SocketType)]:
(left, right) = _stdlib_socket.socketpair(family, type, proto)
return (from_stdlib_socket(left), from_stdlib_socket(right)) |
class ConfigOptionsHandler(ConfigHandler['Distribution']):
section_prefix = 'options'
def __init__(self, target_obj: 'Distribution', options: AllCommandOptions, ignore_option_errors: bool, ensure_discovered: expand.EnsurePackagesDiscovered):
super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
self.root_dir = target_obj.src_root
self.package_dir: Dict[(str, str)] = {}
def _parse_list_semicolon(cls, value):
return cls._parse_list(value, separator=';')
def _parse_file_in_root(self, value):
return self._parse_file(value, root_dir=self.root_dir)
def _parse_requirements_list(self, label: str, value: str):
parsed = self._parse_list_semicolon(self._parse_file_in_root(value))
_warn_accidental_env_marker_misconfig(label, value, parsed)
return [line for line in parsed if (not line.startswith('#'))]
def parsers(self):
parse_list = self._parse_list
parse_bool = self._parse_bool
parse_dict = self._parse_dict
parse_cmdclass = self._parse_cmdclass
return {'zip_safe': parse_bool, 'include_package_data': parse_bool, 'package_dir': parse_dict, 'scripts': parse_list, 'eager_resources': parse_list, 'dependency_links': parse_list, 'namespace_packages': self._deprecated_config_handler(parse_list, 'The namespace_packages parameter is deprecated, consider using implicit namespaces instead (PEP 420).'), 'install_requires': partial(self._parse_requirements_list, 'install_requires'), 'setup_requires': self._parse_list_semicolon, 'tests_require': self._parse_list_semicolon, 'packages': self._parse_packages, 'entry_points': self._parse_file_in_root, 'py_modules': parse_list, 'python_requires': SpecifierSet, 'cmdclass': parse_cmdclass}
def _parse_cmdclass(self, value):
package_dir = self.ensure_discovered.package_dir
return expand.cmdclass(self._parse_dict(value), package_dir, self.root_dir)
def _parse_packages(self, value):
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if (trimmed_value not in find_directives):
return self._parse_list(value)
find_kwargs = self.parse_section_packages__find(self.sections.get('packages.find', {}))
find_kwargs.update(namespaces=(trimmed_value == find_directives[1]), root_dir=self.root_dir, fill_package_dir=self.package_dir)
return expand.find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
section_data = self._parse_section_to_dict(section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict([(k, v) for (k, v) in section_data.items() if ((k in valid_keys) and v)])
where = find_kwargs.get('where')
if (where is not None):
find_kwargs['where'] = where[0]
return find_kwargs
def parse_section_entry_points(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
package_data = self._parse_section_to_dict(section_options, self._parse_list)
return expand.canonic_package_data(package_data)
def parse_section_package_data(self, section_options):
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
self['exclude_package_data'] = self._parse_package_data(section_options)
def parse_section_extras_require(self, section_options):
parsed = self._parse_section_to_dict_with_key(section_options, (lambda k, v: self._parse_requirements_list(f'extras_require[{k}]', v)))
self['extras_require'] = parsed
def parse_section_data_files(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = expand.canonic_data_files(parsed, self.root_dir) |
class RegexTest(object):
.parametrize('value', ['123', '', '00000'])
def test_valid_input(self, value):
num_only = inputs.regex('^[0-9]+$')
assert (num_only(value) == value)
.parametrize('value', ['abc', '123abc', 'abc123', ''])
def test_bad_input(self, value):
num_only = inputs.regex('^[0-9]+$')
with pytest.raises(ValueError):
num_only(value)
def test_bad_pattern(self):
with pytest.raises(re.error):
inputs.regex('[')
def test_schema(self):
assert (inputs.regex('^[0-9]+$').__schema__ == {'type': 'string', 'pattern': '^[0-9]+$'}) |
def main():
config = read_config(sys.argv[1])
if ('logging' in config):
logging.config.dictConfig(config['logging'])
else:
logging.basicConfig(level=logging.INFO)
downloader = Downloader(config)
db = SQLiteTLE(config['database']['path'], config['platforms'], config['text_writer'])
logging.info('Start downloading TLEs')
for dl_ in config['downloaders']:
fetcher = getattr(downloader, dl_)
tles = fetcher()
if isinstance(tles, dict):
for source in tles:
for tle in tles[source]:
db.update_db(tle, source)
else:
source = 'file'
if ('spacetrack' in dl_):
source = 'spacetrack'
for tle in tles:
db.update_db(tle, source)
db.write_tle_txt()
db.close()
logging.info('TLE downloading finished') |
def rouge(hypotheses, references):
hyps_and_refs = zip(hypotheses, references)
hyps_and_refs = [_ for _ in hyps_and_refs if (len(_[0]) > 0)]
(hypotheses, references) = zip(*hyps_and_refs)
rouge_1 = [rouge_n([hyp], [ref], 1) for (hyp, ref) in zip(hypotheses, references)]
rouge_2 = [rouge_n([hyp], [ref], 2) for (hyp, ref) in zip(hypotheses, references)]
rouge_l = [rouge_l_sentence_level([hyp], [ref]) for (hyp, ref) in zip(hypotheses, references)]
return (rouge_1, rouge_2, rouge_l) |
class SvgDraggablePoint(gui.SvgRectangle, DraggableItem):
def __init__(self, app_instance, name_coord_x, name_coord_y, compatibility_iterable, **kwargs):
self.w = 15
self.h = 15
super(SvgDraggablePoint, self).__init__(0, 0, self.w, self.h, **kwargs)
DraggableItem.__init__(self, app_instance, **kwargs)
self.attributes['stroke-dasharray'] = '2,2'
self.name_coord_x = name_coord_x
self.name_coord_y = name_coord_y
self.set_stroke(1, 'black')
self.set_fill('#ffcc00')
self.compatibility_iterable = compatibility_iterable
self.onmousedown.do(self.start_drag)
def setup(self, refWidget, newParent):
if ((type(refWidget) in self.compatibility_iterable) or (refWidget == None)):
DraggableItem.setup(self, refWidget, newParent)
def on_drag(self, emitter, x, y):
if self.active:
if (self.origin_x == (- 1)):
self.origin_x = float(x)
self.origin_y = float(y)
self.refWidget_origin_x = float(self.refWidget.attributes[self.name_coord_x])
self.refWidget_origin_y = float(self.refWidget.attributes[self.name_coord_y])
else:
self.refWidget.attributes[self.name_coord_x] = self.round_grid(((self.refWidget_origin_x + float(x)) - self.origin_x))
self.refWidget.attributes[self.name_coord_y] = self.round_grid(((self.refWidget_origin_y + float(y)) - self.origin_y))
self.update_position()
def update_position(self):
if self.refWidget:
self.set_position((float(self.refWidget.attributes[self.name_coord_x]) - (self.w / 2)), (float(self.refWidget.attributes[self.name_coord_y]) - (self.h / 2))) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_list(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
response = client.get(url)
if (project_id in view_integration_permission_map.get(username, [])):
assert (response.status_code == 200)
if (username == 'user'):
assert (sorted([item['id'] for item in response.json()]) == [])
else:
values_list = Integration.objects.filter(project_id=project_id).order_by('id').values_list('id', flat=True)
assert (sorted([item['id'] for item in response.json()]) == list(values_list))
else:
assert (response.status_code == 404) |
class SYSTEM_PROCESS_INFORMATION(Structure):
_fields_ = [('NextEntryOffset', ULONG), ('NumberOfThreads', ULONG), ('WorkingSetPrivate', LARGE_INTEGER), ('HardFaultCount', ULONG), ('NumberOfThreadsHighWatermark', ULONG), ('CycleTime', c_ulonglong), ('CreateTime', LARGE_INTEGER), ('UserTime', LARGE_INTEGER), ('KernelTime', LARGE_INTEGER), ('ImageName', UNICODE_STRING), ('BasePriority', LONG), ('UniqueProcessId', HANDLE), ('InheritedFromUniqueProcessId', HANDLE), ('HandleCount', ULONG), ('SessionId', ULONG), ('UniqueProcessKey', c_void_p), ('PeakVirtualSize', c_void_p), ('VirtualSize', c_void_p), ('PageFaultCount', ULONG), ('PeakWorkingSetSize', c_void_p), ('WorkingSetSize', c_void_p), ('QuotaPeakPagedPoolUsage', c_void_p), ('QuotaPagedPoolUsage', c_void_p), ('QuotaPeakNonPagedPoolUsage', c_void_p), ('QuotaNonPagedPoolUsage', c_void_p), ('PagefileUsage', c_void_p), ('PeakPagefileUsage', c_void_p), ('PrivatePageCount', c_size_t), ('ReadOperationCount', LARGE_INTEGER), ('WriteOperationCount', LARGE_INTEGER), ('OtherOperationCount', LARGE_INTEGER), ('ReadTransferCount', LARGE_INTEGER), ('WriteTransferCount', LARGE_INTEGER), ('OtherTransferCount', LARGE_INTEGER)]
def __str__(self):
return 'Pid: {0}, ImageName: {1}'.format(self.UniqueProcessId, repr(self.ImageName)) |
class Describe_NumberingDefinitions():
def it_knows_how_many_numbering_definitions_it_contains(self, len_fixture):
(numbering_definitions, numbering_definition_count) = len_fixture
assert (len(numbering_definitions) == numbering_definition_count)
(params=[0, 1, 2, 3])
def len_fixture(self, request):
numbering_definition_count = request.param
numbering_bldr = a_numbering().with_nsdecls()
for idx in range(numbering_definition_count):
numbering_bldr.with_child(a_num())
numbering_elm = numbering_bldr.element
numbering_definitions = _NumberingDefinitions(numbering_elm)
return (numbering_definitions, numbering_definition_count) |
def test_exit(manager_nospawn, minimal_conf_noscreen):
qewidget = widget.QuickExit(timer_interval=0.001, countdown_start=1)
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([qewidget], 10))]
manager_nospawn.start(config)
topbar = manager_nospawn.c.bar['top']
topbar.fake_button_press(0, 'top', 0, 0, button=1)
with pytest.raises((IPCError, ConnectionResetError)):
assert topbar.info() |
def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):
def loop_over(preloop, code, indices, i):
iterv = f'ITER_{int(i)}'
update = ''
suitable_n = '1'
for (j, index) in enumerate(indices):
var = sub[f'lv{int(j)}']
update += f'''{var}_iter += {var}_jump{index}_{i};
'''
if (index != 'x'):
suitable_n = f'{var}_n{index}'
return f'''
{preloop}
for (int {iterv} = {suitable_n}; {iterv}; {iterv}--) {{
{code}
{update}
}}
'''
preloops = {}
for (i, (loop_order, dtype)) in enumerate(zip(loop_orders, dtypes)):
for (j, index) in enumerate(loop_order):
if (index != 'x'):
preloops.setdefault(j, '')
preloops[j] += (f'''%(lv{i})s_iter = ({dtype}*)(PyArray_DATA(%(lv{i})s));
''' % sub)
break
else:
preloops.setdefault(0, '')
preloops[0] += (f'''%(lv{i})s_iter = ({dtype}*)(PyArray_DATA(%(lv{i})s));
''' % sub)
if (len(loop_tasks) == 1):
s = preloops.get(0, '')
else:
s = ''
for (i, (pre_task, task), indices) in reversed(list(zip(range((len(loop_tasks) - 1)), loop_tasks, list(zip(*loop_orders))))):
s = loop_over((preloops.get(i, '') + pre_task), (s + task), indices, i)
s += loop_tasks[(- 1)]
return f'{{{s}}}' |
class Venue(Object):
def __init__(self, *, client: 'pyrogram.Client'=None, location: 'types.Location', title: str, address: str, foursquare_id: str=None, foursquare_type: str=None):
super().__init__(client)
self.location = location
self.title = title
self.address = address
self.foursquare_id = foursquare_id
self.foursquare_type = foursquare_type
def _parse(client, venue: 'raw.types.MessageMediaVenue'):
return Venue(location=types.Location._parse(client, venue.geo), title=venue.title, address=venue.address, foursquare_id=(venue.venue_id or None), foursquare_type=venue.venue_type, client=client) |
class VariationalEncoderDecoderGen(keras.utils.Sequence):
def __init__(self, feature_root, modalities, split_root, phase, batch_size, shuffle=True):
assert (phase in ['train', 'val', 'test']), 'phase must be one of train, val, test!'
index_path = os.path.join(split_root, '{}.txt'.format(phase))
phase_idxes = pd.read_table(index_path, header=None).values.squeeze()
self.modalities = modalities
self.features = []
for modality in modalities:
feature_path = os.path.join(feature_root, '{}.npy'.format(modality))
self.features.append(np.load(feature_path)[phase_idxes])
target_path = os.path.join(feature_root, 'target.npz')
self.target = np.load(target_path)['target'][phase_idxes]
self.target_stats = [np.load(target_path)['mean'], np.load(target_path)['std']]
self.num_videos = len(phase_idxes)
self.video_idxes = np.arange(self.num_videos)
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle:
self.on_epoch_end()
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.video_idxes)
def __len__(self):
self.batch_num = ((self.num_videos // self.batch_size) + 1)
return self.batch_num
def __getitem__(self, i):
batch_idxes = self.video_idxes[(i * self.batch_size):((i + 1) * self.batch_size)]
batch_size = len(batch_idxes)
batch_features = [np.empty((batch_size, feature.shape[(- 1)]), dtype=np.float32) for feature in self.features]
batch_target = np.empty((batch_size, 1), dtype=np.float32)
for (j, idx) in enumerate(batch_idxes):
for k in range(len(self.modalities)):
batch_features[k][j] = self.features[k][idx]
batch_target[j] = self.target[idx]
return (batch_features, batch_target)
def mod_shape_dict(self):
return {mod: feature.shape[(- 1)] for (mod, feature) in zip(self.modalities, self.features)} |
def test_unicode_issue368(pytester: Pytester) -> None:
path = pytester.path.joinpath('test.xml')
log = LogXML(str(path), None)
ustr = '!'
class Report(BaseReport):
longrepr = ustr
sections: List[Tuple[(str, str)]] = []
nodeid = 'something'
location = ('tests/filename.py', 42, 'TestClass.method')
when = 'teardown'
test_report = cast(TestReport, Report())
log.pytest_sessionstart()
node_reporter = log._opentestcase(test_report)
node_reporter.append_failure(test_report)
node_reporter.append_collect_error(test_report)
node_reporter.append_collect_skipped(test_report)
node_reporter.append_error(test_report)
test_report.longrepr = ('filename', 1, ustr)
node_reporter.append_skipped(test_report)
test_report.longrepr = ('filename', 1, 'Skipped: ')
node_reporter.append_skipped(test_report)
test_report.wasxfail = ustr
node_reporter.append_skipped(test_report)
log.pytest_sessionfinish() |
def test_load_adaptor_twice():
old_sys_path = sys.path
path = os.path.split(os.path.abspath(__file__))[0]
sys.path.append(path)
Engine()._load_adaptors(['mockadaptor_enabled', 'mockadaptor_enabled'])
cpis = Engine().loaded_adaptors()
mocks = cpis['radical.saga.job.Job']['mock']
assert (len(mocks) == 1)
cpis = Engine().loaded_adaptors()
adaptor = cpis['radical.saga.job.Job']['mock'][0]['adaptor_instance']
sys.path = old_sys_path |
def add_verbosity_args(parser, train=False):
verbosity_group = parser.add_argument_group('Verbosity')
verbosity_group.add_argument('--log-verbose', action='store_true', help='Whether to output more verbose logs for debugging/profiling.')
verbosity_group.add_argument('--args-verbosity', default=1, type=int, choices=[0, 1, 2], help="Level of verbosity when printing the arguments (0: don't print the arguments; 1: print the Namespace object; 2: print all the arguments, one per line). The default is 1.one per line)")
return verbosity_group |
def negative_sampling(pos_samples, num_entity, negative_rate):
size_of_batch = len(pos_samples)
num_to_generate = (size_of_batch * negative_rate)
neg_samples = np.tile(pos_samples, (negative_rate, 1))
labels = np.zeros((size_of_batch * (negative_rate + 1)), dtype=np.float32)
labels[:size_of_batch] = 1
values = np.random.randint(num_entity, size=num_to_generate)
choices = np.random.uniform(size=num_to_generate)
subj = (choices > 0.5)
obj = (choices <= 0.5)
neg_samples[(subj, 0)] = values[subj]
neg_samples[(obj, 2)] = values[obj]
return (np.concatenate((pos_samples, neg_samples)), labels) |
def main():
multiprocessing.freeze_support()
import randovania
randovania.setup_logging('INFO', None, quiet=True)
logging.debug('Starting Randovania...')
dotnet_path = randovania.get_data_path().joinpath('dotnet_runtime')
if (randovania.is_frozen() and dotnet_path.exists()):
os.environ['PATH'] = f"{dotnet_path}{os.pathsep}{os.environ['PATH']}"
os.environ['DOTNET_ROOT'] = f'{dotnet_path}'
os.environ['DOTNET_MULTILEVEL_LOOKUP'] = '0'
logging.debug('Portable dotnet path exists, added as DOTNET_ROOT.')
from randovania import cli
cli.run_cli(sys.argv) |
class PreActivationBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(PreActivationBottleneck, self).__init__()
self.bn1 = nn.BatchNorm3d(inplanes)
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
def make_layers(cfg, batch_norm=False, deconv=None):
layers = []
in_channels = 3
if (not deconv):
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
else:
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
if (in_channels == 3):
conv2d = deconv(in_channels, v, kernel_size=3, padding=1, freeze=True, n_iter=15, sampling_stride=3)
else:
conv2d = deconv(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
def _get_ticklabels(band_type, kHz, separator):
if (separator is None):
import locale
separator = locale.localeconv()['decimal_point']
if (band_type == 'octave'):
if (kHz is True):
ticklabels = TICKS_OCTAVE_KHZ
else:
ticklabels = TICKS_OCTAVE
elif (kHz is True):
ticklabels = TICKS_THIRD_OCTAVE_KHZ
else:
ticklabels = TICKS_THIRD_OCTAVE
ticklabels = _set_separator(ticklabels, separator)
return ticklabels |
class PassportElementErrorSelfie(PassportElementError):
__slots__ = ('file_hash',)
def __init__(self, type: str, file_hash: str, message: str, *, api_kwargs: Optional[JSONDict]=None):
super().__init__('selfie', type, message, api_kwargs=api_kwargs)
with self._unfrozen():
self.file_hash: str = file_hash
self._id_attrs = (self.source, self.type, self.file_hash, self.message) |
def get_service_ips_and_ports(component_name):
try:
get_service_cmd = ['kubectl', 'get', 'service', component_name, '-o', 'json']
service_spec = subprocess.check_output(get_service_cmd).strip().decode('UTF-8')
spec = json.loads(service_spec)
external_ips = spec['spec'].get('externalIPs', [])
ports_spec = spec['spec']['ports']
ports = {}
for port in ports_spec:
ports[port['name']] = port.get('nodePort', port.get('port'))
return (external_ips, ports)
except subprocess.CalledProcessError:
return () |
class Conv2d_Atari(nn.Module):
def __init__(self, in_channels=4, feature_dim=512):
super().__init__()
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=8, stride=4))
self.conv2 = layer_init(nn.Conv2d(32, 64, kernel_size=4, stride=2))
self.conv3 = layer_init(nn.Conv2d(64, 64, kernel_size=3, stride=1))
linear_input_size = ((7 * 7) * 64)
self.fc4 = layer_init(nn.Linear(linear_input_size, feature_dim))
def forward(self, x):
y = F.relu(self.conv1(x))
y = F.relu(self.conv2(y))
y = F.relu(self.conv3(y))
y = y.view(y.size(0), (- 1))
y = F.relu(self.fc4(y))
return y |
class Win32Raw(Escpos):
def is_usable() -> bool:
return is_usable()
_win32print
def __init__(self, printer_name: str='', *args, **kwargs) -> None:
Escpos.__init__(self, *args, **kwargs)
self.printer_name = printer_name
self.job_name = ''
self._device: Union[(Literal[False], Literal[None], 'PyPrinterHANDLE')] = False
def printers(self) -> dict:
return {printer['pPrinterName']: printer for printer in win32print.EnumPrinters(win32print.PRINTER_ENUM_NAME, '', 4)}
def open(self, job_name: str='python-escpos', raise_not_found: bool=True) -> None:
if self._device:
self.close()
self.job_name = job_name
try:
self.printer_name = (self.printer_name or win32print.GetDefaultPrinter())
assert (self.printer_name in self.printers), 'Incorrect printer name'
self.device: Optional['PyPrinterHANDLE'] = win32print.OpenPrinter(self.printer_name)
if self.device:
self.current_job = win32print.StartDocPrinter(self.device, 1, (job_name, '', 'RAW'))
win32print.StartPagePrinter(self.device)
except (AssertionError, pywintypes.error) as e:
self.device = None
if raise_not_found:
raise DeviceNotFoundError((f'Unable to start a print job for the printer {self.printer_name}:' + f'''
{e}'''))
else:
logging.error('Win32Raw printing %s not available', self.printer_name)
return
logging.info('Win32Raw printer enabled')
def close(self) -> None:
if ((self._device is False) or (self._device is None)):
return
logging.info('Closing Win32Raw connection to printer %s', self.printer_name)
win32print.EndPagePrinter(self._device)
win32print.EndDocPrinter(self._device)
win32print.ClosePrinter(self._device)
self._device = False
def _raw(self, msg: bytes) -> None:
if (self.printer_name is None):
raise DeviceNotFoundError('Printer not found')
if (not self.device):
raise DeviceNotFoundError('Printer job not opened')
win32print.WritePrinter(self.device, msg) |
('PyQt6.QtWidgets.QGraphicsPixmapItem.keyPressEvent')
def test_key_press_event_other(key_mock, qapp, item):
item.exit_crop_mode = MagicMock()
event = MagicMock()
event.key.return_value = Qt.Key.Key_Space
item.keyPressEvent(event)
item.exit_crop_mode.assert_not_called()
key_mock.assert_called_once_with(event) |
class BacktestPositionFactory():
def create_position(ticker: Ticker) -> BacktestPosition:
sec_type = ticker.security_type
if (sec_type == SecurityType.STOCK):
return BacktestEquityPosition(ticker)
elif (sec_type == SecurityType.FUTURE):
return BacktestFuturePosition(ticker)
elif (sec_type == SecurityType.CRYPTO):
return BacktestCryptoPosition(ticker)
else:
raise ValueError("Ticker security type: '{}' is not currently supported.") |
def run_gat_target(args, device, data):
(train_g, val_g, test_g, in_feats, labels, n_classes, g, num_heads) = data
train_nid = train_g.nodes()
val_nid = val_g.nodes()
test_nid = test_g.nodes()
sampler = dgl.dataloading.MultiLayerNeighborSampler([int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.NodeDataLoader(train_g, train_nid, sampler, batch_size=args.batch_size, shuffle=True, drop_last=False, num_workers=args.num_workers)
model = GAT(in_feats, args.num_hidden, n_classes, args.num_layers, num_heads, args.num_workers, F.relu, args.dropout)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
avg = 0
iter_tput = []
best_eval_acc = 0
best_test_acc = 0
for epoch in range(args.num_epochs):
tic = time.time()
for (step, (input_nodes, seeds, blocks)) in enumerate(dataloader):
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata['features']
batch_labels = blocks[(- 1)].dstdata['labels']
tic_step = time.time()
blocks = [blk.to(device) for blk in blocks]
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append((len(seeds) / (time.time() - tic_step)))
if ((step % args.log_every) == 0):
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = ((th.cuda.max_memory_allocated() / 1000000) if th.cuda.is_available() else 0)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
toc = time.time()
print('Epoch Time(s): {:.4f}'.format((toc - tic)))
if (epoch >= 5):
avg += (toc - tic)
if ((((epoch + 1) % args.eval_every) == 0) and (epoch != 0)):
(eval_acc, pred, embds) = evaluate_gat_target(model, val_g, val_g.ndata['features'], val_g.ndata['labels'], val_nid, args.val_batch_size, num_heads, device)
(test_acc, pred, embds) = evaluate_gat_target(model, test_g, test_g.ndata['features'], test_g.ndata['labels'], test_nid, args.val_batch_size, num_heads, device)
print('Eval Acc {:.4f}'.format(eval_acc))
print('Test Acc {:.4f}'.format(test_acc))
print('Avg epoch time: {}'.format((avg / (epoch - 4))))
return model |
def get_files(**kwargs):
relative_root = kwargs.get('relative_root', '')
files = [File(Path(relative_root, f.path), f.contents) for f in get_template_files(**kwargs)]
files.extend((File(Path(relative_root, kwargs['package_name'], 'lib.so'), ''), File(Path(relative_root, '.hgignore'), 'syntax: glob\n*.pyc\n\nsyntax: foo\nREADME.md\n\nsyntax: glob\n*.so\n*.h\n'), File(Path(relative_root, 'PKG-INFO'), f'''Metadata-Version: {DEFAULT_METADATA_VERSION}
Name: {kwargs['project_name']}
Version: 0.0.1
License-File: LICENSE.txt
''')))
return files |
.end_to_end()
.skipif((not _TEST_SHOULD_RUN), reason='pygraphviz is required')
.parametrize('layout', _GRAPH_LAYOUTS)
.parametrize('format_', _TEST_FORMATS)
.parametrize('rankdir', ['LR'])
def test_create_graph_via_cli(tmp_path, runner, format_, layout, rankdir):
if ((sys.platform == 'win32') and (format_ == 'pdf')):
pytest.xfail('gvplugin_pango.dll might be missing on Github Actions.')
source = '\n import pytask\n\n .depends_on("input.txt")\n def task_example(): pass\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('input.txt').touch()
result = runner.invoke(cli, ['dag', tmp_path.as_posix(), '-o', tmp_path.joinpath(f'dag.{format_}'), '-l', layout, '-r', rankdir])
assert (result.exit_code == ExitCode.OK)
assert tmp_path.joinpath(f'dag.{format_}').exists() |
class SmtLib20Parser(SmtLibParser):
def __init__(self, environment=None, interactive=False):
SmtLibParser.__init__(self, environment, interactive)
del self.commands['check-sat-assuming']
del self.commands['declare-const']
del self.commands['define-fun-rec']
del self.commands['define-funs-rec']
del self.commands['echo']
del self.commands['get-assignment']
del self.commands['get-unsat-assumptions']
del self.commands['reset']
del self.commands['reset-assertions'] |
def SelectPeakIndex(FFT_Data, endpoint=True):
D1 = (FFT_Data[1:(- 1)] - FFT_Data[0:(- 2)])
D2 = (FFT_Data[1:(- 1)] - FFT_Data[2:])
D3 = np.logical_and((D1 > 0), (D2 > 0))
tmp = np.where((D3 == True))
sel_ind = (tmp[0] + 1)
if endpoint:
if ((FFT_Data[0] - FFT_Data[1]) > 0):
sel_ind = np.concatenate([[0], sel_ind])
if ((FFT_Data[(- 1)] - FFT_Data[(- 2)]) > 0):
Last_ind = (len(FFT_Data) - 1)
sel_ind = np.concatenate([sel_ind, [Last_ind]])
return sel_ind |
class ConcatInternalConnectivity(InternalConnectivity):
def __init__(self, input_mask_and_length_tuple: List[Tuple[(List, int)]], output_mask_and_length_tuple: List[Tuple[(List, int)]]):
assert (len(input_mask_and_length_tuple) > 1)
assert (len(output_mask_and_length_tuple) == 1)
super().__init__(input_mask_and_length_tuple, output_mask_and_length_tuple)
def forward_propagate_the_masks(self, input_mask_list: List[List[int]], output_mask_list: List[List[int]]):
assert (len(input_mask_list) > 1)
assert (len(output_mask_list) == 1)
output_mask_list[0] = [item for input_mask in input_mask_list for item in input_mask]
def backward_propagate_the_masks(self, output_mask_list: List[List[int]], input_mask_list: List[List[int]]):
output_mask = output_mask_list[0]
number_of_zeros_in_output_mask = get_zero_positions_in_binary_mask(output_mask)
if number_of_zeros_in_output_mask:
start_pos = 0
end_pos = 0
index = 0
for input_mask in input_mask_list:
segmented_mask = []
in_mask_length = len(input_mask)
end_pos = (start_pos + in_mask_length)
for i in range(start_pos, end_pos):
segmented_mask.append(output_mask[i])
start_pos += in_mask_length
assert (len(input_mask) == len(segmented_mask))
input_mask_list[index] = segmented_mask
index += 1 |
class VanillaOption(Instrument):
def __init__(self, option_type, expiry_type, strike, expiry_date, derivative_type):
self.option_type = (option_type or VanillaOptionType.CALL.value)
self.expiry_type = (expiry_type or ExpiryType.EUROPEAN.value)
self.strike = strike
self.expiry_date = datetime.strptime(expiry_date, '%Y%m%d')
self.derivative_type = (derivative_type or DerivativeType.VANILLA_OPTION.value)
def _option_type_flag(self):
if (self.option_type == VanillaOptionType.CALL.value):
return 1
else:
return (- 1)
def payoff(self, spot0=None):
return max((self._option_type_flag * (spot0 - self.strike)), 0.0) |
def listen_cli(args):
dicom_listener = DicomListener(host=args.host, port=args.port, ae_title=args.aetitle, storage_directory=args.storage_directory)
logging.info('Starting DICOM listener')
logging.info('IP: %s', args.host)
logging.info('Port: %s', args.port)
logging.info('AE Title: %s', args.aetitle)
dicom_listener.start()
logging.info('Listener Ready')
def handler_stop_signals(*_):
logging.info('Shutting down listener')
dicom_listener.stop()
sys.exit()
signal.signal(signal.SIGINT, handler_stop_signals)
signal.signal(signal.SIGTERM, handler_stop_signals)
while True:
pass |
class TestInterpreterVersion():
def test_warn(self, monkeypatch):
class MockConfigVar():
def __init__(self, return_):
self.warn = None
self._return = return_
def __call__(self, name, warn):
self.warn = warn
return self._return
mock_config_var = MockConfigVar('38')
monkeypatch.setattr(tags, '_get_config_var', mock_config_var)
tags.interpreter_version(warn=True)
assert mock_config_var.warn
def test_python_version_nodot(self, monkeypatch):
monkeypatch.setattr(tags, '_get_config_var', (lambda var, warn: 'NN'))
assert (tags.interpreter_version() == 'NN')
.parametrize('version_info,version_str', [((1, 2, 3), '12'), ((1, 12, 3), '112'), ((11, 2, 3), '112'), ((11, 12, 3), '1112'), ((1, 2, 13), '12')])
def test_sys_version_info(self, version_info, version_str, monkeypatch):
monkeypatch.setattr(tags, '_get_config_var', (lambda *args, **kwargs: None))
monkeypatch.setattr(sys, 'version_info', version_info)
assert (tags.interpreter_version() == version_str) |
class CIFARPyramidNet(nn.Module):
def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10):
super(CIFARPyramidNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, activation=None))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stride = (1 if ((i == 0) or (j != 0)) else 2)
stage.add_module('unit{}'.format((j + 1)), PyrUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('post_activ', PreResActivation(in_channels=in_channels))
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=8, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
def parse_args():
parser = argparse.ArgumentParser(description='Collect data to be submitted to the server')
group = parser.add_mutually_exclusive_group()
group.add_argument('--plugins', action='store_true', help='Only run plugins (no commands)')
group.add_argument('--commands', action='store_true', help='Only run one-shot commands (no plugins)')
args = parser.parse_args()
return args |
_fixtures(WebFixture)
def test_activating_javascript(web_fixture):
(Widget)
class WidgetWithJavaScript(Widget):
def __init__(self, view, fake_js):
super().__init__(view)
self.fake_js = fake_js
def get_js(self, context=None):
return [self.fake_js]
class MyPage(Widget):
def __init__(self, view):
super().__init__(view)
self.add_child(WidgetWithJavaScript(view, 'js1'))
self.add_child(WidgetWithJavaScript(view, 'js2'))
self.add_child(WidgetWithJavaScript(view, 'js1'))
self.add_child(Slot(view, 'reahl_footer'))
class MainUI(UserInterface):
def assemble(self):
self.define_page(MyPage)
self.define_view('/', title='Home')
fixture = web_fixture
wsgi_app = fixture.new_wsgi_app(site_root=MainUI)
browser = Browser(wsgi_app)
browser.open('/')
rendered_js = [i.text for i in browser.lxml_html.xpath('//script[="reahl-jqueryready"]')][0]
assert (rendered_js == "\njQuery(document).ready(function($){\n$('body').addClass('enhanced');\njs1\njs2\n\n});\n")
number_of_duplicates = (rendered_js.count('js1') - 1)
assert (number_of_duplicates == 0) |
class MultiWozV22(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('2.2.0')
def _info(self):
features = datasets.Features({'dialogue_id': datasets.Value('string'), 'db_root_path': datasets.Value('string'), 'services': datasets.Sequence(datasets.Value('string')), 'db_paths': datasets.Sequence(datasets.Value('string')), 'turns': datasets.Sequence({'turn_id': datasets.Value('string'), 'speaker': datasets.ClassLabel(names=['USER', 'SYSTEM']), 'utterance': datasets.Value('string'), 'frames': datasets.Sequence({'service': datasets.Value('string'), 'state': {'active_intent': datasets.Value('string'), 'requested_slots': datasets.Sequence(datasets.Value('string')), 'slots_values': datasets.Sequence({'slots_values_name': datasets.Value('string'), 'slots_values_list': datasets.Sequence(datasets.Value('string'))})}, 'slots': datasets.Sequence({'slot': datasets.Value('string'), 'value': datasets.Value('string'), 'start': datasets.Value('int32'), 'exclusive_end': datasets.Value('int32'), 'copy_from': datasets.Value('string'), 'copy_from_value': datasets.Sequence(datasets.Value('string'))})}), 'dialogue_acts': datasets.Features({'dialog_act': datasets.Sequence({'act_type': datasets.Value('string'), 'act_slots': datasets.Sequence(datasets.Features({'slot_name': datasets.Value('string'), 'slot_value': datasets.Value('string')}))}), 'span_info': datasets.Sequence({'act_type': datasets.Value('string'), 'act_slot_name': datasets.Value('string'), 'act_slot_value': datasets.Value('string'), 'span_start': datasets.Value('int32'), 'span_end': datasets.Value('int32')})})})})
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None, homepage=' license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
data_path = dl_manager.download_and_extract(URL)
def __get_file_paths_dict(root_path):
file_paths = [('dialogue_acts', 'dialog_acts.json')]
file_paths += [(f'train_{i:03d}', f'train/dialogues_{i:03d}.json') for i in range(1, 18)]
file_paths += [(f'dev_{i:03d}', f'dev/dialogues_{i:03d}.json') for i in range(1, 3)]
file_paths += [(f'test_{i:03d}', f'test/dialogues_{i:03d}.json') for i in range(1, 3)]
file_paths = dict(file_paths)
for (file_info, file_name) in file_paths.items():
file_paths[file_info] = os.path.join(root_path, 'multiwoz-44f0f8479fc5591b839ad78827da197b', 'data', 'MultiWOZ_2.2', file_name)
return dict(file_paths)
data_files = __get_file_paths_dict(data_path)
db_root_path = os.path.join(data_path, 'multiwoz-44f0f8479fc5591b839ad78827da197b', 'db')
db_paths = [path for path in os.listdir(db_root_path) if str(path).endswith('.json')]
self.global_entities = load_entities(db_paths)
self.stored_dialogue_acts = json.load(open(data_files['dialogue_acts']))
return [datasets.SplitGenerator(name=spl_enum, gen_kwargs={'filepaths': data_files, 'split': spl, 'db_root_path': db_root_path}) for (spl, spl_enum) in [('train', datasets.Split.TRAIN), ('dev', datasets.Split.VALIDATION), ('test', datasets.Split.TEST)]]
def _generate_examples(self, filepaths, split, db_root_path):
id_ = (- 1)
file_list = [fpath for (fname, fpath) in filepaths.items() if fname.startswith(split)]
for filepath in file_list:
dialogues = json.load(open(filepath))
for dialogue in dialogues:
id_ += 1
mapped_acts = self.stored_dialogue_acts.get(dialogue['dialogue_id'], {})
res = {'dialogue_id': dialogue['dialogue_id'], 'db_root_path': db_root_path, 'services': dialogue['services'], 'db_paths': [os.path.join(db_root_path, '{}.json'.format(service)) for service in dialogue['services']], 'turns': [{'turn_id': turn['turn_id'], 'speaker': turn['speaker'], 'utterance': underscore_entities(turn['utterance'], self.global_entities), 'frames': [{'service': frame['service'], 'state': {'active_intent': (frame['state']['active_intent'] if ('state' in frame) else ''), 'requested_slots': (frame['state']['requested_slots'] if ('state' in frame) else []), 'slots_values': {'slots_values_name': ([sv_name for (sv_name, sv_list) in frame['state']['slot_values'].items()] if ('state' in frame) else []), 'slots_values_list': ([sv_list for (sv_name, sv_list) in frame['state']['slot_values'].items()] if ('state' in frame) else [])}}, 'slots': [{'slot': slot['slot'], 'value': ('' if ('copy_from' in slot) else slot['value']), 'start': slot.get('exclusive_end', (- 1)), 'exclusive_end': slot.get('start', (- 1)), 'copy_from': slot.get('copy_from', ''), 'copy_from_value': (slot['value'] if ('copy_from' in slot) else [])} for slot in frame['slots']]} for frame in turn['frames'] if (('active_only' not in self.config.name) or (frame.get('state', {}).get('active_intent', 'NONE') != 'NONE'))], 'dialogue_acts': {'dialog_act': [{'act_type': act_type, 'act_slots': {'slot_name': [sl_name for (sl_name, sl_val) in dialog_act], 'slot_value': [sl_val for (sl_name, sl_val) in dialog_act]}} for (act_type, dialog_act) in mapped_acts.get(turn['turn_id'], {}).get('dialog_act', {}).items()], 'span_info': [{'act_type': span_info[0], 'act_slot_name': span_info[1], 'act_slot_value': span_info[2], 'span_start': span_info[3], 'span_end': span_info[4]} for span_info in mapped_acts.get(turn['turn_id'], {}).get('span_info', [])]}} for turn in dialogue['turns']]}
(yield (id_, res)) |
class HeisenbergModel(LatticeModel):
def __init__(self, lattice: Lattice, coupling_constants: tuple=(1.0, 1.0, 1.0), ext_magnetic_field: tuple=(0.0, 0.0, 0.0)) -> None:
super().__init__(lattice)
self.coupling_constants = coupling_constants
self.ext_magnetic_field = ext_magnetic_field
def register_length(self) -> int:
return self._lattice.num_nodes
def second_q_op(self) -> SpinOp:
hamiltonian = {}
weighted_edge_list = self.lattice.weighted_edge_list
for (node_a, node_b, _) in weighted_edge_list:
if (node_a == node_b):
index = node_a
for (axis, coeff) in zip('XYZ', self.ext_magnetic_field):
if (not np.isclose(coeff, 0.0)):
hamiltonian[f'{axis}_{index}'] = coeff
else:
index_left = node_a
index_right = node_b
for (axis, coeff) in zip('XYZ', self.coupling_constants):
if (not np.isclose(coeff, 0.0)):
hamiltonian[f'{axis}_{index_left} {axis}_{index_right}'] = coeff
return SpinOp(hamiltonian, spin=Fraction(1, 2), num_spins=self.lattice.num_nodes) |
def any_causes_overload_ambiguity(items: list[CallableType], return_types: list[Type], arg_types: list[Type], arg_kinds: list[ArgKind], arg_names: (Sequence[(str | None)] | None)) -> bool:
if all_same_types(return_types):
return False
actual_to_formal = [map_formals_to_actuals(arg_kinds, arg_names, item.arg_kinds, item.arg_names, (lambda i: arg_types[i])) for item in items]
for (arg_idx, arg_type) in enumerate(arg_types):
if has_any_type(arg_type, ignore_in_type_obj=True):
matching_formals_unfiltered = [(item_idx, lookup[arg_idx]) for (item_idx, lookup) in enumerate(actual_to_formal) if lookup[arg_idx]]
matching_returns = []
matching_formals = []
for (item_idx, formals) in matching_formals_unfiltered:
matched_callable = items[item_idx]
matching_returns.append(matched_callable.ret_type)
for formal in formals:
matching_formals.append(matched_callable.arg_types[formal])
if ((not all_same_types(matching_formals)) and (not all_same_types(matching_returns))):
return True
return False |
class Cache():
def __init__(self, cache_file_location: str='.pyspark_ai.json', file_format: str='json'):
self._staging_updates: Dict[(str, str)] = {}
if (file_format == 'json'):
self._file_cache: FileCache = JsonCache(cache_file_location)
else:
self._file_cache = SQLiteCacheWrapper(cache_file_location)
def lookup(self, key: str) -> Optional[str]:
staging_result = self._staging_updates.get(key)
if (staging_result is not None):
return staging_result
return self._file_cache.lookup(key)
def update(self, key: str, val: str) -> None:
self._staging_updates[key] = val
def clear(self) -> None:
self._file_cache.clear()
self._staging_updates = {}
def commit(self) -> None:
self._file_cache.commit_staging_cache(self._staging_updates)
self._staging_updates = {} |
class TestExpandXarrayDims(object):
def setup_method(self):
self.test_inst = pysat.Instrument(inst_module=pysat.instruments.pysat_ndtesting, use_header=True)
self.start_time = pysat.instruments.pysat_ndtesting._test_dates['']['']
self.data_list = []
self.out = None
self.meta = None
return
def teardown_method(self):
del self.test_inst, self.start_time, self.data_list, self.meta, self.out
return
def set_data_meta(self, dims_equal):
self.test_inst.load(date=self.start_time)
self.data_list.append(self.test_inst.data)
self.meta = self.test_inst.meta
num_samples = int((self.test_inst.index.shape[0] / 2))
if dims_equal:
self.test_inst = pysat.Instrument(inst_module=self.test_inst.inst_module, num_samples=num_samples, use_header=True)
else:
self.test_inst = pysat.Instrument(inst_module=pysat.instruments.pysat_testmodel, num_samples=num_samples, use_header=True)
self.test_inst.load(date=(self.start_time + dt.timedelta(days=1)))
self.data_list.append(self.test_inst.data)
return
def eval_dims(self, dims_equal, exclude_dims=None, default_fill_val=None):
if (exclude_dims is None):
exclude_dims = []
ref_dims = list(self.out[0].dims.keys())
for (i, xdata) in enumerate(self.out[1:]):
test_dims = list(xdata.dims.keys())
if dims_equal:
testing.assert_lists_equal(test_dims, ref_dims)
else:
for tdim in test_dims:
assert ((tdim == 'time') if (tdim in ref_dims) else (tdim != 'time')), 'unexpected dimension: {:}'.format(tdim)
for tdim in test_dims:
if (tdim in ref_dims):
if (tdim in exclude_dims):
assert (xdata[tdim].shape != self.out[0][tdim].shape)
else:
assert (xdata[tdim].shape == self.out[0][tdim].shape)
if (xdata[tdim].shape != self.data_list[(i + 1)][tdim].shape):
for dvar in xdata.data_vars.keys():
if (tdim in xdata[dvar].dims):
if (dvar in self.meta):
fill_val = self.meta[(dvar, self.meta.labels.fill_val)]
else:
fill_val = default_fill_val
try:
if np.isnan(fill_val):
assert np.isnan(xdata[dvar].values).any()
else:
assert np.any((xdata[dvar].values == fill_val))
except TypeError:
estr = ''.join(['Bad or missing fill values for ', dvar, ': ({:} not in {:})'.format(fill_val, xdata[dvar].values)])
if (fill_val is None):
assert (fill_val in xdata[dvar].values), estr
else:
assert np.any((xdata[dvar].values == fill_val)), estr
return
.parametrize('dims_equal', [True, False])
.parametrize('exclude_dims', [None, ['time']])
def test_expand_xarray_dims(self, dims_equal, exclude_dims):
self.set_data_meta(dims_equal)
self.out = coords.expand_xarray_dims(self.data_list, self.meta, dims_equal=dims_equal, exclude_dims=exclude_dims)
self.eval_dims(dims_equal, exclude_dims)
return
.parametrize('new_data_type', [int, float, str, bool, None])
def test_missing_meta(self, new_data_type):
self.set_data_meta(True)
self.data_list[1]['new_variable'] = self.data_list[1]['mlt'].astype(new_data_type)
self.out = coords.expand_xarray_dims(self.data_list, self.meta, dims_equal=True)
fill_val = self.meta.labels.default_values_from_type(self.meta.labels.label_type['fill_val'], new_data_type)
self.eval_dims(True, default_fill_val=fill_val)
return |
def evaluate(instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=256, **kwargs):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors='pt')
input_ids = inputs['input_ids'].to(device)
generation_config = GenerationConfig(temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, **kwargs)
with torch.no_grad():
generation_output = model.generate(input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
return output.split('### Response:')[1].strip() |
.parametrize(('t', 't2', 'result'), ((TClass[(int, int)], str, TClass(TClass(1, 2), 'a')), (List[TClass[(int, int)]], str, TClass([TClass(1, 2)], 'a'))))
def test_structure_nested_generics(converter: BaseConverter, t, t2, result):
res = converter.structure(asdict(result), TClass[(t, t2)])
assert (res == result) |
class FCNHead(nn.Sequential):
def __init__(self, in_channels, channels):
inter_channels = (in_channels // 4)
layers = [nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU(), nn.Dropout(0.1), nn.Conv2d(inter_channels, channels, 1)]
super(FCNHead, self).__init__(*layers) |
class CallError(Error):
def __str__(self) -> str:
if (len(self.args) == 1):
return self.args[0]
(instance, method, args, kwargs, original_error, stack) = self.args
cls = (instance.__class__.__name__ if (instance is not None) else '')
full_method = '.'.join((cls, method.__name__)).strip('.')
parameters = ', '.join(itertools.chain((repr(arg) for arg in args), (('%s=%r' % (key, value)) for (key, value) in kwargs.items())))
return ('Call to %s(%s) failed: %s (injection stack: %r)' % (full_method, parameters, original_error, [level[0] for level in stack])) |
class GitVisionConfig(PretrainedConfig):
model_type = 'git_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'git'):
config_dict = config_dict['vision_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def test_one(args, wav_root, store_root, rescale, soundstream):
(wav, sr) = librosa.load(wav_root, sr=args.sr)
wav = torch.tensor(wav).unsqueeze(0)
wav = wav.unsqueeze(1).cuda()
compressed = soundstream.encode(wav, target_bw=args.target_bw)
print('finish compressing')
out = soundstream.decode(compressed)
out = out.detach().cpu().squeeze(0)
check_clipping(out, rescale)
save_audio(wav=out, path=store_root, sample_rate=args.sr, rescale=rescale)
print('finish decompressing') |
_module()
class BCELossWithLogits(BaseWeightedLoss):
def __init__(self, loss_weight=1.0, class_weight=None):
super().__init__(loss_weight=loss_weight)
self.class_weight = None
if (class_weight is not None):
self.class_weight = torch.Tensor(class_weight)
def _forward(self, cls_score, label, **kwargs):
if (self.class_weight is not None):
assert ('weight' not in kwargs), "The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.binary_cross_entropy_with_logits(cls_score, label, **kwargs)
return loss_cls |
class TResNet(nn.Module):
def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0.0):
self.num_classes = num_classes
self.drop_rate = drop_rate
super(TResNet, self).__init__()
aa_layer = BlurPool2d
self.inplanes = int((64 * width_factor))
self.planes = int((64 * width_factor))
if v2:
self.inplanes = ((self.inplanes // 8) * 8)
self.planes = ((self.planes // 8) * 8)
conv1 = conv2d_iabn((in_chans * 16), self.planes, stride=1, kernel_size=3)
layer1 = self._make_layer((Bottleneck if v2 else BasicBlock), self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer)
layer2 = self._make_layer((Bottleneck if v2 else BasicBlock), (self.planes * 2), layers[1], stride=2, use_se=True, aa_layer=aa_layer)
layer3 = self._make_layer(Bottleneck, (self.planes * 4), layers[2], stride=2, use_se=True, aa_layer=aa_layer)
layer4 = self._make_layer(Bottleneck, (self.planes * 8), layers[3], stride=2, use_se=False, aa_layer=aa_layer)
self.body = nn.Sequential(OrderedDict([('SpaceToDepth', SpaceToDepthModule()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4)]))
self.feature_info = [dict(num_chs=self.planes, reduction=2, module=''), dict(num_chs=(self.planes * (Bottleneck.expansion if v2 else 1)), reduction=4, module='body.layer1'), dict(num_chs=((self.planes * 2) * (Bottleneck.expansion if v2 else 1)), reduction=8, module='body.layer2'), dict(num_chs=((self.planes * 4) * Bottleneck.expansion), reduction=16, module='body.layer3'), dict(num_chs=((self.planes * 8) * Bottleneck.expansion), reduction=32, module='body.layer4')]
self.num_features = ((self.planes * 8) * Bottleneck.expansion)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, BasicBlock):
m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight))
if isinstance(m, Bottleneck):
m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight))
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
layers = []
if (stride == 2):
layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
layers += [conv2d_iabn(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, act_layer='identity')]
downsample = nn.Sequential(*layers)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer))
return nn.Sequential(*layers)
.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem='^body\\.conv1', blocks=('^body\\.layer(\\d+)' if coarse else '^body\\.layer(\\d+)\\.(\\d+)'))
return matcher
.ignore
def set_grad_checkpointing(self, enable=True):
assert (not enable), 'gradient checkpointing not supported'
.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='fast'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
return self.body(x)
def forward_head(self, x, pre_logits: bool=False):
return (x if pre_logits else self.head(x))
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x |
class ScheduleInvitation():
id: strawberry.ID
option: ScheduleInvitationOption
notes: str
title: str
submission: Submission
dates: List[ScheduleInvitationDate]
def from_django_model(cls, instance):
return cls(id=instance.submission.hashid, title=instance.title, option=ScheduleInvitationOption.from_schedule_item_status(instance.status), notes=instance.speaker_invitation_notes, submission=instance.submission, dates=[ScheduleInvitationDate.from_django(instance)]) |
class CMDefaults():
user = User(1, 'First name', False)
custom_title: str = 'PTB'
is_anonymous: bool = True
until_date: datetime.datetime = to_timestamp(datetime.datetime.utcnow())
can_be_edited: bool = False
can_change_info: bool = True
can_post_messages: bool = True
can_edit_messages: bool = True
can_delete_messages: bool = True
can_invite_users: bool = True
can_restrict_members: bool = True
can_pin_messages: bool = True
can_promote_members: bool = True
can_send_messages: bool = True
can_send_media_messages: bool = True
can_send_polls: bool = True
can_send_other_messages: bool = True
can_add_web_page_previews: bool = True
is_member: bool = True
can_manage_chat: bool = True
can_manage_video_chats: bool = True
can_manage_topics: bool = True
can_send_audios: bool = True
can_send_documents: bool = True
can_send_photos: bool = True
can_send_videos: bool = True
can_send_video_notes: bool = True
can_send_voice_notes: bool = True
can_post_stories: bool = True
can_edit_stories: bool = True
can_delete_stories: bool = True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.