code stringlengths 281 23.7M |
|---|
class Installation(pg_api.Installation):
version = None
version_info = None
type = None
configure_options = None
info = None
pg_executables = ('pg_config', 'psql', 'initdb', 'pg_resetxlog', 'pg_controldata', 'clusterdb', 'pg_ctl', 'pg_dump', 'pg_dumpall', 'postgres', 'postmaster', 'reindexdb', 'vacuumdb', 'ipcclean', 'createdb', 'ecpg', 'createuser', 'createlang', 'droplang', 'dropuser', 'pg_restore')
pg_libraries = ('libpq', 'libecpg', 'libpgtypes', 'libecpg_compat')
pg_directories = ('bindir', 'docdir', 'includedir', 'pkgincludedir', 'includedir_server', 'libdir', 'pkglibdir', 'localedir', 'mandir', 'sharedir', 'sysconfdir')
def _e_metas(self):
l = list(self.configure_options.items())
l.sort(key=itemgetter(0))
(yield ('version', self.version))
if l:
(yield ('configure_options', os.linesep.join(((k if (v is True) else ((k + '=') + v)) for (k, v) in l))))
def __repr__(self, format='{mod}.{name}({info!r})'.format):
return format(mod=type(self).__module__, name=type(self).__name__, info=self.info)
def __init__(self, info: dict):
self.info = info
self.version = self.info['version']
(self.type, vs) = self.version.split()
self.version_info = versionstring.normalize(versionstring.split(vs))
self.configure_options = dict(parse_configure_options(self.info.get('configure', '')))
self.paths = dict()
exists = os.path.exists
join = os.path.join
for k in self.pg_directories:
self.paths[k] = self.info.get(k)
bindir_path = self.info.get('bindir')
if (bindir_path is None):
self.paths.update(zip(self.pg_executables, cycle((None,))))
else:
for k in self.pg_executables:
path = platform_exe(join(bindir_path, k))
if exists(path):
self.paths[k] = path
else:
self.paths[k] = None
self.__dict__.update(self.paths)
def ssl(self):
if ('with_openssl' in self.configure_options):
return True
for x in self.configure_options:
if ('with_ssl' in x):
return True
return False |
class Class_Aes():
def buqi_key(self, aes_type, aes_key, aes_zifuji):
if (aes_type == 'AES-128'):
length = 16
elif (aes_type == 'AES-192'):
length = 24
elif (aes_type == 'AES-256'):
length = 32
else:
length = 16
if (len(aes_key) >= length):
return (aes_key[:length].encode(), length)
else:
aes_key = self.aes_buqi_(aes_key.encode(aes_zifuji), length, 'ZeroPadding', 'utf-8')
return (aes_key, length)
def encrypt(self, aes_type, aes_mode, aes_zifuji, aes_tianchong, aes_iv, aes_encode, aes_key, aes_m_text):
if (not aes_key):
return ['error', 'AES!']
else:
(aes_key, aes_length) = self.buqi_key(aes_type, aes_key, aes_zifuji)
if (aes_mode not in ['ECB']):
if (len(aes_iv) != 16):
return ['error', 'AES16!']
if (not aes_iv):
return ['error', '!']
else:
aes_iv = aes_iv.encode(aes_zifuji)
if (not aes_m_text):
return ['', '!']
else:
aes_m_text = self.aes_buqi_(aes_m_text.encode(aes_zifuji), 16, aes_tianchong, aes_zifuji)
if (aes_mode == 'CBC'):
cryptor = AES.new(aes_key, AES.MODE_CBC, aes_iv)
elif (aes_mode == 'ECB'):
try:
cryptor = AES.new(aes_key, AES.MODE_ECB)
except Exception as e:
print(str(e))
elif (aes_mode == 'CFB'):
cryptor = AES.new(aes_key, AES.MODE_CFB, aes_iv)
elif (aes_mode == 'CTR'):
ctr = Crypto.Util.Counter.new(128, initial_value=int(binascii.hexlify(aes_iv), 16))
cryptor = AES.new(aes_key, AES.MODE_CTR, counter=ctr)
elif (aes_mode == 'OFB'):
cryptor = AES.new(aes_key, AES.MODE_OFB, aes_iv)
else:
return ['error', '!']
return_text = cryptor.encrypt(aes_m_text)
if (aes_encode == 'Base64'):
return_text = str(base64.encodebytes(return_text), encoding=aes_zifuji).strip()
elif (aes_encode == 'Hex'):
return_text = str(binascii.b2a_hex(return_text), encoding=aes_zifuji).strip()
return ['success', return_text]
def decrypt(self, aes_type, aes_mode, aes_zifuji, aes_iv, aes_encode, aes_key, aes_m_text):
if (not aes_key):
return ['error', 'AES!']
else:
if (len(aes_key) < 16):
aes_key = self.aes_buqi_(aes_key.encode(aes_zifuji), 16, 'ZeroPadding', 'utf-8')
elif (16 < len(aes_key) < 24):
aes_key = self.aes_buqi_(aes_key.encode(aes_zifuji), 24, 'ZeroPadding', 'utf-8')
elif (24 < len(aes_key) < 32):
aes_key = self.aes_buqi_(aes_key.encode(aes_zifuji), 32, 'ZeroPadding', 'utf-8')
elif (len(aes_key) > 32):
return ['error', 'AES32!']
else:
aes_key = aes_key.encode(aes_zifuji)
if (aes_mode not in ['ECB']):
if (len(aes_iv) != 16):
return ['error', 'AES16!']
if (not aes_iv):
return ['error', '!']
else:
aes_iv = aes_iv.encode(aes_zifuji)
if (not aes_m_text):
return ['error', '!']
elif (aes_encode == 'Base64'):
aes_m_text = base64.b64decode(aes_m_text.encode(aes_zifuji))
elif (aes_encode == 'Hex'):
aes_m_text = bytes.fromhex(aes_m_text)
if (aes_mode == 'CBC'):
cryptor = AES.new(aes_key, AES.MODE_CBC, aes_iv)
elif (aes_mode == 'ECB'):
cryptor = AES.new(aes_key, AES.MODE_ECB)
elif (aes_mode == 'CFB'):
cryptor = AES.new(aes_key, AES.MODE_CFB, aes_iv)
elif (aes_mode == 'CTR'):
ctr = Crypto.Util.Counter.new(128, initial_value=int(binascii.hexlify(aes_iv), 16))
cryptor = AES.new(aes_key, AES.MODE_CTR, counter=ctr)
elif (aes_mode == 'OFB'):
cryptor = AES.new(aes_key, AES.MODE_OFB, aes_iv)
else:
return ['error', '!']
try:
return_text = cryptor.decrypt(aes_m_text)
ret = self._unpad(return_text)
return_text = ret.decode(aes_zifuji)
return ['success', return_text]
except:
return ['error', '!']
def _unpad(self, s):
return s[:(- ord(s[(len(s) - 1):]))]
def aes_buqi_(self, text, length, aes_tianchong, aes_zifuji):
if (aes_tianchong == 'ZeroPadding'):
tianchong_str = '\x00'
count = len(text)
if (count < length):
add = (length - count)
text = (text + (tianchong_str * add).encode(aes_zifuji))
elif (count > length):
add = (length - (count % length))
text = (text + (tianchong_str * add).encode(aes_zifuji))
return text
elif (aes_tianchong == 'Pkcs7'):
bs = length
padding_size = len(text)
padding = (bs - (padding_size % bs))
padding_text = (chr(padding) * padding)
return (text + padding_text.encode(aes_zifuji))
elif (aes_tianchong == 'Iso10126'):
bs = length
padding_size = len(text)
padding = (bs - (padding_size % bs))
qian_Str = ''
for i in range(0, (padding - 1)):
qian_Str += chr(random.randint(0, 9))
padding_text = (qian_Str + chr(padding))
return (text + padding_text.encode(aes_zifuji))
elif (aes_tianchong == 'AnsiX923'):
bs = length
tianchong_str = '\x00'
padding_size = len(text)
padding = (bs - (padding_size % bs))
text = (text + ((tianchong_str * (padding - 1)) + chr(padding)).encode(aes_zifuji))
return text
elif (aes_tianchong == 'No Padding'):
while ((len(text) % 16) != 0):
text += '\x00'.encode(aes_zifuji)
return text
else:
tianchong_str = '\x00'
count = len(text)
if (count < length):
add = (length - count)
text = (text + (tianchong_str * add).encode(aes_zifuji))
elif (count > length):
add = (length - (count % length))
text = (text + (tianchong_str * add).encode(aes_zifuji))
return text |
def register(manager: AstroidManager) -> None:
manager.register_transform(nodes.ClassDef, dataclass_transform, is_decorated_with_dataclass)
manager.register_transform(nodes.Call, inference_tip(infer_dataclass_field_call, raise_on_overwrite=True), _looks_like_dataclass_field_call)
manager.register_transform(nodes.Unknown, inference_tip(infer_dataclass_attribute, raise_on_overwrite=True), _looks_like_dataclass_attribute) |
class Migration(migrations.Migration):
initial = True
dependencies = [('conferences', '0031_fix_keynote_details_save')]
operations = [migrations.CreateModel(name='ReviewSession', fields=[('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('session_type', models.CharField(choices=[('proposals', 'Proposals'), ('grants', 'Grants')], max_length=100)), ('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='conferences.conference'))], options={'abstract': False}), migrations.CreateModel(name='AvailableVoteOption', fields=[('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')), ('numeric_value', models.IntegerField()), ('label', models.CharField(max_length=100)), ('review_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.reviewsession'))], options={'unique_together': {('review_session', 'numeric_value')}})] |
def _read_mat_binary(fd):
header = fd.read(3).decode()
if header.startswith('CM'):
return _read_compressed_mat(fd, header)
elif header.startswith('SM'):
return _read_sparse_mat(fd, header)
elif (header == 'FM '):
sample_size = 4
elif (header == 'DM '):
sample_size = 8
else:
raise UnknownMatrixHeader(("The header contained '%s'" % header))
assert (sample_size > 0)
(s1, rows, s2, cols) = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
buf = fd.read(((rows * cols) * sample_size))
if (sample_size == 4):
vec = np.frombuffer(buf, dtype='float32')
elif (sample_size == 8):
vec = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
mat = np.reshape(vec, (rows, cols))
return mat |
class TagFormSet(forms.BaseInlineFormSet):
def save(self, commit=True):
def get_handler(finished_object):
related = getattr(finished_object, self.related_field)
try:
tagtype = finished_object.tag_type
except AttributeError:
tagtype = finished_object.tag.db_tagtype
if (tagtype == 'alias'):
handler_name = 'aliases'
elif (tagtype == 'permission'):
handler_name = 'permissions'
else:
handler_name = 'tags'
return getattr(related, handler_name)
instances = super().save(commit=False)
for obj in self.deleted_objects:
handler = get_handler(obj)
handler.remove(obj.tag_key, category=obj.tag_category)
for instance in instances:
handler = get_handler(instance)
handler.add(instance.tag_key, category=instance.tag_category, data=instance.tag_data) |
class PVTNetwork_4(nn.Module):
def __init__(self, channel=32, n_classes=1, deep_supervision=True):
super().__init__()
self.deep_supervision = deep_supervision
print(f'use SpatialAtt + ChannelAtt and My attention layer'.center(80, '='))
self.backbone = pvt_v2_b2()
path = '/afs/crc.nd.edu/user/y/ypeng4/Polyp-PVT_2/pvt_pth/pvt_v2_b2.pth'
save_model = torch.load(path)
model_dict = self.backbone.state_dict()
state_dict = {k: v for (k, v) in save_model.items() if (k in model_dict.keys())}
model_dict.update(state_dict)
self.backbone.load_state_dict(model_dict)
self.ca_1 = ChannelAttention(64)
self.sa_1 = SpatialAttention()
self.ca_2 = ChannelAttention(128)
self.sa_2 = SpatialAttention()
self.ca_3 = ChannelAttention(320)
self.sa_3 = SpatialAttention()
self.ca_4 = ChannelAttention(512)
self.sa_4 = SpatialAttention()
self.Translayer_1 = BasicConv2d(64, channel, 1)
self.Translayer_2 = BasicConv2d(128, channel, 1)
self.Translayer_3 = BasicConv2d(320, channel, 1)
self.Translayer_4 = BasicConv2d(512, channel, 1)
self.attention_1 = AttentionLayer()
self.attention_2 = AttentionLayer()
self.attention_3 = AttentionLayer()
self.attention_4 = AttentionLayer()
self.seg_outs = nn.ModuleList([nn.Conv2d(32, n_classes, 1, 1) for _ in range(4)])
self.deconv2 = nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1, bias=False)
self.deconv2_conv = BasicConv2d((channel * 2), channel, 3, 1, 1)
self.deconv3 = nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1, bias=False)
self.deconv3_conv = BasicConv2d((channel * 2), channel, 3, 1, 1)
self.deconv4 = nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1, bias=False)
self.deconv4_conv = BasicConv2d((channel * 2), channel, 3, 1, 1)
def forward(self, x):
seg_outs = []
(f1, f2, f3, f4) = self.backbone(x)
f1 = (self.ca_1(f1) * f1)
f1 = (self.sa_1(f1) * f1)
f1 = self.Translayer_1(f1)
f2 = (self.ca_2(f2) * f2)
f2 = (self.sa_2(f2) * f2)
f2 = self.Translayer_2(f2)
f3 = (self.ca_3(f3) * f3)
f3 = (self.sa_3(f3) * f3)
f3 = self.Translayer_3(f3)
f4 = (self.ca_4(f4) * f4)
f4 = (self.sa_4(f4) * f4)
f4 = self.Translayer_4(f4)
f41 = self.attention_4([f1, f2, f3, f4], f4)
seg_outs.append(self.seg_outs[0](f41))
f31 = self.attention_3([f1, f2, f3, f4], f3)
f21 = self.attention_2([f1, f2, f3, f4], f2)
f11 = self.attention_1([f1, f2, f3, f4], f1)
y = self.deconv2_conv(torch.cat([self.deconv2(f41), f31], dim=1))
seg_outs.append(self.seg_outs[1](y))
y = self.deconv3_conv(torch.cat([self.deconv3(y), f21], dim=1))
seg_outs.append(self.seg_outs[2](y))
y = self.deconv4_conv(torch.cat([self.deconv4(y), f11], dim=1))
seg_outs.append(self.seg_outs[3](y))
for (i, o) in enumerate(seg_outs):
seg_outs[i] = F.interpolate(o, scale_factor=4, mode='bilinear')
if self.deep_supervision:
return seg_outs[::(- 1)]
else:
return seg_outs[(- 1)] |
def step(base, data, hh):
def flt():
for l in data.split('\n'):
if (l in hh):
pp = os.path.join(base, hh[l])
(yield (('\n\n' + load_file(pp)) + '\n\n'))
os.unlink(pp)
else:
(yield l)
return '\n'.join(flt()) |
def evaluate(config, workdir, eval_folder='eval'):
eval_dir = os.path.join(workdir, eval_folder, f'host_{jax.process_index()}')
tf.io.gfile.makedirs(eval_dir)
rng = jax.random.PRNGKey((config.seed + 1))
rng = jax.random.fold_in(rng, jax.process_index())
test_data_dir = {'ct2d_320': 'LIDC_320.npz', 'ldct_512': 'LDCT.npz', 'brats': 'BraTS.npz'}[config.data.dataset]
test_data_dir = os.path.join('test_data', test_data_dir)
test_imgs = np.load(test_data_dir)['all_imgs']
test_imgs = test_imgs.reshape((jax.process_count(), (- 1), *test_imgs.shape[1:]))[jax.process_index()]
if ('mar' in config.sampling.task):
mar = True
mar_data_dir = {'ct2d_320': 'LIDC_320_MAR.npz', 'ldct_512': 'LDCT_MAR.npz'}[config.data.dataset]
mar_data_dir = os.path.join('test_data', mar_data_dir)
mar_data = np.load(mar_data_dir)
ma_imgs = mar_data['ma_imgs']
metal_imgs = mar_data['metal_masks']
metal_imgs = metal_imgs.reshape((jax.process_count(), (- 1), *metal_imgs.shape[1:]))[jax.process_index()]
ma_imgs = ma_imgs.reshape((jax.process_count(), (- 1), *ma_imgs.shape[1:]))[jax.process_index()]
gt_imgs = mar_data['gt_imgs']
gt_imgs = gt_imgs.reshape((jax.process_count(), (- 1), *gt_imgs.shape[1:]))[jax.process_index()]
else:
mar = False
scaler = datasets.get_data_scaler(config)
inverse_scaler = datasets.get_data_inverse_scaler(config)
(rng, model_rng) = jax.random.split(rng)
(score_model, init_model_state, initial_params) = mutils.init_model(model_rng, config)
optimizer = losses.get_optimizer(config).create(initial_params)
state = mutils.State(step=0, optimizer=optimizer, lr=config.optim.lr, model_state=init_model_state, ema_rate=config.model.ema_rate, params_ema=initial_params, rng=rng)
checkpoint_dir = os.path.join(workdir, 'checkpoints')
if (config.training.sde.lower() == 'vpsde'):
sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'subvpsde'):
sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'vesde'):
sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=config.model.num_scales)
sampling_eps = 1e-05
else:
raise NotImplementedError(f'SDE {config.training.sde} unknown.')
sampling_shape = ((config.eval.batch_size // jax.device_count()), config.data.image_size, config.data.image_size, config.data.num_channels)
cs_solver = cs.get_cs_solver(config, sde, score_model, sampling_shape, inverse_scaler, eps=sampling_eps)
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=config.eval.ckpt_id)
pstate = flax.jax_utils.replicate(state)
hyper_params = {'projection': [config.sampling.coeff, config.sampling.snr], 'langevin_projection': [config.sampling.coeff, config.sampling.snr], 'langevin': [config.sampling.projection_sigma_rate, config.sampling.snr], 'baseline': [config.sampling.projection_sigma_rate, config.sampling.snr]}[config.sampling.cs_solver]
per_host_batch_size = (config.eval.batch_size // jax.host_count())
num_batches = int(np.ceil((len(test_imgs) / per_host_batch_size)))
img_size = config.data.image_size
mask = Image.new('L', (img_size, img_size), 0)
draw = ImageDraw.Draw(mask)
draw.pieslice([0, 0, img_size, img_size], 0, 360, fill=255)
toTensor = transforms.ToTensor()
mask = toTensor(mask)[0]
def get_metric(predictions, targets, mask_roi=False, hist_norm=False):
with torch.no_grad():
if hist_norm:
pred_hist = torch.histc(predictions, bins=255)
targ_hist = torch.histc(targets, bins=255)
peak_pred1 = (torch.argmax(pred_hist[:75]) / 255.0)
peak_pred2 = ((torch.argmax(pred_hist[75:]) + 75) / 255.0)
peak_targ1 = (torch.argmax(targ_hist[:75]) / 255.0)
peak_targ2 = ((torch.argmax(targ_hist[75:]) + 75) / 255.0)
predictions = torch.clamp(((predictions - peak_pred1) / (peak_pred2 - peak_pred1)), min=0)
targets = torch.clamp(((targets - peak_targ1) / (peak_targ2 - peak_targ1)), min=0)
predictions = torch.clamp(predictions, max=torch.max(targets).item(), min=0)
predictions /= torch.max(targets)
targets /= torch.max(targets)
if mask_roi:
predictions = (predictions * mask)
targets = (targets * mask)
return (piq.psnr(predictions[(None, None, ...)], targets[(None, None, ...)], data_range=1.0).item(), piq.ssim(predictions[(None, None, ...)], targets[(None, None, ...)], data_range=1.0).item())
def compute_mar_metrics(gt, pred, metal_mask):
gt[metal_mask] = 0.0
pred[metal_mask] = 0.0
gt = np.clip(gt, 0.0, 1.0)
pred = np.clip(pred, 0.0, 1.0)
ssim = structural_similarity(gt, pred)
psnr = peak_signal_noise_ratio(gt, pred)
rmse = np.sqrt(np.mean(((gt - pred) ** 2)))
return (ssim, psnr, rmse)
all_samples = []
all_ssims = []
all_psnrs = []
all_ssims_mask = []
all_psnrs_mask = []
all_ssims_mask_hist = []
all_psnrs_mask_hist = []
all_mar_ssims = []
all_mar_psnrs = []
all_mar_rmses = []
for batch in tqdm.tqdm(range(num_batches)):
if (not mar):
current_batch = (jnp.asarray(test_imgs[(batch * per_host_batch_size):min(((batch + 1) * per_host_batch_size), len(test_imgs))], dtype=jnp.float32) / 255.0)
else:
current_batch = jnp.asarray(ma_imgs[(batch * per_host_batch_size):min(((batch + 1) * per_host_batch_size), len(ma_imgs))], dtype=jnp.float32)
test_batch = jnp.asarray(gt_imgs[(batch * per_host_batch_size):min(((batch + 1) * per_host_batch_size), len(gt_imgs))], dtype=jnp.float32)
metal_batch = jnp.asarray(metal_imgs[(batch * per_host_batch_size):min(((batch + 1) * per_host_batch_size), len(metal_imgs))], dtype=jnp.bool_)
n_effective_samples = len(current_batch)
if (n_effective_samples < per_host_batch_size):
pad_len = (per_host_batch_size - len(current_batch))
current_batch = jnp.pad(current_batch, ((0, pad_len), (0, 0), (0, 0)), mode='constant', constant_values=0.0)
current_batch = current_batch.reshape(((- 1), *sampling_shape))
img = scaler(current_batch)
(rng, *step_rng) = jax.random.split(rng, (jax.local_device_count() + 1))
step_rng = jnp.asarray(step_rng)
samples = cs_solver(step_rng, pstate, img, *hyper_params)
samples = np.clip(np.asarray(samples), 0.0, 1.0)
samples = samples.reshape(((- 1), config.data.image_size, config.data.image_size, 1))[:n_effective_samples]
all_samples.extend(samples)
if (not mar):
ground_truth = np.asarray(inverse_scaler(img)).reshape(((- 1), config.data.image_size, config.data.image_size, 1))
ground_truth = np.clip(ground_truth, 0.0, 1.0)
ground_truth = torch.from_numpy(ground_truth).permute(0, 3, 1, 2)
samples = torch.from_numpy(samples).permute(0, 3, 1, 2)
for i in range(n_effective_samples):
(p, s) = get_metric(samples[i].squeeze(), ground_truth[i].squeeze())
all_psnrs.append(p)
all_ssims.append(s)
(p, s) = get_metric(samples[i].squeeze(), ground_truth[i].squeeze(), mask_roi=True)
all_psnrs_mask.append(p)
all_ssims_mask.append(s)
(p, s) = get_metric(samples[i].squeeze(), ground_truth[i].squeeze(), mask_roi=True, hist_norm=True)
all_psnrs_mask_hist.append(p)
all_ssims_mask_hist.append(s)
print(f'PSNR: {np.asarray(all_psnrs).mean():.4f}, SSIM: {np.asarray(all_ssims).mean():.4f}')
print(f'with mask: PSNR: {np.asarray(all_psnrs_mask).mean():.4f}, SSIM: {np.asarray(all_ssims_mask).mean():.4f}')
print(f'with mask & hist: PSNR: {np.asarray(all_psnrs_mask_hist).mean():.4f}, SSIM: {np.asarray(all_ssims_mask_hist).mean():.4f}')
else:
ground_truth = np.array(test_batch)
samples = np.array(samples)[(..., 0)]
masks = (np.array(metal_batch) > 0.0)
for i in range(n_effective_samples):
(ssim, psnr, rmse) = compute_mar_metrics(ground_truth[i], samples[i], masks[i])
all_mar_ssims.append(ssim)
all_mar_psnrs.append(psnr)
all_mar_rmses.append(rmse)
print(f'SSIM: {np.asarray(all_mar_ssims).mean():.4f}, PSNR: {np.asarray(all_mar_psnrs).mean():.4f}, RMSE: {np.asarray(all_mar_rmses).mean():.4f}')
all_samples = (np.stack(all_samples, axis=0) * 255.0).astype(np.uint8)
np.savez_compressed(os.path.join(eval_dir, 'reconstructions.npz'), recon=all_samples)
if (not mar):
all_psnrs = np.asarray(all_psnrs)
all_ssims = np.asarray(all_ssims)
all_psnrs_mask = np.asarray(all_psnrs_mask)
all_ssims_mask = np.asarray(all_ssims_mask)
all_psnrs_mask_hist = np.asarray(all_psnrs_mask_hist)
all_ssims_mask_hist = np.asarray(all_ssims_mask_hist)
np.savez_compressed(os.path.join(eval_dir, 'metrics.npz'), psnrs=all_psnrs, ssims=all_ssims, psnrs_mask=all_psnrs_mask, ssims_mask=all_ssims_mask, psnrs_mask_hist=all_psnrs_mask_hist, ssims_mask_hist=all_ssims_mask_hist)
else:
all_psnrs = np.asarray(all_mar_psnrs)
all_ssims = np.asarray(all_mar_ssims)
all_rmses = np.asarray(all_mar_rmses)
np.savez_compressed(os.path.join(eval_dir, 'metrics.npz'), psnrs=all_psnrs, ssims=all_ssims, rmses=all_rmses) |
def has_checkpoint(checkpoint_folder: str, skip_final: bool=False):
checkpointed_files = PathManager.ls(checkpoint_folder)
checkpoint_exists = False
for f in checkpointed_files:
if (f.endswith('.torch') and (('model_final' not in f) or (not skip_final))):
checkpoint_exists = True
break
return checkpoint_exists |
(netloc='fakegitlab', path='/api/v4/projects/4/hooks/1$', method='DELETE')
def delete_hook_handler(_, request):
if (not (request.headers.get('Authorization') == 'Bearer foobar')):
return {'status_code': 401}
return {'status_code': 200, 'headers': {'Content-Type': 'application/json'}, 'content': json.dumps({})} |
class TestPower():
def test_numpy_compare(self):
rng = np.random.default_rng(utt.fetch_seed())
A = matrix('A', dtype=config.floatX)
Q = power(A, 3)
fn = function([A], [Q])
a = rng.random((4, 4)).astype(config.floatX)
n_p = np.power(a, 3)
t_p = fn(a)
assert np.allclose(n_p, t_p)
def test_multiple_power(self):
x = vector()
y = [1, 2, 3]
z = power(x, y)
f = function([x], z)
assert np.allclose(f([1, 2, 3]), [1, 4, 27])
def test_wrong_shape(self):
x = vector()
y = [1, 2, 3]
z = power(x, y)
f = function([x], z)
with pytest.raises(ValueError):
f([1, 2, 3, 4]) |
class CssGenshiLexer(DelegatingLexer):
name = 'CSS+Genshi Text'
aliases = ['css+genshitext', 'css+genshi']
version_added = ''
alias_filenames = ['*.css']
mimetypes = ['text/css+genshi']
url = '
def __init__(self, **options):
super().__init__(CssLexer, GenshiTextLexer, **options)
def analyse_text(text):
return (GenshiLexer.analyse_text(text) - 0.05) |
def main(_):
if (FLAGS.input == ''):
print('You must specify --input value (--output is optional)')
return
if (not os.path.exists((FLAGS.input + '.meta'))):
print(('Input %s.meta does not exist' % FLAGS.input))
return
meta = tf.train.import_meta_graph((FLAGS.input + '.meta'), clear_devices=True)
var_list = tf.get_collection('trainable_variables')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
meta.restore(sess, FLAGS.input)
val_list = sess.run(var_list)
data = dict()
for i in range(len(var_list)):
var = var_list[i]
val = val_list[i]
data[var.name] = val
fname = FLAGS.output
if (fname == ''):
fname = 'output.npy'
elif (not fname.endswith('.npy')):
fname += '.npy'
np.save(fname, data)
print(('Saved %s' % fname)) |
def init_cli(cli_obj, reset=False):
if reset:
global MANAGE_DICT
MANAGE_DICT = {}
sys.path.insert(0, '.')
load_manage_dict_from_sys_args()
cli.help = MANAGE_DICT.get('help_text', '{project_name} Interactive shell!').format(**MANAGE_DICT)
load_groups(cli, MANAGE_DICT)
load_commands(cli, MANAGE_DICT)
manager = click.CommandCollection(help=cli.help, no_args_is_help=False)
manager.add_source(cli)
load_command_sources(manager, MANAGE_DICT)
for item in MANAGE_DICT.get('disabled', []):
cli.commands.pop(item, None)
return manager |
def execute_and_export_notebooks(output_nbs: bool, output_html: bool, only_out_of_date: bool=True):
reporoot = get_git_root()
sourceroot = (reporoot / 'qualtran')
nb_rel_paths = get_nb_rel_paths(sourceroot=sourceroot)
bad_nbs = []
for nb_rel_path in nb_rel_paths:
paths = _NBInOutPaths.from_nb_rel_path(nb_rel_path, reporoot, output_html=output_html, output_nbs=output_nbs)
if (only_out_of_date and (not paths.needs_reexport())):
print(f'{nb_rel_path} up to date')
continue
print(f'Exporting {nb_rel_path}')
err = execute_and_export_notebook(paths)
if (err is not None):
bad_nbs.append(paths.nb_in)
if (len(bad_nbs) > 0):
print()
print('Errors in notebooks:')
for nb in bad_nbs:
print(' ', nb)
sys.exit(1) |
class ProcessView(gui.Svg, FBD_model.Process):
selected_input = None
selected_output = None
def __init__(self, *args, **kwargs):
gui.Svg.__init__(self, *args, **kwargs)
FBD_model.Process.__init__(self)
self.css_border_color = 'black'
self.css_border_width = '1'
self.css_border_style = 'solid'
self.style['background-color'] = 'lightyellow'
def onselection_start(self, emitter, x, y):
self.selected_input = self.selected_output = None
print('selection start: ', type(emitter))
if issubclass(type(emitter), FBD_model.Input):
self.selected_input = emitter
else:
self.selected_output = emitter
def onselection_end(self, emitter, x, y):
print('selection end: ', type(emitter))
if issubclass(type(emitter), FBD_model.Input):
self.selected_input = emitter
else:
self.selected_output = emitter
if ((self.selected_input != None) and (self.selected_output != None)):
if self.selected_input.is_linked():
return
self.selected_output.link(self.selected_input, self)
def add_function_block(self, function_block):
function_block.onclick.do(self.onfunction_block_clicked)
self.append(function_block, function_block.name)
FBD_model.Process.add_function_block(self, function_block)
def add_object_block(self, object_block):
object_block.onclick.do(self.onfunction_block_clicked)
self.append(object_block, object_block.name)
FBD_model.Process.add_object_block(self, object_block)
_event
def onfunction_block_clicked(self, function_block):
return (function_block,) |
.parametrize('minimum_unit, seconds, expected', [('seconds', ONE_MICROSECOND, 'a moment'), ('seconds', FOUR_MICROSECONDS, 'a moment'), ('seconds', ONE_MILLISECOND, 'a moment'), ('seconds', FOUR_MILLISECONDS, 'a moment'), ('seconds', MICROSECONDS_101_943, 'a moment'), ('seconds', MILLISECONDS_1_337, 'a second'), ('seconds', 2, '2 seconds'), ('seconds', 4, '4 seconds'), ('seconds', (ONE_HOUR + FOUR_MILLISECONDS), 'an hour'), ('seconds', (ONE_DAY + FOUR_MILLISECONDS), 'a day'), ('seconds', (ONE_YEAR + FOUR_MICROSECONDS), 'a year'), ('milliseconds', FOUR_MICROSECONDS, '0 milliseconds'), ('milliseconds', ONE_MILLISECOND, '1 millisecond'), ('milliseconds', FOUR_MILLISECONDS, '4 milliseconds'), ('milliseconds', MICROSECONDS_101_943, '101 milliseconds'), ('milliseconds', MILLISECONDS_1_337, 'a second'), ('milliseconds', 2, '2 seconds'), ('milliseconds', 4, '4 seconds'), ('milliseconds', (ONE_HOUR + FOUR_MILLISECONDS), 'an hour'), ('milliseconds', (ONE_YEAR + FOUR_MICROSECONDS), 'a year'), ('microseconds', ONE_MICROSECOND, '1 microsecond'), ('microseconds', FOUR_MICROSECONDS, '4 microseconds'), ('microseconds', FOUR_MILLISECONDS, '4 milliseconds'), ('microseconds', MICROSECONDS_101_943, '101 milliseconds'), ('microseconds', MILLISECONDS_1_337, 'a second'), ('microseconds', 2, '2 seconds'), ('microseconds', 4, '4 seconds'), ('microseconds', (ONE_HOUR + FOUR_MILLISECONDS), 'an hour'), ('microseconds', (ONE_DAY + FOUR_MILLISECONDS), 'a day'), ('microseconds', (ONE_YEAR + FOUR_MICROSECONDS), 'a year')])
def test_naturaldelta_minimum_unit_explicit(minimum_unit: str, seconds: float, expected: str) -> None:
delta = dt.timedelta(seconds=seconds)
assert (humanize.naturaldelta(delta, minimum_unit=minimum_unit) == expected) |
_grad()
def evaluate_similarity(model, *, lowercase, batch_size=256, datasets=tuple(SIMILARITY_BENCHMARKS.keys()), **kwargs):
metrics = {}
for dataset_name in datasets:
sim_data = SimilarityDataset(dataset_name, lowercase=lowercase)
word_occurences = Counter((word for word in sim_data.word_pairs.reshape((- 1))))
lhs_words = set()
for (w1, w2) in sim_data.word_pairs:
w1_idx = model.token_to_ix.get(w1)
w2_idx = model.token_to_ix.get(w2)
if ((w1_idx is not None) and (w2_idx is not None)):
lhs_words.add(max((w1, w2), key=word_occurences.get))
elif (w1_idx is not None):
lhs_words.add(w1)
elif (w2_idx is not None):
lhs_words.add(w2)
lhs_words = sorted(lhs_words)
with training_mode(model, is_train=False):
dists = model.evaluate_distances_to_all_vertices(list(map(model.token_to_ix.get, lhs_words)), batch_size=batch_size, soft=False, **kwargs)
words_to_lhs_idx = {word: i for (i, word) in enumerate(lhs_words)}
similarities = []
similarities_notna = []
for (w1, w2) in sim_data.word_pairs:
w1_idx = model.token_to_ix.get(w1)
w2_idx = model.token_to_ix.get(w2)
if ((w1_idx is not None) and (w2_idx is not None)):
if (w1 in lhs_words):
distance = dists[words_to_lhs_idx[w1]][w2_idx]
else:
distance = dists[words_to_lhs_idx[w2]][w1_idx]
elif (w1_idx is not None):
distance = dists[words_to_lhs_idx[w1]].mean()
elif (w2_idx is not None):
distance = dists[words_to_lhs_idx[w2]].mean()
else:
distance = float('inf')
similarity = (- distance)
similarities.append(similarity)
if ((w1_idx is not None) and (w2_idx is not None)):
similarities_notna.append(similarity)
else:
similarities_notna.append(np.nan)
metrics[(dataset_name + '_infer_nan')] = spearmanr(similarities, sim_data.scores, nan_policy='raise').correlation
metrics[(dataset_name + '_omit_nan')] = spearmanr(similarities_notna, sim_data.scores, nan_policy='omit').correlation
return metrics |
class DescriptionWrapper(Dataset):
def __init__(self, dataset, description):
self.dataset = dataset
self.description = description
def __getitem__(self, index):
item = self.dataset[index]
item['description'] = self.description
return item
def __len__(self):
return len(self.dataset) |
class Encoder(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128, D_kernel_size=3, D_attn='64', n_classes=1000, num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False), D_lr=0.0002, D_B1=0.0, D_B2=0.999, adam_eps=1e-08, SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False, D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(Encoder, self).__init__()
self.ch = D_ch
self.D_wide = D_wide
self.resolution = resolution
self.kernel_size = D_kernel_size
self.attention = D_attn
self.n_classes = n_classes
self.activation = D_activation
self.init = D_init
self.D_param = D_param
self.SN_eps = SN_eps
self.fp16 = D_fp16
self.arch = D_arch(self.ch, self.attention)[resolution]
if (self.D_param == 'SN'):
self.which_conv = functools.partial(layers.SNConv2d, kernel_size=3, padding=1, num_svs=num_D_SVs, num_itrs=num_D_SV_itrs, eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear, num_svs=num_D_SVs, num_itrs=num_D_SV_itrs, eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding, num_svs=num_D_SVs, num_itrs=num_D_SV_itrs, eps=self.SN_eps)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index], out_channels=self.arch['out_channels'][index], which_conv=self.which_conv, wide=self.D_wide, activation=self.activation, preactivation=(index > 0), downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
if self.arch['attention'][self.arch['resolution'][index]]:
print(('Adding attention layer in D at resolution %d' % self.arch['resolution'][index]))
self.blocks[(- 1)] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
self.linear = self.which_linear(self.arch['out_channels'][(- 1)], output_dim)
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][(- 1)])
if (not skip_init):
self.init_weights()
(self.lr, self.B1, self.B2, self.adam_eps) = (D_lr, D_B1, D_B2, adam_eps)
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)):
if (self.init == 'ortho'):
init.orthogonal_(module.weight)
elif (self.init == 'N02'):
init.normal_(module.weight, 0, 0.02)
elif (self.init in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print(('Param count for Ds initialized parameters: %d' % self.param_count))
def forward(self, x, y=None, E1_output_feat=False):
h = x
L_feat = {}
for (index, blocklist) in enumerate(self.blocks):
for block in blocklist:
h = block(h)
if ((h.shape[(- 1)] >= (self.resolution / 32)) and ((self.resolution / 4) >= h.shape[(- 1)])):
L_feat[h.shape[(- 1)]] = h
h = torch.sum(self.activation(h), [2, 3])
out = self.linear(h)
out = out
return (out if (not E1_output_feat) else L_feat) |
class CompileCatalog(CommandMixin):
description = 'compile message catalogs to binary MO files'
user_options = [('domain=', 'D', "domains of PO files (space separated list, default 'messages')"), ('directory=', 'd', 'path to base directory containing the catalogs'), ('input-file=', 'i', 'name of the input file'), ('output-file=', 'o', "name of the output file (default '<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"), ('locale=', 'l', 'locale of the catalog to compile'), ('use-fuzzy', 'f', 'also include fuzzy translations'), ('statistics', None, 'print statistics about translations')]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
self.domain = listify_value(self.domain)
if ((not self.input_file) and (not self.directory)):
raise OptionError('you must specify either the input file or the base directory')
if ((not self.output_file) and (not self.directory)):
raise OptionError('you must specify either the output file or the base directory')
def run(self):
n_errors = 0
for domain in self.domain:
for errors in self._run_domain(domain).values():
n_errors += len(errors)
if n_errors:
self.log.error('%d errors encountered.', n_errors)
return (1 if n_errors else 0)
def _run_domain(self, domain):
po_files = []
mo_files = []
if (not self.input_file):
if self.locale:
po_files.append((self.locale, os.path.join(self.directory, self.locale, 'LC_MESSAGES', f'{domain}.po')))
mo_files.append(os.path.join(self.directory, self.locale, 'LC_MESSAGES', f'{domain}.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale, 'LC_MESSAGES', f'{domain}.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale, 'LC_MESSAGES', f'{domain}.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale, 'LC_MESSAGES', f'{domain}.mo'))
if (not po_files):
raise OptionError('no message catalogs found')
catalogs_and_errors = {}
for (idx, (locale, po_file)) in enumerate(po_files):
mo_file = mo_files[idx]
with open(po_file, 'rb') as infile:
catalog = read_po(infile, locale)
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated += 1
percentage = 0
if len(catalog):
percentage = ((translated * 100) // len(catalog))
self.log.info('%d of %d messages (%d%%) translated in %s', translated, len(catalog), percentage, po_file)
if (catalog.fuzzy and (not self.use_fuzzy)):
self.log.info('catalog %s is marked as fuzzy, skipping', po_file)
continue
catalogs_and_errors[catalog] = catalog_errors = list(catalog.check())
for (message, errors) in catalog_errors:
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno, error)
self.log.info('compiling catalog %s to %s', po_file, mo_file)
with open(mo_file, 'wb') as outfile:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
return catalogs_and_errors |
def test_update_iou():
privkey = bytes(([2] * 32))
sender = Address(privatekey_to_address(privkey))
receiver = Address(bytes(([1] * 20)))
one_to_n_address = Address(bytes(([2] * 20)))
iou = IOU(sender=sender, receiver=receiver, amount=10, expiration_block=1000, chain_id=4, one_to_n_address=one_to_n_address)
iou.sign(privkey)
added_amount = 10
new_iou = update_iou(iou=replace(iou), privkey=privkey, added_amount=added_amount)
assert (new_iou.amount == (iou.amount + added_amount))
assert (new_iou.sender == iou.sender)
assert (new_iou.receiver == iou.receiver)
assert (new_iou.signature != iou.signature)
tampered_iou = replace(new_iou)
tampered_iou.amount += 10
with pytest.raises(ServiceRequestFailed):
update_iou(iou=tampered_iou, privkey=privkey, added_amount=added_amount) |
def _unquote(name):
assert isinstance(name, bytes), ('Input %s to function must be bytes not %s.' % (name, type(name)))
def unquoted_char(match):
if (not (len(match.group()) == 4)):
return match.group
try:
return bytes([int(match.group()[1:])])
except Exception:
return match.group
return re.sub(b';[0-9]{3}', unquoted_char, name, re.S) |
_module()
class ResNet3dSlowFast(nn.Module):
def __init__(self, pretrained, resample_rate=8, speed_ratio=8, channel_ratio=8, slow_pathway=dict(type='resnet3d', depth=50, pretrained=None, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1)), fast_pathway=dict(type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1)):
super().__init__()
self.pretrained = pretrained
self.resample_rate = resample_rate
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
if slow_pathway['lateral']:
slow_pathway['speed_ratio'] = speed_ratio
slow_pathway['channel_ratio'] = channel_ratio
self.slow_path = build_pathway(slow_pathway)
self.fast_path = build_pathway(fast_pathway)
def init_weights(self, pretrained=None):
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = get_root_logger()
msg = f'load model from: {self.pretrained}'
print_log(msg, logger=logger)
load_checkpoint(self, self.pretrained, strict=True, logger=logger)
elif (self.pretrained is None):
self.fast_path.init_weights()
self.slow_path.init_weights()
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x_slow = nn.functional.interpolate(x, mode='nearest', scale_factor=((1.0 / self.resample_rate), 1.0, 1.0))
x_slow = self.slow_path.conv1(x_slow)
x_slow = self.slow_path.maxpool(x_slow)
x_fast = nn.functional.interpolate(x, mode='nearest', scale_factor=((1.0 / (self.resample_rate // self.speed_ratio)), 1.0, 1.0))
x_fast = self.fast_path.conv1(x_fast)
x_fast = self.fast_path.maxpool(x_fast)
if self.slow_path.lateral:
x_fast_lateral = self.slow_path.conv1_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
for (i, layer_name) in enumerate(self.slow_path.res_layers):
res_layer = getattr(self.slow_path, layer_name)
x_slow = res_layer(x_slow)
res_layer_fast = getattr(self.fast_path, layer_name)
x_fast = res_layer_fast(x_fast)
if ((i != (len(self.slow_path.res_layers) - 1)) and self.slow_path.lateral):
lateral_name = self.slow_path.lateral_connections[i]
conv_lateral = getattr(self.slow_path, lateral_name)
x_fast_lateral = conv_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
out = (x_slow, x_fast)
return out |
def test_download_one_point():
with expected_protocol(LeCroyT3DSO1204, [(b'CHDR OFF', None), (b'WFSU SP,1', None), (b'WFSU NP,1', None), (b'WFSU FP,0', None), (b'SANU? C1', b'7.00E+06'), (b'WFSU NP,1', None), (b'WFSU FP,0', None), (b'C1:WF? DAT2', ((b'DAT2,#' + b'\x01') + b'\n\n')), (b'WFSU?', b'SP,1,NP,2,FP,0'), (b'ACQW?', b'SAMPLING'), (b'SARA?', b'1.00E+09'), (b'SAST?', b'Stop'), (b'MSIZ?', b'7M'), (b'TDIV?', b'5.00E-04'), (b'TRDL?', b'-0.00E+00'), (b'SANU? C1', b'7.00E+06'), (b'C1:VDIV?', b'5.00E-02'), (b'C1:OFST?', b'-1.50E-01'), (b'C1:UNIT?', b'V')], connection_attributes={'chunk_size': 0}) as instr:
(y, x, preamble) = instr.download_waveform(source='c1', requested_points=1, sparsing=1)
assert (preamble == {'sparsing': 1, 'requested_points': 1, 'memory_size': 7000000.0, 'sampled_points': 7000000.0, 'transmitted_points': 1, 'first_point': 0, 'source': 'C1', 'type': 'normal', 'average': None, 'sampling_rate': .0, 'grid_number': 14, 'status': 'stopped', 'xdiv': 0.0005, 'xoffset': 0, 'ydiv': 0.05, 'yoffset': (- 0.15), 'unit': 'V'})
assert (len(x) == 1)
assert (len(y) == 1)
assert (y[0] == (((1 * 0.05) / 25.0) + 0.15)) |
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if (args.data_set == 'CIFAR'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif (args.data_set == 'IMNET'):
if (not args.use_mcloader):
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
else:
dataset = ClassificationDataset(('train' if is_train else 'val'), pipeline=transform)
nb_classes = 1000
elif (args.data_set == 'INAT'):
dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif (args.data_set == 'INAT19'):
dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return (dataset, nb_classes) |
class BadMsgNotification(Exception):
descriptions = {16: 'The msg_id is too low, the client time has to be synchronized.', 17: 'The msg_id is too high, the client time has to be synchronized.', 18: 'Incorrect two lower order of the msg_id bits, the server expects the client message msg_id to be divisible by 4.', 19: 'The container msg_id is the same as the msg_id of a previously received message.', 20: 'The message is too old, it cannot be verified by the server.', 32: 'The msg_seqno is too low.', 33: 'The msg_seqno is too high.', 34: 'An even msg_seqno was expected, but an odd one was received.', 35: 'An odd msg_seqno was expected, but an even one was received.', 48: 'Incorrect server salt.', 64: 'Invalid container.'}
def __init__(self, code):
description = self.descriptions.get(code, 'Unknown error code')
super().__init__(f'[{code}] {description}') |
.parametrize('mu, beta, size', [(np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), None), (np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), []), (np.full((1, 2), 0, dtype=config.floatX), np.array(1, dtype=config.floatX), None)])
def test_gumbel_samples(mu, beta, size):
compare_sample_values(gumbel, mu, beta, size=size, test_fn=fixed_scipy_rvs('gumbel_r')) |
def convert_execution_result_to_train_instance(row):
success = row['execution_result']['success']
problem = row['prompt']
generated_solution = row['generation']
if (not success):
if ('traceback' not in row['execution_result']):
language_feedback = row['execution_result']['reason']
else:
language_feedback = row['execution_result']['traceback']['str']
else:
language_feedback = None
if (('additional_feedback' in row) and row['additional_feedback']):
if (language_feedback is not None):
language_feedback += ('\n' + row['additional_feedback'])
else:
language_feedback = row['additional_feedback']
text_a = language_feedback
text_b = (problem + generated_solution)
ret = [{'success': success, 'problem': problem, 'generated_solution': generated_solution, 'language_feedback': language_feedback, 'text_a': text_a, 'text_b': text_b}]
return ret |
class DataLoader_Target(object):
def __init__(self, batch_size, target_file, user_feat_dict_file, item_feat_dict_file, context_dict_file):
self.batch_size = batch_size
self.target_file = open(target_file, 'r')
if (user_feat_dict_file != None):
with open(user_feat_dict_file, 'rb') as f:
self.user_feat_dict = pkl.load(f)
else:
self.user_feat_dict = None
with open(item_feat_dict_file, 'rb') as f:
self.item_feat_dict = pkl.load(f)
with open(context_dict_file, 'rb') as f:
self.context_dict = pkl.load(f)
def __iter__(self):
return self
def __next__(self):
target_batch = []
label_batch = []
for i in range(self.batch_size):
target_line = self.target_file.readline()
if (target_line == ''):
raise StopIteration
(target_uid, target_iid) = target_line[:(- 1)].split(',')
if (self.user_feat_dict != None):
target_batch.append((((([int(target_uid)] + self.user_feat_dict[target_uid]) + [int(target_iid)]) + self.item_feat_dict[target_iid]) + self.context_dict[target_uid]))
else:
target_batch.append(((([int(target_uid)] + [int(target_iid)]) + self.item_feat_dict[target_iid]) + self.context_dict[target_uid]))
if ((i % 2) == 0):
label_batch.append(1)
else:
label_batch.append(0)
return (target_batch, label_batch) |
('I load a third-party iframe')
def load_iframe(quteproc, server, ssl_server):
quteproc.set_setting('content.tls.certificate_errors', 'load-insecurely')
quteproc.open_path(f' port=server.port)
msg = quteproc.wait_for(message='Certificate error: *')
msg.expected = True
msg = quteproc.wait_for(message='Certificate error: *')
msg.expected = True |
class TestOrbitsGappyLongDataXarray(TestOrbitsGappyData):
def setup_method(self):
self.testInst = pysat.Instrument('pysat', 'testing_xarray', clean_level='clean', orbit_info={'index': 'longitude', 'kind': 'longitude'}, use_header=True)
self.stime = pysat.instruments.pysat_testing._test_dates['']['']
self.gaps = (self.stime + self.deltime)
self.testInst.custom_attach(filter_data, kwargs={'times': self.gaps})
return
def teardown_method(self):
del self.testInst, self.stime, self.gaps
return |
def parse3(f):
state = [None, None, []]
for (block, content) in parse2(f):
if ((block == b'050') and state[0] and state[1]):
(yield state)
state = [None, None, []]
if (block == b'050'):
state[0] = content
elif (block == b'052'):
state[1] = content
else:
state[2].append((block, content))
if (state[0] and state[1]):
(yield state) |
def expand_named_state_definition(source, loc, tokens):
indent = (' ' * (pp.col(loc, source) - 1))
statedef = []
states = set()
transitions = set()
baseStateClass = tokens.name
fromTo = {}
for tn in tokens.transitions:
states.add(tn.from_state)
states.add(tn.to_state)
transitions.add(tn.transition)
if (tn.from_state in fromTo):
fromTo[tn.from_state][tn.transition] = tn.to_state
else:
fromTo[tn.from_state] = {tn.transition: tn.to_state}
for s in states:
if (s not in fromTo):
fromTo[s] = {}
statedef.extend(['class {baseStateClass}Transition:'.format(baseStateClass=baseStateClass), ' def __str__(self):', ' return self.transitionName'])
statedef.extend(('{tn_name} = {baseStateClass}Transition()'.format(tn_name=tn, baseStateClass=baseStateClass) for tn in transitions))
statedef.extend(("{tn_name}.transitionName = '{tn_name}'".format(tn_name=tn) for tn in transitions))
statedef.extend([('class %s(object):' % baseStateClass), ' from statemachine import InvalidTransitionException as BaseTransitionException', ' class InvalidTransitionException(BaseTransitionException): pass', ' def __str__(self):', ' return self.__class__.__name__', ' ', ' def states(cls):', ' return list(cls.__subclasses__())', ' ', ' def next_state(cls, name):', ' try:', ' return cls.tnmap[name]()', ' except KeyError:', " raise cls.InvalidTransitionException('%s does not support transition %r'% (cls.__name__, name))", ' def __bad_tn(name):', ' def _fn(cls):', " raise cls.InvalidTransitionException('%s does not support transition %r'% (cls.__name__, name))", ' _fn.__name__ = name', ' return _fn'])
statedef.extend((' {tn_name} = classmethod(__bad_tn({tn_name!r}))'.format(tn_name=tn) for tn in transitions))
statedef.extend(('class {}({}): pass'.format(s, baseStateClass) for s in states))
for s in states:
trns = list(fromTo[s].items())
statedef.extend(('{}.{} = classmethod(lambda cls: {}())'.format(s, tn_, to_) for (tn_, to_) in trns))
statedef.extend(['{baseStateClass}.transitions = classmethod(lambda cls: [{transition_class_list}])'.format(baseStateClass=baseStateClass, transition_class_list=', '.join(('cls.{}'.format(tn) for tn in transitions))), '{baseStateClass}.transition_names = [tn.__name__ for tn in {baseStateClass}.transitions()]'.format(baseStateClass=baseStateClass)])
statedef.extend(['class {baseStateClass}Mixin:'.format(baseStateClass=baseStateClass), ' def __init__(self):', ' self._state = None', ' def initialize_state(self, init_state):', ' if issubclass(init_state, {baseStateClass}):'.format(baseStateClass=baseStateClass), ' init_state = init_state()', ' self._state = init_state', ' ', ' def state(self):', ' return self._state', ' # get behavior/properties from current state', ' def __getattr__(self, attrname):', ' attr = getattr(self._state, attrname)', ' return attr', ' def __str__(self):', " return '{0}: {1}'.format(self.__class__.__name__, self._state)"])
statedef.extend((' def {tn_name}(self): self._state = self._state.{tn_name}()'.format(tn_name=tn) for tn in transitions))
return (('\n' + indent).join(statedef) + '\n') |
def get_parser():
parser = argparse.ArgumentParser(epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
add_wallet_option(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help='Run GUI (default)')
parser_gui.add_argument('url', nargs='?', default=None, help='Qtum URI (or bip70 file)')
parser_gui.add_argument('-g', '--gui', dest='gui', help='select graphical user interface', choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument('-m', action='store_true', dest='hide_gui', default=False, help='hide GUI on startup')
parser_gui.add_argument('-L', '--lang', dest='language', default=None, help='default language used in GUI')
parser_gui.add_argument('--daemon', action='store_true', dest='daemon', default=False, help='keep daemon running after GUI is closed')
parser_gui.add_argument('--nosegwit', action='store_true', dest='nosegwit', default=False, help='Do not create segwit wallets')
add_wallet_option(parser_gui)
add_network_options(parser_gui)
add_global_options(parser_gui)
parser_daemon = subparsers.add_parser('daemon', help='Run Daemon')
parser_daemon.add_argument('-d', '--detached', action='store_true', dest='detach', default=False, help='run daemon in detached mode')
add_network_options(parser_daemon)
add_global_options(parser_daemon)
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
for (optname, default) in zip(cmd.options, cmd.defaults):
if (optname in ['wallet_path', 'wallet']):
add_wallet_option(p)
continue
(a, help) = command_options[optname]
b = ('--' + optname)
action = ('store_true' if (default is False) else 'store')
args = ((a, b) if a else (b,))
if (action == 'store'):
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
add_global_options(p)
for param in cmd.params:
if (param in ['wallet_path', 'wallet']):
continue
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for (k, v) in cvh.items():
group.add_argument(k, nargs='?', help=v)
parser.set_default_subparser('gui')
return parser |
def _extract_tolerances(data_frame: DataFrame, methods: List[str]) -> List[float]:
tolerance_set = set(data_frame[SpottingEvaluation.TOLERANCE])
for method in methods:
method_tolerance_set = set(data_frame[(data_frame[METHOD] == method)][SpottingEvaluation.TOLERANCE])
tolerance_set = tolerance_set.intersection(method_tolerance_set)
tolerances = sorted(tolerance_set)
return tolerances |
def get_prefix_from_len(sentence, bpe_symbol, prefix_len):
bpe_count = sum([(bpe_symbol.strip(' ') in t) for t in sentence[:prefix_len]])
if (bpe_count == 0):
return sentence[:prefix_len]
else:
return (sentence[:prefix_len] + get_prefix_from_len(sentence[prefix_len:], bpe_symbol, bpe_count)) |
def crack(passwd):
ql = Qiling(['../../examples/rootfs/mcu/stm32f407/backdoorlock.hex'], archtype=QL_ARCH.CORTEX_M, ostype=QL_OS.MCU, env=stm32f407, verbose=QL_VERBOSE.DISABLED)
ql.hw.create('spi2')
ql.hw.create('gpioe')
ql.hw.create('gpiof')
ql.hw.create('usart1')
ql.hw.create('rcc')
ql.hw.show_info()
print('Testing passwd', passwd)
ql.patch(, (b'\x00\xbf' * 4))
ql.patch(, (b'\x00\xbf' * 11))
ql.patch(, (b'\x00\xbf' * 13))
ql.patch(, (b'\x00\xbf' * 10))
ql.hw.usart1.send((passwd.encode() + b'\r'))
ql.hw.systick.set_ratio(100)
ql.run(count=1000000, end=)
if (ql.arch.effective_pc == ):
print('Success, the passwd is', passwd)
else:
print('Fail, the passwd is not', passwd)
del ql |
class ClientManager(Observer):
def __init__(self, args, comm=None, rank=0, size=0, backend='MPI'):
if (args.mode == 'distributed'):
from mpi4py import MPI
self.args = args
self.size = size
self.rank = rank
self.backend = backend
if (backend == 'MPI'):
self.com_manager = MpiCommunicationManager(comm, rank, size, node_type='client')
elif (backend == 'MQTT'):
HOST = '81.71.1.31'
PORT = 1883
self.com_manager = MqttCommManager(HOST, PORT, client_id=rank, client_num=(size - 1))
else:
self.com_manager = MpiCommunicationManager(comm, rank, size, node_type='client')
self.com_manager.add_observer(self)
self.message_handler_dict = dict()
def run(self):
self.register_message_receive_handlers()
self.com_manager.handle_receive_message()
def get_sender_id(self):
return self.rank
def receive_message(self, msg_type, msg_params) -> None:
handler_callback_func = self.message_handler_dict[msg_type]
handler_callback_func(msg_params)
def send_message(self, message):
msg = Message()
msg.add(Message.MSG_ARG_KEY_TYPE, message.get_type())
msg.add(Message.MSG_ARG_KEY_SENDER, message.get_sender_id())
msg.add(Message.MSG_ARG_KEY_RECEIVER, message.get_receiver_id())
for (key, value) in message.get_params().items():
msg.add(key, value)
self.com_manager.send_message(msg)
def register_message_receive_handlers(self) -> None:
pass
def register_message_receive_handler(self, msg_type, handler_callback_func):
self.message_handler_dict[msg_type] = handler_callback_func
def finish(self):
logging.info('__finish server')
if (self.backend == 'MPI'):
MPI.COMM_WORLD.Abort() |
_stabilize
_rewriter([Blockwise])
def psd_solve_with_chol(fgraph, node):
if (isinstance(node.op.core_op, Solve) and (node.op.core_op.b_ndim == 2)):
(A, b) = node.inputs
if (getattr(A.tag, 'psd', None) is True):
L = cholesky(A)
Li_b = solve(L, b, assume_a='sym', lower=True, b_ndim=2)
x = solve(_T(L), Li_b, assume_a='sym', lower=False, b_ndim=2)
return [x] |
def ensemble(training_output_folder1, training_output_folder2, output_folder, task, validation_folder, folds, allow_ensembling: bool=True):
print('\nEnsembling folders\n', training_output_folder1, '\n', training_output_folder2)
output_folder_base = output_folder
output_folder = join(output_folder_base, 'ensembled_raw')
dataset_directory = join(preprocessing_output_dir, task)
plans = load_pickle(join(training_output_folder1, 'plans.pkl'))
files1 = []
files2 = []
property_files = []
out_files = []
gt_segmentations = []
folder_with_gt_segs = join(dataset_directory, 'gt_segmentations')
for f in folds:
validation_folder_net1 = join(training_output_folder1, ('fold_%d' % f), validation_folder)
validation_folder_net2 = join(training_output_folder2, ('fold_%d' % f), validation_folder)
if (not isdir(validation_folder_net1)):
raise AssertionError(('Validation directory missing: %s. Please rerun validation with `nnFormer_train CONFIG TRAINER TASK FOLD -val --npz`' % validation_folder_net1))
if (not isdir(validation_folder_net2)):
raise AssertionError(('Validation directory missing: %s. Please rerun validation with `nnFormer_train CONFIG TRAINER TASK FOLD -val --npz`' % validation_folder_net2))
if (not isfile(join(validation_folder_net1, 'summary.json'))):
raise AssertionError(('Validation directory incomplete: %s. Please rerun validation with `nnFormer_train CONFIG TRAINER TASK FOLD -val --npz`' % validation_folder_net1))
if (not isfile(join(validation_folder_net2, 'summary.json'))):
raise AssertionError(('Validation directory missing: %s. Please rerun validation with `nnFormer_train CONFIG TRAINER TASK FOLD -val --npz`' % validation_folder_net2))
patient_identifiers1_npz = [i[:(- 4)] for i in subfiles(validation_folder_net1, False, None, 'npz', True)]
patient_identifiers2_npz = [i[:(- 4)] for i in subfiles(validation_folder_net2, False, None, 'npz', True)]
patient_identifiers1_nii = [i[:(- 7)] for i in subfiles(validation_folder_net1, False, None, suffix='nii.gz', sort=True) if ((not i.endswith('noPostProcess.nii.gz')) and (not i.endswith('_postprocessed.nii.gz')))]
patient_identifiers2_nii = [i[:(- 7)] for i in subfiles(validation_folder_net2, False, None, suffix='nii.gz', sort=True) if ((not i.endswith('noPostProcess.nii.gz')) and (not i.endswith('_postprocessed.nii.gz')))]
if (not all([(i in patient_identifiers1_npz) for i in patient_identifiers1_nii])):
raise AssertionError(("Missing npz files in folder %s. Please run the validation for all models and folds with the '--npz' flag." % validation_folder_net1))
if (not all([(i in patient_identifiers2_npz) for i in patient_identifiers2_nii])):
raise AssertionError(("Missing npz files in folder %s. Please run the validation for all models and folds with the '--npz' flag." % validation_folder_net2))
patient_identifiers1_npz.sort()
patient_identifiers2_npz.sort()
assert all([(i == j) for (i, j) in zip(patient_identifiers1_npz, patient_identifiers2_npz)]), 'npz filenames do not match. This should not happen.'
maybe_mkdir_p(output_folder)
for p in patient_identifiers1_npz:
files1.append(join(validation_folder_net1, (p + '.npz')))
files2.append(join(validation_folder_net2, (p + '.npz')))
property_files.append((join(validation_folder_net1, p) + '.pkl'))
out_files.append(join(output_folder, (p + '.nii.gz')))
gt_segmentations.append(join(folder_with_gt_segs, (p + '.nii.gz')))
p = Pool(default_num_threads)
p.map(merge, zip(files1, files2, property_files, out_files))
p.close()
p.join()
if ((not isfile(join(output_folder, 'summary.json'))) and (len(out_files) > 0)):
aggregate_scores(tuple(zip(out_files, gt_segmentations)), labels=plans['all_classes'], json_output_file=join(output_folder, 'summary.json'), json_task=task, json_name=((task + '__') + output_folder_base.split('/')[(- 1)]), num_threads=default_num_threads)
if (allow_ensembling and (not isfile(join(output_folder_base, 'postprocessing.json')))):
determine_postprocessing(output_folder_base, folder_with_gt_segs, 'ensembled_raw', 'temp', 'ensembled_postprocessed', default_num_threads, dice_threshold=0)
out_dir_all_json = join(network_training_output_dir, 'summary_jsons')
json_out = load_json(join(output_folder_base, 'ensembled_postprocessed', 'summary.json'))
json_out['experiment_name'] = output_folder_base.split('/')[(- 1)]
save_json(json_out, join(output_folder_base, 'ensembled_postprocessed', 'summary.json'))
maybe_mkdir_p(out_dir_all_json)
shutil.copy(join(output_folder_base, 'ensembled_postprocessed', 'summary.json'), join(out_dir_all_json, ('%s__%s.json' % (task, output_folder_base.split('/')[(- 1)])))) |
.parametrize('default_config', ['ini', 'cmdline'])
def test_filterwarnings_mark(pytester: Pytester, default_config) -> None:
if (default_config == 'ini'):
pytester.makeini('\n [pytest]\n filterwarnings = always::RuntimeWarning\n ')
pytester.makepyfile("\n import warnings\n import pytest\n\n .filterwarnings('ignore::RuntimeWarning')\n def test_ignore_runtime_warning():\n warnings.warn(RuntimeWarning())\n\n .filterwarnings('error')\n def test_warning_error():\n warnings.warn(RuntimeWarning())\n\n def test_show_warning():\n warnings.warn(RuntimeWarning())\n ")
result = pytester.runpytest(('-W always::RuntimeWarning' if (default_config == 'cmdline') else ''))
result.stdout.fnmatch_lines(['*= 1 failed, 2 passed, 1 warning in *']) |
class AgilentE4980(Instrument):
ac_voltage = Instrument.control(':VOLT:LEV?', ':VOLT:LEV %g', 'AC voltage level, in Volts', validator=strict_range, values=[0, 20])
ac_current = Instrument.control(':CURR:LEV?', ':CURR:LEV %g', 'AC current level, in Amps', validator=strict_range, values=[0, 0.1])
frequency = Instrument.control(':FREQ:CW?', ':FREQ:CW %g', 'AC frequency (range depending on model), in Hertz', validator=strict_range, values=[20, 2000000.0])
impedance = Instrument.measurement(':FETCH?', 'Measured data A and B, according to :attr:`~.AgilentE4980.mode`', get_process=(lambda x: x[:2]))
mode = Instrument.control('FUNCtion:IMPedance:TYPE?', 'FUNCtion:IMPedance:TYPE %s', '\nSelect quantities to be measured:\n\n * CPD: Parallel capacitance [F] and dissipation factor [number]\n * CPQ: Parallel capacitance [F] and quality factor [number]\n * CPG: Parallel capacitance [F] and parallel conductance [S]\n * CPRP: Parallel capacitance [F] and parallel resistance [Ohm]\n\n - CSD: Series capacitance [F] and dissipation factor [number]\n - CSQ: Series capacitance [F] and quality factor [number]\n - CSRS: Series capacitance [F] and series resistance [Ohm]\n\n * LPD: Parallel inductance [H] and dissipation factor [number]\n * LPQ: Parallel inductance [H] and quality factor [number]\n * LPG: Parallel inductance [H] and parallel conductance [S]\n * LPRP: Parallel inductance [H] and parallel resistance [Ohm]\n\n - LSD: Series inductance [H] and dissipation factor [number]\n - LSQ: Seriesinductance [H] and quality factor [number]\n - LSRS: Series inductance [H] and series resistance [Ohm]\n\n * RX: Resitance [Ohm] and reactance [Ohm]\n * ZTD: Impedance, magnitude [Ohm] and phase [deg]\n * ZTR: Impedance, magnitude [Ohm] and phase [rad]\n * GB: Conductance [S] and susceptance [S]\n * YTD: Admittance, magnitude [Ohm] and phase [deg]\n * YTR: Admittance magnitude [Ohm] and phase [rad]\n', validator=strict_discrete_set, values=['CPD', 'CPQ', 'CPG', 'CPRP', 'CSD', 'CSQ', 'CSRS', 'LPD', 'LPQ', 'LPG', 'LPRP', 'LSD', 'LSQ', 'LSRS', 'RX', 'ZTD', 'ZTR', 'GB', 'YTD', 'YTR'])
trigger_source = Instrument.control('TRIG:SOUR?', 'TRIG:SOUR %s', '\nSelect trigger source; accept the values:\n * HOLD: manual\n * INT: internal\n * BUS: external bus (GPIB/LAN/USB)\n * EXT: external connector', validator=strict_discrete_set, values=['HOLD', 'INT', 'BUS', 'EXT'])
def __init__(self, adapter, name='Agilent E4980A/AL LCR meter', **kwargs):
super().__init__(adapter, name, **kwargs)
self.timeout = 30000
self.write('FORM ASC')
def freq_sweep(self, freq_list, return_freq=False):
self.write('TRIG:SOUR BUS')
self.write('DISP:PAGE LIST')
self.write('FORM ASC')
self.write('LIST:MODE SEQ')
lista_str = ','.join([('%e' % f) for f in freq_list])
self.write(('LIST:FREQ %s' % lista_str))
self.write('INIT:CONT ON')
self.write(':TRIG:IMM')
while 1:
try:
measured = self.values(':FETCh:IMPedance:FORMatted?')
break
except VisaIOError:
pass
self.write(':TRIG:SOUR HOLD')
a_data = [measured[_] for _ in range(0, (4 * len(freq_list)), 4)]
b_data = [measured[_] for _ in range(1, (4 * len(freq_list)), 4)]
if return_freq:
read_freqs = self.values('LIST:FREQ?')
return (a_data, b_data, read_freqs)
else:
return (a_data, b_data)
def aperture(self, time=None, averages=1):
if (time is None):
read_values = self.ask(':APER?').split(',')
return (read_values[0], int(read_values[1]))
elif (time.upper() in ['SHORT', 'MED', 'LONG']):
self.write(f':APER {time}, {averages}')
else:
raise Exception('Time must be a string: SHORT, MED, LONG') |
class DuckTestDrive():
def main(*args):
duck: Duck = MallardDuck()
turkey: Turkey = WildTurkey()
turkeyAdapter: Duck = TurkeyAdapter(turkey)
print('The Turkey says...')
turkey.gobble()
turkey.fly()
print('\nThe Duck says...')
DuckTestDrive.testDuck(duck)
print('\nThe TurkeyAdapter says...')
DuckTestDrive.testDuck(turkeyAdapter)
drone: Drone = SuperDrone()
droneAdapter: Duck = DroneAdapter(drone)
DuckTestDrive.testDuck(droneAdapter)
def testDuck(duck: Duck) -> None:
duck.quack()
duck.fly() |
class SmoothedValue(object):
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
def global_avg(self):
return (self.total / self.count) |
class Effect6473(BaseEffect):
dealsDamage = True
type = 'active'
def handler(fit, mod, context, projectionRange, **kwargs):
fit.ship.boostItemAttr('maxVelocity', mod.getModifiedItemAttr('speedFactor'), stackingPenalties=True, **kwargs)
fit.ship.increaseItemAttr('warpScrambleStatus', mod.getModifiedItemAttr('siegeModeWarpStatus'), **kwargs) |
def run_colmap(basedir, match_type):
logfile_name = os.path.join(basedir, 'colmap_output.txt')
logfile = open(logfile_name, 'w')
feature_extractor_args = ['colmap', 'feature_extractor', '--database_path', os.path.join(basedir, 'database.db'), '--image_path', os.path.join(basedir, 'image'), '--ImageReader.single_camera', '1']
feat_output = subprocess.check_output(feature_extractor_args, universal_newlines=True)
logfile.write(feat_output)
print('Features extracted')
exhaustive_matcher_args = ['colmap', match_type, '--database_path', os.path.join(basedir, 'database.db')]
match_output = subprocess.check_output(exhaustive_matcher_args, universal_newlines=True)
logfile.write(match_output)
print('Features matched')
p = os.path.join(basedir, 'sparse')
if (not os.path.exists(p)):
os.makedirs(p)
mapper_args = ['colmap', 'mapper', '--database_path', os.path.join(basedir, 'database.db'), '--image_path', os.path.join(basedir, 'image'), '--output_path', os.path.join(basedir, 'sparse'), '--Mapper.num_threads', '16', '--Mapper.init_min_tri_angle', '4', '--Mapper.multiple_models', '0', '--Mapper.extract_colors', '0']
map_output = subprocess.check_output(mapper_args, universal_newlines=True)
logfile.write(map_output)
logfile.close()
print('Sparse map created')
print('Finished running COLMAP, see {} for logs'.format(logfile_name)) |
def test_incorrect_interface_type_is_flagged():
class WrongInterfaceInstrument(Instrument):
def __init__(self, adapter, name='Instrument with incorrect interface name', **kwargs):
super().__init__(adapter, name=name, arsl={'read_termination': '\r\n'}, **kwargs)
with pytest.raises(ValueError, match='arsl'):
_ = WrongInterfaceInstrument(adapter='ASRL1::INSTR', visa_library='') |
class LegacyDistributedDataParallel(nn.Module):
def __init__(self, module, process_group, buffer_size=(2 ** 28)):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = distributed_utils.get_world_size(self.process_group)
self.buffer_size = min(buffer_size, sum((p.numel() for p in module.parameters())))
self.buffer = None
self.accumulate_grads = False
paramlists = OrderedDict()
for param in self.module.parameters():
device = param.device
if (paramlists.get(device) is None):
paramlists[device] = []
paramlists[device] += [param]
self.per_device_params = list(paramlists.values())
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
def no_sync(self):
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
(yield)
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
def all_reduce_params(params):
buffer = self.buffer
nonzero_buffer = False
if (len(params) > 1):
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
buffer[offset:(offset + sz)].copy_(p.grad.data.view((- 1)))
nonzero_buffer = True
else:
buffer[offset:(offset + sz)].zero_()
offset += sz
else:
p = params[0]
if (p.grad is not None):
buffer = p.grad.data
nonzero_buffer = True
elif (p.numel() <= self.buffer.numel()):
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
p.grad.data.copy_(buffer[offset:(offset + sz)].view_as(p))
else:
p.grad = buffer[offset:(offset + sz)].view_as(p).clone()
offset += sz
def reduction_fn():
if self.accumulate_grads:
return
if (self.buffer is None):
self.buffer = next(self.module.parameters()).new(self.buffer_size)
for params in self.per_device_params:
offset = 0
buffered_params = []
for param in params:
if (not param.requires_grad):
continue
if (param.grad is None):
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
sz = param.numel()
if (sz > self.buffer.numel()):
all_reduce_params([param])
else:
if ((offset + sz) > self.buffer.numel()):
all_reduce_params(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if (len(buffered_params) > 0):
all_reduce_params(buffered_params)
reduction_fn() |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_round_trip(mock_invoke_step, mock_get_module):
complex_step_info = CommentedMap({'name': 'step1', 'swallow': 0})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert (str(err_info.value) == 'arb error here')
mock_logger_error.assert_called_once_with('Error while running step step1 at pipeline yaml line: 6, col: 7')
assert (len(context) == (original_len + 1))
assert (context['runErrors'] == [{'col': 7, 'customError': {}, 'description': 'arb error here', 'exception': err_info.value, 'line': 6, 'name': 'ValueError', 'step': step.name, 'swallowed': False}]) |
def split_splittable_port(port, k, lanes, dev):
new_split_group = split(k, port)
cmd = 'udevadm settle'
(stdout, stderr) = run_command(cmd)
assert (stderr == '')
if (new_split_group != []):
test(exists_and_lanes(new_split_group, (lanes / k), dev), ('split port %s into %s' % (port.name, k)))
unsplit(port.bus_info) |
class RequiredImgAssetTests(TestCase):
def test_required_asset_class_inherits_from_expected_classed(self):
classes = (RequiredAssetMixin, BaseRequiredImgAsset, BenefitFeature)
issubclass(RequiredImgAsset, classes)
def test_build_form_field_from_input(self):
text_asset = baker.make(RequiredImgAsset, _fill_optional=True)
field = text_asset.as_form_field()
self.assertIsInstance(field, forms.ImageField)
self.assertFalse(field.required)
self.assertEqual(text_asset.help_text, field.help_text)
self.assertEqual(text_asset.label, field.label)
self.assertIsInstance(field.widget, forms.ClearableFileInput) |
def test_upload_collection_list_np_arrays():
vectors_dim = 50
local_client = init_local()
remote_client = init_remote()
vectors = np.random.randn(UPLOAD_NUM_VECTORS, vectors_dim).tolist()
vectors = [np.array(vector) for vector in vectors]
vectors_config = models.VectorParams(size=vectors_dim, distance=models.Distance.EUCLID)
local_client.recreate_collection(COLLECTION_NAME, vectors_config=vectors_config, timeout=TIMEOUT)
remote_client.recreate_collection(COLLECTION_NAME, vectors_config=vectors_config, timeout=TIMEOUT)
local_client.upload_collection(COLLECTION_NAME, vectors)
remote_client.upload_collection(COLLECTION_NAME, vectors)
compare_collections(local_client, remote_client, UPLOAD_NUM_VECTORS)
local_client.delete_collection(COLLECTION_NAME)
remote_client.delete_collection(COLLECTION_NAME) |
class MainConfigTest(unittest.TestCase):
def setUp(self):
os.chdir(tests_dir)
os.chdir('dataset01')
cfg_file = './nagios/nagios.cfg'
self.main_config = pynag.Parsers.main.MainConfig(filename=cfg_file)
def test_normal(self):
self.assertEqual('test.cfg', self.main_config.get('cfg_file'))
self.assertEqual(['test.cfg'], self.main_config.get_list('cfg_file'))
def test_parse_string_normal(self):
result = self.main_config._parse_string('cfg_file=test.cfg')
self.assertEqual([('cfg_file', 'test.cfg')], result)
def test_parse_string_empty_line(self):
result = self.main_config._parse_string('#empty\n\n#line')
self.assertEqual([], result)
def test_parse_string_skips_comments(self):
result = self.main_config._parse_string('# this is a comment')
self.assertEqual([], result) |
.parametrize('dist_name, py_module', [('my.pkg', 'my_pkg'), ('my-pkg', 'my_pkg'), ('my_pkg', 'my_pkg'), ('pkg', 'pkg')])
def test_dist_default_py_modules(tmp_path, dist_name, py_module):
(tmp_path / f'{py_module}.py').touch()
(tmp_path / 'setup.py').touch()
(tmp_path / 'noxfile.py').touch()
attrs = {**EXAMPLE_BASE_INFO, 'name': dist_name, 'src_root': str(tmp_path)}
dist = Distribution(attrs)
dist.set_defaults()
assert (dist.py_modules == [py_module])
dist = Distribution({**attrs, 'py_modules': ['explicity_py_module']})
dist.set_defaults()
assert (dist.py_modules == ['explicity_py_module'])
dist = Distribution({**attrs, 'packages': ['explicity_package']})
dist.set_defaults()
assert (not dist.py_modules) |
class F40Handler(BaseHandler):
version = F40
commandMap = {'auth': commands.authconfig.F35_Authconfig, 'authconfig': commands.authconfig.F35_Authconfig, 'authselect': commands.authselect.F28_Authselect, 'autopart': commands.autopart.F38_AutoPart, 'autostep': commands.autostep.F34_AutoStep, 'bootloader': commands.bootloader.F39_Bootloader, 'btrfs': commands.btrfs.F23_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F28_ClearPart, 'cmdline': commands.displaymode.F26_DisplayMode, 'device': commands.device.F34_Device, 'deviceprobe': commands.deviceprobe.F34_DeviceProbe, 'dmraid': commands.dmraid.F34_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'module': commands.module.F31_Module, 'eula': commands.eula.F20_Eula, 'fcoe': commands.fcoe.F28_Fcoe, 'firewall': commands.firewall.F28_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.F26_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F23_Reboot, 'harddrive': commands.harddrive.F33_HardDrive, 'hmc': commands.hmc.F28_Hmc, 'ignoredisk': commands.ignoredisk.F34_IgnoreDisk, 'install': commands.install.F34_Install, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.F34_Logging, 'logvol': commands.logvol.F29_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F34_Method, 'mount': commands.mount.F27_Mount, 'multipath': commands.multipath.F34_MultiPath, 'network': commands.network.F39_Network, 'nfs': commands.nfs.FC6_NFS, 'nvdimm': commands.nvdimm.F40_Nvdimm, 'timesource': commands.timesource.F33_Timesource, 'ostreecontainer': commands.ostreecontainer.F38_OSTreeContainer, 'ostreesetup': commands.ostreesetup.F38_OSTreeSetup, 'part': commands.partition.F34_Partition, 'partition': commands.partition.F34_Partition, 'poweroff': commands.reboot.F23_Reboot, 'raid': commands.raid.F29_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F23_Reboot, 'repo': commands.repo.F33_Repo, 'reqpart': commands.reqpart.F23_ReqPart, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F37_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F23_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'snapshot': commands.snapshot.F26_Snapshot, 'sshpw': commands.sshpw.F24_SshPw, 'sshkey': commands.sshkey.F22_SshKey, 'text': commands.displaymode.F26_DisplayMode, 'timezone': commands.timezone.F33_Timezone, 'updates': commands.updates.F34_Updates, 'url': commands.url.F30_Url, 'user': commands.user.F24_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F21_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F37_ZFCP, 'zipl': commands.zipl.F32_Zipl}
dataMap = {'BTRFSData': commands.btrfs.F23_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'ModuleData': commands.module.F31_ModuleData, 'TimesourceData': commands.timesource.F33_TimesourceData, 'FcoeData': commands.fcoe.F28_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F29_LogVolData, 'MountData': commands.mount.F27_MountData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F39_NetworkData, 'NvdimmData': commands.nvdimm.F28_NvdimmData, 'PartData': commands.partition.F29_PartData, 'RaidData': commands.raid.F29_RaidData, 'RepoData': commands.repo.F30_RepoData, 'SnapshotData': commands.snapshot.F26_SnapshotData, 'SshPwData': commands.sshpw.F24_SshPwData, 'SshKeyData': commands.sshkey.F38_SshKeyData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F21_VolGroupData, 'ZFCPData': commands.zfcp.F37_ZFCPData} |
.parametrize('flat_fee, prop_fee, initial_amount, expected_amount', [(50, 0, 1000, ((1000 - 50) - 50)), (0, 1000000, 2000, 1000), (0, 100000, 1100, 1000), (0, 50000, 1050, 1000), (0, 10000, 1010, 1000), (0, 10000, 101, 100), (0, 4990, 100, 100), (1, 500000, ((1000 + 500) + 2), 1000), (10, 500000, ((1000 + 500) + 20), 997), (100, 500000, ((1000 + 500) + 200), 967), (1, 100000, ((1000 + 100) + 2), 1000), (10, 100000, ((1000 + 100) + 20), 999), (100, 100000, ((1000 + 100) + 200), 991), (1, 10000, ((1000 + 10) + 2), 1000), (10, 10000, ((1000 + 10) + 20), 1000), (100, 10000, ((1000 + 10) + 200), 999), (100, 500000, (1000 + 750), 1000), (0, 200000, (47 + 9), 47), (0, 200000, (39 + 8), 39)])
def test_get_lock_amount_after_fees(flat_fee, prop_fee, initial_amount, expected_amount):
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
lock = make_hash_time_lock_state(amount=initial_amount)
channel_in = factories.create(NettingChannelStateProperties(partner_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)), fee_schedule=FeeScheduleState(flat=flat_fee, proportional=prop_fee_per_channel)))
channel_out = factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)), fee_schedule=FeeScheduleState(flat=flat_fee, proportional=prop_fee_per_channel)))
locked_after_fees = get_amount_without_fees(amount_with_fees=lock.amount, channel_in=channel_in, channel_out=channel_out)
assert (locked_after_fees == expected_amount) |
def test_mpris2_no_scroll(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=None)
fakebar = FakeBar([mp], window=fake_window)
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.parse_message(*METADATA_PLAYING.body)
assert (mp.text == 'Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley')
mp.parse_message(*METADATA_PAUSED.body)
assert (mp.text == 'Paused: Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley') |
def check_if_user_can_vote(user: User, conference: Conference):
if user.is_staff:
return True
if Submission.objects.filter(speaker_id=user.id, conference=conference).exists():
return True
additional_events = [{'organizer_slug': included_voting_event.pretix_organizer_id, 'event_slug': included_voting_event.pretix_event_id} for included_voting_event in conference.included_voting_events.all()]
if user_has_admission_ticket(email=user.email, event_organizer=conference.pretix_organizer_id, event_slug=conference.pretix_event_id, additional_events=additional_events):
return True
if user_is_python_italia_member(user.id):
return True
return False |
def ql_syscall_wait4(ql: Qiling, pid: int, wstatus: int, options: int, rusage: int):
pid = ql.unpack32s(ql.pack32(pid))
try:
(spid, status, _) = os.wait4(pid, options)
if wstatus:
ql.mem.write_ptr(wstatus, status, 4)
retval = spid
except ChildProcessError:
retval = (- ECHILD)
return retval |
def verify(image, dimension):
value = image.__getattribute__(dimension)
while (value > 1):
div_float = (float(value) / 2.0)
div_int = int(div_float)
if (not (div_float == div_int)):
raise Exception(('image %s is %d, which is not a power of 2' % (dimension, image.__getattribute__(dimension))))
value = div_int |
def test_cache_race_condition():
with tempfile.TemporaryDirectory() as dir_name:
_flags(on_opt_error='raise', on_shape_error='raise')
def f_build(factor):
a = pt.vector()
f = pytensor.function([a], (factor * a))
return f(np.array([1], dtype=config.floatX))
ctx = multiprocessing.get_context()
compiledir_prop = pytensor.config._config_var_dict['compiledir']
with patch.object(compiledir_prop, 'val', dir_name, create=True), patch.object(pytensor.link.c.cmodule, '_module_cache', None):
assert (pytensor.config.compiledir == dir_name)
num_procs = 30
rng = np.random.default_rng(209)
for i in range(10):
factor = rng.random()
procs = [ctx.Process(target=f_build, args=(factor,)) for i in range(num_procs)]
for proc in procs:
proc.start()
for proc in procs:
proc.join()
assert (not any(((exit_code != 0) for exit_code in [proc.exitcode for proc in procs]))) |
def _get_next_free_filename():
global _free_name_counter
def scan_next_free():
log.Log('Setting next free from long filenames dir', log.INFO)
cur_high = 0
for filename in _get_long_rp().listdir():
try:
i = int(filename.split(b'.')[0])
except ValueError:
continue
if (i > cur_high):
cur_high = i
return (cur_high + 1)
def read_next_free():
rp = _get_long_rp(_counter_filename)
if (not rp.lstat()):
return None
return int(rp.get_string())
def write_next_free(i):
rp = _get_long_rp(_counter_filename)
if rp.lstat():
rp.delete()
rp.write_string(str(_free_name_counter))
rp.fsync_with_dir()
if (not _free_name_counter):
_free_name_counter = read_next_free()
if (not _free_name_counter):
_free_name_counter = scan_next_free()
filename = (b'%i' % _free_name_counter)
rp = _get_long_rp(filename)
assert (not rp.lstat()), "Unexpected file '{rp}' found".format(rp=rp)
_free_name_counter += 1
write_next_free(_free_name_counter)
return filename |
class APETextValue(_APEUtf8Value, MutableSequence):
kind = TEXT
def __iter__(self):
return iter(self.value.split(u'\x00'))
def __getitem__(self, index):
return self.value.split(u'\x00')[index]
def __len__(self):
return (self.value.count(u'\x00') + 1)
def __setitem__(self, index, value):
if (not isinstance(value, str)):
raise TypeError('value not str')
values = list(self)
values[index] = value
self.value = u'\x00'.join(values)
def insert(self, index, value):
if (not isinstance(value, str)):
raise TypeError('value not str')
values = list(self)
values.insert(index, value)
self.value = u'\x00'.join(values)
def __delitem__(self, index):
values = list(self)
del values[index]
self.value = u'\x00'.join(values)
def pprint(self):
return u' / '.join(self) |
def recursive_find_python_class(folder: str, class_name: str, current_module: str):
tr = None
for (importer, modname, ispkg) in pkgutil.iter_modules([folder]):
if (not ispkg):
m = importlib.import_module(((current_module + '.') + modname))
if hasattr(m, class_name):
tr = getattr(m, class_name)
break
if (tr is None):
for (importer, modname, ispkg) in pkgutil.iter_modules([folder]):
if ispkg:
next_current_module = ((current_module + '.') + modname)
tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)
if (tr is not None):
break
return tr |
class TPBGroupedWeightedPauliOperator(WeightedPauliOperator):
def __init__(self, paulis, basis, z2_symmetries=None, atol=1e-12, name=None, grouping_func=None, kwargs=None):
super().__init__(paulis, basis, z2_symmetries, atol, name)
self._grouping_func = grouping_func
self._kwargs = (kwargs or {})
def num_groups(self):
return len(self._basis)
def grouping_func(self):
return self._grouping_func
def kwargs(self):
return self._kwargs
def sorted_grouping(cls, weighted_pauli_operator, method='largest-degree'):
p_g = PauliGraph(weighted_pauli_operator.paulis, method)
(basis, paulis) = _post_format_conversion(p_g.grouped_paulis)
kwargs = {'method': method}
return cls(paulis, basis, weighted_pauli_operator.z2_symmetries, weighted_pauli_operator.atol, weighted_pauli_operator.name, cls.sorted_grouping, kwargs)
def unsorted_grouping(cls, weighted_pauli_operator):
paulis = weighted_pauli_operator.paulis
temp_paulis = copy.deepcopy(paulis)
n = paulis[0][1].num_qubits
grouped_paulis = []
sorted_paulis = []
def check_pauli_in_list(target, pauli_list):
ret = False
for pauli in pauli_list:
if (target[1] == pauli[1]):
ret = True
break
return ret
for (i, _) in enumerate(temp_paulis):
p_1 = temp_paulis[i]
if (not check_pauli_in_list(p_1, sorted_paulis)):
paulis_temp = []
paulis_temp.append(p_1)
paulis_temp.append(copy.deepcopy(p_1))
paulis_temp[0][0] = 0.0
for j in range((i + 1), len(temp_paulis)):
p_2 = temp_paulis[j]
if ((not check_pauli_in_list(p_2, sorted_paulis)) and (p_1[1] != p_2[1])):
j = 0
for __i in range(n):
if (not (((not p_2[1].z[__i]) and (not p_2[1].x[__i])) or ((not p_1[1].z[__i]) and (not p_1[1].x[__i])) or ((p_2[1].z[__i] == p_1[1].z[__i]) and (p_2[1].x[__i] == p_1[1].x[__i])))):
break
if (p_2[1].z[__i] or p_2[1].x[__i]):
paulis_temp[0][1].z[__i] = p_2[1].z[__i]
paulis_temp[0][1].x[__i] = p_2[1].x[__i]
j += 1
if (j == n):
paulis_temp.append(p_2)
sorted_paulis.append(p_2)
grouped_paulis.append(paulis_temp)
(basis, new_paulis) = _post_format_conversion(grouped_paulis)
return cls(new_paulis, basis, weighted_pauli_operator.z2_symmetries, weighted_pauli_operator.atol, weighted_pauli_operator.name, cls.unsorted_grouping)
def __eq__(self, other):
if (not super().__eq__(other)):
return False
if (len(self._basis) != len(other.basis)):
return False
for (basis, indices) in self._basis:
found_basis = False
found_indices = []
for (other_basis, other_indices) in other.basis:
if (basis == other_basis):
found_basis = True
found_indices = other_indices
break
if ((not found_basis) or (len(indices) != len(found_indices))):
return False
return True
def __str__(self):
curr_repr = 'tpb grouped paulis'
length = len(self._paulis)
name = ('' if (self._name is None) else '{}: '.format(self._name))
ret = '{}Representation: {}, qubits: {}, size: {}, group: {}'.format(name, curr_repr, self.num_qubits, length, len(self._basis))
return ret
def print_details(self):
if self.is_empty():
return 'Operator is empty.'
ret = ''
for (basis, indices) in self._basis:
ret = ''.join([ret, 'TPB: {} ({})\n'.format(basis.to_label(), len(indices))])
for idx in indices:
(weight, pauli) = self._paulis[idx]
ret = ''.join([ret, '{}\t{}\n'.format(pauli.to_label(), weight)])
return ret
def _add_or_sub(self, other, operation, copy=True):
ret_op = super()._add_or_sub(other, operation, copy)
ret_op = self._grouping_func(ret_op, **self._kwargs)
return ret_op
def multiply(self, other):
ret_op = super().multiply(other)
ret_op = self._grouping_func(ret_op, **self._kwargs)
return ret_op |
def tfm_assert_array_to_file_output(input_file, output_file, tfm, dtype_in='int16', dtype_out='int16', test_file_out=True, skip_array_tests=False, **kwargs):
(input_array, rate) = sf.read(input_file, dtype=dtype_in)
(actual_output, _) = sf.read(output_file, dtype=dtype_out)
if (not skip_array_tests):
est_array = tfm.build_array(input_array=input_array, sample_rate_in=rate, **kwargs)
assert np.allclose(actual_output, est_array.astype(dtype_out))
est_array = tfm.build_array(input_filepath=input_file, sample_rate_in=rate, **kwargs)
assert np.allclose(actual_output, est_array.astype(dtype_out))
if test_file_out:
tfm.build(input_array=input_array, output_filepath=OUTPUT_FILE_ALT, sample_rate_in=rate, **kwargs)
(est_array, _) = sf.read(OUTPUT_FILE_ALT, dtype=dtype_out)
assert np.allclose(actual_output, est_array.astype(dtype_out))
tfm.build_file(input_array=input_array, output_filepath=OUTPUT_FILE_ALT, sample_rate_in=rate, **kwargs) |
def lexical_overlap_rate(premise, hypothesis):
premise_token_list = tokenizer.tokenize(premise.lower())
hypothesis_token_list = tokenizer.tokenize(hypothesis.lower())
overlap_cnt = 0
for tok in hypothesis_token_list:
if (tok in premise_token_list):
overlap_cnt += 1
return ((1.0 * overlap_cnt) / len(hypothesis_token_list)) |
def create_duel_q_network(input_frames, num_actions, trainable, noisy):
(flat_output, flat_output_size, parameter_list) = create_conv_network(input_frames, trainable)
if (noisy == False):
fcV_W = tf.get_variable(shape=[flat_output_size, 512], name='fcV_W', trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
fcV_b = tf.Variable(tf.zeros([512], dtype=tf.float32), name='fcV_b', dtype=tf.float32, trainable=trainable)
outputV = tf.nn.relu((tf.matmul(flat_output, fcV_W) + fcV_b), name='outputV')
fcV2_W = tf.get_variable(shape=[512, 1], name='fcV2_W', trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
fcV2_b = tf.Variable(tf.zeros([1], dtype=tf.float32), name='fcV2_b', trainable=trainable)
outputV2 = (tf.matmul(outputV, fcV2_W) + fcV2_b)
fcA_W = tf.get_variable(shape=[flat_output_size, 512], name='fcA_W', trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
fcA_b = tf.Variable(tf.zeros([512], dtype=tf.float32), name='fcA_b', trainable=trainable)
outputA = tf.nn.relu((tf.matmul(flat_output, fcA_W) + fcA_b), name='outputA')
fcA2_W = tf.get_variable(shape=[512, num_actions], name='fcA2_W', trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
fcA2_b = tf.Variable(tf.zeros([num_actions], dtype=tf.float32), name='fcA2_b', trainable=trainable)
outputA2 = (tf.matmul(outputA, fcA2_W) + fcA2_b)
parameter_list += [fcV_W, fcV_b, fcV2_W, fcV2_b, fcA_W, fcA_b, fcA2_W, fcA2_b]
else:
(outputV, parameter_list_outputV) = noisy_dense(flat_output, name='fcV', input_size=flat_output_size, output_size=512, trainable=trainable, activation_fn=tf.nn.relu)
(outputV2, parameter_list_outputV2) = noisy_dense(outputV, name='fcV2', input_size=512, output_size=1, trainable=trainable)
(ouputA, parameter_list_outputA) = noisy_dense(flat_output, name='fcA', input_size=flat_output_size, output_size=512, trainable=trainable, activation_fn=tf.nn.relu)
(outputA2, parameter_list_outputA2) = noisy_dense(ouputA, name='fcA2', input_size=512, output_size=num_actions, trainable=trainable)
parameter_list += (((parameter_list_outputA + parameter_list_outputA2) + parameter_list_outputV) + parameter_list_outputV2)
q_network = tf.nn.relu(((outputV2 + outputA2) - tf.reduce_mean(outputA2)), name='q_network')
return (q_network, parameter_list) |
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, float: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append((self.data_offsets[(- 1)] + (bytes / self.element_size)))
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append((self.dim_offsets[(- 1)] + len(tensor.size())))
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', (len(self.data_offsets) - 1), len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close() |
def similarity_transform(xsys, T, timescale=1, inverse=False):
zsys = StateSpace(xsys)
T = np.atleast_2d(T)
def rsolve(M, y):
return transpose(solve(transpose(M), transpose(y)))
if (not inverse):
zsys.A = (rsolve(T, (T zsys.A)) / timescale)
zsys.B = ((T zsys.B) / timescale)
zsys.C = rsolve(T, zsys.C)
else:
zsys.A = ((solve(T, zsys.A) T) / timescale)
zsys.B = (solve(T, zsys.B) / timescale)
zsys.C = (zsys.C T)
return zsys |
def wait_for_gpus(world_size, timeout_secs=3600):
n_gpus = int(ray.cluster_resources().get('GPU', 0))
elapsed_time = 0
while (n_gpus < world_size):
logging.warning(f'Not enough GPUs available ({n_gpus} available,need {world_size}), waiting 10 seconds')
time.sleep(10)
elapsed_time += 10
if (elapsed_time > timeout_secs):
raise RuntimeError('Timeout: could not find enough GPUs')
n_gpus = int(ray.cluster_resources().get('GPU', 0)) |
def _override_attr(sub_node: str, data_class: Type[FairseqDataclass], args: Namespace) -> List[str]:
overrides = []
for k in data_class.__dataclass_fields__.keys():
if (k == '_name'):
continue
if (not hasattr(args, k)):
continue
if (getattr(args, k) is None):
overrides.append('{}.{}=null'.format(sub_node, k))
elif (getattr(args, k) == ''):
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(getattr(args, k), str):
if (getattr(args, k).startswith('[') or getattr(args, k).startswith('(') or getattr(args, k).startswith('{') or (',' in getattr(args, k))):
overrides.append("{}.{}='{}'".format(sub_node, k, getattr(args, k)))
else:
overrides.append('{}.{}={}'.format(sub_node, k, getattr(args, k)))
else:
overrides.append('{}.{}={}'.format(sub_node, k, getattr(args, k)))
return overrides |
def split_rxn_parts(rxn):
rxn_parts = rxn.strip().split('>')
rxn_reactants = set(rxn_parts[0].split('.'))
rxn_agents = (None if (not rxn_parts[1]) else set(rxn_parts[1].split('.')))
rxn_products = set(rxn_parts[2].split('.'))
(reactants, agents, products) = (set(), set(), set())
for r in rxn_reactants:
reactants.add(Chem.MolFromSmiles(r))
if rxn_agents:
for a in rxn_agents:
agents.add(Chem.MolFromSmiles(a))
for p in rxn_products:
products.add(Chem.MolFromSmiles(p))
return [reactants, agents, products] |
def test_not_strict_mode():
code = 'K9L2 100958Z AUTO 33006KT 10SM CLR M A3007 RMK AO2 SLPNO FZRANO $'
raisesParserError(code)
with warnings.catch_warnings(record=True) as w:
report = Metar.Metar(code, strict=False)
assert (len(w) == 1)
assert (not report.decode_completed)
assert (report.cycle == 10)
assert (report.mod == 'AUTO')
assert (not report.recent)
assert (report.station_id == 'K9L2')
assert (report.vis.value() == 10)
assert (report.sky_conditions() == 'clear') |
class ClientTests(CommonTests, AsyncioTestCase):
def setUp(self):
super().setUp()
self.protocol.is_client = True
self.protocol.side = 'client'
def test_local_close_send_close_frame_timeout(self):
self.protocol.close_timeout = (10 * MS)
self.make_drain_slow((50 * MS))
with self.assertCompletesWithin((19 * MS), (29 * MS)):
self.loop.run_until_complete(self.protocol.close(reason='close'))
self.assertConnectionClosed(CloseCode.ABNORMAL_CLOSURE, '')
def test_local_close_receive_close_frame_timeout(self):
self.protocol.close_timeout = (10 * MS)
with self.assertCompletesWithin((19 * MS), (29 * MS)):
self.loop.run_until_complete(self.protocol.close(reason='close'))
self.assertConnectionClosed(CloseCode.ABNORMAL_CLOSURE, '')
def test_local_close_connection_lost_timeout_after_write_eof(self):
self.protocol.close_timeout = (10 * MS)
with self.assertCompletesWithin((19 * MS), (29 * MS)):
self.transport._eof = True
self.receive_frame(self.close_frame)
self.run_loop_once()
self.loop.run_until_complete(self.protocol.close(reason='close'))
self.assertConnectionClosed(CloseCode.NORMAL_CLOSURE, 'close')
def test_local_close_connection_lost_timeout_after_close(self):
self.protocol.close_timeout = (10 * MS)
with self.assertCompletesWithin((29 * MS), (49 * MS)):
self.transport._eof = True
self.transport._closing = True
self.receive_frame(self.close_frame)
self.run_loop_once()
self.loop.run_until_complete(self.protocol.close(reason='close'))
self.assertConnectionClosed(CloseCode.NORMAL_CLOSURE, 'close') |
def display_suite_metadata(suite, title=None):
metadata = suite.get_metadata()
empty = True
for (key, fmt) in (('performance_version', 'Performance version: %s'), ('python_version', 'Python version: %s'), ('platform', 'Report on %s'), ('cpu_count', 'Number of logical CPUs: %s')):
if (key not in metadata):
continue
empty = False
if title:
print(title)
print(('=' * len(title)))
print()
title = None
text = (fmt % metadata[key])
print(text)
dates = suite.get_dates()
if dates:
print(('Start date: %s' % dates[0].isoformat(' ')))
print(('End date: %s' % dates[1].isoformat(' ')))
empty = False
if (not empty):
print() |
def get_ordered_ops(graph: tf.Graph, starting_op_names: List[str], output_op_names: List[str]) -> List[tf.Operation]:
def add_children_ops_before_parent_op(current_op: tf.Operation):
visited_ops.add(current_op)
for output_tensor in current_op.outputs:
for consumer_op in output_tensor.consumers():
if (consumer_op not in visited_ops):
add_children_ops_before_parent_op(consumer_op)
ordered_ops.append(current_op)
valid_ops = get_valid_ops(graph, starting_op_names, output_op_names)
visited_ops = set()
ordered_ops = []
for starting_op_name in starting_op_names:
starting_op = graph.get_operation_by_name(starting_op_name)
add_children_ops_before_parent_op(starting_op)
ordered_ops.reverse()
ordered_ops = [op for op in ordered_ops if (op in valid_ops)]
return ordered_ops |
def test_one_parameter_multiple_calls() -> None:
with RecursionTable('fib') as table:
def fib(n):
if (n in [0, 1]):
return 1
else:
return (fib((n - 2)) + fib((n - 1)))
fib(3)
recursive_dict = table.get_recursive_dict()
assert (len(list(recursive_dict.keys())) == 3)
assert (recursive_dict['n'] == [3, 1, 2, 0, 1])
assert (recursive_dict['called by'] == ['N/A', 'fib(3)', 'fib(3)', 'fib(2)', 'fib(2)'])
assert (recursive_dict['return value'] == [3, 1, 2, 1, 1]) |
def clip_gradients(model, i_iter, writer, config):
max_grad_l2_norm = config['training_parameters']['max_grad_l2_norm']
clip_norm_mode = config['training_parameters']['clip_norm_mode']
if (max_grad_l2_norm is not None):
if (clip_norm_mode == 'all'):
norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)
writer.add_scalars({'grad_norm': norm}, i_iter)
elif (clip_norm_mode == 'question'):
question_embedding = model.module.question_embedding_module
norm = nn.utils.clip_grad_norm(question_embedding.parameters(), max_grad_l2_norm)
writer.add_scalars({'question_grad_norm': norm}, i_iter)
else:
raise NotImplementedError(('Clip norm mode %s not implemented' % clip_norm_mode)) |
class LockTimeRawEdit(QLineEdit, _LockTimeEditor):
def __init__(self, parent=None):
QLineEdit.__init__(self, parent)
self.setFixedWidth((14 * char_width_in_lineedit()))
self.textChanged.connect(self.numbify)
def numbify(self):
text = self.text().strip()
chars = ''
pos = self.cursorPosition()
pos = len(''.join([i for i in text[:pos] if (i in chars)]))
s = ''.join([i for i in text if (i in chars)])
self.set_locktime(s)
self.setModified(self.hasFocus())
self.setCursorPosition(pos)
def get_locktime(self) -> Optional[int]:
try:
return int(str(self.text()))
except:
return None
def set_locktime(self, x: Any) -> None:
try:
x = int(x)
except:
self.setText('')
return
x = max(x, self.min_allowed_value)
x = min(x, self.max_allowed_value)
self.setText(str(x)) |
def calc_sim(sent_visual, sent_caption):
def sent_preprocess(sent):
sent = word_tokenize(sent)
sent = [ps.stem(word.lower()) for word in sent]
sent_remove_stop_words = [word for word in sent if (word not in stop_words)]
return (sent, sent_remove_stop_words)
visual_words = [list(sent_preprocess(sent)) for sent in sent_visual]
sent_visual = [vw[0] for vw in visual_words]
sent_visual_remove_stop_words = [vw[1] for vw in visual_words]
(sent_caption, sent_caption_remove_stop_words) = sent_preprocess(sent_caption)
if (sent_caption in sent_visual):
score1 = 1
else:
score1 = sentence_bleu(sent_visual, sent_caption, weights=(0.25, 0.25, 0.25, 0.25))
tmp_score = [(len((set(v_sent) & set(sent_caption_remove_stop_words))) / len(v_sent)) for v_sent in sent_visual_remove_stop_words if (len(v_sent) != 0)]
score2 = np.mean(tmp_score)
return np.mean([score1, score2]) |
def fopsort(filename):
temporaryfile = f'{filename}.temp'
check_lines = 10
section = []
lineschecked = 1
filterlines = elementlines = 0
with open(filename, 'r', encoding='utf-8', newline='\n') as inputfile, open(temporaryfile, 'w', encoding='utf-8', newline='\n') as outputfile:
def combinefilters(uncombinedFilters, DOMAINPATTERN, domainseparator):
combinedFilters = []
for (i, uncombinedFilter) in enumerate(uncombinedFilters):
domains1 = re.search(DOMAINPATTERN, uncombinedFilter)
if (((i + 1) < len(uncombinedFilters)) and domains1):
domains2 = re.search(DOMAINPATTERN, uncombinedFilters[(i + 1)])
domain1str = domains1.group(1)
if ((not domains1) or ((i + 1) == len(uncombinedFilters)) or (not domains2) or (len(domain1str) == 0) or (len(domains2.group(1)) == 0)):
combinedFilters.append(uncombinedFilter)
else:
domain2str = domains2.group(1)
if (domains1.group(0).replace(domain1str, domain2str, 1) != domains2.group(0)):
combinedFilters.append(uncombinedFilter)
elif (re.sub(DOMAINPATTERN, '', uncombinedFilter) == re.sub(DOMAINPATTERN, '', uncombinedFilters[(i + 1)])):
newDomains = f'{domain1str}{domainseparator}{domain2str}'
newDomains = domainseparator.join(sorted(set(newDomains.split(domainseparator)), key=(lambda domain: domain.strip('~'))))
if ((domain1str.count('~') != (domain1str.count(domainseparator) + 1)) != (domain2str.count('~') != (domain2str.count(domainseparator) + 1))):
combinedFilters.append(uncombinedFilter)
else:
domainssubstitute = domains1.group(0).replace(domain1str, newDomains, 1)
uncombinedFilters[(i + 1)] = re.sub(DOMAINPATTERN, domainssubstitute, uncombinedFilter)
else:
combinedFilters.append(uncombinedFilter)
return combinedFilters
def writefilters():
if (elementlines > filterlines):
uncombinedFilters = sorted(set(section), key=(lambda rule: re.sub(ELEMENTDOMAINPATTERN, '', rule)))
outputfile.write('{filters}\n'.format(filters='\n'.join(combinefilters(uncombinedFilters, ELEMENTDOMAINPATTERN, ','))))
else:
uncombinedFilters = sorted(set(section), key=str.lower)
outputfile.write('{filters}\n'.format(filters='\n'.join(combinefilters(uncombinedFilters, FILTERDOMAINPATTERN, '|'))))
for line in inputfile:
line = line.strip()
if (not re.match(BLANKPATTERN, line)):
if ((line[0] == '!') or (line[:8] == '%include') or ((line[0] == '[') and (line[(- 1)] == ']'))):
if section:
writefilters()
section = []
lineschecked = 1
filterlines = elementlines = 0
outputfile.write(f'''{line}
''')
else:
elementparts = re.match(ELEMENTPATTERN, line)
if elementparts:
domains = elementparts.group(1).lower()
if (lineschecked <= check_lines):
elementlines += 1
lineschecked += 1
line = elementtidy(domains, elementparts.group(2), elementparts.group(3))
else:
if (lineschecked <= check_lines):
filterlines += 1
lineschecked += 1
line = filtertidy(line, filename)
section.append(line)
if section:
writefilters()
if (not filecmp.cmp(temporaryfile, filename)):
os.replace(temporaryfile, filename)
(head, tail) = os.path.split(filename)
print(f'- Sorted: {tail}')
else:
os.remove(temporaryfile) |
def print_usage():
print('Usage: imapbackup [OPTIONS] -s HOST -u USERNAME [-p PASSWORD]')
print(' -d DIR --mbox-dir=DIR Write mbox files to directory. (defaults to cwd)')
print(' -a --append-to-mboxes Append new messages to mbox files. (default)')
print(' -y --yes-overwrite-mboxes Overwite existing mbox files instead of appending.')
print(' -f FOLDERS --folders=FOLDERS Specify which folders to include. Comma separated list.')
print(' --exclude-folders=FOLDERS Specify which folders to exclude. Comma separated list.')
print(' You cannot use both --folders and --exclude-folders.')
print(' -e --ssl Use SSL. Port defaults to 993.')
print(' -k KEY --key=KEY PEM private key file for SSL. Specify cert, too.')
print(' -c CERT --cert=CERT PEM certificate chain for SSL. Specify key, too.')
print(" Python's SSL module doesn't check the cert chain.")
print(' -s HOST --server=HOST Address of server, port optional, eg. mail.com:143')
print(' -u USER --user=USER Username to log into server')
print(' -p PASS --pass=PASS Prompts for password if not specified. If the first')
print(" character is '', treat the rest as a path to a file")
print(" containing the password. Leading '' makes it literal.")
print(' -t SECS --timeout=SECS Sets socket timeout to SECS seconds.')
print(' --thunderbird Create Mozilla Thunderbird compatible mailbox')
print(' --nospinner Disable spinner (makes output log-friendly)')
sys.exit(2) |
def _r2_score_compute(sum_squared_obs: torch.Tensor, sum_obs: torch.Tensor, rss: torch.Tensor, num_obs: torch.Tensor, multioutput: str, num_regressors: int) -> torch.Tensor:
if (num_obs < 2):
raise ValueError('There is no enough data for computing. Needs at least two samples to calculate r2 score.')
if (num_regressors >= (num_obs - 1)):
raise ValueError(f'The `num_regressors` must be smaller than n_samples - 1, got num_regressors={num_regressors}, n_samples={num_obs}.')
return _compute(sum_squared_obs, sum_obs, rss, num_obs, multioutput, num_regressors) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert (args.eval is not None)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.results)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'by_epoch']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metrics=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir)
raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir)
raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir)
config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if (model_args.config_overrides is not None):
logger.info(f'Overriding config: {model_args.config_overrides}')
config.update_from_string(model_args.config_overrides)
logger.info(f'New config: {config}')
tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
else:
logger.info('Training new model from scratch')
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if training_args.do_train:
column_names = raw_datasets['train'].column_names
else:
column_names = raw_datasets['validation'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
if (data_args.max_seq_length is None):
max_seq_length = tokenizer.model_max_length
if (max_seq_length > 1024):
logger.warning(f'The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.')
max_seq_length = 1024
else:
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_function(examples):
examples[text_column_name] = [line for line in examples[text_column_name] if ((len(line) > 0) and (not line.isspace()))]
return tokenizer(examples[text_column_name], padding=padding, truncation=True, max_length=max_seq_length, return_special_tokens_mask=True)
with training_args.main_process_first(desc='dataset map tokenization'):
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset line_by_line')
else:
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc='dataset map tokenization'):
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on every text in dataset')
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= max_seq_length):
total_length = ((total_length // max_seq_length) * max_seq_length)
result = {k: [t[i:(i + max_seq_length)] for i in range(0, total_length, max_seq_length)] for (k, t) in concatenated_examples.items()}
return result
with training_args.main_process_first(desc='grouping texts together'):
tokenized_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc=f'Grouping texts in chunks of {max_seq_length}')
if training_args.do_train:
if ('train' not in tokenized_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = tokenized_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if ('validation' not in tokenized_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = tokenized_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
logits = logits[0]
return logits.argmax(dim=(- 1))
metric = load_metric('accuracy')
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
labels = labels.reshape((- 1))
preds = preds.reshape((- 1))
mask = (labels != (- 100))
labels = labels[mask]
preds = preds[mask]
return metric.compute(predictions=preds, references=labels)
pad_to_multiple_of_8 = (data_args.line_by_line and training_args.fp16 and (not data_args.pad_to_max_length))
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability, pad_to_multiple_of=(8 if pad_to_multiple_of_8 else None))
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if (training_args.do_eval and (not is_torch_tpu_available())) else None), preprocess_logits_for_metrics=(preprocess_logits_for_metrics if (training_args.do_eval and (not is_torch_tpu_available())) else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics['eval_loss'])
except OverflowError:
perplexity = float('inf')
metrics['perplexity'] = perplexity
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'fill-mask'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs) |
class PathManager():
def __init__(self):
self.path = MsgPath()
self.manager_requests_waypoints = True
def update(self, target_position):
self.path.type = 'orbit'
self.path.airspeed = 25
self.path.orbit_center[(0, 0)] = target_position.item(0)
self.path.orbit_center[(1, 0)] = target_position.item(1)
self.path.orbit_center[(2, 0)] = (- 200)
self.path.orbit_radius = 150
self.path.orbit_direction = 'CW'
return self.path |
class ImgEncoder(BaseNet):
def __init__(self, obs_shape, hidden_size=256):
super().__init__(False, hidden_size, hidden_size)
self.n_channels = obs_shape[0]
self.net = nn.Sequential(nn.Conv2d(self.n_channels, 32, kernel_size=8, stride=3), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(64, 32, kernel_size=3, stride=2), nn.ReLU(), Flatten(), nn.Linear(288, hidden_size))
def forward(self, inputs, rnn_hxs, masks):
tmp = self.net(inputs)
return (tmp, rnn_hxs) |
class opcodes(IntEnum):
OP_0 = 0
OP_FALSE = OP_0
OP_PUSHDATA1 = 76
OP_PUSHDATA2 = 77
OP_PUSHDATA4 = 78
OP_1NEGATE = 79
OP_RESERVED = 80
OP_1 = 81
OP_TRUE = OP_1
OP_2 = 82
OP_3 = 83
OP_4 = 84
OP_5 = 85
OP_6 = 86
OP_7 = 87
OP_8 = 88
OP_9 = 89
OP_10 = 90
OP_11 = 91
OP_12 = 92
OP_13 = 93
OP_14 = 94
OP_15 = 95
OP_16 = 96
OP_NOP = 97
OP_VER = 98
OP_IF = 99
OP_NOTIF = 100
OP_VERIF = 101
OP_VERNOTIF = 102
OP_ELSE = 103
OP_ENDIF = 104
OP_VERIFY = 105
OP_RETURN = 106
OP_TOALTSTACK = 107
OP_FROMALTSTACK = 108
OP_2DROP = 109
OP_2DUP = 110
OP_3DUP = 111
OP_2OVER = 112
OP_2ROT = 113
OP_2SWAP = 114
OP_IFDUP = 115
OP_DEPTH = 116
OP_DROP = 117
OP_DUP = 118
OP_NIP = 119
OP_OVER = 120
OP_PICK = 121
OP_ROLL = 122
OP_ROT = 123
OP_SWAP = 124
OP_TUCK = 125
OP_CAT = 126
OP_SUBSTR = 127
OP_LEFT = 128
OP_RIGHT = 129
OP_SIZE = 130
OP_INVERT = 131
OP_AND = 132
OP_OR = 133
OP_XOR = 134
OP_EQUAL = 135
OP_EQUALVERIFY = 136
OP_RESERVED1 = 137
OP_RESERVED2 = 138
OP_1ADD = 139
OP_1SUB = 140
OP_2MUL = 141
OP_2DIV = 142
OP_NEGATE = 143
OP_ABS = 144
OP_NOT = 145
OP_0NOTEQUAL = 146
OP_ADD = 147
OP_SUB = 148
OP_MUL = 149
OP_DIV = 150
OP_MOD = 151
OP_LSHIFT = 152
OP_RSHIFT = 153
OP_BOOLAND = 154
OP_BOOLOR = 155
OP_NUMEQUAL = 156
OP_NUMEQUALVERIFY = 157
OP_NUMNOTEQUAL = 158
OP_LESSTHAN = 159
OP_GREATERTHAN = 160
OP_LESSTHANOREQUAL = 161
OP_GREATERTHANOREQUAL = 162
OP_MIN = 163
OP_MAX = 164
OP_WITHIN = 165
OP_RIPEMD160 = 166
OP_SHA1 = 167
OP_SHA256 = 168
OP_HASH160 = 169
OP_HASH256 = 170
OP_CODESEPARATOR = 171
OP_CHECKSIG = 172
OP_CHECKSIGVERIFY = 173
OP_CHECKMULTISIG = 174
OP_CHECKMULTISIGVERIFY = 175
OP_NOP1 = 176
OP_CHECKLOCKTIMEVERIFY = 177
OP_NOP2 = OP_CHECKLOCKTIMEVERIFY
OP_CHECKSEQUENCEVERIFY = 178
OP_NOP3 = OP_CHECKSEQUENCEVERIFY
OP_NOP4 = 179
OP_NOP5 = 180
OP_NOP6 = 181
OP_NOP7 = 182
OP_NOP8 = 183
OP_NOP9 = 184
OP_NOP10 = 185
OP_CREATE = 193
OP_CALL = 194
OP_SPEND = 195
OP_SENDER = 196
OP_INVALIDOPCODE = 255
def hex(self) -> str:
return bytes([self]).hex() |
class session(Thread):
def __init__(self, conn, pSocket, connectURLs, redirectURLs, FwdTarget, force_redirect):
Thread.__init__(self)
self.pSocket = pSocket
self.connectURLs = connectURLs
self.conn = conn
self.connect_closed = False
self.session_connected = False
self.fwd_target = FwdTarget
self.redirectURL = None
self.force_redirect = force_redirect
if redirectURLs:
self.redirectURL = random.choice(redirectURLs)
def url_sample(self):
return random.choice(self.connectURLs)
def session_mark(self):
mark = base64.b64encode(uuid.uuid4().bytes)[0:(- 8)]
if ispython3:
mark = mark.decode()
return mark
def parseSocks5(self, sock):
log.debug('[SOCKS5] Version5 detected')
nmethods = sock.recv(1)
methods = sock.recv(ord(nmethods))
sock.sendall((VER + METHOD))
ver = sock.recv(1)
if (ver == b'\x02'):
(ver, cmd, rsv, atyp) = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1))
else:
(cmd, rsv, atyp) = (sock.recv(1), sock.recv(1), sock.recv(1))
target = None
targetPort = None
if (atyp == b'\x01'):
target = sock.recv(4)
targetPort = sock.recv(2)
target = inet_ntoa(target)
elif (atyp == b'\x03'):
targetLen = ord(sock.recv(1))
target = sock.recv(targetLen)
targetPort = sock.recv(2)
if LOCALDNS:
try:
target = gethostbyname(target)
except:
log.error(('[SOCKS5] DNS resolution failed: (%s)' % target.decode()))
return False
else:
target = target.decode()
elif (atyp == b'\x04'):
target = sock.recv(16)
targetPort = sock.recv(2)
target = inet_ntop(AF_INET6, target)
if (targetPort == None):
return False
targetPortNum = struct.unpack('>H', targetPort)[0]
if (cmd == b'\x02'):
raise SocksCmdNotImplemented('Socks5 - BIND not implemented')
elif (cmd == b'\x03'):
raise SocksCmdNotImplemented('Socks5 - UDP not implemented')
elif (cmd == b'\x01'):
try:
serverIp = inet_aton(target)
except:
serverIp = inet_aton('127.0.0.1')
mark = self.setupRemoteSession(target, targetPortNum)
if mark:
sock.sendall((((((VER + SUCCESS) + b'\x00') + b'\x01') + serverIp) + targetPort))
return True
else:
sock.sendall((((((VER + REFUSED) + b'\x00') + b'\x01') + serverIp) + targetPort))
return False
raise SocksCmdNotImplemented('Socks5 - Unknown CMD')
def handleSocks(self, sock):
try:
ver = sock.recv(1)
if (ver == b'\x05'):
res = self.parseSocks5(sock)
if (not res):
sock.close()
return res
elif (ver == b''):
log.error('[SOCKS5] Failed to get version')
else:
log.error('[SOCKS5] Only support Socks5 protocol')
return False
except OSError:
return False
except timeout:
return False
def handleFwd(self, sock):
log.debug('[PORT FWD] Forward detected')
(host, port) = self.fwd_target.split(':', 1)
mark = self.setupRemoteSession(host, int(port))
return bool(mark)
def neoreg_request(self, info, timeout=None):
if self.redirectURL:
info['REDIRECTURL'] = self.redirectURL
if self.force_redirect:
info['FORCEREDIRECT'] = 'TRUE'
else:
info['FORCEREDIRECT'] = 'FALSE'
data = encode_body(info)
log.debug(('[HTTP] [%s:%d] %s Request (%s)' % (self.target, self.port, info['CMD'], self.mark)))
retry = 0
while True:
retry += 1
try:
response = self.conn.post(self.url_sample(), headers=HEADERS, timeout=timeout, data=data)
second = response.elapsed.total_seconds()
log.debug(('[HTTP] [%s:%d] %s Response (%s) => HttpCode: %d, Time: %.2fs' % (self.target, self.port, info['CMD'], self.mark, response.status_code, second)))
rdata = extract_body(response.content)
rinfo = decode_body(rdata)
if (rinfo is None):
raise NeoregReponseFormatError('[HTTP] Response Format Error: {}'.format(response.content))
else:
if ((rinfo['STATUS'] != 'OK') and (info['CMD'] != 'DISCONNECT')):
log.warning(('[%s] [%s:%d] Error: %s' % (info['CMD'], self.target, self.port, rinfo['ERROR'])))
return rinfo
log.warning(('[HTTP] [%s:%d] [ReTry %d] %s Request (%s) => HttpCode: %d' % (self.target, self.port, retry, info['CMD'], self.mark, response.status_code)))
except requests.exceptions.ConnectionError as e:
log.warning('[HTTP] [{}] [requests.exceptions.ConnectionError] {}'.format(info['CMD'], e))
except requests.exceptions.ChunkedEncodingError as e:
log.warning('[HTTP] [{}] [requests.exceptions.ChunkedEncodingError] {}'.format(info['CMD'], e))
except NeoregReponseFormatError as e:
log.warning(('[%s] [%s:%d] NeoregReponseFormatError, Retry: No.%d' % (info['CMD'], self.target, self.port, retry)))
if (retry > MAXRETRY):
raise e
def setupRemoteSession(self, target, port):
self.mark = self.session_mark()
self.target = target.encode()
self.port = port
info = {'CMD': 'CONNECT', 'MARK': self.mark, 'IP': self.target, 'PORT': str(self.port)}
if ('.php' in self.connectURLs[0]):
try:
rinfo = self.neoreg_request(info, timeout=PHPTIMEOUT)
except:
log.info(('[CONNECT] [%s:%d] Session mark (%s)' % (self.target, self.port, self.mark)))
return self.mark
else:
rinfo = self.neoreg_request(info)
status = rinfo['STATUS']
if (status == 'OK'):
log.info(('[CONNECT] [%s:%d] Session mark: %s' % (self.target, self.port, self.mark)))
return self.mark
else:
return False
def closeRemoteSession(self):
if (not self.connect_closed):
self.connect_closed = True
try:
self.pSocket.close()
log.debug(('[DISCONNECT] [%s:%d] Closing localsocket' % (self.target, self.port)))
except:
if hasattr(self, 'target'):
log.debug(('[DISCONNECT] [%s:%d] Localsocket already closed' % (self.target, self.port)))
if hasattr(self, 'mark'):
info = {'CMD': 'DISCONNECT', 'MARK': self.mark}
rinfo = self.neoreg_request(info)
if (not self.connect_closed):
if hasattr(self, 'target'):
log.info(('[DISCONNECT] [%s:%d] Connection Terminated' % (self.target, self.port)))
else:
log.error('[DISCONNECT] Connection Terminated')
def reader(self):
try:
info = {'CMD': 'READ', 'MARK': self.mark}
n = 0
while True:
try:
if (self.connect_closed or (self.pSocket.fileno() == (- 1))):
break
rinfo = self.neoreg_request(info)
if (rinfo['STATUS'] == 'OK'):
data = rinfo['DATA']
data_len = len(data)
if (data_len == 0):
sleep(READINTERVAL)
elif (data_len > 0):
n += 1
transferLog.info(('[%s:%d] [%s] No.%d <<<< [%d byte]' % (self.target, self.port, self.mark, n, data_len)))
while data:
writed_size = self.pSocket.send(data)
data = data[writed_size:]
if (data_len < 500):
sleep(READINTERVAL)
else:
break
except error:
pass
except Exception as ex:
log.exception(ex)
break
finally:
self.closeRemoteSession()
def writer(self):
try:
info = {'CMD': 'FORWARD', 'MARK': self.mark}
n = 0
while True:
try:
raw_data = self.pSocket.recv(READBUFSIZE)
if (not raw_data):
break
info['DATA'] = raw_data
rinfo = self.neoreg_request(info)
if (rinfo['STATUS'] != 'OK'):
break
n += 1
transferLog.info(('[%s:%d] [%s] No.%d >>>> [%d byte]' % (self.target, self.port, self.mark, n, len(raw_data))))
if (len(raw_data) < READBUFSIZE):
sleep(WRITEINTERVAL)
except timeout:
continue
except error:
break
except OSError:
break
except Exception as ex:
log.exception(ex)
break
finally:
self.closeRemoteSession()
def run(self):
try:
if self.fwd_target:
self.session_connected = self.handleFwd(self.pSocket)
else:
self.session_connected = self.handleSocks(self.pSocket)
if self.session_connected:
r = Thread(target=self.reader)
w = Thread(target=self.writer)
r.start()
w.start()
r.join()
w.join()
except NeoregReponseFormatError as ex:
log.error('[HTTP] [NeoregReponseFormatError] {}'.format(ex))
except SocksCmdNotImplemented as ex:
log.error('[SOCKS5] [SocksCmdNotImplemented] {}'.format(ex))
except requests.exceptions.ConnectionError as ex:
log.warning('[HTTP] [requests.exceptions.ConnectionError] {}'.format(ex))
except Exception as ex:
log.exception(ex)
finally:
if self.session_connected:
self.closeRemoteSession() |
class TensorFlowBenchmarkArguments(BenchmarkArguments):
deprecated_args = ['no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process']
def __init__(self, **kwargs):
for deprecated_arg in self.deprecated_args:
if (deprecated_arg in kwargs):
positive_arg = deprecated_arg[3:]
kwargs[positive_arg] = (not kwargs.pop(deprecated_arg))
logger.warning(f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or {positive_arg}={kwargs[positive_arg]}')
self.tpu_name = kwargs.pop('tpu_name', self.tpu_name)
self.device_idx = kwargs.pop('device_idx', self.device_idx)
self.eager_mode = kwargs.pop('eager_mode', self.eager_mode)
self.use_xla = kwargs.pop('use_xla', self.use_xla)
super().__init__(**kwargs)
tpu_name: str = field(default=None, metadata={'help': 'Name of TPU'})
device_idx: int = field(default=0, metadata={'help': 'CPU / GPU device index. Defaults to 0.'})
eager_mode: bool = field(default=False, metadata={'help': 'Benchmark models in eager model.'})
use_xla: bool = field(default=False, metadata={'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'})
_property
def _setup_tpu(self) -> Tuple['tf.distribute.cluster_resolver.TPUClusterResolver']:
requires_backends(self, ['tf'])
tpu = None
if self.tpu:
try:
if self.tpu_name:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
tpu = None
return tpu
_property
def _setup_strategy(self) -> Tuple[('tf.distribute.Strategy', 'tf.distribute.cluster_resolver.TPUClusterResolver')]:
requires_backends(self, ['tf'])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
strategy = tf.distribute.TPUStrategy(self._setup_tpu)
elif self.is_gpu:
tf.config.set_visible_devices(self.gpu_list[self.device_idx], 'GPU')
strategy = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}')
else:
tf.config.set_visible_devices([], 'GPU')
strategy = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}')
return strategy
def is_tpu(self) -> bool:
requires_backends(self, ['tf'])
return (self._setup_tpu is not None)
def strategy(self) -> 'tf.distribute.Strategy':
requires_backends(self, ['tf'])
return self._setup_strategy
def gpu_list(self):
requires_backends(self, ['tf'])
return tf.config.list_physical_devices('GPU')
def n_gpu(self) -> int:
requires_backends(self, ['tf'])
if self.cuda:
return len(self.gpu_list)
return 0
def is_gpu(self) -> bool:
return (self.n_gpu > 0) |
def apply_rotary_emb(q, sinu_pos):
sinu_pos = rearrange(sinu_pos, 'n (j d) -> n j d', j=2)
(sin, cos) = sinu_pos.unbind(dim=(- 2))
(sin, cos) = map((lambda t: repeat(t, 'n d -> n (d j)', j=2)), (sin, cos))
print(q.size(), cos.size(), sin.size())
q = ((q * cos.unsqueeze(1)) + (rotate_every_two(q) * sin.unsqueeze(1)))
return (q, sin, cos) |
class DeleteDialog(WarningMessage):
RESPONSE_DELETE = 1
def for_songs(cls, parent, songs):
description = _('The selected songs will be removed from the library and their files deleted from disk.')
paths = [s('~filename') for s in songs]
return cls(parent, paths, description)
def for_files(cls, parent, paths):
description = _('The selected files will be deleted from disk.')
return cls(parent, paths, description)
def __init__(self, parent, paths, description):
title = numeric_phrase('Delete %(file_count)d file permanently?', 'Delete %(file_count)d files permanently?', len(paths), 'file_count')
super().__init__(get_top_parent(parent), title, description, buttons=Gtk.ButtonsType.NONE)
area = self.get_message_area()
exp = FileListExpander(paths)
exp.show()
area.pack_start(exp, False, True, 0)
self.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
self.add_icon_button(_('_Delete Files'), Icons.EDIT_DELETE, self.RESPONSE_DELETE)
self.set_default_response(Gtk.ResponseType.CANCEL) |
_unraisablehook()
def test_last_minute_gc_edge_case() -> None:
saved: list[AsyncGenerator[(int, None)]] = []
record = []
needs_retry = True
async def agen() -> AsyncGenerator[(int, None)]:
try:
(yield 1)
finally:
record.append('cleaned up')
def collect_at_opportune_moment(token: _core._entry_queue.TrioToken) -> None:
runner = _core._run.GLOBAL_RUN_CONTEXT.runner
assert (runner.system_nursery is not None)
if (runner.system_nursery._closed and isinstance(runner.asyncgens.alive, weakref.WeakSet)):
saved.clear()
record.append('final collection')
gc_collect_harder()
record.append('done')
else:
try:
token.run_sync_soon(collect_at_opportune_moment, token)
except _core.RunFinishedError:
nonlocal needs_retry
needs_retry = True
async def async_main() -> None:
token = _core.current_trio_token()
token.run_sync_soon(collect_at_opportune_moment, token)
saved.append(agen())
(await saved[(- 1)].asend(None))
for _attempt in range(50):
needs_retry = False
del record[:]
del saved[:]
_core.run(async_main)
if needs_retry:
assert (record == ['cleaned up'])
else:
assert (record == ['final collection', 'done', 'cleaned up'])
break
else:
pytest.fail(f"Didn't manage to hit the trailing_finalizer_asyncgens case despite trying {_attempt} times") |
class Migration(migrations.Migration):
dependencies = [('api', '0086_infraction_jump_url')]
operations = [migrations.AlterField(model_name='infraction', name='type', field=models.CharField(choices=[('note', 'Note'), ('warning', 'Warning'), ('watch', 'Watch'), ('timeout', 'Timeout'), ('kick', 'Kick'), ('ban', 'Ban'), ('superstar', 'Superstar'), ('voice_ban', 'Voice Ban'), ('voice_mute', 'Voice Mute')], help_text='The type of the infraction.', max_length=10)), migrations.RunPython(rename_type, migrations.RunPython.noop)] |
def test_method_and_teardown_failing_reporting(pytester: Pytester) -> None:
pytester.makepyfile('\n import unittest\n class TC(unittest.TestCase):\n def tearDown(self):\n assert 0, "down1"\n def test_method(self):\n assert False, "down2"\n ')
result = pytester.runpytest('-s')
assert (result.ret == 1)
result.stdout.fnmatch_lines(['*tearDown*', '*assert 0*', '*test_method*', '*assert False*', '*1 failed*1 error*']) |
class NotificationManager(object):
def __init__(self, config, data_manager):
self.config = config
self.data_manager = data_manager
self.logger = getLogger()
self.apprise = self.build_apprise()
def build_apprise(self):
asset = apprise.AppriseAsset(image_url_mask=' default_extension='.png')
asset.app_id = 'Ouroboros'
asset.app_desc = 'Ouroboros'
asset.app_url = '
asset.html_notify_map['info'] = '#5F87C6'
asset.image_url_logo = '
apprise_obj = apprise.Apprise(asset=asset)
for notifier in self.config.notifiers:
add = apprise_obj.add(notifier)
if (not add):
self.logger.error('Could not add notifier %s', notifier)
return apprise_obj
def send(self, container_tuples=None, socket=None, kind='update', next_run=None, mode='container'):
if (kind == 'startup'):
now = datetime.now(timezone.utc).astimezone()
title = f'Ouroboros has started'
body_fields = [f'Host: {self.config.hostname}', f"Time: {now.strftime('%Y-%m-%d %H:%M:%S')}", f'Next Run: {next_run}']
else:
title = 'Ouroboros has updated containers!'
body_fields = [f"Host/Socket: {self.config.hostname} / {socket.split('//')[1]}", f'Containers Monitored: {self.data_manager.monitored_containers[socket]}', f'Total Containers Updated: {self.data_manager.total_updated[socket]}', f'Containers updated this pass: {len(container_tuples)}']
body_fields.extend(['{} updated from {} to {}'.format(container.name, (old_image if (mode == 'service') else old_image.short_id.split(':')[1]), new_image.short_id.split(':')[1]) for (container, old_image, new_image) in container_tuples])
body = '\r\n'.join(body_fields)
if self.apprise.servers:
self.apprise.notify(title=title, body=body) |
def make_dataset():
if (opt.dataset in ('imagenet', 'dog_and_cat_64', 'dog_and_cat_128')):
trans = tfs.Compose([tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
data = ImageFolder(opt.root, transform=trans)
loader = DataLoader(data, batch_size=100, shuffle=False, num_workers=opt.workers)
elif (opt.dataset == 'cifar10'):
trans = tfs.Compose([tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
data = CIFAR10(root=opt.root, train=True, download=False, transform=trans)
loader = DataLoader(data, batch_size=100, shuffle=True, num_workers=opt.workers)
else:
raise ValueError(f'Unknown dataset: {opt.dataset}')
return loader |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.