code stringlengths 281 23.7M |
|---|
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator = nn.ModuleList([nn.Sequential(nn.ReflectionPad1d(7), nn.utils.weight_norm(nn.Conv1d(1, 16, kernel_size=15)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(16, 64, kernel_size=41, stride=4, padding=20, groups=4)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(64, 256, kernel_size=41, stride=4, padding=20, groups=16)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(256, 512, kernel_size=41, stride=4, padding=20, groups=64)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.weight_norm(nn.Conv1d(512, 512, kernel_size=5, stride=1, padding=2)), nn.LeakyReLU(0.2, True)), nn.utils.weight_norm(nn.Conv1d(512, 1, kernel_size=3, stride=1, padding=1))])
def forward(self, x):
for layer in self.discriminator:
x = layer(x)
return x |
.parametrize('attributes, expected_link', [('', Link(' ('data-requires-python=">=3.7"', Link(' requires_python='>=3.7')), ('data-yanked', Link(' yanked=True)), ('data-yanked=""', Link(' yanked=True)), ('data-yanked="<reason>"', Link(' yanked='<reason>')), ('data-requires-python=">=3.7" data-yanked', Link(' requires_python='>=3.7', yanked=True))])
def test_link_attributes(html_page_content: HTMLPageGetter, attributes: str, expected_link: Link) -> None:
anchor = f'<a href=" {attributes}>demo-0.1.whl</a><br/>'
content = html_page_content(anchor)
page = HTMLPage(' content)
assert (len(list(page.links)) == 1)
link = next(iter(page.links))
assert (link.url == expected_link.url)
assert (link.requires_python == expected_link.requires_python)
assert (link.yanked == expected_link.yanked)
assert (link.yanked_reason == expected_link.yanked_reason) |
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[(- 1):]
if c['use_bias']:
bias = _get_variable('bias', params_shape, initializer=tf.zeros_initializer)
return (x + bias)
axis = list(range((len(x_shape) - 1)))
beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer)
gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer)
moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer, trainable=False)
moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer, trainable=False)
(mean, variance) = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
(mean, variance) = control_flow_ops.cond(c['is_training'], (lambda : (mean, variance)), (lambda : (moving_mean, moving_variance)))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
return x |
class TestWeight():
def test_init(self, expected_data):
(score_sum, weight_sum) = expected_data
weight_obj = Weight(score_sum, weight_sum)
with pytest.raises(TypeError):
_ = Weight()
assert (weight_obj.score_sum == score_sum)
assert (weight_obj.weight_sum == weight_sum)
def test_level_info(self):
assert (LEVEL_INFO.LOW.value == 'Low Risk')
assert (LEVEL_INFO.Moderate.value == 'Moderate Risk')
assert (LEVEL_INFO.High.value == 'High Risk')
def test_calculate_with_expected_data(self, expected_data):
(score_sum, weight_sum) = expected_data
weight_obj = Weight(score_sum, weight_sum)
assert (weight_obj.calculate() in ['\x1b[92mLow Risk\x1b[0m', '\x1b[33mModerate Risk\x1b[0m', '\x1b[91mHigh Risk\x1b[0m'])
.xfail(raises=ValueError)
def test_calculate_with_unexpected_data(self, unexpected_data):
(score_sum, weight_sum) = unexpected_data
weight_obj = Weight(score_sum, weight_sum)
assert (weight_obj.calculate() in ['\x1b[92mLow Risk\x1b[0m', '\x1b[33mModerate Risk\x1b[0m', '\x1b[91mHigh Risk\x1b[0m']) |
class MOT(GenericDataset):
num_categories = 1
default_resolution = [544, 960]
class_name = ['']
max_objs = 256
cat_ids = {1: 1, (- 1): (- 1)}
def __init__(self, opt, split):
self.dataset_version = opt.dataset_version
self.year = int(self.dataset_version[:2])
print('Using MOT {} {}'.format(self.year, self.dataset_version))
data_dir = os.path.join(opt.data_dir, 'mot{}'.format(self.year))
if (opt.dataset_version in ['17trainval', '17test']):
ann_file = '{}.json'.format(('train' if (split == 'train') else 'test'))
elif (opt.dataset_version == '17halftrain'):
ann_file = '{}.json'.format('train_half')
elif (opt.dataset_version == '17halfval'):
ann_file = '{}.json'.format('val_half')
img_dir = os.path.join(data_dir, '{}'.format(('test' if ('test' in self.dataset_version) else 'train')))
print('ann_file', ann_file)
ann_path = os.path.join(data_dir, 'annotations', ann_file)
self.images = None
super(MOT, self).__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded MOT {} {} {} samples'.format(self.dataset_version, split, self.num_samples))
def _to_float(self, x):
return float('{:.2f}'.format(x))
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results_mot{}'.format(self.dataset_version))
if (not os.path.exists(results_dir)):
os.mkdir(results_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
file_name = video['file_name']
out_path = os.path.join(results_dir, '{}.txt'.format(file_name))
f = open(out_path, 'w')
images = self.video_to_images[video_id]
tracks = defaultdict(list)
for image_info in images:
if (not (image_info['id'] in results)):
continue
result = results[image_info['id']]
frame_id = image_info['frame_id']
for item in result:
if (not ('tracking_id' in item)):
item['tracking_id'] = np.random.randint(100000)
if (item['active'] == 0):
continue
tracking_id = item['tracking_id']
bbox = item['bbox']
bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
tracks[tracking_id].append(([frame_id] + bbox))
rename_track_id = 0
for track_id in sorted(tracks):
rename_track_id += 1
for t in tracks[track_id]:
f.write('{},{},{:.2f},{:.2f},{:.2f},{:.2f},-1,-1,-1,-1\n'.format(t[0], rename_track_id, t[1], t[2], (t[3] - t[1]), (t[4] - t[2])))
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
gt_type_str = '{}'.format(('_train_half' if ('17halftrain' in self.opt.dataset_version) else ('_val_half' if ('17halfval' in self.opt.dataset_version) else '')))
gt_type_str = ('_val_half' if (self.year in [16, 19]) else gt_type_str)
gt_type_str = ('--gt_type {}'.format(gt_type_str) if (gt_type_str != '') else '')
os.system((((('python tools/eval_motchallenge.py ' + '../data/mot{}/{}/ '.format(self.year, 'train')) + '{}/results_mot{}/ '.format(save_dir, self.dataset_version)) + gt_type_str) + ' --eval_official')) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.embed(x)
out = self.linear(out)
return out
def embed(self, x):
out = F.relu(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return out |
def test_geo_position():
pos = OSC.GeoPosition(1, 1)
pos2 = OSC.GeoPosition(1, 1)
pos3 = OSC.GeoPosition(1, 1, 1)
prettyprint(pos)
assert (pos == pos2)
assert (pos != pos3)
pos4 = OSC.GeoPosition.parse(pos.get_element())
assert (pos == pos4)
assert (version_validation('Position', pos, 0) == ValidationResponse.OSC_VERSION)
assert (version_validation('Position', pos, 1) == ValidationResponse.OK)
assert (version_validation('Position', pos, 2) == ValidationResponse.OK) |
('struct?', [values.W_Object], simple=False)
def do_is_struct(v, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if isinstance(v, values_struct.W_RootStruct):
if current_inspector.has_control(v.struct_type()):
return return_value(values.w_true, env, cont)
return return_value(values.w_false, env, cont) |
def test_create_grant_vouchers_on_pretix_doesnt_work_without_pretix_config(rf, conference_factory, grant_factory, mocker):
mock_create_voucher = mocker.patch('grants.admin.create_voucher', side_effect=[{'id': 1}, {'id': 2}])
mock_messages = mocker.patch('grants.admin.messages')
conference = conference_factory(pretix_speaker_voucher_quota_id=None)
grant_1 = grant_factory(status=Grant.Status.confirmed, conference=conference)
grant_2 = grant_factory(status=Grant.Status.confirmed, conference=conference)
request = rf.get('/')
create_grant_vouchers_on_pretix(None, request=request, queryset=Grant.objects.filter(conference=conference))
mock_create_voucher.assert_not_called()
mock_messages.error.assert_called_once_with(request, 'Please configure the grant voucher quota ID in the conference settings')
grant_1.refresh_from_db()
grant_2.refresh_from_db()
assert (grant_1.pretix_voucher_id is None)
assert (grant_1.voucher_code is None)
assert (grant_2.pretix_voucher_id is None)
assert (grant_2.voucher_code is None) |
class Fusion(nn.Module):
def __init__(self):
super(Fusion, self).__init__()
self.a1 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a2 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a3 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a4 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.avgpool = nn.AvgPool2d(kernel_size=4, stride=4, padding=0)
def forward(self, x1, x2, x3, x4):
s1 = (self.a1.expand_as(x1) * x1)
s2 = (self.a2.expand_as(x2) * x2)
s3 = (self.a3.expand_as(x3) * x3)
s4 = (self.a4.expand_as(x4) * x4)
y = self.avgpool((((s1 + s2) + s3) + s4))
return y |
.command()
('--group_name', help='HostGroup for this named.conf', default=None)
('--file_path', default=None, help='Path of file named.conf')
def import_named_conf(group_name, file_path):
if (not DnsHostGroup.query.filter_by(group_name=group_name).first()):
print((u'%s,' % group_name))
return
if DnsNamedConf.query.filter_by(name=group_name).first():
print(u'named.conf,')
return
(header, zones) = parse_named_conf(file_path, group_name)
named_conf = DnsNamedConf(name=group_name, conf_content=header)
with db.session.begin(subtransactions=True):
db.session.add(named_conf)
db.session.bulk_insert_mappings(DnsZoneConf, zones) |
def make_numbered_dir(root: Path, prefix: str, mode: int=448) -> Path:
for i in range(10):
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=(- 1))
new_number = (max_existing + 1)
new_path = root.joinpath(f'{prefix}{new_number}')
try:
new_path.mkdir(mode=mode)
except Exception:
pass
else:
_force_symlink(root, (prefix + 'current'), new_path)
return new_path
else:
raise OSError('could not create numbered dir with prefix {prefix} in {root} after 10 tries'.format(prefix=prefix, root=root)) |
def remove_tag_from_timemachine(repo_id, tag_name, manifest_id, include_submanifests=False, is_alive=False):
try:
namespace = User.select(User.removed_tag_expiration_s).join(Repository, on=(Repository.namespace_user == User.id)).where((Repository.id == repo_id)).get()
except User.DoesNotExist:
return False
time_machine_ms = (namespace.removed_tag_expiration_s * 1000)
now_ms = get_epoch_timestamp_ms()
increment = 1
updated = False
if is_alive:
alive_tag = get_tag(repo_id, tag_name)
if (alive_tag is None):
return False
updated = Tag.update(lifetime_end_ms=(now_ms - time_machine_ms)).where((Tag.id == alive_tag)).where((Tag.lifetime_end_ms == alive_tag.lifetime_end_ms)).execute()
if (updated != 1):
return False
else:
updated = True
else:
with db_transaction():
for tag in get_tags_within_timemachine_window(repo_id, tag_name, manifest_id, time_machine_ms):
Tag.update(lifetime_end_ms=((now_ms - time_machine_ms) - increment)).where((Tag.id == tag)).execute()
updated = True
increment = (increment + 1)
if (updated and include_submanifests):
reset_child_manifest_expiration(repo_id, manifest_id, (now_ms - time_machine_ms))
return updated |
class ContrastResNet(nn.Module):
def __init__(self, opt, n_cls):
super(ContrastResNet, self).__init__()
self.encoder = create_model(opt.model, n_cls, dataset=opt.dataset)
dim_in = self.encoder.feat_dim
projection_size = opt.feat_dim
self.head = Projection(dim=dim_in, projection_size=projection_size, hidden_size=dim_in)
self.global_cont_loss = opt.global_cont_loss
self.spatial_cont_loss = opt.spatial_cont_loss
def forward(self, x):
(feat, outputs) = self.encoder(x, is_feat=True)
spatial_f = feat[(- 2)]
avg_pool_feat = feat[(- 1)]
global_f = self.head(avg_pool_feat)
return (outputs, spatial_f, global_f, avg_pool_feat) |
def hierarchical_model_data():
group_coords = {'group_d1': np.arange(3), 'group_d2': np.arange(7)}
group_shape = tuple((len(d) for d in group_coords.values()))
data_coords = {'data_d': np.arange(11), **group_coords}
data_shape = tuple((len(d) for d in data_coords.values()))
mu = (- 5.0)
sigma_group_mu = 3
group_mu = (sigma_group_mu * np.random.randn(*group_shape))
sigma = 3.0
data = (((sigma * np.random.randn(*data_shape)) + group_mu) + mu)
return dict(group_coords=group_coords, group_shape=group_shape, data_coords=data_coords, data_shape=data_shape, mu=mu, sigma_group_mu=sigma_group_mu, sigma=sigma, group_mu=group_mu, data=data) |
class TextLayoutGroup(graphics.Group):
def __init__(self, texture, program, order=1, parent=None):
super().__init__(order=order, parent=parent)
self.texture = texture
self.program = program
def set_state(self):
self.program.use()
self.program['scissor'] = False
glActiveTexture(GL_TEXTURE0)
glBindTexture(self.texture.target, self.texture.id)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def unset_state(self):
glDisable(GL_BLEND)
self.program.stop()
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.texture)
def __eq__(self, other):
return ((other.__class__ is self.__class__) and (self.parent is other.parent) and (self.program.id is other.program.id) and (self.order == other.order) and (self.texture.target == other.texture.target) and (self.texture.id == other.texture.id))
def __hash__(self):
return hash((id(self.parent), self.program.id, self.order, self.texture.target, self.texture.id)) |
class PartialConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
if ('multi_channel' in kwargs):
self.multi_channel = kwargs['multi_channel']
kwargs.pop('multi_channel')
else:
self.multi_channel = False
self.return_mask = True
super(PartialConv2d, self).__init__(*args, **kwargs)
if self.multi_channel:
self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0], self.kernel_size[1])
else:
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1])
self.slide_winsize = ((self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]) * self.weight_maskUpdater.shape[3])
self.last_size = (None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask=None):
if ((mask is not None) or (self.last_size != (input.data.shape[2], input.data.shape[3]))):
self.last_size = (input.data.shape[2], input.data.shape[3])
with torch.no_grad():
if (self.weight_maskUpdater.type() != input.type()):
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if (mask is None):
if self.multi_channel:
mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2], input.data.shape[3]).to(input)
else:
mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input)
self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=1)
self.mask_ratio = (self.slide_winsize / (self.update_mask + 1e-08))
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
if ((self.update_mask.type() != input.type()) or (self.mask_ratio.type() != input.type())):
self.update_mask.to(input)
self.mask_ratio.to(input)
raw_out = super(PartialConv2d, self).forward((torch.mul(input, mask) if (mask is not None) else input))
if (self.bias is not None):
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (torch.mul((raw_out - bias_view), self.mask_ratio) + bias_view)
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return (output, self.update_mask)
else:
return output |
class SpacedDiffusion(GaussianDiffusion):
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps)
def _scale_timesteps(self, t):
return t |
class Season(ItemEntity):
isdir: ClassVar[bool] = True
mediatype: ClassVar[str] = 'season'
def __init__(self, *, index: int, parent: TVShow, item_data: Dict) -> None:
super().__init__(parent=parent, item_data=item_data)
self.index = index
self.tvshow = self.parent
self.title = f' {self.index}'
self.item_id = self.tvshow.item_id
self.url = self.plugin.routing.build_url('season_episodes', self.item_id, f'{self.index}/')
self.watching_info = self.tvshow.watching_info['seasons'][(self.index - 1)]
self.watching_status = self.watching_info['status']
def episodes(self) -> List['SeasonEpisode']:
return [SeasonEpisode(parent=self, item_data=episode_item, index=i) for (i, episode_item) in enumerate(self.item['episodes'], 1)]
def video_info(self) -> Dict:
return {**self.tvshow.video_info.copy(), 'season': self.index, 'playcount': self.watching_status, 'mediatype': self.mediatype} |
def export_onnx(model, path, force_cpu):
import onnx
import onnxruntime
import numpy as np
onnx_file_name = os.path.join(path, (model + '.onnx'))
model_weight_file = os.path.join(path, (model + '.pth'))
dummy_input = Variable(torch.randn(1, 3, 1024, 1024))
model_setenv(force_cpu)
torch_model = get_model(model_weight_file)
torch_model.eval()
print('Export model ...')
input_names = ['input']
output_names = ['output']
device = model_device()
torch.onnx.export(torch_model, dummy_input.to(device), onnx_file_name, input_names=input_names, output_names=output_names, verbose=False, opset_version=10, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
print('Checking model ...')
onnx_model = onnx.load(onnx_file_name)
onnx.checker.check_model(onnx_model)
print('Done checking model ...') |
class NMeasure(Predictor):
def predict(self, weight=None):
res = Scoresheet()
for (a, b) in self.likely_pairs():
w = ((math.sqrt(2) * neighbourhood_intersection_size(self.G, a, b, weight)) / math.sqrt(((neighbourhood_size(self.G, a, weight) ** 2) + (neighbourhood_size(self.G, b, weight) ** 2))))
if (w > 0):
res[(a, b)] = w
return res |
def _get_scheme_dict(distribution_name: str, prefix: Optional[str]=None) -> Dict[(str, str)]:
vars = {}
if (prefix is None):
installed_base = sysconfig.get_config_var('base')
assert installed_base
else:
vars['base'] = vars['platbase'] = installed_base = prefix
scheme_dict = sysconfig.get_paths(vars=vars)
scheme_dict['headers'] = os.path.join(sysconfig.get_path('include', vars={'installed_base': installed_base}), distribution_name)
return scheme_dict |
class Brotli(Codec):
codec_id = 'imagecodecs_brotli'
def __init__(self, level=None, mode=None, lgwin=None):
self.level = level
self.mode = mode
self.lgwin = lgwin
def encode(self, buf):
return imagecodecs.brotli_encode(buf, level=self.level, mode=self.mode, lgwin=self.lgwin)
def decode(self, buf, out=None):
return imagecodecs.brotli_decode(buf, out=_flat(out)) |
class InterpolatedFlowParameter(AbstractInterpolatedParameter):
def __init__(self, model, node, flows, values, interp_kwargs=None, **kwargs):
super().__init__(model, flows, values, interp_kwargs, **kwargs)
self._node = node
def _value_to_interpolate(self, ts, scenario_index):
return self._node.prev_flow[scenario_index.global_id]
def load(cls, model, data):
node = model.nodes[data.pop('node')]
flows = np.array(data.pop('flows'))
values = np.array(data.pop('values'))
interp_kwargs = data.pop('interp_kwargs', None)
return cls(model, node, flows, values, interp_kwargs=interp_kwargs, **data) |
def RSU4F(x, mid_ch=12, out_ch=3):
x0 = REBNCONV(x, out_ch, 1)
x1 = REBNCONV(x0, mid_ch, 1)
x2 = REBNCONV(x1, mid_ch, 2)
x3 = REBNCONV(x2, mid_ch, 4)
x4 = REBNCONV(x3, mid_ch, 8)
x = REBNCONV(tf.concat([x4, x3], axis=(- 1)), mid_ch, 4)
x = REBNCONV(tf.concat([x, x2], axis=(- 1)), mid_ch, 2)
x = REBNCONV(tf.concat([x, x1], axis=(- 1)), out_ch, 1)
return (x + x0) |
def _get_win_folder_from_registry(csidl_name):
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {'CSIDL_APPDATA': 'AppData', 'CSIDL_COMMON_APPDATA': 'Common AppData', 'CSIDL_LOCAL_APPDATA': 'Local AppData'}[csidl_name]
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')
(dir, type) = _winreg.QueryValueEx(key, shell_folder_name)
return dir |
class ExpandAugment(DeformableAugment):
def __init__(self, mask, vector_expand=(10, 10, 10), gaussian_smooth=5, bone_mask=False):
self.mask = mask
self.vector_expand = vector_expand
self.gaussian_smooth = gaussian_smooth
self.bone_mask = bone_mask
def augment(self):
(_, transform, dvf) = generate_field_expand(self.mask, bone_mask=self.bone_mask, expand=self.vector_expand, gaussian_smooth=self.gaussian_smooth)
return (transform, dvf) |
class Sponsorship(models.Model):
APPLIED = 'applied'
REJECTED = 'rejected'
APPROVED = 'approved'
FINALIZED = 'finalized'
STATUS_CHOICES = [(APPLIED, 'Applied'), (REJECTED, 'Rejected'), (APPROVED, 'Approved'), (FINALIZED, 'Finalized')]
objects = SponsorshipQuerySet.as_manager()
submited_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL)
sponsor = models.ForeignKey('Sponsor', null=True, on_delete=models.SET_NULL)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default=APPLIED, db_index=True)
locked = models.BooleanField(default=False)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
applied_on = models.DateField(auto_now_add=True)
approved_on = models.DateField(null=True, blank=True)
rejected_on = models.DateField(null=True, blank=True)
finalized_on = models.DateField(null=True, blank=True)
year = models.PositiveIntegerField(null=True, validators=YEAR_VALIDATORS, db_index=True)
for_modified_package = models.BooleanField(default=False, help_text="If true, it means the user customized the package's benefits. Changes are listed under section 'User Customizations'.")
level_name_old = models.CharField(max_length=64, default='', blank=True, help_text='DEPRECATED: shall be removed after manual data sanity check.', verbose_name='Level name')
package = models.ForeignKey(SponsorshipPackage, null=True, on_delete=models.SET_NULL)
sponsorship_fee = models.PositiveIntegerField(null=True, blank=True)
overlapped_by = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
renewal = models.BooleanField(null=True, blank=True, help_text='If true, it means the sponsorship is a renewal of a previous sponsorship and will use the renewal template for contracting.')
assets = GenericRelation(GenericAsset)
class Meta():
permissions = [('sponsor_publisher', 'Can access sponsor placement API')]
def level_name(self):
return (self.package.name if self.package else self.level_name_old)
_name.setter
def level_name(self, value):
self.level_name_old = value
_property
def user_customizations(self):
benefits = [b.sponsorship_benefit for b in self.benefits.select_related('sponsorship_benefit')]
return self.package.get_user_customization(benefits)
def __str__(self):
repr = f'{self.level_name} - {self.year} - ({self.get_status_display()}) for sponsor {self.sponsor.name}'
if (self.start_date and self.end_date):
fmt = '%m/%d/%Y'
start = self.start_date.strftime(fmt)
end = self.end_date.strftime(fmt)
repr += f' [{start} - {end}]'
return repr
def save(self, *args, **kwargs):
if ('locked' not in kwargs.get('update_fields', [])):
if (self.status != self.APPLIED):
self.locked = True
return super().save(*args, **kwargs)
def new(cls, sponsor, benefits, package=None, submited_by=None):
for_modified_package = False
package_benefits = []
if (package and package.has_user_customization(benefits)):
package_benefits = package.benefits.all()
for_modified_package = True
elif (not package):
for_modified_package = True
if cls.objects.in_progress().filter(sponsor=sponsor).exists():
raise SponsorWithExistingApplicationException(f'Sponsor pk: {sponsor.pk}')
sponsorship = cls.objects.create(submited_by=submited_by, sponsor=sponsor, level_name=('' if (not package) else package.name), package=package, sponsorship_fee=(None if (not package) else package.sponsorship_amount), for_modified_package=for_modified_package, year=SponsorshipCurrentYear.get_year())
for benefit in benefits:
added_by_user = (for_modified_package and (benefit not in package_benefits))
SponsorBenefit.new_copy(benefit, sponsorship=sponsorship, added_by_user=added_by_user)
return sponsorship
def estimated_cost(self):
return (self.benefits.aggregate(Sum('benefit_internal_value'))['benefit_internal_value__sum'] or 0)
def verbose_sponsorship_fee(self):
if (self.sponsorship_fee is None):
return 0
return num2words(self.sponsorship_fee)
def agreed_fee(self):
valid_status = [Sponsorship.APPROVED, Sponsorship.FINALIZED]
if (self.status in valid_status):
return self.sponsorship_fee
try:
benefits = [sb.sponsorship_benefit for sb in self.package_benefits.all().select_related('sponsorship_benefit')]
if (self.package and (not self.package.has_user_customization(benefits))):
return self.sponsorship_fee
except SponsorshipPackage.DoesNotExist:
return None
def is_active(self):
conditions = [(self.status == self.FINALIZED), (self.end_date and (self.end_date > date.today()))]
def reject(self):
if (self.REJECTED not in self.next_status):
msg = f"Can't reject a {self.get_status_display()} sponsorship."
raise InvalidStatusException(msg)
self.status = self.REJECTED
self.locked = True
self.rejected_on = timezone.now().date()
def approve(self, start_date, end_date):
if (self.APPROVED not in self.next_status):
msg = f"Can't approve a {self.get_status_display()} sponsorship."
raise InvalidStatusException(msg)
if (start_date >= end_date):
msg = f'Start date greater or equal than end date'
raise SponsorshipInvalidDateRangeException(msg)
self.status = self.APPROVED
self.locked = True
self.start_date = start_date
self.end_date = end_date
self.approved_on = timezone.now().date()
def rollback_to_editing(self):
accepts_rollback = [self.APPLIED, self.APPROVED, self.REJECTED]
if (self.status not in accepts_rollback):
msg = f"Can't rollback to edit a {self.get_status_display()} sponsorship."
raise InvalidStatusException(msg)
try:
if (not self.contract.is_draft):
status = self.contract.get_status_display()
msg = f"Can't rollback to edit a sponsorship with a {status} Contract."
raise InvalidStatusException(msg)
self.contract.delete()
except ObjectDoesNotExist:
pass
self.status = self.APPLIED
self.approved_on = None
self.rejected_on = None
def unlocked(self):
return (not self.locked)
def verified_emails(self):
emails = [self.submited_by.email]
if self.sponsor:
emails = self.sponsor.verified_emails(initial_emails=emails)
return emails
def admin_url(self):
return reverse('admin:sponsors_sponsorship_change', args=[self.pk])
def contract_admin_url(self):
if (not self.contract):
return ''
return reverse('admin:sponsors_contract_change', args=[self.contract.pk])
def detail_url(self):
return reverse('users:sponsorship_application_detail', args=[self.pk])
_property
def package_benefits(self):
return self.benefits.filter(added_by_user=False)
_property
def added_benefits(self):
return self.benefits.filter(added_by_user=True)
def open_for_editing(self):
return ((self.status == self.APPLIED) or self.unlocked)
def next_status(self):
states_map = {self.APPLIED: [self.APPROVED, self.REJECTED], self.APPROVED: [self.FINALIZED], self.REJECTED: [], self.FINALIZED: []}
return states_map[self.status]
def previous_effective_date(self):
if (len(self.sponsor.sponsorship_set.all().order_by('-year')) > 1):
return self.sponsor.sponsorship_set.all().order_by('-year')[1].start_date
return None |
def construct_and_access_collection(use):
ints_a = use.init_artifact('ints_a', single_int1_factory)
ints_b = use.init_artifact('ints_b', single_int2_factory)
rc_in = use.construct_artifact_collection('rc_in', {'a': ints_a, 'b': ints_b})
(rc_out,) = use.action(use.UsageAction(plugin_id='dummy_plugin', action_id='dict_of_ints'), use.UsageInputs(ints=rc_in), use.UsageOutputNames(output='rc_out'))
ints_b_from_collection = use.get_artifact_collection_member('ints_b_from_collection', rc_out, 'b') |
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type='softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func()
if (loss_type == 'softmax'):
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif (loss_type == 'dist'):
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
self.DBval = False
def forward(self, x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
scores = self.forward(x)
y = Variable(y.cuda())
return self.loss_fn(scores, y)
def train_loop(self, epoch, train_loader, optimizer):
print_freq = 10
avg_loss = 0
for (i, (x, y)) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = (avg_loss + loss.item())
if ((i % print_freq) == 0):
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), (avg_loss / float((i + 1)))))
def test_loop(self, val_loader):
if self.DBval:
return self.analysis_loop(val_loader)
else:
return (- 1)
def analysis_loop(self, val_loader, record=None):
class_file = {}
for (i, (x, y)) in enumerate(val_loader):
x = x.cuda()
x_var = Variable(x)
feats = self.feature.forward(x_var).data.cpu().numpy()
labels = y.cpu().numpy()
for (f, l) in zip(feats, labels):
if (l not in class_file.keys()):
class_file[l] = []
class_file[l].append(f)
for cl in class_file:
class_file[cl] = np.array(class_file[cl])
DB = DBindex(class_file)
print(('DB index = %4.2f' % DB))
return (1 / DB) |
.parametrize('func', [(lambda x: x.sum()), (lambda x: x.mean()), (lambda x: x.count()), (lambda x: x.size()), (lambda x: x.var(ddof=1)), (lambda x: x.std(ddof=1)), pytest.param((lambda x: x.var(ddof=0)), marks=pytest.mark.xfail)])
.parametrize('n', [1, 4])
.parametrize('getter', [(lambda df: df), (lambda df: df.x)])
.parametrize('grouper', [(lambda a: (a.x % 3)), (lambda a: 'y'), (lambda a: (a.index % 2)), (lambda a: ['y'])])
.parametrize('indexer', [(lambda g: g.x), (lambda g: g), (lambda g: g[['x']])])
def test_groupby_windowing_n(func, n, getter, grouper, indexer):
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': ([1.0, 2.0] * 5)})
sdf = DataFrame(example=df)
def f(x):
return func(indexer(x.groupby(grouper(x))))
L = f(sdf.window(n=n)).stream.gather().sink_to_list()
diff = 3
for i in range(0, 10, diff):
sdf.emit(df.iloc[i:(i + diff)])
sdf.emit(df.iloc[:0])
assert (len(L) == 5)
first = df.iloc[max(0, (diff - n)):diff]
assert_eq(L[0], f(first))
last = df.iloc[(len(df) - n):]
assert_eq(L[(- 1)], f(last)) |
class UnformattedDocument(AbstractDocument):
def __init__(self, text=''):
super().__init__(text)
self.styles = {}
def get_style_runs(self, attribute):
value = self.styles.get(attribute)
return runlist.ConstRunIterator(len(self.text), value)
def get_style(self, attribute, position=None):
return self.styles.get(attribute)
def set_style(self, start, end, attributes):
return super().set_style(0, len(self.text), attributes)
def _set_style(self, start, end, attributes):
self.styles.update(attributes)
def set_paragraph_style(self, start, end, attributes):
return super().set_paragraph_style(0, len(self.text), attributes)
def get_font_runs(self, dpi=None):
ft = self.get_font(dpi=dpi)
return runlist.ConstRunIterator(len(self.text), ft)
def get_font(self, position=None, dpi=None):
from pyglet import font
font_name = self.styles.get('font_name')
font_size = self.styles.get('font_size')
bold = self.styles.get('bold', False)
italic = self.styles.get('italic', False)
stretch = self.styles.get('stretch', False)
return font.load(font_name, font_size, bold=bold, italic=italic, stretch=stretch, dpi=dpi)
def get_element_runs(self):
return runlist.ConstRunIterator(len(self._text), None) |
.parametrize('username,password', users)
def test_create_section(db, client, username, password):
client.login(username=username, password=password)
instances = Page.objects.all()
for instance in instances:
section = instance.sections.first()
if (section is not None):
section_pages = list(section.section_pages.values_list('page', 'order'))
order = (section.section_pages.aggregate(order=Max('order')).get('order') + 1)
url = reverse(urlnames['list'])
data = {'uri_prefix': instance.uri_prefix, 'uri_path': f'{instance.uri_path}_new_{username}', 'comment': instance.comment, 'attribute': (instance.attribute.pk if instance.attribute else ''), 'is_collection': instance.is_collection, 'title_en': instance.title_lang1, 'title_de': instance.title_lang2, 'help_en': instance.help_lang1, 'help_de': instance.help_lang2, 'verbose_name_en': instance.verbose_name_lang1, 'verbose_name_de': instance.verbose_name_lang2, 'sections': [section.id]}
response = client.post(url, data, content_type='application/json')
assert (response.status_code == status_map['create'][username]), response.json()
if (response.status_code == 201):
new_instance = Page.objects.get(id=response.json().get('id'))
section.refresh_from_db()
assert ([*section_pages, (new_instance.id, order)] == list(section.section_pages.values_list('page', 'order'))) |
_layout_config
def test_remove(manager):
one = manager.test_window('one')
two = manager.test_window('two')
three = manager.test_window('three')
assert_focused(manager, 'three')
assert (manager.c.group.info()['focus_history'] == ['one', 'two', 'three'])
manager.kill_window(three)
assert (manager.c.window.info()['name'] in manager.c.layout.info()['clients'])
manager.c.group.focus_by_name('two')
manager.test_window('four')
assert_focused(manager, 'four')
assert (manager.c.group.info()['focus_history'] == ['one', 'two', 'four'])
manager.kill_window(two)
assert_focused(manager, 'four')
assert (manager.c.group.info()['focus_history'] == ['one', 'four'])
five = manager.test_window('five')
manager.test_window('six')
manager.c.group.focus_by_name('one')
seven = manager.test_window('seven')
manager.c.group.focus_by_name('six')
assert_focused(manager, 'six')
assert (manager.c.group.info()['focus_history'] == ['four', 'five', 'one', 'seven', 'six'])
manager.kill_window(five)
manager.kill_window(one)
assert_focused(manager, 'six')
assert (manager.c.group.info()['focus_history'] == ['four', 'seven', 'six'])
manager.c.group.focus_by_name('seven')
manager.kill_window(seven)
assert (manager.c.window.info()['name'] in manager.c.layout.info()['clients']) |
def flake(session: nox.Session) -> None:
pyproject_data = load_pyproject_toml()
install(session, *pyproject_data['build-system']['requires'], *pyproject_data['project']['optional-dependencies']['pep8test'], *pyproject_data['project']['optional-dependencies']['test'], *pyproject_data['project']['optional-dependencies']['ssh'], *pyproject_data['project']['optional-dependencies']['nox'])
install(session, '-e', 'vectors/')
session.run('ruff', '.')
session.run('ruff', 'format', '--check', '.')
session.run('mypy', 'src/cryptography/', 'vectors/cryptography_vectors/', 'tests/', 'release.py', 'noxfile.py')
session.run('check-sdist', '--no-isolation') |
def export_chunk_to_wav_file(chunk, folder_name: str, i: int, word: str, wave_file) -> None:
clean_word = re.sub('[^A-Za-z0-9]+', '', word)
with wave.open(os.path.join(folder_name, f'chunk_{i}_{clean_word}.wav'), 'wb') as chunk_file:
chunk_file.setparams(wave_file.getparams())
chunk_file.writeframes(chunk) |
.parametrize('repo, commit_parser, translator, commit_messages,prerelease, major_on_zero, expected_new_version', xdist_sort_hack([(lazy_fixture(repo_fixture_name), lazy_fixture(parser_fixture_name), translator, commit_messages, prerelease, major_on_zero, expected_new_version) for ((repo_fixture_name, parser_fixture_name, translator), values) in {('repo_with_no_tags_angular_commits', 'default_angular_parser', VersionTranslator()): [*((commits, False, major_on_zero, '0.1.0') for major_on_zero in (True, False) for commits in ([], ['uninteresting'], ANGULAR_COMMITS_PATCH, ANGULAR_COMMITS_MINOR)), (ANGULAR_COMMITS_MAJOR, False, False, '0.1.0'), (ANGULAR_COMMITS_MAJOR, False, True, '1.0.0')], ('repo_with_single_branch_angular_commits', 'default_angular_parser', VersionTranslator()): [*((commits, False, major_on_zero, '0.1.1') for major_on_zero in (True, False) for commits in ([], ['uninteresting'])), *((ANGULAR_COMMITS_PATCH, False, major_on_zero, '0.1.2') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_MINOR, False, major_on_zero, '0.2.0') for major_on_zero in (True, False)), (ANGULAR_COMMITS_MAJOR, False, False, '0.2.0'), (ANGULAR_COMMITS_MAJOR, False, True, '1.0.0')], ('repo_with_single_branch_and_prereleases_angular_commits', 'default_angular_parser', VersionTranslator()): [*((commits, prerelease, major_on_zero, '0.2.0') for prerelease in (True, False) for major_on_zero in (True, False) for commits in ([], ['uninteresting'])), *((ANGULAR_COMMITS_PATCH, False, major_on_zero, '0.2.1') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_PATCH, True, major_on_zero, '0.2.1-rc.1') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_MINOR, False, major_on_zero, '0.3.0') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_MINOR, True, major_on_zero, '0.3.0-rc.1') for major_on_zero in (True, False)), (ANGULAR_COMMITS_MAJOR, False, True, '1.0.0'), (ANGULAR_COMMITS_MAJOR, True, True, '1.0.0-rc.1'), (ANGULAR_COMMITS_MAJOR, False, False, '0.3.0'), (ANGULAR_COMMITS_MAJOR, True, False, '0.3.0-rc.1')], ('repo_with_main_and_feature_branches_angular_commits', 'default_angular_parser', VersionTranslator(prerelease_token='beta')): [*((commits, True, major_on_zero, '0.3.0-beta.1') for major_on_zero in (True, False) for commits in ([], ['uninteresting'])), *((commits, False, major_on_zero, '0.3.0') for major_on_zero in (True, False) for commits in ([], ['uninteresting'])), *((ANGULAR_COMMITS_PATCH, False, major_on_zero, '0.3.0') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_PATCH, True, major_on_zero, '0.3.0-beta.2') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_MINOR, False, major_on_zero, '0.3.0') for major_on_zero in (True, False)), *((ANGULAR_COMMITS_MINOR, True, major_on_zero, '0.3.0-beta.2') for major_on_zero in (True, False)), (ANGULAR_COMMITS_MAJOR, False, True, '1.0.0'), (ANGULAR_COMMITS_MAJOR, True, True, '1.0.0-beta.1'), (ANGULAR_COMMITS_MAJOR, False, False, '0.3.0'), (ANGULAR_COMMITS_MAJOR, True, False, '0.3.0-beta.2')]}.items() for (commit_messages, prerelease, major_on_zero, expected_new_version) in values]))
def test_algorithm_with_zero_dot_versions_angular(repo, file_in_repo, commit_parser, translator, commit_messages, prerelease, expected_new_version, major_on_zero):
for commit_message in commit_messages:
add_text_to_file(repo, file_in_repo)
repo.git.commit(m=commit_message)
new_version = next_version(repo, translator, commit_parser, prerelease, major_on_zero)
assert (new_version == Version.parse(expected_new_version, prerelease_token=translator.prerelease_token)) |
def create_gaussian_diffusion(*, beta_schedule='linear', beta_linear_start=1e-06, beta_linear_end=0.01, steps=1000, accsteps=1000):
betas = gd.make_beta_schedule(schedule=beta_schedule, n_timestep=steps, linear_start=beta_linear_start, linear_end=beta_linear_end)
return gd.GaussianDiffusion(betas=betas, accsteps=accsteps) |
def normalize(inp, activation, reuse, scope):
if (FLAGS.norm == 'batch_norm'):
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif (FLAGS.norm == 'layer_norm'):
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif (FLAGS.norm == 'None'):
if (activation is not None):
return activation(inp)
return inp
else:
raise ValueError('Please set correct normalization.') |
(post_save, sender=Nomination)
def purge_nomination_pages(sender, instance, created, **kwargs):
if kwargs.get('raw', False):
return
purge_url(instance.get_absolute_url())
if instance.nominee:
purge_url(instance.nominee.get_absolute_url())
if instance.election:
purge_url(reverse('nominations:nominees_list', kwargs={'election': instance.election.slug})) |
class VIIRSLSTHandler(VIIRSJRRFileHandler):
_manual_scalings = {'VLST': ('LST_ScaleFact', 'LST_Offset'), 'emis_m15': ('LSE_ScaleFact', 'LSE_Offset'), 'emis_m16': ('LSE_ScaleFact', 'LSE_Offset'), 'emis_bbe': ('LSE_ScaleFact', 'LSE_Offset'), 'Satellite_Azimuth_Angle': ('AZI_ScaleFact', 'AZI_Offset')}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scale_data()
def _scale_data(self):
for var_name in list(self.nc.variables.keys()):
if (var_name not in self._manual_scalings):
continue
data_arr = self.nc[var_name]
scale_factor = self.nc[self._manual_scalings[var_name][0]]
add_offset = self.nc[self._manual_scalings[var_name][1]]
data_arr.data = ((data_arr.data * scale_factor.data) + add_offset.data)
self.nc[var_name] = data_arr |
def test_latex_flg_GaPrinter():
(g3d, e_1, e_2, e_3) = Ga.build('e*1|2|3')
t = Symbol('theta')
mv = (t + (0 * e_1))
assert (GaPrinter().doprint(mv) == 'theta')
GaLatexPrinter.redirect()
try:
assert (GaPrinter().doprint(mv) == 'theta')
finally:
GaLatexPrinter.restore() |
class RunPythonPluginTest(unittest.TestCase):
def test_success_execution(self):
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write(bytes("print('Hello world!')", 'utf-8'))
tmp_file.flush()
(output_id, output_data) = run_python_file(RunPythonFileInput(tmp_file.name))
self.assertEqual('success', output_id)
self.assertEqual('Hello world!\n', output_data.stdout)
def test_error_execution(self):
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write(bytes("import sys\nprint('Hello world!')\nsys.exit(42)\n", 'utf-8'))
tmp_file.flush()
(output_id, output_data) = run_python_file(RunPythonFileInput(tmp_file.name))
self.assertEqual('error', output_id)
self.assertEqual(42, output_data.exit_code)
self.assertEqual('Hello world!\n', output_data.stdout) |
class DeformableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False):
super(DeformableConv2d, self).__init__()
assert ((type(kernel_size) == tuple) or (type(kernel_size) == int))
kernel_size = (kernel_size if (type(kernel_size) == tuple) else (kernel_size, kernel_size))
self.stride = (stride if (type(stride) == tuple) else (stride, stride))
self.padding = padding
self.offset_conv = nn.Conv2d(in_channels, ((2 * kernel_size[0]) * kernel_size[1]), kernel_size=kernel_size, stride=stride, padding=self.padding, bias=True)
nn.init.constant_(self.offset_conv.weight, 0.0)
nn.init.constant_(self.offset_conv.bias, 0.0)
self.modulator_conv = nn.Conv2d(in_channels, ((1 * kernel_size[0]) * kernel_size[1]), kernel_size=kernel_size, stride=stride, padding=self.padding, bias=True)
nn.init.constant_(self.modulator_conv.weight, 0.0)
nn.init.constant_(self.modulator_conv.bias, 0.0)
self.regular_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=self.padding, bias=bias)
def forward(self, x):
offset = self.offset_conv(x)
modulator = (2.0 * torch.sigmoid(self.modulator_conv(x)))
x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight=self.regular_conv.weight, bias=self.regular_conv.bias, padding=self.padding, mask=modulator, stride=self.stride)
return x |
def lookup_manifest_builder(repository_ref, builder_id, storage, legacy_signing_key):
builder_state_json = session.get(_SESSION_KEY)
if (builder_state_json is None):
return None
try:
builder_state_tuple = json.loads(builder_state_json)
except ValueError:
return None
if (builder_state_tuple is None):
return None
builder_state = _BuilderState(*builder_state_tuple)
if (builder_state.builder_id != builder_id):
return None
return _ManifestBuilder(repository_ref, builder_state, storage, legacy_signing_key) |
def main():
opts = parse_args()
mkdir2(opts.out_dir)
db = COCO(opts.annot_path)
eval_summary = eval_ccf(db, opts.result_path, None, opts.per_class)
out_path = join(opts.out_dir, 'eval_summary.pkl')
if (opts.overwrite or (not isfile(out_path))):
pickle.dump(eval_summary, open(out_path, 'wb'))
if opts.eval_mask:
print('Evaluating instance segmentation')
eval_summary = eval_ccf(db, opts.result_path, iou_type='segm')
out_path = join(opts.out_dir, 'eval_summary_mask.pkl')
if (opts.overwrite or (not isfile(out_path))):
pickle.dump(eval_summary, open(out_path, 'wb')) |
class ReformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
self.variational = opt.variational_dropout
self.death_rate = death_rate
d_model = opt.model_size
p = opt.dropout
super(ReformerEncoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.self_attention = LSHSelfAttention(opt)
self.feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, opt.variational_dropout)
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
(out, _, _) = self.self_attention(query, attn_mask)
if (self.training and (self.death_rate > 0)):
out = (out / (1 - self.death_rate))
input = self.postprocess_attn(out, input)
out = self.feedforward(self.preprocess_ffn(input))
if (self.training and (self.death_rate > 0)):
out = (out / (1 - self.death_rate))
input = self.postprocess_ffn(out, input)
return input |
def test_caption_query_get_by_language_code_when_not_exists():
caption1 = Caption({'url': 'url1', 'name': {'simpleText': 'name1'}, 'languageCode': 'en', 'vssId': '.en'})
caption2 = Caption({'url': 'url2', 'name': {'simpleText': 'name2'}, 'languageCode': 'fr', 'vssId': '.fr'})
caption_query = CaptionQuery(captions=[caption1, caption2])
with pytest.raises(KeyError):
assert (caption_query['hello'] is not None) |
_fixtures(WebFixture)
def test_form_encoding(web_fixture):
fixture = web_fixture
class DomainObject():
fields = ExposedNames()
fields.file = (lambda i: FileField(allow_multiple=False, label='Attached files'))
domain_object = DomainObject()
form = Form(fixture.view, 'testform')
assert ('enctype' not in form.attributes.v)
form.add_child(SimpleFileInput(form, domain_object.fields.file))
assert (form.attributes.v['enctype'] == 'multipart/form-data') |
def _create_area(area_id, area_content):
from configobj import ConfigObj
config_obj = area_content.replace('{', '').replace('};', '')
config_obj = ConfigObj([line.replace(':', '=', 1) for line in config_obj.splitlines()])
config = config_obj.dict()
config['REGION'] = area_id
if (not isinstance(config['NAME'], str)):
config['NAME'] = ', '.join(config['NAME'])
config['XSIZE'] = int(config['XSIZE'])
config['YSIZE'] = int(config['YSIZE'])
config['AREA_EXTENT'][0] = config['AREA_EXTENT'][0].replace('(', '')
config['AREA_EXTENT'][3] = config['AREA_EXTENT'][3].replace(')', '')
for (i, val) in enumerate(config['AREA_EXTENT']):
config['AREA_EXTENT'][i] = float(val)
config['PCS_DEF'] = _get_proj4_args(config['PCS_DEF'])
return create_area_def(config['REGION'], config['PCS_DEF'], description=config['NAME'], proj_id=config['PCS_ID'], shape=(config['YSIZE'], config['XSIZE']), area_extent=config['AREA_EXTENT']) |
def test_dict_incomplete_value() -> None:
val = value.DictIncompleteValue(dict, [KVPair(TypedValue(int), KnownValue('x'))])
assert ("<dict containing {int: Literal['x']}>" == str(val))
val = value.DictIncompleteValue(dict, [KVPair(KnownValue('a'), TypedValue(int)), KVPair(KnownValue('b'), TypedValue(str))])
assert (val.get_value(KnownValue('a'), CTX) == TypedValue(int)) |
def load_ade20k_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
def _convert_category_id(segment_info, meta):
if (segment_info['category_id'] in meta['thing_dataset_id_to_contiguous_id']):
segment_info['category_id'] = meta['thing_dataset_id_to_contiguous_id'][segment_info['category_id']]
segment_info['isthing'] = True
else:
segment_info['category_id'] = meta['stuff_dataset_id_to_contiguous_id'][segment_info['category_id']]
segment_info['isthing'] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info['annotations']:
image_id = ann['image_id']
image_file = os.path.join(image_dir, (os.path.splitext(ann['file_name'])[0] + '.jpg'))
label_file = os.path.join(gt_dir, ann['file_name'])
sem_label_file = os.path.join(semseg_dir, ann['file_name'])
segments_info = [_convert_category_id(x, meta) for x in ann['segments_info']]
ret.append({'file_name': image_file, 'image_id': image_id, 'pan_seg_file_name': label_file, 'sem_seg_file_name': sem_label_file, 'segments_info': segments_info})
assert len(ret), f'No images found in {image_dir}!'
assert PathManager.isfile(ret[0]['file_name']), ret[0]['file_name']
assert PathManager.isfile(ret[0]['pan_seg_file_name']), ret[0]['pan_seg_file_name']
assert PathManager.isfile(ret[0]['sem_seg_file_name']), ret[0]['sem_seg_file_name']
return ret |
class Tunnel(Transparent):
def query_remote(self, sock):
if (not self.param):
return ('tunnel', 0)
dst = (sock.getsockname() if sock else (None, None))
return netloc_split(self.param, dst[0], dst[1])
async def connect(self, reader_remote, writer_remote, rauth, host_name, port, **kw):
pass
def udp_connect(self, rauth, host_name, port, data, **kw):
return data |
class Pager(Widget):
source = None
source_is_stream = False
old_source = None
old_scroll_begin = 0
old_startx = 0
need_clear_image = False
need_redraw_image = False
max_width = None
def __init__(self, win, embedded=False):
Widget.__init__(self, win)
self.embedded = embedded
self.scroll_begin = 0
self.scroll_extra = 0
self.startx = 0
self.markup = None
self.lines = []
self.image = None
self.image_drawn = False
def _close_source(self):
if (self.source and self.source_is_stream):
try:
self.source.close()
except OSError as ex:
LOG.error('Unable to close pager source')
LOG.exception(ex)
def open(self):
self.scroll_begin = 0
self.markup = None
self.max_width = 0
self.startx = 0
self.need_redraw = True
def clear_image(self, force=False):
if ((force or self.need_clear_image) and self.image_drawn):
self.fm.image_displayer.clear(self.x, self.y, self.wid, self.hei)
self.need_clear_image = False
self.image_drawn = False
def close(self):
if self.image:
self.need_clear_image = True
self.clear_image()
self._close_source()
def destroy(self):
self.clear_image(force=True)
Widget.destroy(self)
def finalize(self):
self.fm.ui.win.move(self.y, self.x)
def scrollbit(self, lines):
target_scroll = (self.scroll_extra + lines)
max_scroll = (len(self.lines) - self.hei)
self.scroll_extra = max(0, min(target_scroll, max_scroll))
self.need_redraw = True
def draw(self):
if self.need_clear_image:
self.need_redraw = True
if (self.old_source != self.source):
self.old_source = self.source
self.need_redraw = True
if ((self.old_scroll_begin != self.scroll_begin) or (self.old_startx != self.startx)):
self.old_startx = self.startx
self.old_scroll_begin = self.scroll_begin
self.need_redraw = True
if self.need_redraw:
self.win.erase()
self.need_redraw_image = True
self.clear_image()
if (not self.image):
scroll_pos = (self.scroll_begin + self.scroll_extra)
line_gen = self._generate_lines(starty=scroll_pos, startx=self.startx)
for (line, i) in zip(line_gen, range(self.hei)):
self._draw_line(i, line)
self.need_redraw = False
def draw_image(self):
if (self.image and self.need_redraw_image):
self.source = None
self.need_redraw_image = False
try:
self.fm.image_displayer.draw(self.image, self.x, self.y, self.wid, self.hei)
except ImgDisplayUnsupportedException as ex:
self.fm.settings.preview_images = False
self.fm.notify(ex, bad=True)
except Exception as ex:
self.fm.notify(ex, bad=True)
else:
self.image_drawn = True
def _draw_line(self, i, line):
if (self.markup is None):
self.addstr(i, 0, line)
elif (self.markup == 'ansi'):
try:
self.win.move(i, 0)
except curses.error:
pass
else:
for chunk in ansi.text_with_fg_bg_attr(line):
if isinstance(chunk, tuple):
self.set_fg_bg_attr(*chunk)
else:
self.addstr(chunk)
def move(self, narg=None, **kw):
direction = Direction(kw)
if direction.horizontal():
self.startx = direction.move(direction=direction.right(), override=narg, maximum=self.max_width, current=self.startx, pagesize=self.wid, offset=((- self.wid) + 1))
if direction.vertical():
movement = {'direction': direction.down(), 'override': narg, 'current': self.scroll_begin, 'pagesize': self.hei, 'offset': ((- self.hei) + 1)}
if self.source_is_stream:
desired_position = direction.move(maximum=(len(self.lines) + 9999), **movement)
self._get_line((desired_position + self.hei))
self.scroll_begin = direction.move(maximum=len(self.lines), **movement)
def press(self, key):
self.fm.ui.keymaps.use_keymap('pager')
self.fm.ui.press(key)
def set_image(self, image):
if self.image:
self.need_clear_image = True
self.image = image
self._close_source()
self.source = None
self.source_is_stream = False
def set_source(self, source, strip=False):
if self.image:
self.image = None
self.need_clear_image = True
self._close_source()
self.max_width = 0
if isinstance(source, str):
self.source_is_stream = False
self.lines = source.splitlines()
if self.lines:
self.max_width = max((len(line) for line in self.lines))
elif hasattr(source, '__getitem__'):
self.source_is_stream = False
self.lines = source
if self.lines:
self.max_width = max((len(line) for line in source))
elif hasattr(source, 'readline'):
self.source_is_stream = True
self.lines = []
else:
self.source = None
self.source_is_stream = False
return False
self.markup = 'ansi'
if ((not self.source_is_stream) and strip):
self.lines = [line.strip() for line in self.lines]
self.source = source
return True
def click(self, event):
n = (1 if event.ctrl() else 3)
direction = event.mouse_wheel_direction()
if direction:
self.move(down=(direction * n))
return True
def _get_line(self, n, attempt_to_read=True):
assert isinstance(n, int), n
try:
return self.lines[n]
except (KeyError, IndexError):
if (attempt_to_read and self.source_is_stream):
try:
for line in self.source:
if (len(line) > self.max_width):
self.max_width = len(line)
self.lines.append(line)
if (len(self.lines) > n):
break
except (UnicodeError, IOError):
pass
return self._get_line(n, attempt_to_read=False)
return ''
def _generate_lines(self, starty, startx):
i = starty
if (not self.source):
return
while True:
try:
line = self._get_line(i).expandtabs(4)
for part in ((0,) if (not self.fm.settings.wrap_plaintext_previews) else range(max(1, (((len(line) - 1) // self.wid) + 1)))):
shift = (part * self.wid)
if (self.markup == 'ansi'):
line_bit = (ansi.char_slice(line, (startx + shift), (self.wid + shift)) + ansi.reset)
else:
line_bit = line[(startx + shift):((self.wid + startx) + shift)]
(yield line_bit.rstrip().replace('\r\n', '\n'))
except IndexError:
return
i += 1 |
def main():
import argparse
parser = argparse.ArgumentParser(description=(('Extract data from a KFX, KDF or ION files (v' + VERSION) + ')'))
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument('-c', '--cover', action='store_true', help='List cover metadata from a Kindle document directory')
action.add_argument('-f', '--full', action='store_true', help='Dump all content of a .kfx, .kdf, or .ion file as .json')
action.add_argument('-m', '--metadata', action='store_true', help='Dump book metadata from a .kfx file as .json')
parser.add_argument('pathname', help='Pathname to be processed')
args = parser.parse_args()
if (not os.path.exists(args.pathname)):
print(('%s does not exist' % args.pathname))
return
if args.cover:
if (not os.path.isdir(args.pathname)):
print(('%s is not a directory' % args.pathname))
return
print(('Cover metadata from Kindle directory: %s' % args.pathname))
for fn in sorted(os.listdir(args.pathname)):
if fn.endswith('.kfx'):
try:
metadata = get_kindle_kfx_metadata(os.path.join(args.pathname, fn))
print(('%s: doctype=%s, asin=%s, cover=%s' % (fn, metadata.get('cde_content_type'), metadata.get('ASIN'), ('cover_image_data' in metadata))))
except Exception as e:
print(('%s: Exception -- %s' % (fn, unicode(e))))
elif (args.full or args.metadata):
if (not os.path.isfile(args.pathname)):
print(('%s is not a file' % args.pathname))
return
print(('Decoding: %s' % args.pathname))
if args.pathname.endswith('.kdf'):
data = KDFDatabase(args.pathname).decode()
else:
packed_data = read_file(args.pathname)
if (packed_data[0:4] == CONTAINER_MAGIC):
data = KFXContainer(packed_data).decode(metadata_only=args.metadata)
elif (packed_data[0:4] == ION_MAGIC):
data = PackedIon(packed_data).decode_list()
elif (packed_data[0:8] == DRMION_MAGIC):
data = PackedIon(packed_data[8:(- 8)]).decode_list()
else:
print(('%s does not appear to be KFX, KDF or ION' % args.pathname))
return
if args.metadata:
data = extract_metadata(data)
outfile = (os.path.splitext(args.pathname)[0] + '.json')
write_file(outfile, json_dump(data, sort_keys=args.metadata))
print(('Extracted data to JSON file "%s"' % outfile))
else:
print('No processing option specified. See --help') |
def pairwise_loss(loss_function, y, margin=1):
loss = None
if (loss_function.lower() == 'bpr'):
loss = (- tf.reduce_sum(tf.log_sigmoid(y)))
elif (loss_function.lower() == 'hinge'):
loss = tf.reduce_sum(tf.maximum((y + margin), 0))
elif (loss_function.lower() == 'square'):
loss = tf.reduce_sum(tf.square((1 - y)))
else:
raise Exception('please choose a suitable loss function')
return loss |
class ResponseCloseWrapper(object):
def __init__(self, response, closed_callback, read):
self._response = response
self._closed_callback = closed_callback
if (read is None):
self.read = response.read
else:
self.read = read
def __getattr__(self, name):
return getattr(self._response, name)
def close(self):
self._closed_callback() |
class CoveredArea():
def __init__(self, gdf, spatial_weights, unique_id, verbose=True):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
data = gdf
area = data.set_index(unique_id).geometry.area
results_list = []
for index in tqdm(area.index, total=area.shape[0], disable=(not verbose)):
if (index in spatial_weights.neighbors):
neighbours = [index]
neighbours += spatial_weights.neighbors[index]
areas = area.loc[neighbours]
results_list.append(sum(areas))
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index) |
class TestNeighbour(unittest.TestCase):
def test_packing(self):
c = pyxtal(molecular=True)
for data in [('aspirin', 14), ('WEXBOS', 14), ('MERQIM', 12), ('LAGNAL', 16), ('YICMOP', 14), ('LUFHAW', 18), ('coumarin', 14), ('HAHCOI', 14), ('JAPWIH', 14), ('AXOSOW01', 14), ('PAHYON01', 13), ('xxvi', 15), ('resorcinol', 14)]:
(name, CN) = data
c.from_seed(seed=((cif_path + name) + '.cif'), molecules=[name])
(ds, _, _, _, engs) = c.get_neighboring_molecules(0, 1.5)
self.assertTrue((len(ds) == CN)) |
def stderr_redirector(stream):
original_stderr_fd = sys.stderr.fileno()
def _redirect_stderr(to_fd):
libc.fflush(c_stderr)
sys.stderr.close()
os.dup2(to_fd, original_stderr_fd)
sys.stderr = io.TextIOWrapper(os.fdopen(original_stderr_fd, 'wb'))
saved_stderr_fd = os.dup(original_stderr_fd)
try:
tfile = tempfile.TemporaryFile(mode='w+b')
_redirect_stderr(tfile.fileno())
(yield)
_redirect_stderr(saved_stderr_fd)
tfile.flush()
tfile.seek(0, io.SEEK_SET)
stream.write(tfile.read())
finally:
tfile.close()
os.close(saved_stderr_fd) |
def _compute_tags(original_tags: Iterable[str], new_tags: (str | None)) -> set[str]:
if (new_tags is None):
return set(original_tags)
if new_tags.startswith('+'):
return {*original_tags, *new_tags[1:].split('.')}
if new_tags.startswith('-'):
return (set(original_tags) - set(new_tags[1:].split('.')))
return set(new_tags.split('.')) |
class TimeCache(FitDataCache):
def getRpsData(self, src, ancReload):
return self._data[src.item.ID][ancReload]['finalRps']
def getRepAmountData(self, src, ancReload):
return self._data[src.item.ID][ancReload]['finalRepAmount']
def getRpsDataPoint(self, src, ancReload, time):
return self._getDataPoint(src=src, ancReload=ancReload, time=time, dataFunc=self.getRpsData)
def getRepAmountDataPoint(self, src, ancReload, time):
return self._getDataPoint(src=src, ancReload=ancReload, time=time, dataFunc=self.getRepAmountData)
def prepareRpsData(self, src, ancReload, maxTime):
if (maxTime is None):
return True
self._generateInternalForm(src=src, ancReload=ancReload, maxTime=maxTime)
fitCache = self._data[src.item.ID][ancReload]
if ('finalRps' in fitCache):
return
pointCache = {}
for (key, rpsList) in fitCache['internalRps'].items():
pointData = pointCache[key] = {}
prevRps = None
prevTimeEnd = None
for (timeStart, timeEnd, rps) in rpsList:
if (not pointData):
pointData[timeStart] = rps
elif (floatUnerr(prevTimeEnd) < floatUnerr(timeStart)):
pointData[prevTimeEnd] = RRTypes(0, 0, 0, 0)
pointData[timeStart] = rps
elif (rps != prevRps):
pointData[timeStart] = rps
prevRps = rps
prevTimeEnd = timeEnd
del fitCache['internalRps']
changesByTime = {}
for (key, rpsMap) in pointCache.items():
for time in rpsMap:
changesByTime.setdefault(time, []).append(key)
finalRpsCache = fitCache['finalRps'] = {}
timeRpsData = {}
for time in sorted(changesByTime):
timeRpsData = copy(timeRpsData)
for key in changesByTime[time]:
timeRpsData[key] = pointCache[key][time]
finalRpsCache[time] = timeRpsData
def prepareRepAmountData(self, src, ancReload, maxTime):
if (maxTime is None):
return
self._generateInternalForm(src=src, ancReload=ancReload, maxTime=maxTime)
fitCache = self._data[src.item.ID][ancReload]
if ('finalRepAmount' in fitCache):
return
intCache = fitCache['internalRepAmount']
changesByTime = {}
for (key, remAmountMap) in intCache.items():
for time in remAmountMap:
changesByTime.setdefault(time, []).append(key)
finalCache = fitCache['finalRepAmount'] = {}
timeRepAmountData = {}
for time in sorted(changesByTime):
timeRepAmountData = copy(timeRepAmountData)
for key in changesByTime[time]:
keyRepAmount = intCache[key][time]
if (key in timeRepAmountData):
timeRepAmountData[key] = (timeRepAmountData[key] + keyRepAmount)
else:
timeRepAmountData[key] = keyRepAmount
finalCache[time] = timeRepAmountData
del fitCache['internalRepAmount']
def _generateInternalForm(self, src, ancReload, maxTime):
if self._isTimeCacheValid(src=src, ancReload=ancReload, maxTime=maxTime):
return
fitCache = self._data.setdefault(src.item.ID, {})[ancReload] = {'maxTime': maxTime}
intCacheRps = fitCache['internalRps'] = {}
intCacheRepAmount = fitCache['internalRepAmount'] = {}
def addRps(rrKey, addedTimeStart, addedTimeFinish, addedRepAmounts):
if (not addedRepAmounts):
return
repAmountSum = sum(addedRepAmounts, RRTypes(0, 0, 0, 0))
if ((repAmountSum.shield > 0) or (repAmountSum.armor > 0) or (repAmountSum.hull > 0)):
addedRps = (repAmountSum / (addedTimeFinish - addedTimeStart))
rrCacheRps = intCacheRps.setdefault(rrKey, [])
rrCacheRps.append((addedTimeStart, addedTimeFinish, addedRps))
def addRepAmount(rrKey, addedTime, addedRepAmount):
if ((addedRepAmount.shield > 0) or (addedRepAmount.armor > 0) or (addedRepAmount.hull > 0)):
intCacheRepAmount.setdefault(rrKey, {})[addedTime] = addedRepAmount
for mod in src.item.activeModulesIter():
if (not mod.isRemoteRepping()):
continue
isAncShield = ('shipModuleAncillaryRemoteShieldBooster' in mod.item.effects)
isAncArmor = ('shipModuleAncillaryRemoteArmorRepairer' in mod.item.effects)
if (isAncShield or isAncArmor):
cycleParams = mod.getCycleParameters(reloadOverride=ancReload)
else:
cycleParams = mod.getCycleParameters(reloadOverride=True)
if (cycleParams is None):
continue
currentTime = 0
nonstopCycles = 0
cyclesWithoutReload = 0
cyclesUntilReload = mod.numShots
for (cycleTimeMs, inactiveTimeMs, isInactivityReload) in cycleParams.iterCycles():
cyclesWithoutReload += 1
cycleRepAmounts = []
repAmountParams = mod.getRepAmountParameters(spoolOptions=SpoolOptions(SpoolType.CYCLES, nonstopCycles, True))
for (repTimeMs, repAmount) in repAmountParams.items():
if (isAncArmor and mod.charge and (not ancReload) and (cyclesWithoutReload > cyclesUntilReload)):
repAmount = (repAmount / mod.getModifiedItemAttr('chargedArmorDamageMultiplier', 1))
cycleRepAmounts.append(repAmount)
addRepAmount(mod, (currentTime + (repTimeMs / 1000)), repAmount)
addRps(mod, currentTime, (currentTime + (cycleTimeMs / 1000)), cycleRepAmounts)
if (inactiveTimeMs > 0):
nonstopCycles = 0
else:
nonstopCycles += 1
if isInactivityReload:
cyclesWithoutReload = 0
if (currentTime > maxTime):
break
currentTime += ((cycleTimeMs / 1000) + (inactiveTimeMs / 1000))
for drone in src.item.activeDronesIter():
if (not drone.isRemoteRepping()):
continue
cycleParams = drone.getCycleParameters(reloadOverride=True)
if (cycleParams is None):
continue
currentTime = 0
repAmountParams = drone.getRepAmountParameters()
for (cycleTimeMs, inactiveTimeMs, isInactivityReload) in cycleParams.iterCycles():
cycleRepAmounts = []
for (repTimeMs, repAmount) in repAmountParams.items():
cycleRepAmounts.append(repAmount)
addRepAmount(drone, (currentTime + (repTimeMs / 1000)), repAmount)
addRps(drone, currentTime, (currentTime + (cycleTimeMs / 1000)), cycleRepAmounts)
if (currentTime > maxTime):
break
currentTime += ((cycleTimeMs / 1000) + (inactiveTimeMs / 1000))
def _isTimeCacheValid(self, src, ancReload, maxTime):
try:
cacheMaxTime = self._data[src.item.ID][ancReload]['maxTime']
except KeyError:
return False
return (maxTime <= cacheMaxTime)
def _getDataPoint(self, src, ancReload, time, dataFunc):
data = dataFunc(src=src, ancReload=ancReload)
timesBefore = [t for t in data if (floatUnerr(t) <= floatUnerr(time))]
try:
time = max(timesBefore)
except ValueError:
return {}
else:
return data[time] |
.parametrize('username,password', users)
def test_delete(db, client, username, password):
client.login(username=username, password=password)
instances = Question.objects.all()
for instance in instances:
url = reverse(urlnames['detail'], args=[instance.pk])
response = client.delete(url)
assert (response.status_code == status_map['delete'][username]), response.json() |
_model('multilingual_transformer')
class MultilingualTransformerModel(FairseqMultiModel):
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
def add_args(parser):
TransformerModel.add_args(parser)
parser.add_argument('--share-encoder-embeddings', action='store_true', help='share encoder embeddings across languages')
parser.add_argument('--share-decoder-embeddings', action='store_true', help='share decoder embeddings across languages')
parser.add_argument('--share-encoders', action='store_true', help='share encoders across languages')
parser.add_argument('--share-decoders', action='store_true', help='share decoders across languages')
def build_model(cls, args, task):
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
base_multilingual_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = 1024
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = 1024
src_langs = [lang_pair.split('-')[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
print(('c' * 200))
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
(shared_encoder_embed_tokens, shared_decoder_embed_tokens) = (None, None)
if args.share_all_embeddings:
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path)
(lang_encoders, lang_decoders) = ({}, {})
def get_encoder(lang):
if (lang not in lang_encoders):
if (shared_encoder_embed_tokens is not None):
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path)
lang_encoders[lang] = cls._get_module_class(True, args, task.dicts[lang], encoder_embed_tokens, src_langs)
return lang_encoders[lang]
def get_decoder(lang):
if (lang not in lang_decoders):
if (shared_decoder_embed_tokens is not None):
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path)
lang_decoders[lang] = cls._get_module_class(False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs)
return lang_decoders[lang]
(shared_encoder, shared_decoder) = (None, None)
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
(encoders, decoders) = (OrderedDict(), OrderedDict())
for (lang_pair, src, tgt) in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (shared_encoder if (shared_encoder is not None) else get_encoder(src))
decoders[lang_pair] = (shared_decoder if (shared_decoder is not None) else get_decoder(tgt))
return MultilingualTransformerModel(encoders, decoders)
def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs):
module_class = (TransformerEncoder if is_encoder else TransformerDecoder)
return module_class(args, lang_dict, embed_tokens)
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
state_dict_subset = state_dict.copy()
for (k, _) in state_dict.items():
assert k.startswith('models.')
lang_pair = k.split('.')[1]
if (lang_pair not in self.models):
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg) |
class SmallMnistNoDropout(nn.Module):
def __init__(self):
super(SmallMnistNoDropout, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.relu2 = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.relu2(self.conv2(x))
x = x.view((- 1), 320)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.log_softmax(x) |
def create_roi_contour(roi_data: ROIData, series_data) -> Dataset:
roi_contour = Dataset()
roi_contour.ROIDisplayColor = roi_data.color
roi_contour.ContourSequence = create_contour_sequence(roi_data, series_data)
roi_contour.ReferencedROINumber = str(roi_data.number)
return roi_contour |
def preprocess_image(img, resize=128, img2np=True):
(h, w) = (img.size[0], img.size[1])
wid = min(h, w)
x_l = int(((h - wid) / 2.0))
x_r = (x_l + wid)
y_u = int(((w - wid) / 2.0))
y_d = (y_u + wid)
img = img.crop((x_l, y_u, x_r, y_d))
img = img.resize((resize, resize), Image.BILINEAR)
if img2np:
img = image_to_np(img)
return img |
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.attention_network = Attention()
self.embedding = nn.Embedding(config.args.vocab_size, config.emb_dim)
init_wt_normal(self.embedding.weight)
self.x_context = nn.Linear(((config.hidden_dim * 2) + config.emb_dim), config.emb_dim)
self.lstm = nn.LSTM(config.emb_dim, config.hidden_dim, num_layers=1, batch_first=True, bidirectional=False)
init_lstm_wt(self.lstm)
if config.pointer_gen:
self.p_gen_linear = nn.Linear(((config.hidden_dim * 4) + config.emb_dim), 1)
self.out1 = nn.Linear((config.hidden_dim * 3), config.hidden_dim)
self.out2 = nn.Linear(config.hidden_dim, config.args.vocab_size)
init_linear_wt(self.out2)
def forward(self, y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, step):
if ((not self.training) and (step == 0)):
(h_decoder, c_decoder) = s_t_1
s_t_hat = torch.cat((h_decoder.view((- 1), config.hidden_dim), c_decoder.view((- 1), config.hidden_dim)), 1)
(c_t, _, coverage_next) = self.attention_network(s_t_hat, encoder_outputs, encoder_feature, enc_padding_mask, coverage)
coverage = coverage_next
y_t_1_embd = self.embedding(y_t_1)
x = self.x_context(torch.cat((c_t_1, y_t_1_embd), 1))
(lstm_out, s_t) = self.lstm(x.unsqueeze(1), s_t_1)
(h_decoder, c_decoder) = s_t
s_t_hat = torch.cat((h_decoder.view((- 1), config.hidden_dim), c_decoder.view((- 1), config.hidden_dim)), 1)
(c_t, attn_dist, coverage_next) = self.attention_network(s_t_hat, encoder_outputs, encoder_feature, enc_padding_mask, coverage)
if (self.training or (step > 0)):
coverage = coverage_next
p_gen = None
if config.pointer_gen:
p_gen_input = torch.cat((c_t, s_t_hat, x), 1)
p_gen = self.p_gen_linear(p_gen_input)
p_gen = F.sigmoid(p_gen)
output = torch.cat((lstm_out.view((- 1), config.hidden_dim), c_t), 1)
output = self.out1(output)
output = self.out2(output)
vocab_dist = F.softmax(output, dim=1)
if config.pointer_gen:
vocab_dist_ = (p_gen * vocab_dist)
attn_dist_ = ((1 - p_gen) * attn_dist)
if (extra_zeros is not None):
vocab_dist_ = torch.cat([vocab_dist_, extra_zeros], 1)
final_dist = vocab_dist_.scatter_add(1, enc_batch_extend_vocab, attn_dist_)
else:
final_dist = vocab_dist
return (final_dist, s_t, c_t, attn_dist, p_gen, coverage) |
def set_lights_shortcut(request: WSGIRequest) -> HttpResponse:
(value, response) = extract_value(request.POST)
should_enable = strtobool(value)
is_enabled = ((storage.get('ring_program') != 'Disabled') or (storage.get('wled_program') != 'Disabled') or (storage.get('strip_program') != 'Disabled'))
if (should_enable == is_enabled):
return HttpResponse()
if should_enable:
for device in ['ring', 'wled', 'strip']:
set_program(device, storage.get(cast(DeviceProgram, f'last_{device}_program')))
else:
for device in ['ring', 'wled', 'strip']:
set_program(device, 'Disabled')
return response |
def test_image_storage_format_selects_radiobox(settings, view):
settings.setValue('Items/image_storage_format', 'jpg')
widget = ImageStorageFormatWidget()
assert (widget.buttons['best'].isChecked() is False)
assert (widget.buttons['png'].isChecked() is False)
assert (widget.buttons['jpg'].isChecked() is True) |
def test_apply_function_parallel_spectral_noncube_withblockinfo(data_adv):
chunk_size = ((- 1), 1, 2)
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=chunk_size)
sum_spectral_plane = cube.sum(axis=0).unitless_filled_data[:]
assert (np.unique(sum_spectral_plane).size == sum_spectral_plane.size)
def sum_blocks_spectral(data_chunk, block_info=None, comparison_array=None):
chunk_sum = data_chunk.sum(0)
assert (block_info is not None)
loc = [block_range[0] for block_range in block_info[0]['array-location']]
assert (len(loc) == 3)
thisslice = (slice(loc[1], (loc[1] + chunk_sum.shape[0])), slice(loc[2], (loc[2] + chunk_sum.shape[1])))
return (chunk_sum == comparison_array[thisslice])
output_chunk_size = (1, 2)
test = cube.apply_function_parallel_spectral(sum_blocks_spectral, return_new_cube=False, accepts_chunks=True, drop_axis=[0], chunks=output_chunk_size, comparison_array=sum_spectral_plane)
assert (test.shape == cube.shape[1:])
assert np.all(test.compute()) |
.parametrize('is_locked', [False, True])
.parametrize('is_installed', [False, True])
.parametrize('with_extras', [False, True])
.parametrize('do_sync', [False, True])
def test_run_installs_extras_with_deps_if_requested(installer: Installer, locker: Locker, repo: Repository, installed: CustomInstalledRepository, package: ProjectPackage, is_locked: bool, is_installed: bool, with_extras: bool, do_sync: bool) -> None:
package.extras[canonicalize_name('foo')] = [get_dependency('C')]
package_a = get_package('A', '1.0')
package_b = get_package('B', '1.0')
package_c = get_package('C', '1.0')
package_d = get_package('D', '1.1')
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency(Factory.create_dependency('A', '^1.0'))
package.add_dependency(Factory.create_dependency('B', '^1.0'))
package.add_dependency(Factory.create_dependency('C', {'version': '^1.0', 'optional': True}))
package_c.add_dependency(Factory.create_dependency('D', '^1.0'))
if is_locked:
locker.locked(True)
locker.mock_lock_data(fixture('extras-with-dependencies'))
if is_installed:
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
installed.add_package(package_d)
if with_extras:
installer.extras(['foo'])
installer.requires_synchronization(do_sync)
result = installer.run()
assert (result == 0)
if (not is_locked):
assert (locker.written_data == fixture('extras-with-dependencies'))
if with_extras:
expected_installations_count = (0 if is_installed else 4)
expected_removals_count = 0
else:
expected_installations_count = (0 if is_installed else 2)
expected_removals_count = (2 if (is_installed and is_locked) else 0)
assert (installer.executor.installations_count == expected_installations_count)
assert (installer.executor.removals_count == expected_removals_count) |
class ESILRegisters():
def __init__(self, reg_array, aliases: Dict={}, sym=False):
self.reg_info = reg_array
self._registers = {}
self.offset_dictionary = {}
self._register_values = {}
self.aliases = aliases
self._refs = {'count': 1}
self.zero_regs = {'xzr': z3.BitVecVal(0, 64), 'wzr': z3.BitVecVal(0, 32), 'zero': z3.BitVecVal(0, 64)}
self.pure_symbolic = sym
def init_registers(self):
self.reg_info.sort(key=(lambda x: x['size']), reverse=True)
for reg in self.reg_info:
self.add_register(reg)
def add_register(self, reg: Dict):
start = reg['offset']
end = (reg['offset'] + reg['size'])
size = reg['size']
reg['start'] = start
reg['end'] = end
self._registers[reg['name']] = reg
key = (start, end)
reg_value = self.get_register_from_bounds(reg)
if (reg_value != None):
if (reg_value['size'] < size):
reg_value['size'] = size
reg_value['start'] = start
reg_value['end'] = end
if (self.pure_symbolic and (reg['name'] != self.aliases['PC']['reg']) and (reg['type_str'] != 'flg')):
reg.pop('value')
reg_value['bv'] = z3.BitVec(reg['name'], size)
else:
reg_value['bv'] = z3.BitVecVal(reg.pop('value'), size)
reg_value['bounds'] = key
self.offset_dictionary[key] = reg_value
reg['bounds'] = reg_value['bounds']
reg['sub'] = True
else:
reg_value = {'type': reg['type'], 'size': size, 'start': start, 'end': end}
if (('value' in reg) and ((not self.pure_symbolic) or (reg['name'] == self.aliases['PC']['reg']) or (reg['type_str'] == 'flg'))):
reg_value['bv'] = z3.BitVecVal(reg.pop('value'), size)
else:
reg.pop('value')
reg_value['bv'] = z3.BitVec(reg['name'], size)
reg_value['bounds'] = key
self.offset_dictionary[key] = reg_value
reg['bounds'] = key
reg['sub'] = False
def get_register_from_bounds(self, reg: Dict):
bounds = reg.get('bounds')
val = self.offset_dictionary.get(bounds)
if (val != None):
return val
start = reg['offset']
end = (reg['offset'] + reg['size'])
size = reg['size']
key = (start, end)
if (key in self.offset_dictionary):
return self.offset_dictionary[key]
else:
for bounds in self.offset_dictionary:
old_reg = self.offset_dictionary[bounds]
if (old_reg['type'] != reg['type']):
continue
above_start = (bounds[0] <= start <= bounds[1])
below_end = (bounds[0] <= end <= bounds[1])
if (above_start and below_end):
return old_reg
def __getitem__(self, key: str) -> z3.BitVecRef:
if (key not in self._registers):
if (key in self.aliases):
key = self.aliases[key]['reg']
else:
logger.warning(('register %s not found' % key))
return self.zero_regs['zero']
if (key in self.zero_regs):
return self.zero_regs[key]
register = self._registers[key]
reg_value = self.get_register_from_bounds(register)
if (register['size'] == reg_value['size']):
return reg_value['bv']
else:
low = (register['start'] - reg_value['start'])
high = (low + register['size'])
reg = z3.Extract((high - 1), low, reg_value['bv'])
return reg
def __setitem__(self, key: str, val):
if (self._refs['count'] > 1):
self.finish_clone()
if (key in self.aliases):
key = self.aliases[key]['reg']
if (key not in self._registers):
logger.warning(('register %s not found' % key))
return
register = self._registers[key]
reg_value = self.get_register_from_bounds(register)
new_reg = self.set_register_bits(register, reg_value, reg_value['bv'], val)
reg_value['bv'] = z3.simplify(new_reg)
def weak_set(self, key: str, val):
if (self._refs['count'] > 1):
self.finish_clone()
if (key in self.aliases):
key = self.aliases[key]['reg']
register = self._registers[key]
reg_value = self.get_register_from_bounds(register)
new_reg = self.set_register_bits(register, reg_value, reg_value['bv'], val)
reg_value['bv'] = z3.simplify(new_reg)
def val_to_register_bv(self, reg: Dict, val):
new_val = val
if isinstance(val, int):
new_val = z3.BitVecVal(val, reg['size'])
elif z3.is_int(val):
new_val = z3.Int2BV(val, reg['size'])
elif z3.is_bv(val):
if (val.size() > reg['size']):
new_val = z3.Extract((reg['size'] - 1), 0, val)
elif (val.size() < reg['size']):
new_val = z3.ZeroExt((reg['size'] - val.size()), val)
else:
raise ESILArgumentException(('%s %s' % (reg, val)))
return new_val
def set_register_bits(self, register: Dict, reg_value: Dict, bv, val):
low = (register['start'] - reg_value['start'])
high = (low + register['size'])
bvs = []
if (high != reg_value['size']):
upper = z3.Extract((reg_value['size'] - 1), high, bv)
bvs.append(upper)
bvs.append(self.val_to_register_bv(register, val))
if (low != 0):
lower = z3.Extract((low - 1), 0, bv)
bvs.append(lower)
if (len(bvs) > 1):
new_reg = z3.Concat(bvs)
else:
new_reg = bvs[0]
return new_reg
def get_all_registers(self):
return self.offset_dictionary.values()
def __contains__(self, key: str):
return ((key in self._registers) or (key in self.aliases))
def __iter__(self):
return iter(self._registers.keys())
def clone(self) -> 'ESILRegisters':
clone = self.__class__(self.reg_info, self.aliases, self.pure_symbolic)
self._refs['count'] += 1
clone._refs = self._refs
clone._registers = self._registers
clone.offset_dictionary = self.offset_dictionary
return clone
def finish_clone(self):
self.offset_dictionary = self.offset_dictionary.copy()
for x in self.offset_dictionary:
self.offset_dictionary[x] = self.offset_dictionary[x].copy()
self._refs['count'] -= 1
self._refs = {'count': 1} |
class DPSeriesMotorController(Instrument):
address = Instrument.control('%', '~%i', 'Integer property representing the address that the motor controller uses for serial\n communications.', validator=strict_range, values=[0, 99], cast=int)
basespeed = Instrument.control('VB', 'B%i', "Integer property that represents the motor controller's starting/homing speed. This\n property can be set.", validator=truncated_range, values=[1, 5000], cast=int)
maxspeed = Instrument.control('VM', 'M%i', "Integer property that represents the motor controller's maximum (running) speed.\n This property can be set.", validator=truncated_range, values=[1, 50000], cast=int)
direction = Instrument.control('V+', '%s', "A string property that represents the direction in which the stepper motor will rotate\n upon subsequent step commands. This property can be set. 'CW' corresponds to clockwise\n rotation and 'CCW' corresponds to counter-clockwise rotation.", map_values=True, validator=strict_discrete_set, values={'CW': '+', 'CCW': '-'}, get_process=(lambda d: ('+' if (d == 1.0) else '-')))
encoder_autocorrect = Instrument.control('VEA', 'EA%i', 'A boolean property to enable or disable the encoder auto correct function. This property\n can be set.', map_values=True, values={True: 1, False: 0}, validator=strict_discrete_set, cast=int)
encoder_delay = Instrument.control('VED', 'ED%i', 'An integer property that represents the wait time in ms. after a move is finished before\n the encoder is read for a potential encoder auto-correct action to take place. This\n property can be set.', validator=truncated_range, values=[0, 65535], cast=int)
encoder_motor_ratio = Instrument.control('VEM', 'EM%i', 'An integer property that represents the ratio of the number of encoder pulses per motor\n step. This property can be set.', validator=truncated_range, values=[1, 255], cast=int)
encoder_retries = Instrument.control('VER', 'ER%i', 'An integer property that represents the number of times the motor controller will try the\n encoder auto correct function before setting an error flag. This property can be set.', validator=truncated_range, values=[0, 255], cast=int)
encoder_window = Instrument.control('VEW', 'EW%i', 'An integer property that represents the allowable error in encoder pulses from the\n desired position before the encoder auto-correct function runs. This property can be set.\n ', validator=truncated_range, values=[0, 255], cast=int)
busy = Instrument.measurement('VF', 'Query to see if the controller is currently moving a motor.')
error_reg = Instrument.measurement('!', 'Reads the current value of the error codes register.', get_process=(lambda err: DPSeriesErrors(int(err))))
def check_errors(self):
current_errors = self.error_reg
if (current_errors != 0):
logging.error(('DP-Series motor controller error detected: %s' % current_errors))
return current_errors
def __init__(self, adapter, name='Anaheim Automation Stepper Motor Controller', address=0, encoder_enabled=False, **kwargs):
self._address = address
self._encoder_enabled = encoder_enabled
kwargs.setdefault('write_termination', '\r')
kwargs.setdefault('read_termination', '\r')
kwargs.setdefault('timeout', 2000)
super().__init__(adapter, name, includeSCPI=False, asrl={'baud_rate': 38400}, **kwargs)
def encoder_enabled(self):
return self._encoder_enabled
_enabled.setter
def encoder_enabled(self, en):
self._encoder_enabled = bool(en)
def step_position(self):
if self._encoder_enabled:
pos = self.ask('VEP')
else:
pos = self.ask('VZ')
return int(pos)
_position.setter
def step_position(self, pos):
strict_range(pos, ((- 8388607), 8388607))
self.write(('P%i' % pos))
self.write('G')
def absolute_position(self):
step_pos = self.step_position
return self.steps_to_absolute(step_pos)
_position.setter
def absolute_position(self, abs_pos):
steps_pos = self.absolute_to_steps(abs_pos)
self.step_position = steps_pos
def absolute_to_steps(self, pos):
raise NotImplementedError('absolute_to_steps() must be implemented in subclasses!')
def steps_to_absolute(self, steps):
raise NotImplementedError('steps_to_absolute() must be implemented in subclasses!')
def reset_position(self):
self.write('ET')
self.write('Z0')
def stop(self):
self.write('.')
def move(self, direction):
self.direction = direction
self.write('S')
def home(self, home_mode):
hm = int(home_mode)
if ((hm == 0) or (hm == 1)):
self.write(('H%i' % hm))
else:
raise ValueError(('Invalid home mode %i specified!' % hm))
def write(self, command):
if ('' in command):
cmd_str = command
elif (('%' in command) or ('~' in command)):
cmd_str = ('%s' % command)
else:
cmd_str = ('%i%s' % (self._address, command))
super().write(cmd_str)
def wait_for_completion(self, interval=0.5):
while self.busy:
sleep(interval) |
class Discover():
DISCOVERY_PORT = 9999
DISCOVERY_QUERY = {'system': {'get_sysinfo': None}}
DISCOVERY_PORT_2 = 20002
DISCOVERY_QUERY_2 = binascii.unhexlify('cb5d3')
async def discover(*, target='255.255.255.255', on_discovered=None, discovery_timeout=5, discovery_packets=3, interface=None, on_unsupported=None, credentials=None, port=None, timeout=None) -> DeviceDict:
loop = asyncio.get_event_loop()
(transport, protocol) = (await loop.create_datagram_endpoint((lambda : _DiscoverProtocol(target=target, on_discovered=on_discovered, discovery_packets=discovery_packets, interface=interface, on_unsupported=on_unsupported, credentials=credentials, timeout=timeout, port=port)), local_addr=('0.0.0.0', 0)))
protocol = cast(_DiscoverProtocol, protocol)
try:
_LOGGER.debug('Waiting %s seconds for responses...', discovery_timeout)
(await asyncio.sleep(discovery_timeout))
finally:
transport.close()
_LOGGER.debug('Discovered %s devices', len(protocol.discovered_devices))
return protocol.discovered_devices
async def discover_single(host: str, *, discovery_timeout: int=5, port: Optional[int]=None, timeout: Optional[int]=None, credentials: Optional[Credentials]=None) -> SmartDevice:
loop = asyncio.get_event_loop()
event = asyncio.Event()
try:
ipaddress.ip_address(host)
ip = host
except ValueError:
try:
adrrinfo = (await loop.getaddrinfo(host, 0, type=socket.SOCK_DGRAM, family=socket.AF_INET))
ip = adrrinfo[0][4][0]
except socket.gaierror as gex:
raise SmartDeviceException(f'Could not resolve hostname {host}') from gex
(transport, protocol) = (await loop.create_datagram_endpoint((lambda : _DiscoverProtocol(target=ip, port=port, discovered_event=event, credentials=credentials, timeout=timeout)), local_addr=('0.0.0.0', 0)))
protocol = cast(_DiscoverProtocol, protocol)
try:
_LOGGER.debug('Waiting a total of %s seconds for responses...', discovery_timeout)
async with asyncio_timeout(discovery_timeout):
(await event.wait())
except asyncio.TimeoutError as ex:
raise SmartDeviceException(f'Timed out getting discovery response for {host}') from ex
finally:
transport.close()
if (ip in protocol.discovered_devices):
dev = protocol.discovered_devices[ip]
dev.host = host
return dev
elif (ip in protocol.unsupported_device_exceptions):
raise protocol.unsupported_device_exceptions[ip]
elif (ip in protocol.invalid_device_exceptions):
raise protocol.invalid_device_exceptions[ip]
else:
raise SmartDeviceException(f'Unable to get discovery response for {host}')
def _get_device_class(info: dict) -> Type[SmartDevice]:
if ('result' in info):
discovery_result = DiscoveryResult(**info['result'])
dev_class = get_device_class_from_family(discovery_result.device_type)
if (not dev_class):
raise UnsupportedDeviceException(('Unknown device type: %s' % discovery_result.device_type), discovery_result=info)
return dev_class
else:
return get_device_class_from_sys_info(info)
def _get_device_instance_legacy(data: bytes, config: DeviceConfig) -> SmartDevice:
try:
info = json_loads(TPLinkSmartHomeProtocol.decrypt(data))
except Exception as ex:
raise SmartDeviceException(f'Unable to read response from device: {config.host}: {ex}') from ex
_LOGGER.debug('[DISCOVERY] %s << %s', config.host, info)
device_class = Discover._get_device_class(info)
device = device_class(config.host, config=config)
sys_info = info['system']['get_sysinfo']
if (device_type := sys_info.get('mic_type', sys_info.get('type'))):
config.connection_type = ConnectionType.from_values(device_family=device_type, encryption_type=EncryptType.Xor.value)
device.protocol = get_protocol(config)
device.update_from_discover_info(info)
return device
def _get_device_instance(data: bytes, config: DeviceConfig) -> SmartDevice:
try:
info = json_loads(data[16:])
except Exception as ex:
_LOGGER.debug('Got invalid response from device %s: %s', config.host, data)
raise SmartDeviceException(f'Unable to read response from device: {config.host}: {ex}') from ex
try:
discovery_result = DiscoveryResult(**info['result'])
except ValidationError as ex:
_LOGGER.debug('Unable to parse discovery from device %s: %s', config.host, info)
raise UnsupportedDeviceException(f'Unable to parse discovery from device: {config.host}: {ex}') from ex
type_ = discovery_result.device_type
try:
config.connection_type = ConnectionType.from_values(type_, discovery_result.mgt_encrypt_schm.encrypt_type)
except SmartDeviceException as ex:
raise UnsupportedDeviceException((f'Unsupported device {config.host} of type {type_} ' + f'with encrypt_type {discovery_result.mgt_encrypt_schm.encrypt_type}'), discovery_result=discovery_result.get_dict()) from ex
if ((device_class := get_device_class_from_family(type_)) is None):
_LOGGER.warning('Got unsupported device type: %s', type_)
raise UnsupportedDeviceException(f'Unsupported device {config.host} of type {type_}: {info}', discovery_result=discovery_result.get_dict())
if ((protocol := get_protocol(config)) is None):
_LOGGER.warning('Got unsupported connection type: %s', config.connection_type.to_dict())
raise UnsupportedDeviceException((f'Unsupported encryption scheme {config.host} of ' + f'type {config.connection_type.to_dict()}: {info}'), discovery_result=discovery_result.get_dict())
_LOGGER.debug('[DISCOVERY] %s << %s', config.host, info)
device = device_class(config.host, protocol=protocol)
di = discovery_result.get_dict()
di['model'] = discovery_result.device_model
di['alias'] = UNAVAILABLE_ALIAS
di['nickname'] = UNAVAILABLE_NICKNAME
device.update_from_discover_info(di)
return device |
class DRL():
def __init__(self, action_dim, state_dim, device='cuda', LR_C=LR_C, LR_A=LR_A):
self.device = device
self.state_dim = ((state_dim[0] * state_dim[1]) * 2)
self.state_dim_width = state_dim[0]
self.state_dim_height = state_dim[1]
self.action_dim = action_dim
self.batch_size = BATCH_SIZE
self.gamma = GAMMA
self.tau = TAU
self.policy_noise = POLICY_NOSIE
self.noise_clip = NOISE_CLIP
self.policy_freq = POLICY_FREQ
self.itera = 0
self.pointer = 0
self.replay_buffer = PrioritizedReplayBuffer(MEMORY_CAPACITY, {'obs': {'shape': (45, 80, 2)}, 'act': {'shape': action_dim}, 'acte': {'shape': action_dim}, 'intervene': {}, 'rew': {}, 'next_obs': {'shape': (45, 80, 2)}, 'done': {}}, next_of='obs')
self.actor = Actor(self.state_dim, self.action_dim).to(self.device)
self.actor_target = Actor(self.state_dim, self.action_dim).to(self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), LR_A)
self.actor_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.actor_optimizer, 0.996)
self.previous_epoch = 0
self.critic = Critic(self.state_dim, self.action_dim).to(self.device)
self.critic_target = Critic(self.state_dim, self.action_dim).to(self.device)
self.critic_optimizers = torch.optim.Adam(self.critic.parameters(), LR_C)
hard_update(self.actor_target, self.actor)
hard_update(self.critic_target, self.critic)
def learn(self, batch_size=BATCH_SIZE, epoch=0):
data = self.replay_buffer.sample(batch_size)
idxs = data['indexes']
(states, actions, actions_h) = (data['obs'], data['act'], data['acte'])
(interv, rewards) = (data['intervene'], data['rew'])
(next_states, dones) = (data['next_obs'], data['done'])
states = torch.FloatTensor(states).permute(0, 3, 1, 2).to(self.device)
actions = torch.FloatTensor(actions).to(self.device)
actions_h = torch.FloatTensor(actions_h).to(self.device)
rewards = torch.FloatTensor(rewards).to(self.device)
next_states = torch.FloatTensor(next_states).permute(0, 3, 1, 2).to(self.device)
dones = torch.FloatTensor(dones).to(self.device)
(loss_c, loss_a) = (0, 0)
with torch.no_grad():
noise1 = (torch.randn_like(actions) * self.policy_noise).clamp(0, 1)
next_actions = (self.actor_target(next_states).detach() + noise1).clamp(0, 1)
(target_q1, target_q2) = self.critic_target([next_states, next_actions])
target_q = torch.min(target_q1, target_q2)
y_expected = (rewards + (((1 - dones) * self.gamma) * target_q))
(y_predicted1, y_predicted2) = self.critic.forward([states, actions])
td_errors = abs((y_expected - y_predicted1.detach()))
loss_critic = (F.mse_loss(y_predicted1, y_expected) + F.mse_loss(y_predicted2, y_expected))
self.critic_optimizers.zero_grad()
loss_critic.backward()
self.critic_optimizers.step()
if ((self.itera % self.policy_freq) == 0):
(index_imi, _) = np.where((interv == 1))
states_imi = states[index_imi]
actions_imi = actions[index_imi]
pred_actions = self.actor.forward(states)
if (len(index_imi) > 0):
imitation_loss = (3 * ((self.actor.forward(states_imi) - actions_imi) ** 2).sum())
with torch.no_grad():
q_adv = torch.exp((self.critic_target([states, actions])[0] - self.critic_target([states, pred_actions])[0]))
q_weight = torch.zeros_like(q_adv)
q_weight[index_imi] = 1
qa_errors = (q_adv * q_weight)
else:
imitation_loss = 0.0
qa_errors = 0.0
loss_actor = ((- self.critic([states, pred_actions])[0]) + imitation_loss)
loss_actor = loss_actor.mean()
self.actor_optimizer.zero_grad()
loss_actor.backward()
self.actor_optimizer.step()
if (epoch != self.previous_epoch):
self.actor_scheduler.step()
self.previous_epoch = epoch
soft_update(self.actor_target, self.actor, self.tau)
soft_update(self.critic_target, self.critic, self.tau)
loss_a = loss_actor.item()
loss_c = loss_critic.item()
self.itera += 1
priorities = (td_errors + qa_errors)
priorities = priorities.cpu().numpy()
self.replay_buffer.update_priorities(idxs, priorities)
return (loss_c, loss_a)
def choose_action(self, state):
state = torch.FloatTensor(state).float().unsqueeze(0).permute(0, 3, 1, 2).to(self.device)
action = self.actor.forward(state).detach()
action = action.squeeze(0).cpu().numpy()
action = np.clip(action, (- 1), 1)
return action
def store_transition(self, s, a, ae, i, r, s_, d=0):
self.replay_buffer.add(obs=s, act=a, acte=ae, intervene=i, rew=r, next_obs=s_, done=d)
def save_transition(self, output, timeend=0):
self.replay_buffer.save_transitions(file='{}/{}'.format(output, timeend))
def load_transition(self, output):
if (output is None):
return
self.replay_buffer.load_transitions('{}.npz'.format(output))
def load_model(self, output):
if (output is None):
return
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(output)))
self.critic.load_state_dict(torch.load('{}/actor.pkl'.format(output)))
def load_actor(self, output):
self.actor.load_state_dict(torch.load(output))
def save_model(self, output):
torch.save(self.actor.state_dict(), '{}/actor.pkl'.format(output))
torch.save(self.critic.state_dict(), '{}/critic.pkl'.format(output))
def save_actor(self, output, no):
torch.save(self.actor.state_dict(), '{}/actor{}.pkl'.format(output, no))
def save(self, log_dir, epoch):
state = {'actor': self.actor.state_dict(), 'actor_target': self.actor_target.state_dict(), 'actor_optimizer': self.actor_optimizer.state_dict(), 'critic': self.critic.state_dict(), 'critic_target': self.critic_target.state_dict(), 'critic_optimizers': self.critic_optimizers.state_dict(), 'epoch': epoch}
torch.save(state, log_dir)
def load(self, log_dir):
checkpoint = torch.load(log_dir)
self.actor.load_state_dict(checkpoint['actor'])
self.actor_target.load_state_dict(checkpoint['actor_target'])
self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
self.critic.load_state_dict(checkpoint['critic'])
self.critic_target.load_state_dict(checkpoint['critic_target'])
self.critic_optimizers.load_state_dict(checkpoint['critic_optimizers']) |
(name='help-postgres')
_readme_flag
def help_postgres(readme):
get_wrapper(readme)("\nThis explains how to use mmpdb to generate and use matched molecular\npairs stored in a Postgres database.\n\nThe `help-analysis` command shows how to generate a fragmentation\ndatabase named `test_data.fragdb`, then index it to produce a SQLite\nfile named `tests_data.mmpdb` containing the matched molecular\npairs.\n\nThe mmpdb program can also place the results in a Postgres database,\nwith one data set per database, by specifying a [peewee database\nURI]( I\nhave `test_data.fragdb` in my local directory, and write permissions\nto a Postgres database named `dalke` on the machine 'localhost', so\nI'll index into that database:\n\n```shell\n% mmpdb index test_data.fragdb -o postgres://localhost/dalke\n```\n\nThe `list` command knows how to work with a Postgres URL, both given a single database URL:\n\n\x08\n```shell\n% mmpdb list postgres://localhost/dalke\n Name #cmpds #rules #pairs #envs #stats | Title | Properties\npostgres://localhost/dalke 9 47 342 321 0 MMPs from 'test_data.fragdb' <none>\n```\n\nand as a way to list all of the MMP database on the server (though I\nonly have one in this example):\n\n\x08\n```shell\n% mmpdb list postgres://localhost\n Name #cmpds #rules #pairs #envs #stats | Title | Properties\npostgres://localhost/dalke 9 47 342 321 0 MMPs from 'test_data.fragdb' <none>\n```\n\nThe `loadprops` command knows how to work with a Postgres URL:\n\n\x08\n```shell\n% mmpdb loadprops postgres://localhost/dalke --properties tests/test_data.csv\nUsing dataset: MMPs from 'test_data.fragdb'\nReading properties from 'tests/test_data.csv'\nRead 2 properties for 9 compounds from 'tests/test_data.csv'\nImported 9 'MW' records (9 new, 0 updated).\nImported 8 'MP' records (8 new, 0 updated).\nNumber of rule statistics added: 533 updated: 0 deleted: 0\nLoaded all properties and re-computed all rule statistics.\n% mmpdb list postgres://localhost/dalke\n Name #cmpds #rules #pairs #envs #stats | Title | Properties\npostgres://localhost/dalke 9 47 342 321 533 MMPs from 'test_data.fragdb' MW MP\n```\n\nSo do the `predict` and `tranform` commands:\n\n\x08\n```shell\n% mmpdb predict --smiles 'c1cccnc1' --reference 'c1cccnc1O' --property MP postgres://localhost/dalke\npredicted delta: -93 +/- 76.7268\n% mmpdb transform --smiles 'c1cccnc1O' --property MW postgres://localhost/dalke | fold -70\nID\tSMILES\tMW_from_smiles\tMW_to_smiles\tMW_radius\tMW_sma\nrts\tMW_pseudosmiles\tMW_rule_environment_id\tMW_count\tMW_avg\n\tMW_std\tMW_kurtosis\tMW_skewness\tMW_min\tMW_q1\tMW_med\nian\tMW_q3\tMW_max\tMW_paired_t\tMW_p_value\n1\tClc1ccccn1\t[*:1]O\t[*:1]Cl\t1\t[#0;X1;H0;+0;!R:1]-[#6\n;X3;H0;+0;R]\t[*:1]-[#6](~*)(~*)\t299\t1\t18.5\n\t\t\t18.5\t18.5\t18.5\t18.5\t18.5\n\n2\tNc1ccccn1\t[*:1]O\t[*:1]N\t1\t[#0;X1;H0;+0;!R:1]-[#6\n;X3;H0;+0;R]\t[*:1]-[#6](~*)(~*)\t276\t3\t-1\t0\n\t\t0\t-1\t-1\t-1\t-1\t-1\t1e+08\n\n3\tc1ccncc1\t[*:1]O\t[*:1][H]\t1\t[#0;X1;H0;+0;!\nR:1]-[#6;X3;H0;+0;R]\t[*:1]-[#6](~*)(~*)\t268\t4\t-16\n\t0\t\t0\t-16\t-16\t-16\t-16\t-16\n\t1e+08\n```\n\nHowever, the distributed cluster indexing does not work. For that\nyou'll need to generate the SQLite file and import it into Postgres.\n\nThe [Postgres\nwiki]( [pgloader]( should be able to\ndo this. I haven't tried it - let me know if it works!\n\n") |
def test_repository_deprecated_ignore_repository_names() -> None:
with pytest.warns(DeprecationWarning):
RepositoryPool(ignore_repository_names=True)
with pytest.warns(DeprecationWarning):
RepositoryPool(ignore_repository_names=False)
with pytest.warns(DeprecationWarning):
RepositoryPool(None, True)
with pytest.warns(DeprecationWarning):
RepositoryPool(None, False) |
class TestCassandraMigration():
def test_queries(self, fs_schema, db_schema):
cassandra_migration = CassandraMigration()
expected_query = ['ALTER TABLE table_name ADD (new_feature FloatType);', 'ALTER TABLE table_name DROP (feature1__avg_over_2_days_rolling_windows);', 'ALTER TABLE table_name ALTER feature1__avg_over_1_week_rolling_windows TYPE FloatType;']
query = cassandra_migration.create_query(fs_schema, 'table_name', db_schema)
assert query, expected_query
def test_queries_on_entity(self, fs_schema, db_schema):
cassandra_migration = CassandraMigration()
expected_query = ['ALTER TABLE table_name ADD (new_feature FloatType);', 'ALTER TABLE table_name ALTER feature1__avg_over_1_week_rolling_windows TYPE FloatType;']
query = cassandra_migration.create_query(fs_schema, 'table_name', db_schema, True)
assert query, expected_query
def test_create_table_query(self, fs_schema):
cassandra_migration = CassandraMigration()
expected_query = ['CREATE TABLE test.table_name (id LongType, timestamp TimestampType, new_feature FloatType, feature1__avg_over_1_week_rolling_windows FloatType, PRIMARY KEY (id, timestamp));']
query = cassandra_migration.create_query(fs_schema, 'table_name')
assert query, expected_query |
class ChatGPTAPIWrapper(BaseAPIWrapper):
_with_exponential_backoff
def call(prompt: Union[(str, List[Dict[(str, str)]])], max_tokens: int, engine: str, stop_token: str, temperature: float, top_p: float=1, num_completions: int=1, system_message: Optional[str]=None) -> dict:
system_message = (system_message or 'You are ChatGPT, a large language model trained by OpenAI.')
if isinstance(prompt, str):
messages = []
if system_message:
messages.append({'role': 'system', 'content': system_message})
messages.append({'role': 'user', 'content': prompt})
elif isinstance(prompt, list):
messages = prompt
if system_message:
messages.insert(0, {'role': 'system', 'content': system_message})
else:
raise ValueError('Invalid prompt type. Prompt should be a string or a list of messages.')
if (num_completions > 2):
response_combined = dict()
num_completions_remaining = num_completions
for i in range(0, num_completions, 2):
response = ChatGPTAPIWrapper.call(prompt=prompt, max_tokens=max_tokens, engine=engine, stop_token=stop_token, temperature=temperature, top_p=top_p, num_completions=min(num_completions_remaining, 2))
num_completions_remaining -= 2
if (i == 0):
response_combined = response
else:
response_combined['choices'] += response['choices']
return response_combined
response = openai.ChatCompletion.create(model=engine, messages=messages, temperature=temperature, max_tokens=max_tokens, top_p=top_p, stop=([stop_token] if stop_token else None), n=num_completions)
return response
def get_first_response(response) -> Dict[(str, Any)]:
text = response['choices'][0]['message']['content']
return text
def get_majority_answer(response) -> Dict[(str, Any)]:
answers = [choice['message']['content'] for choice in response['choices']]
answers = Counter(answers)
if (len(answers) == 1):
return answers.most_common(1)[0][0]
if (answers.most_common(1)[0][1] == answers.most_common(2)[1][1]):
return ChatGPTAPIWrapper.get_first_response(response)
return answers.most_common(1)[0][0]
def get_all_responses(response) -> Dict[(str, Any)]:
return [{'generated_answer': choice['message']['content'], 'logprobs': None} for choice in response['choices']] |
_function('builtins.list')
def translate_list_from_generator_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> (Value | None):
if ((len(expr.args) == 1) and (expr.arg_kinds[0] == ARG_POS) and isinstance(expr.args[0], GeneratorExpr)):
return sequence_from_generator_preallocate_helper(builder, expr.args[0], empty_op_llbuilder=builder.builder.new_list_op_with_length, set_item_op=new_list_set_item_op)
return None |
def pose_ren_net(net_type, iter_idx, output_dir):
dataset = 'icvl'
n = caffe.NetSpec()
(fx_, fy_, ux_, uy_) = util.get_param(dataset)
point_num_ = util.get_joint_num(dataset)
root_folder_ = config.icvl_data_dir
if (net_type == 'train'):
image_source_ = '{}/cache/train_image_s{}.txt'.format(output_dir, iter_idx)
label_source_ = '{}/cache/train_label_s{}.txt'.format(output_dir, iter_idx)
pose_data_param_train = dict(image_source=image_source_, label_source=label_source_, root_folder=(root_folder_ + 'train/Depth/'), batch_size=128, shuffle=True, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.ICVL)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=0), transform_param=dict(is_trans=True, trans_dx=10, trans_dy=10, is_rotate=True, rotate_deg=15, is_zoom=True, zoom_scale=0.1), pose_data_param=pose_data_param_train, ntop=2)
(n.pose, n.prev_pose) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=(point_num_ * 3)), include=dict(phase=0), ntop=2)
first_layer = str(n.to_proto())
pose_data_param_test = dict(image_source=(root_folder_ + 'test_image.txt'), label_source='{}/cache/test_label_s{}.txt'.format(output_dir, iter_idx), root_folder=(root_folder_ + 'test/Depth/'), batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.ICVL)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
elif (net_type == 'test-train'):
label_source_ = '{}/cache/train_label_s{}_single.txt'.format(output_dir, iter_idx)
pose_data_param_test = dict(image_source='{}/cache/train_image_skip.txt'.format(output_dir), label_source=label_source_, root_folder=(root_folder_ + 'train/Depth/'), batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.ICVL)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
elif (net_type == 'test-test'):
label_source_ = '{}/cache/test_label_s{}.txt'.format(output_dir, iter_idx)
pose_data_param_test = dict(image_source=(root_folder_ + 'test_image.txt'), label_source=label_source_, root_folder=(root_folder_ + 'test/Depth/'), batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.ICVL)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
(n.conv0, n.relu0) = conv_relu(n.data, 16)
n.conv1 = conv(n.relu0, 16)
n.pool1 = max_pool(n.conv1)
n.relu1 = L.ReLU(n.pool1, in_place=True)
(n.conv2_0, n.relu2_0) = conv_relu(n.pool1, 32, ks=1, pad=0)
(n.conv2, n.relu2) = conv_relu(n.relu2_0, 32)
n.conv3 = conv(n.relu2, 32)
n.res1 = L.Eltwise(n.conv2_0, n.conv3)
n.pool2 = max_pool(n.res1)
n.relu3 = L.ReLU(n.pool2, in_place=True)
(n.conv3_0, n.relu3_0) = conv_relu(n.relu3, 64, ks=1, pad=0)
(n.conv4, n.relu4) = conv_relu(n.relu3_0, 64)
n.conv5 = conv(n.relu4, 64)
n.res2 = L.Eltwise(n.conv3_0, n.conv5)
n.pool3 = max_pool(n.res2)
n.relu5 = L.ReLU(n.pool3, in_place=True)
for idx in xrange(point_num_):
if ((((idx + 1) % 3) == 0) and idx):
continue
rois = 'rois_{}'.format(idx)
n[rois] = L.Python(n.prev_pose, module='python_layers.py_generate_roi_layer', layer='PyGenerateROILayer', ntop=1, param_str=str(dict(joint_idx=idx, roi_h=6, roi_w=6, img_h=96, img_w=96, spatial_mul=8)))
roipool = 'roi_pool_{}'.format(idx)
n[roipool] = L.ROIPooling(n.pool3, n[rois], roi_pooling_param=dict(pooled_w=7, pooled_h=7, spatial_scale=0.125))
fc1 = 'fc1_{}'.format(idx)
relu6 = 'relu6_{}'.format(idx)
drop1 = 'drop1_{}'.format(idx)
(n[fc1], n[relu6], n[drop1]) = fc_relu_dropout(n[roipool], 2048, 0.5)
connect_structure_1 = [[0, 1, 3], [0, 4, 6], [0, 7, 9], [0, 10, 12], [0, 13, 15]]
concate_bottom_final = []
for idx in xrange(len(connect_structure_1)):
concate_bottom = []
for jdx in xrange(len(connect_structure_1[idx])):
drop1 = 'drop1_{}'.format(connect_structure_1[idx][jdx])
concate_bottom.append(n[drop1])
concate_1 = 'concate_1_{}'.format(idx)
n[concate_1] = L.Concat(*concate_bottom)
fc2 = 'fc2_{}'.format(idx)
relu7 = 'relu7_{}'.format(idx)
drop2 = 'drop2_{}'.format(idx)
(n[fc2], n[relu7], n[drop2]) = fc_relu_dropout(n[concate_1], 2048, 0.5)
concate_bottom_final.append(n[drop2])
n.fc_concat = L.Concat(*concate_bottom_final)
n.fc3_0 = fc(n.fc_concat, (point_num_ * 3))
if (net_type == 'train'):
n.loss = L.SmoothL1Loss(n.fc3_0, n.pose, smooth_l1_loss_param=dict(sigma=10), loss_weight=1)
n.distance = L.PoseDistance(n.fc3_0, n.pose, n.center, loss_weight=0, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_), include=dict(phase=1))
return (first_layer + str(n.to_proto()))
else:
(n.error, n.output) = L.PoseDistance(n.fc3_0, n.pose, n.center, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_, output_pose=True), include=dict(phase=1), ntop=2)
return str(n.to_proto()) |
class MobileNetV2(nn.Module):
cfg = [(1, 16, 1, 1), (6, 24, 2, 1), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)]
def __init__(self, num_classes=10, deconv=None, delinear=None, channel_deconv=None):
super(MobileNetV2, self).__init__()
if deconv:
self.deconv = True
self.conv1 = deconv(3, 32, kernel_size=3, stride=1, padding=1, bias=True, freeze=True, n_iter=10)
self.conv2 = deconv(320, 1280, kernel_size=1, stride=1, padding=0, bias=True)
else:
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.layers = self._make_layers(in_planes=32, deconv=deconv)
if delinear:
self.linear = delinear(1280, num_classes)
else:
self.linear = nn.Linear(1280, num_classes)
if channel_deconv:
self.deconv1 = channel_deconv()
def _make_layers(self, in_planes, deconv=None):
layers = []
for (expansion, out_planes, num_blocks, stride) in self.cfg:
strides = ([stride] + ([1] * (num_blocks - 1)))
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride, deconv=deconv))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
if hasattr(self, 'bn1'):
out = F.relu(self.bn1(self.conv1(x)))
else:
out = F.relu(self.conv1(x))
out = self.layers(out)
if hasattr(self, 'bn2'):
out = F.relu(self.bn2(self.conv2(out)))
else:
out = F.relu(self.conv2(out))
if hasattr(self, 'deconv1'):
out = self.deconv1(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
.script_launch_mode('inprocess')
def test_private_token_overrides_job_token(monkeypatch, script_runner, resp_get_project):
monkeypatch.setenv('GITLAB_PRIVATE_TOKEN', PRIVATE_TOKEN)
monkeypatch.setenv('CI_JOB_TOKEN', CI_JOB_TOKEN)
resp_get_project_with_token = copy.deepcopy(resp_get_project)
resp_get_project_with_token.update(match=[responses.matchers.header_matcher({'PRIVATE-TOKEN': PRIVATE_TOKEN})])
resp_auth_with_token = copy.deepcopy(resp_get_project_with_token)
resp_auth_with_token.update(url=f'{DEFAULT_URL}/api/v4/user')
resp_auth_with_token['json'].update(username='user', web_url=f'{DEFAULT_URL}/user')
responses.add(**resp_get_project_with_token)
responses.add(**resp_auth_with_token)
ret = script_runner.run(['gitlab', 'project', 'get', '--id', '1'])
assert ret.success |
def _insert_downsample_or_upsample_ops_if_needed(input_tensor: tf.Tensor, parent_mask: List, child_mask: List) -> tf.Tensor:
assert (len(child_mask) == len(parent_mask))
parent_mask_sum = sum(parent_mask)
child_mask_sum = sum(child_mask)
if (parent_mask_sum == child_mask_sum):
assert (parent_mask == child_mask)
elif (parent_mask_sum > child_mask_sum):
input_tensor = _insert_downsample_op(input_tensor, parent_mask, child_mask)
else:
input_tensor = _insert_upsample_op(input_tensor, parent_mask, child_mask)
return input_tensor |
class NonBlocking(IterDataPipe):
not_available_hook = default_not_available_hook
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
while True:
try:
return self.nonblocking_next()
except NotAvailable:
if (NonBlocking.not_available_hook is not None):
NonBlocking.not_available_hook()
def nonblocking_next(self):
raise NotImplementedError(('nonblocking_next is not implemented for %s' % self.__class__))
def reset_iterator(self):
raise NotImplementedError(('reset_iterator is not implemented for %s' % self.__class__))
def register_not_available_hook(hook_function):
NonBlocking.not_available_hook = hook_function |
class Environment():
PRICE_IDX = 4
def __init__(self, chart_data=None):
self.chart_data = chart_data
self.observation = None
self.idx = (- 1)
def reset(self):
self.observation = None
self.idx = (- 1)
def observe(self):
if (len(self.chart_data) > (self.idx + 1)):
self.idx += 1
self.observation = self.chart_data.iloc[self.idx]
return self.observation
return None
def get_price(self):
if (self.observation is not None):
return self.observation.iloc[self.PRICE_IDX]
return None |
class SolverWrapper(object):
def __init__(self, sess, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
self.tbvaldir = (tbdir + '_val')
if (not os.path.exists(self.tbvaldir)):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, sess, iter):
net = self.net
if (not os.path.exists(self.output_dir)):
os.makedirs(self.output_dir)
filename = ((cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter)) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
nfilename = ((cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter)) + '.pkl')
nfilename = os.path.join(self.output_dir, nfilename)
st0 = np.random.get_state()
cur = self.data_layer._cur
perm = self.data_layer._perm
cur_val = self.data_layer_val._cur
perm_val = self.data_layer_val._perm
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return (filename, nfilename)
def from_snapshot(self, sess, sfile, nfile):
print('Restoring model snapshots from {:s}'.format(sfile))
self.saver.restore(sess, sfile)
print('Restored.')
with open(nfile, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
return last_snapshot_iter
def get_variables_in_checkpoint_file(self, file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
except Exception as e:
print(str(e))
if ('corrupted compressed block contents' in str(e)):
print("It's likely that your checkpoint file has been compressed with SNAPPY.")
def construct_graph(self, sess):
with sess.graph.as_default():
tf.set_random_seed(cfg.RNG_SEED)
layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS)
loss = layers['total_loss']
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)
gvs = self.optimizer.compute_gradients(loss)
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for (grad, var) in gvs:
scale = 1.0
if (cfg.TRAIN.DOUBLE_BIAS and ('/biases:' in var.name)):
scale *= 2.0
if (not np.allclose(scale, 1.0)):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
self.saver = tf.train.Saver(max_to_keep=100000)
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
return (lr, train_op)
def find_previous(self):
sfiles = os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.ckpt.meta'))
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
redfiles = []
for stepsize in cfg.TRAIN.STEPSIZE:
redfiles.append(os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.ckpt.meta'.format((stepsize + 1)))))
sfiles = [ss.replace('.meta', '') for ss in sfiles if (ss not in redfiles)]
nfiles = os.path.join(self.output_dir, (cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl'))
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.ckpt.meta', '.pkl') for redfile in redfiles]
nfiles = [nn for nn in nfiles if (nn not in redfiles)]
lsf = len(sfiles)
assert (len(nfiles) == lsf)
return (lsf, nfiles, sfiles)
def initialize(self, sess):
np_paths = []
ss_paths = []
index = 1
if (index == 0):
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
variables = tf.global_variables()
sess.run(tf.variables_initializer(variables, name='init'))
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.pretrained_model)
print('Loaded.')
self.net.fix_variables(sess, self.pretrained_model)
print('Fixed.')
else:
variables = tf.global_variables()
init = tf.global_variables_initializer()
sess.run(init)
cls_score_var = [var for var in variables if ('cls_score' in var.name)]
bbox_pred_var = [var for var in variables if ('bbox_pred' in var.name)]
no_restore_var = (cls_score_var + bbox_pred_var)
restore_var = [var for var in variables if (var not in no_restore_var)]
print('{0}'.format(restore_var))
restorer = tf.train.Saver(var_list=restore_var)
restorer.restore(sess, self.pretrained_model)
last_snapshot_iter = 0
rate = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return (rate, last_snapshot_iter, stepsizes, np_paths, ss_paths)
def restore(self, sess, sfile, nfile):
np_paths = [nfile]
ss_paths = [sfile]
last_snapshot_iter = self.from_snapshot(sess, sfile, nfile)
rate = cfg.TRAIN.LEARNING_RATE
stepsizes = []
for stepsize in cfg.TRAIN.STEPSIZE:
if (last_snapshot_iter > stepsize):
rate *= cfg.TRAIN.GAMMA
else:
stepsizes.append(stepsize)
return (rate, last_snapshot_iter, stepsizes, np_paths, ss_paths)
def remove_snapshot(self, np_paths, ss_paths):
to_remove = (len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT)
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
to_remove = (len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT)
for c in range(to_remove):
sfile = ss_paths[0]
if os.path.exists(str(sfile)):
os.remove(str(sfile))
else:
os.remove(str((sfile + '.data-00000-of-00001')))
os.remove(str((sfile + '.index')))
sfile_meta = (sfile + '.meta')
os.remove(str(sfile_meta))
ss_paths.remove(sfile)
def train_model(self, sess, max_iters):
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
(lr, train_op) = self.construct_graph(sess)
(lsf, nfiles, sfiles) = self.find_previous()
if (lsf == 0):
(rate, last_snapshot_iter, stepsizes, np_paths, ss_paths) = self.initialize(sess)
else:
(rate, last_snapshot_iter, stepsizes, np_paths, ss_paths) = self.restore(sess, str(sfiles[(- 1)]), str(nfiles[(- 1)]))
timer = Timer()
iter = (last_snapshot_iter + 1)
last_summary_time = time.time()
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
while (iter < (max_iters + 1)):
if (iter == (next_stepsize + 1)):
self.snapshot(sess, iter)
rate *= cfg.TRAIN.GAMMA
sess.run(tf.assign(lr, rate))
next_stepsize = stepsizes.pop()
timer.tic()
blobs = self.data_layer.forward()
now = time.time()
if ((iter == 1) or ((now - last_summary_time) > cfg.TRAIN.SUMMARY_INTERVAL)):
(rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary) = self.net.train_step_with_summary(sess, blobs, train_op)
self.writer.add_summary(summary, float(iter))
blobs_val = self.data_layer_val.forward()
summary_val = self.net.get_summary(sess, blobs_val)
self.valwriter.add_summary(summary_val, float(iter))
last_summary_time = now
else:
(rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss) = self.net.train_step(sess, blobs, train_op)
timer.toc()
if ((iter % cfg.TRAIN.DISPLAY) == 0):
print(('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n >>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % (iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr.eval())))
print('speed: {:.3f}s / iter'.format(timer.average_time))
if ((iter % cfg.TRAIN.SNAPSHOT_ITERS) == 0):
last_snapshot_iter = iter
(ss_path, np_path) = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
if (len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT):
self.remove_snapshot(np_paths, ss_paths)
iter += 1
if (last_snapshot_iter != (iter - 1)):
self.snapshot(sess, (iter - 1))
self.writer.close()
self.valwriter.close() |
class SecretsStore(ContextFactory):
def __init__(self, path: str, timeout: Optional[int]=None, backoff: Optional[float]=None, parser: Optional[SecretParser]=None):
self.parser = (parser or parse_secrets_fetcher)
self._filewatcher = FileWatcher(path, json.load, timeout=timeout, backoff=backoff)
def _get_data(self) -> Tuple[(Any, float)]:
try:
return self._filewatcher.get_data_and_mtime()
except WatchedFileNotAvailableError as exc:
raise SecretsNotAvailableError(exc)
def get_raw(self, path: str) -> Dict[(str, str)]:
return self.get_raw_and_mtime(path)[0]
def get_credentials(self, path: str) -> CredentialSecret:
return self.get_credentials_and_mtime(path)[0]
def get_simple(self, path: str) -> bytes:
return self.get_simple_and_mtime(path)[0]
def get_versioned(self, path: str) -> VersionedSecret:
return self.get_versioned_and_mtime(path)[0]
def get_vault_url(self) -> str:
warn_deprecated('get_vault_url is deprecated and will be removed in v3.0.')
(data, _) = self._get_data()
return data['vault']['url']
def get_vault_token(self) -> str:
warn_deprecated('get_vault_token is deprecated and will be removed in v3.0.')
(data, _) = self._get_data()
return data['vault']['token']
def get_raw_and_mtime(self, secret_path: str) -> Tuple[(Dict[(str, str)], float)]:
(data, mtime) = self._get_data()
return (self.parser(data, secret_path), mtime)
def get_credentials_and_mtime(self, path: str) -> Tuple[(CredentialSecret, float)]:
(secret_attributes, mtime) = self.get_raw_and_mtime(path)
if (secret_attributes.get('type') != 'credential'):
raise CorruptSecretError(path, 'secret does not have type=credential')
encoding = secret_attributes.get('encoding', 'identity')
if (encoding != 'identity'):
raise CorruptSecretError(path, f'secret has encoding={encoding} rather than encoding=identity')
values = {}
for key in ('username', 'password'):
try:
val = secret_attributes[key]
if (not isinstance(val, str)):
raise CorruptSecretError(path, f"secret value '{key}' is not a string")
values[key] = val
except KeyError:
raise CorruptSecretError(path, f"secret does not have key '{key}'")
return (CredentialSecret(**values), mtime)
def get_simple_and_mtime(self, path: str) -> Tuple[(bytes, float)]:
(secret_attributes, mtime) = self.get_raw_and_mtime(path)
if (secret_attributes.get('type') != 'simple'):
raise CorruptSecretError(path, 'secret does not have type=simple')
try:
value = secret_attributes['value']
except KeyError:
raise CorruptSecretError(path, 'secret does not have value')
encoding = secret_attributes.get('encoding', 'identity')
return (_decode_secret(path, encoding, value), mtime)
def get_versioned_and_mtime(self, path: str) -> Tuple[(VersionedSecret, float)]:
(secret_attributes, mtime) = self.get_raw_and_mtime(path)
if (secret_attributes.get('type') != 'versioned'):
raise CorruptSecretError(path, 'secret does not have type=versioned')
previous_value = secret_attributes.get('previous')
next_value = secret_attributes.get('next')
try:
current_value = secret_attributes['current']
except KeyError:
raise CorruptSecretError(path, "secret does not have 'current' value")
encoding = secret_attributes.get('encoding', 'identity')
return (VersionedSecret(previous=(_decode_secret(path, encoding, previous_value) if previous_value else None), current=_decode_secret(path, encoding, current_value), next=(_decode_secret(path, encoding, next_value) if next_value else None)), mtime)
def make_object_for_context(self, name: str, span: Span) -> 'SecretsStore':
return _CachingSecretsStore(self._filewatcher, self.parser) |
def test_issue1073_conftest_special_objects(pytester: Pytester) -> None:
pytester.makeconftest(" class DontTouchMe(object):\n def __getattr__(self, x):\n raise Exception('cant touch me')\n\n x = DontTouchMe()\n ")
pytester.makepyfile(' def test_some():\n pass\n ')
res = pytester.runpytest()
assert (res.ret == 0) |
.functions
def test_transform_column_dest_column_already_present(dataframe):
with pytest.raises(ValueError, match='pyjanitor already present in dataframe'):
_ = dataframe.assign(pyjanitor=1).transform_column('a', np.log10, dest_column_name='pyjanitor')
expected_df = dataframe.copy().assign(a=np.log10(dataframe['a']))
result_df = dataframe.transform_column('a', np.log10, dest_column_name='a')
assert_frame_equal(result_df, expected_df) |
def test_multiple_markers_in_class(item_names_for):
test_content = '\n import pytest\n\n class TestA:\n .order(1)\n .order(3)\n def test_1_and_3():\n pass\n .order(-1)\n def test_4():\n pass\n\n .order(2)\n def test_2():\n pass\n '
assert (item_names_for(test_content) == ['TestA::test_1_and_3[index=1]', 'test_2', 'TestA::test_1_and_3[index=3]', 'TestA::test_4']) |
(scope='module')
def switch_inline_query_chosen_chat():
return SwitchInlineQueryChosenChat(query=TestSwitchInlineQueryChosenChatBase.query, allow_user_chats=TestSwitchInlineQueryChosenChatBase.allow_user_chats, allow_bot_chats=TestSwitchInlineQueryChosenChatBase.allow_bot_chats, allow_channel_chats=TestSwitchInlineQueryChosenChatBase.allow_channel_chats, allow_group_chats=TestSwitchInlineQueryChosenChatBase.allow_group_chats) |
def test_move_to_numpy() -> None:
in_dict = {'k1': torch.zeros(10), 'k2': torch.ones(4)}
for k in in_dict:
assert isinstance(in_dict[k], torch.Tensor)
out_dict = move_to_numpy(in_dict)
assert np.alltrue((list(in_dict.keys()) == list(out_dict.keys())))
for k in out_dict:
assert isinstance(out_dict[k], np.ndarray) |
def test_kaiminginit():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1)))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0))
func(model)
assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all((model[0].bias == 0.1))
assert torch.all((model[2].bias == 0.1))
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0))
func(model)
assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0)) |
class CommonItem(dict):
varitems = []
def var_tuple(self):
ret = []
self.varitems.sort()
for key in self.varitems:
ret.append(self[key])
return tuple(ret)
def weak_eq(self, other):
for key in self:
if (key in self.varitems):
continue
if (self[key] != other[key]):
return False
return True |
class ScoreContributionSpecification():
def __init__(self, contributions: List[Tuple[(int, float)]]) -> None:
self.contributions = contributions
def top_counts(self) -> List[int]:
return [x[0] for x in self.contributions]
def weights(self) -> List[float]:
return [x[1] for x in self.contributions] |
class CompositeDateDirective(Directive):
format_string = None
def __init__(self, key, name, regex, format_string=None, **kw):
if format_string:
self.format_string = format_string
self._sub_formatter = None
super(CompositeDateDirective, self).__init__(key, name, regex, get_unicode, **kw)
def _create_formatter(self):
from khayyam.formatting import JalaliDateFormatter
return JalaliDateFormatter(self.format_string)
def sub_formatter(self):
if (not self._sub_formatter):
self._sub_formatter = self._create_formatter()
return self._sub_formatter
def format(self, d):
return d.strftime(self.format_string)
def post_parser(self, ctx, formatter):
ctx.update(self.sub_formatter.parse(ctx[self.name])) |
class OfflineTests(util.TestCase):
def test_failure(self):
host = bpo.Host(util.FakeServerHost())
failed_response = util.FakeResponse(status=404)
fake_session = util.FakeSession(response=failed_response)
with self.assertRaises(client.HTTPException):
self.run_awaitable(host.problems(fake_session, {'brettcannon'}))
def test_filter_extraneous_data(self):
host = bpo.Host(util.FakeServerHost())
response_data = {'web-flow': None, 'brettcannon': True}
fake_response = util.FakeResponse(data=json.dumps(response_data))
fake_session = util.FakeSession(response=fake_response)
result = self.run_awaitable(host.problems(fake_session, {'brettcannon'}))
self.assertEqual(result, {ni_abc.Status.username_not_found: {'web-flow'}})
def test_missing_data(self):
host = bpo.Host(util.FakeServerHost())
response_data = {'web-flow': None}
fake_response = util.FakeResponse(data=json.dumps(response_data))
fake_session = util.FakeSession(response=fake_response)
with self.assertRaises(ValueError):
self.run_awaitable(host.problems(fake_session, {'brettcannon'}))
def test_bad_data(self):
host = bpo.Host(util.FakeServerHost())
response_data = {'brettcannon': 42}
fake_response = util.FakeResponse(data=json.dumps(response_data))
fake_session = util.FakeSession(response=fake_response)
with self.assertRaises(TypeError):
self.run_awaitable(host.problems(fake_session, {'brettcannon'})) |
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries.append(time.ctime(time.time()))
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.