code stringlengths 281 23.7M |
|---|
def test_project_file_uploads(project):
filename = 'test.txt'
file_contents = 'testing contents'
uploaded_file = project.upload(filename, file_contents)
(alt, url) = (uploaded_file['alt'], uploaded_file['url'])
assert (alt == filename)
assert url.startswith('/uploads/')
assert url.endswith(f'/{filename}')
assert (uploaded_file['markdown'] == f'[{alt}]({url})') |
class Decoder(nn.Module):
def __init__(self, num_classes, pretrain, do_segmentation=False):
super().__init__()
self.pretrain = pretrain
self.layers = nn.ModuleList()
self.layers.append(UpsamplerBlock(128, 64))
self.layers.append(non_bottleneck_1d(64, 0, 1))
self.layers.append(non_bottleneck_1d(64, 0, 1))
self.layers.append(UpsamplerBlock(64, 16))
self.layers.append(non_bottleneck_1d(16, 0, 1))
self.layers.append(non_bottleneck_1d(16, 0, 1))
self.output_conv = nn.ConvTranspose2d(16, num_classes, 2, stride=2, padding=0, output_padding=0, bias=True)
if pretrain:
self.output_conv2 = nn.ConvTranspose2d(16, (num_classes + 1), 2, stride=2, padding=0, output_padding=0, bias=True)
self.do_segmentation = do_segmentation
if do_segmentation:
self.layers1 = nn.ModuleList()
self.layers1.append(UpsamplerBlock(128, 64))
self.layers1.append(non_bottleneck_1d(64, 0, 1))
self.layers1.append(non_bottleneck_1d(64, 0, 1))
self.layers1.append(UpsamplerBlock(64, 16))
self.layers1.append(non_bottleneck_1d(16, 0, 1))
self.layers1.append(non_bottleneck_1d(16, 0, 1))
output_conv3 = nn.ConvTranspose2d(16, (num_classes + 1), 2, stride=2, padding=0, output_padding=0, bias=True)
self.layers1.append(output_conv3)
def forward(self, input, flag):
output = input
output_seg = input
for layer in self.layers:
output = layer(output)
if self.pretrain:
if flag:
output = self.output_conv(output)
else:
output = self.output_conv2(output)
else:
output = self.output_conv(output)
if self.do_segmentation:
for layer1 in self.layers1:
output_seg = layer1(output_seg)
return (output, output_seg) |
class MS_SSIM(nn.Module):
def __init__(self, size_average=True, max_val=255):
super(MS_SSIM, self).__init__()
self.size_average = size_average
self.channel = 3
self.max_val = max_val
def _ssim(self, img1, img2, size_average=True):
(_, c, w, h) = img1.size()
window_size = min(w, h, 11)
sigma = ((1.5 * window_size) / 11)
window = create_window(window_size, sigma, self.channel).cuda()
mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=self.channel)
mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=self.channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=(window_size // 2), groups=self.channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=(window_size // 2), groups=self.channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=(window_size // 2), groups=self.channel) - mu1_mu2)
C1 = ((0.01 * self.max_val) ** 2)
C2 = ((0.03 * self.max_val) ** 2)
V1 = ((2.0 * sigma12) + C2)
V2 = ((sigma1_sq + sigma2_sq) + C2)
ssim_map = ((((2 * mu1_mu2) + C1) * V1) / (((mu1_sq + mu2_sq) + C1) * V2))
mcs_map = (V1 / V2)
if size_average:
return (ssim_map.mean(), mcs_map.mean())
def ms_ssim(self, img1, img2, levels=5):
weight = Variable(torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).cuda())
msssim = Variable(torch.Tensor(levels).cuda())
mcs = Variable(torch.Tensor(levels).cuda())
for i in range(levels):
(ssim_map, mcs_map) = self._ssim(img1, img2)
msssim[i] = ssim_map
mcs[i] = mcs_map
filtered_im1 = F.avg_pool2d(img1, kernel_size=2, stride=2)
filtered_im2 = F.avg_pool2d(img2, kernel_size=2, stride=2)
img1 = filtered_im1
img2 = filtered_im2
value = (torch.prod((mcs[0:(levels - 1)] ** weight[0:(levels - 1)])) * (msssim[(levels - 1)] ** weight[(levels - 1)]))
return value
def forward(self, img1, img2):
return self.ms_ssim(img1, img2) |
def eval_net(model, loader, classifier_criterion, model_type):
model.eval()
correct = 0
cls_loss = 0.0
for (images, labels, idx, y_hm, gaze_img, attributes) in loader:
images = images.cuda()
labels = labels.long()
labels = labels.cuda()
y_hm = y_hm.cuda()
gaze_img = gaze_img.cuda()
with torch.no_grad():
if (model_type == 'gat'):
gaze_img = gaze_img.cuda()
(y_pred, _, _, _, _, coordinates) = model(images, gaze_img, status='test')
elif (model_type == 'kfn'):
y_pred = model(images, gaze_img)
l_classifier = classifier_criterion(y_pred, labels)
cls_loss += l_classifier.item()
pred = y_pred.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
model.train()
return ((cls_loss / len(loader.dataset)), (correct / len(loader.dataset))) |
class Summary_Info(object):
def __init__(self):
self.reset()
def reset(self):
self.things = []
self.info = {}
self.summary_n_samples = {}
def get_things(self):
return self.things
def get_info(self):
return self.info
def get_summary_n_samples(self):
return self.summary_n_samples
def get_thing_info(self, thing):
return self.info(thing)
def get_thing_summary_n_samples(self, thing):
return self.summary_n_samples[thing]
def set_things(self, things):
self.things = things
def set_info(self, info):
self.things = list(info.keys())
self.info = info
def set_summary_n_samples(self, summary_n_samples):
self.summary_n_samples = summary_n_samples
def set_thing_info(self, thing, info):
self.info[thing] = info
def set_thing_summary_n_samples(self, thing, summary_n_samples):
self.summary_n_samples[thing] = summary_n_samples |
class ImageNetDataPipeline():
def __init__(self, _config: argparse.Namespace):
self._config = _config
def data_loader(self):
data_loader = ImageNetDataLoader(self._config.tfrecord_dir, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], num_epochs=1, format_bgr=False, model_type='mobilenet')
return data_loader
def evaluate(self, sess: tf.Session, iterations: int=None) -> float:
evaluator = ImageNetEvaluator(self._config.tfrecord_dir, training_inputs=['keras_learning_phase:0'], data_inputs=['input_1:0'], validation_inputs=['labels:0'], image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], format_bgr=False, model_type='mobilenet')
return evaluator.evaluate(sess, iterations) |
def bvh_node_dict2armature(context, bvh_name, bvh_nodes, bvh_frame_time, rotate_mode='XYZ', frame_start=1, IMPORT_LOOP=False, global_matrix=None, use_fps_scale=False):
if (frame_start < 1):
frame_start = 1
scene = context.scene
for obj in scene.objects:
obj.select_set(False)
arm_data = bpy.data.armatures.new(bvh_name)
arm_ob = bpy.data.objects.new(bvh_name, arm_data)
context.collection.objects.link(arm_ob)
arm_ob.select_set(True)
context.view_layer.objects.active = arm_ob
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bvh_nodes_list = sorted_nodes(bvh_nodes)
average_bone_length = 0.0
nonzero_count = 0
for bvh_node in bvh_nodes_list:
l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length
if l:
average_bone_length += l
nonzero_count += 1
if (not average_bone_length):
average_bone_length = 0.1
else:
average_bone_length = (average_bone_length / nonzero_count)
while arm_data.edit_bones:
arm_ob.edit_bones.remove(arm_data.edit_bones[(- 1)])
ZERO_AREA_BONES = []
for bvh_node in bvh_nodes_list:
bone = bvh_node.temp = arm_data.edit_bones.new(bvh_node.name)
bone.head = bvh_node.rest_head_world
bone.tail = bvh_node.rest_tail_world
if ((bone.head - bone.tail).length < 0.001):
print('\tzero length bone found:', bone.name)
if bvh_node.parent:
ofs = (bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local)
if ofs.length:
bone.tail = (bone.tail - ofs)
else:
bone.tail.y = (bone.tail.y + average_bone_length)
else:
bone.tail.y = (bone.tail.y + average_bone_length)
ZERO_AREA_BONES.append(bone.name)
for bvh_node in bvh_nodes_list:
if bvh_node.parent:
bvh_node.temp.parent = bvh_node.parent.temp
if ((not bvh_node.has_loc) and (bvh_node.parent.temp.name not in ZERO_AREA_BONES) and (bvh_node.parent.rest_tail_local == bvh_node.rest_head_local)):
bvh_node.temp.use_connect = True
for bvh_node in bvh_nodes_list:
bvh_node.temp = bvh_node.temp.name
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
pose = arm_ob.pose
pose_bones = pose.bones
if (rotate_mode == 'NATIVE'):
for bvh_node in bvh_nodes_list:
bone_name = bvh_node.temp
pose_bone = pose_bones[bone_name]
pose_bone.rotation_mode = bvh_node.rot_order_str
elif (rotate_mode != 'QUATERNION'):
for pose_bone in pose_bones:
pose_bone.rotation_mode = rotate_mode
else:
pass
context.view_layer.update()
arm_ob.animation_data_create()
action = bpy.data.actions.new(name=bvh_name)
arm_ob.animation_data.action = action
num_frame = 0
for bvh_node in bvh_nodes_list:
bone_name = bvh_node.temp
pose_bone = pose_bones[bone_name]
rest_bone = arm_data.bones[bone_name]
bone_rest_matrix = rest_bone.matrix_local.to_3x3()
bone_rest_matrix_inv = Matrix(bone_rest_matrix)
bone_rest_matrix_inv.invert()
bone_rest_matrix_inv.resize_4x4()
bone_rest_matrix.resize_4x4()
bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv)
if (0 == num_frame):
num_frame = len(bvh_node.anim_data)
skip_frame = 1
if (num_frame > skip_frame):
num_frame = (num_frame - skip_frame)
time = ([float(frame_start)] * num_frame)
if use_fps_scale:
dt = (scene.render.fps * bvh_frame_time)
for frame_i in range(1, num_frame):
time[frame_i] += (float(frame_i) * dt)
else:
for frame_i in range(1, num_frame):
time[frame_i] += float(frame_i)
for (i, bvh_node) in enumerate(bvh_nodes_list):
(pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv) = bvh_node.temp
if bvh_node.has_loc:
data_path = ('pose.bones["%s"].location' % pose_bone.name)
location = ([(0.0, 0.0, 0.0)] * num_frame)
for frame_i in range(num_frame):
bvh_loc = bvh_node.anim_data[(frame_i + skip_frame)][:3]
bone_translate_matrix = Matrix.Translation((Vector(bvh_loc) - bvh_node.rest_head_local))
location[frame_i] = (bone_rest_matrix_inv bone_translate_matrix).to_translation()
for axis_i in range(3):
curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name)
keyframe_points = curve.keyframe_points
keyframe_points.add(num_frame)
for frame_i in range(num_frame):
keyframe_points[frame_i].co = (time[frame_i], location[frame_i][axis_i])
if bvh_node.has_rot:
data_path = None
rotate = None
if ('QUATERNION' == rotate_mode):
rotate = ([(1.0, 0.0, 0.0, 0.0)] * num_frame)
data_path = ('pose.bones["%s"].rotation_quaternion' % pose_bone.name)
else:
rotate = ([(0.0, 0.0, 0.0)] * num_frame)
data_path = ('pose.bones["%s"].rotation_euler' % pose_bone.name)
prev_euler = Euler((0.0, 0.0, 0.0))
for frame_i in range(num_frame):
bvh_rot = bvh_node.anim_data[(frame_i + skip_frame)][3:]
euler = Euler(bvh_rot, bvh_node.rot_order_str[::(- 1)])
bone_rotation_matrix = euler.to_matrix().to_4x4()
bone_rotation_matrix = ((bone_rest_matrix_inv bone_rotation_matrix) bone_rest_matrix)
if (len(rotate[frame_i]) == 4):
rotate[frame_i] = bone_rotation_matrix.to_quaternion()
else:
rotate[frame_i] = bone_rotation_matrix.to_euler(pose_bone.rotation_mode, prev_euler)
prev_euler = rotate[frame_i]
for axis_i in range(len(rotate[0])):
curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name)
keyframe_points = curve.keyframe_points
keyframe_points.add(num_frame)
for frame_i in range(num_frame):
keyframe_points[frame_i].co = (time[frame_i], rotate[frame_i][axis_i])
for cu in action.fcurves:
if IMPORT_LOOP:
pass
for bez in cu.keyframe_points:
bez.interpolation = 'LINEAR'
try:
arm_ob.matrix_world = global_matrix
except:
pass
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
return arm_ob |
class StructBahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, struct_vocab_size, attn_emb_dim, dropout, d_model, **kwargs):
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, d_model, **kwargs)
attn_size = kwargs.get('attn_size', self.d_model)
self.model = getattr(nn, self.rnn_type)(input_size=(self.emb_dim * 3), hidden_size=self.d_model, batch_first=True, num_layers=self.num_layers, bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim, ((self.d_model * (self.bidirectional + 1)) * self.num_layers), attn_size)
self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.struct_embedding = nn.Embedding(struct_vocab_size, emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict['word']
state = input_dict.get('state', None)
fc_emb = input_dict['fc_emb']
attn_emb = input_dict['attn_emb']
attn_emb_len = input_dict['attn_emb_len']
structure = input_dict['structure']
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
struct_emb = self.struct_embedding(structure)
if (state is None):
state = self.init_hidden(word.size(0), fc_emb.device)
if (self.rnn_type == 'LSTM'):
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
(c, attn_weight) = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), struct_emb.unsqueeze(1)), dim=(- 1))
(out, state) = self.model(rnn_input, state)
output = {'state': state, 'embed': out, 'logit': self.classifier(out), 'attn_weight': attn_weight}
return output |
def deprecated_alias(old_qualname: str, new_fn: Callable[(ArgsT, RetT)], version: str, *, issue: (int | None)) -> Callable[(ArgsT, RetT)]:
(version, issue=issue, instead=new_fn)
(new_fn, assigned=('__module__', '__annotations__'))
def wrapper(*args: ArgsT.args, **kwargs: ArgsT.kwargs) -> RetT:
return new_fn(*args, **kwargs)
wrapper.__qualname__ = old_qualname
wrapper.__name__ = old_qualname.rpartition('.')[(- 1)]
return wrapper |
_funcify.register(SVD)
def numba_funcify_SVD(op, node, **kwargs):
full_matrices = op.full_matrices
compute_uv = op.compute_uv
out_dtype = np.dtype(node.outputs[0].dtype)
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
if (not compute_uv):
_basic.numba_njit()
def svd(x):
(_, ret, _) = np.linalg.svd(inputs_cast(x), full_matrices)
return ret
else:
_basic.numba_njit()
def svd(x):
return np.linalg.svd(inputs_cast(x), full_matrices)
return svd |
def process_position(solar_cell, options, layer_widths):
if (options.position is None):
options.position = [max(1e-10, (width / 5000)) for width in layer_widths]
layer_offsets = np.insert(np.cumsum(layer_widths), 0, 0)
options.position = np.hstack([np.arange(layer_offsets[j], (layer_offsets[j] + layer_width), options.position[j]) for (j, layer_width) in enumerate(layer_widths)])
elif (isinstance(options.position, int) or isinstance(options.position, float)):
options.position = np.arange(0, solar_cell.width, options.position)
elif (isinstance(options.position, list) or isinstance(options.position, np.ndarray)):
if (len(options.position) == 1):
options.position = np.arange(0, solar_cell.width, options.position[0])
if (len(options.position) == len(solar_cell)):
options.position = np.hstack([np.arange(layer_object.offset, (layer_object.offset + layer_object.width), options.position[j]) for (j, layer_object) in enumerate(solar_cell)])
elif (len(options.position) == len(layer_widths)):
layer_offsets = np.insert(np.cumsum(layer_widths), 0, 0)
options.position = np.hstack([np.arange(layer_offsets[j], (layer_offsets[j] + layer_width), options.position[j]) for (j, layer_width) in enumerate(layer_widths)]) |
class DepthwiseSeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1, *, norm1=None, activation1=None, norm2=None, activation2=None):
super().__init__()
self.depthwise = Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, groups=in_channels, bias=(not norm1), norm=get_norm(norm1, in_channels), activation=activation1)
self.pointwise = Conv2d(in_channels, out_channels, kernel_size=1, bias=(not norm2), norm=get_norm(norm2, out_channels), activation=activation2)
weight_init.c2_msra_fill(self.depthwise)
weight_init.c2_msra_fill(self.pointwise)
def forward(self, x):
return self.pointwise(self.depthwise(x)) |
def test_prompt(hatch, devpi, temp_dir_cache, helpers, published_project_name, config_file):
config_file.model.publish['index']['ca-cert'] = devpi.ca_cert
config_file.model.publish['index']['repo'] = 'dev'
config_file.model.publish['index']['repos'] = {'dev': devpi.repo}
config_file.save()
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir_cache / published_project_name)
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert (result.exit_code == 0), result.output
result = hatch('build')
assert (result.exit_code == 0), result.output
build_directory = (path / 'dist')
artifacts = list(build_directory.iterdir())
result = hatch('publish', input=f'''{devpi.user}
foo''')
assert (result.exit_code == 1), result.output
assert ('401' in result.output)
assert ('Unauthorized' in result.output)
with path.as_cwd():
result = hatch('publish', '-n')
assert (result.exit_code == 1), result.output
assert (result.output == 'Missing required option: user\n')
with path.as_cwd():
result = hatch('publish', str(artifacts[0]), input=f'''{devpi.user}
{devpi.auth}''')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
Enter your username [__TOKEN__]: {devpi.user}
Enter your credentials:{' '}
{artifacts[0].relative_to(path)} ... success
[{published_project_name}]
{devpi.repo}{published_project_name}/{current_version}/
'''))
with path.as_cwd():
result = hatch('publish', str(artifacts[1]))
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
{artifacts[1].relative_to(path)} ... success
[{published_project_name}]
{devpi.repo}{published_project_name}/{current_version}/
''')) |
def test_choice_samples():
with pytest.raises(NotImplementedError):
choice._supp_shape_from_params(np.asarray(5))
compare_sample_values(choice, np.asarray(5))
compare_sample_values(choice, np.asarray([5]))
compare_sample_values(choice, np.array([1.0, 5.0], dtype=config.floatX))
compare_sample_values(choice, np.asarray([5]), 3)
compare_sample_values(choice, np.array([[1, 2], [3, 4]]))
compare_sample_values(choice, np.array([[1, 2], [3, 4]]), p=[0.4, 0.6])
compare_sample_values(choice, [1, 2, 3], 1)
compare_sample_values(choice, [1, 2, 3], 1, p=pt.as_tensor([(1 / 3.0), (1 / 3.0), (1 / 3.0)]))
with pytest.raises(ValueError):
rng = np.random.default_rng()
rng_pt = shared(rng, borrow=True)
choice(a=[1, 2], p=pt.as_tensor([[0.1, 0.9], [0.3, 0.7]]), rng=rng_pt).eval()
compare_sample_values(choice, [1, 2, 3], (10, 2), replace=True)
compare_sample_values(choice, pt.as_tensor_variable([1, 2, 3]), 2, replace=True) |
def LoadWav(path, cfg):
wav_name = os.path.basename(path).split('.')[0]
sr = cfg.sampling_rate
(wav, fs) = sf.read(path)
(wav, _) = librosa.effects.trim(y=wav, top_db=cfg.top_db)
if (fs != sr):
wav = resampy.resample(x=wav, sr_orig=fs, sr_new=sr, axis=0)
fs = sr
assert (fs == 16000), 'Downsampling needs to be done.'
peak = np.abs(wav).max()
if (peak > 1.0):
wav /= peak
return (wav, wav_name) |
def parse_args():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument('--inference-addr', default='127.0.0.1:8080', help='Address and port of the inference server')
parser.add_argument('--device', default='cuda:0', help='Device used for inference')
parser.add_argument('--score-thr', type=float, default=0.5, help='3d bbox score threshold')
args = parser.parse_args()
return args |
class ExperienceReplay():
def __init__(self, memory_size: int=100, replay_number: int=10):
self.buffer = pd.DataFrame(columns=['smiles', 'likelihood', 'scores'])
self.memory_size = memory_size
self.replay_number = replay_number
def add_to_buffer(self, smiles, scores, neg_likelihood):
df = pd.DataFrame({'smiles': smiles, 'likelihood': neg_likelihood.cpu().detach().numpy(), 'scores': scores.cpu().detach().numpy()})
self.buffer = pd.concat([self.buffer, df])
self.purge_buffer()
def purge_buffer(self):
unique_df = self.buffer.drop_duplicates(subset=['smiles'])
sorted_df = unique_df.sort_values('scores', ascending=False)
self.buffer = sorted_df.head(self.memory_size)
self.buffer = self.buffer.loc[(self.buffer['scores'] != 0.0)]
def sample_buffer(self):
sample_size = min(len(self.buffer), self.replay_number)
if (sample_size > 0):
sampled = self.buffer.sample(sample_size)
smiles = sampled['smiles'].values
scores = sampled['scores'].values
prior_likelihood = utils.to_tensor(sampled['likelihood'].values)
return (smiles, scores, prior_likelihood)
else:
return ([], [], []) |
def get_process_listening_port(proc):
conn = None
if (platform.system() == 'Windows'):
current_process = psutil.Process(proc.pid)
children = []
while (children == []):
time.sleep(0.01)
children = current_process.children(recursive=True)
if ((3, 6) <= sys.version_info < (3, 7)):
children = [current_process]
for child in children:
while ((child.connections() == []) and (not any(((conn.status == 'LISTEN') for conn in child.connections())))):
time.sleep(0.01)
conn = next(filter((lambda conn: (conn.status == 'LISTEN')), child.connections()))
else:
psutil_proc = psutil.Process(proc.pid)
while (not any(((conn.status == 'LISTEN') for conn in psutil_proc.connections()))):
time.sleep(0.01)
conn = next(filter((lambda conn: (conn.status == 'LISTEN')), psutil_proc.connections()))
return conn.laddr.port |
class CalcChangeLocalDroneAmountCommand(wx.Command):
def __init__(self, fitID, position, amount):
wx.Command.__init__(self, True, 'Change Local Drone Amount')
self.fitID = fitID
self.position = position
self.amount = amount
self.savedDroneInfo = None
def Do(self):
pyfalog.debug('Doing change of local drone amount to {} at position {} on fit {}'.format(self.amount, self.position, self.fitID))
fit = Fit.getInstance().getFit(self.fitID)
drone = fit.drones[self.position]
self.savedDroneInfo = DroneInfo.fromDrone(drone)
if (self.amount == self.savedDroneInfo.amount):
return False
drone.amount = self.amount
if (drone.amountActive > 0):
difference = (self.amount - self.savedDroneInfo.amount)
drone.amount = self.amount
drone.amountActive = max(min((drone.amountActive + difference), drone.amount), 0)
return True
def Undo(self):
pyfalog.debug('Undoing change of local drone quantity to {} at position {} on fit {}'.format(self.amount, self.position, self.fitID))
if (self.savedDroneInfo is not None):
fit = Fit.getInstance().getFit(self.fitID)
drone = fit.drones[self.position]
drone.amount = self.savedDroneInfo.amount
drone.amountActive = self.savedDroneInfo.amountActive
return True
return False |
def test_extra_files_are_ok():
with tempfile.TemporaryDirectory() as tempdir:
config_file = os.path.join(tempdir, 'config.py')
with open(config_file, 'w') as config:
config.write('from bar import foo\n')
with open(os.path.join(tempdir, 'bar.py'), 'w') as config:
config.write('foo = 42')
assert run_qtile_check(config_file) |
def _create_chain_initial_circuit(parameters: FermiHubbardParameters, qubits: List[cirq.Qid], chain: ChainInitialState) -> cirq.Circuit:
if isinstance(chain, SingleParticle):
return create_one_particle_circuit(qubits, chain.get_amplitudes(len(qubits)))
elif isinstance(chain, TrappingPotential):
return _create_quadratic_hamiltonian_circuit(qubits, chain.particles, chain.as_quadratic_hamiltonian(len(qubits), parameters.hamiltonian.j))
else:
raise ValueError(f'Unsupported chain initial state {chain}') |
class TemplateSelectorWidget(QWidget):
def __init__(self, theChoice=None, parent=None):
super(TemplateSelector, self).__init__(parent)
self.setWindowTitle('select how Foam case is generated')
choices = ['fromScratch', 'fromExisting']
helpTexts = ['generate Foam case dicts from template string', 'generate case setup from existing tutorial or user case']
layout = QVBoxLayout(self)
self.choiceWidget = ChoiceWidget(choices, helpTexts)
if theChoice:
self.choiceWidget.setChoice(theChoice)
layout.addWidget(self.choiceWidget)
self.buttonBrowse = QPushButton('Browse for case folder')
self.buttonBrowse.clicked.connect(self.selectFolder)
self.folderPath = ''
self.labelCasePath = QLabel(self.folderPath)
self.labelCasePath.setWordWrap(True)
layout.addWidget(self.buttonBrowse)
layout.addWidget(self.labelCasePath)
self.setLayout(layout)
def selectFolder(self):
self.folderPath = QFileDialog.getExistingDirectory()
self.labelCasePath.setText(self.folderPath) |
def get_injectable_payloads(url='', data='', base='', injection_type='', session_filepath='', is_json=False, is_multipart=False, injected_and_vulnerable=False):
Injections = collections.namedtuple('Injections', ['retval', 'template_msg', 'tested_parameters'])
retval = session.fetchall(session_filepath=session_filepath, query='SELECT * FROM tbl_payload WHERE `endpoint`=?', values=(base.path,), to_object=True)
retval = payloads_to_objects(retval)
if (not injected_and_vulnerable):
message = 'Ghauri resumed the following injection point(s) from stored session:\n'
else:
message = 'Ghauri identified the following injection point(s) with a total of {nor} HTTP(s) requests:\n'.format(nor=conf.req_counter_injected)
message += '---\n'
param_set = set()
message_list = []
for entry in retval:
param_name = entry.parameter.key
param_value = entry.parameter.value.replace('*', '')
results = entry.result
if (param_name not in param_set):
_p = f'{param_name}'
_it = (injection_type if (param_name != '#1*') else 'URI')
if is_json:
_p = f'{entry.parameter.type}{param_name}'
_it = f'(custom) {injection_type}'
if is_multipart:
_p = f'{entry.parameter.type}{param_name}'
_it = f'(custom) {injection_type}'
message_ok = 'Parameter: {} ({})'.format(_p, _it)
param_set.add(param_name)
__ = []
for res in results:
_url = url
_data = data
if (entry.parameter.key != res.parameter.key):
continue
payload = res.payload
payload_type = res.payload_type
title = res.title
vector = res.vector
backend = res.backend
if (injection_type == 'POST'):
_data = prepare_attack_request(text=data, payload=payload, param=res.parameter, is_multipart=is_multipart, injection_type=injection_type, encode=False)
if is_json:
_data = re.sub('[\\n]+', '', _data)
if (injection_type == 'GET'):
_url = prepare_attack_request(text=url, payload=payload, param=res.parameter, injection_type=injection_type, encode=False)
if (injection_type == 'GET'):
payload = parse_payload(_url, injection_type=injection_type, param_name=param_name)
elif (injection_type == 'POST'):
payload = parse_payload(url, data=_data, injection_type=injection_type, is_multipart=is_multipart)
elif (injection_type == 'HEADER'):
payload = f'{param_name}: {param_value}{payload}'
payload = parse_payload(payload=payload, injection_type=injection_type)
elif (injection_type == 'COOKIE'):
payload = f'{param_name}={param_value}{payload}'
payload = parse_payload(payload=payload, injection_type=injection_type)
_msg = TEMPLATE_INJECTED_MESSAGE.format(PAYLOAD_TYPE=payload_type, TITLE=title, PAYLOAD=payload)
__.append(_msg)
message_ok += '\n'.join(__)
message_list.append(message_ok)
message += '\n\n'.join(message_list)
message += '\n---'
return Injections(retval=retval, template_msg=message, tested_parameters=list(param_set)) |
def get_res_fc_seq_fc(model_rnn_dim, rnn: bool, self_att: bool, keep_rate=0.8):
seq_mapper = []
if ((not rnn) and (not self_att)):
raise NotImplementedError()
if rnn:
seq_mapper.extend([VariationalDropoutLayer(keep_rate), CudnnGru(model_rnn_dim, w_init=TruncatedNormal(stddev=0.05))])
if self_att:
seq_mapper.extend([VariationalDropoutLayer(keep_rate), StaticAttentionSelf(TriLinear(bias=True), ConcatWithProduct())])
seq_mapper.append(FullyConnected((model_rnn_dim * 2), activation='relu'))
return SequenceMapperSeq(FullyConnected((model_rnn_dim * 2), activation='relu'), ResidualLayer(SequenceMapperSeq(*seq_mapper))) |
class Linear(nn.Linear, Module):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__(in_features, out_features, bias=bias)
def forward(self, x, params=None, episode=None):
if (params is None):
x = super(Linear, self).forward(x)
else:
(weight, bias) = (params.get('weight'), params.get('bias'))
if (weight is None):
weight = self.weight
if (bias is None):
bias = self.bias
x = F.linear(x, weight, bias)
return x |
def dict_of_class(tup: Tuple[(List[Tuple[(_CountingAttr, SearchStrategy)]], Tuple[(Type, PosArgs, KwArgs)])], defaults: Tuple[(PosArgs, KwArgs)]):
nested_cl = tup[1][0]
nested_cl_args = tup[1][1]
nested_cl_kwargs = tup[1][2]
default = Factory((lambda : {'cls': nested_cl(*defaults[0], **defaults[1])}))
combined_attrs = list(tup[0])
combined_attrs.append((field(type=Dict[(str, nested_cl)], default=default), just({'cls': nested_cl(*nested_cl_args, **nested_cl_kwargs)})))
return _create_hyp_class_and_strat(combined_attrs) |
def test_cursor_usage_to_add_a_chain():
(a, b, c) = get_pseudo_nodes(*'abc')
g = Graph()
(((g.get_cursor() >> a) >> b) >> c)
assert (len(g) == 3)
assert (g.outputs_of(BEGIN) == {g.index_of(a)})
assert (g.outputs_of(a) == {g.index_of(b)})
assert (g.outputs_of(b) == {g.index_of(c)})
assert (g.outputs_of(c) == set()) |
def test_subcommand_tab_completion(sc_app):
text = 'Foot'
line = 'base sport {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
first_match = complete_tester(text, line, begidx, endidx, sc_app)
assert ((first_match is not None) and (sc_app.completion_matches == ['Football '])) |
class Model(models.Model):
created = models.DateTimeField(editable=False, verbose_name=_('created'))
updated = models.DateTimeField(editable=False, verbose_name=_('updated'))
class Meta():
abstract = True
def save(self, *args, **kwargs):
if (self.created is None):
self.created = now()
self.updated = now()
super().save(*args, **kwargs) |
class AveragerArguments():
averaging_expiration: float = field(default=5.0, metadata={'help': 'Averaging group will wait for stragglers for at most this many seconds'})
averaging_timeout: float = field(default=30.0, metadata={'help': 'Give up on averaging step after this many seconds'})
listen_on: str = field(default='[::]:*', metadata={'help': 'Network interface used for incoming averager communication. Default: all ipv6'})
min_refresh_period: float = field(default=0.5, metadata={'help': 'Wait for at least this many seconds before fetching new collaboration state'})
max_refresh_period: float = field(default=30, metadata={'help': 'Wait for at most this many seconds before fetching new collaboration state'})
default_refresh_period: float = field(default=3, metadata={'help': 'Attempt to fetch collaboration state every this often until successful'})
expected_drift_peers: float = field(default=3, metadata={'help': 'Trainer assumes that this many new peers can join per step'})
expected_drift_rate: float = field(default=0.2, metadata={'help': 'Trainer assumes that this fraction of current size can join per step'})
performance_ema_alpha: float = field(default=0.1, metadata={'help': 'Uses this alpha for moving average estimate of samples per second'})
target_group_size: int = field(default=256, metadata={'help': 'Maximum group size for all-reduce'})
metadata_expiration: float = field(default=30, metadata={'help': "Peer's metadata will be removed if not updated in this many seconds"}) |
class TestOrderMethods(zf.WithConstantEquityMinuteBarData, zf.WithConstantFutureMinuteBarData, zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = T('2006-01-03')
END_DATE = T('2006-01-06')
SIM_PARAMS_START_DATE = T('2006-01-04')
ASSET_FINDER_EQUITY_SIDS = (1,)
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = True
EQUITY_MINUTE_CONSTANT_LOW = 2.0
EQUITY_MINUTE_CONSTANT_OPEN = 2.0
EQUITY_MINUTE_CONSTANT_CLOSE = 2.0
EQUITY_MINUTE_CONSTANT_HIGH = 2.0
EQUITY_MINUTE_CONSTANT_VOLUME = 10000.0
FUTURE_MINUTE_CONSTANT_LOW = 2.0
FUTURE_MINUTE_CONSTANT_OPEN = 2.0
FUTURE_MINUTE_CONSTANT_CLOSE = 2.0
FUTURE_MINUTE_CONSTANT_HIGH = 2.0
FUTURE_MINUTE_CONSTANT_VOLUME = 10000.0
SIM_PARAMS_CAPITAL_BASE = 10000
def make_futures_info(cls):
return pd.DataFrame.from_dict({2: {'multiplier': 10, 'symbol': 'F', 'exchange': 'TEST'}}, orient='index')
def init_class_fixtures(cls):
super(TestOrderMethods, cls).init_class_fixtures()
cls.EQUITY = cls.asset_finder.retrieve_asset(1)
cls.FUTURE = cls.asset_finder.retrieve_asset(2)
([('order', 1), ('order_value', 1000), ('order_target', 1), ('order_target_value', 1000), ('order_percent', 1), ('order_target_percent', 1)])
def test_cannot_order_in_before_trading_start(self, order_method, amount):
algotext = '\nfrom zipline.api import sid, {order_func}\n\ndef initialize(context):\n context.asset = sid(1)\n\ndef before_trading_start(context, data):\n {order_func}(context.asset, {arg})\n '.format(order_func=order_method, arg=amount)
algo = self.make_algo(script=algotext)
with self.assertRaises(ze.OrderInBeforeTradingStart):
algo.run()
([('order', 5000), ('order_value', 10000), ('order_percent', 1)])
def test_order_equity_non_targeted(self, order_method, amount):
algotext = '\nimport zipline.api as api\n\ndef initialize(context):\n api.set_slippage(api.slippage.FixedSlippage(spread=0.0))\n api.set_commission(api.commission.PerShare(0))\n\n context.equity = api.sid(1)\n\n api.schedule_function(\n func=do_order,\n date_rule=api.date_rules.every_day(),\n time_rule=api.time_rules.market_open(),\n )\n\ndef do_order(context, data):\n context.ordered = True\n api.{order_func}(context.equity, {arg})\n '.format(order_func=order_method, arg=amount)
result = self.run_algorithm(script=algotext)
for orders in result.orders.values:
assert_equal(len(orders), 1)
assert_equal(orders[0]['amount'], 5000)
assert_equal(orders[0]['sid'], self.EQUITY)
for (i, positions) in enumerate(result.positions.values, start=1):
assert_equal(len(positions), 1)
assert_equal(positions[0]['amount'], (5000.0 * i))
assert_equal(positions[0]['sid'], self.EQUITY)
([('order_target', 5000), ('order_target_value', 10000), ('order_target_percent', 1)])
def test_order_equity_targeted(self, order_method, amount):
algotext = '\nimport zipline.api as api\n\ndef initialize(context):\n api.set_slippage(api.slippage.FixedSlippage(spread=0.0))\n api.set_commission(api.commission.PerShare(0))\n\n context.equity = api.sid(1)\n\n api.schedule_function(\n func=do_order,\n date_rule=api.date_rules.every_day(),\n time_rule=api.time_rules.market_open(),\n )\n\ndef do_order(context, data):\n context.ordered = True\n api.{order_func}(context.equity, {arg})\n '.format(order_func=order_method, arg=amount)
result = self.run_algorithm(script=algotext)
assert_equal([len(ords) for ords in result.orders], [1, 0, 0, 0])
order = result.orders.iloc[0][0]
assert_equal(order['amount'], 5000)
assert_equal(order['sid'], self.EQUITY)
for positions in result.positions.values:
assert_equal(len(positions), 1)
assert_equal(positions[0]['amount'], 5000.0)
assert_equal(positions[0]['sid'], self.EQUITY)
([('order', 500), ('order_value', 10000), ('order_percent', 1)])
def test_order_future_non_targeted(self, order_method, amount):
algotext = '\nimport zipline.api as api\n\ndef initialize(context):\n api.set_slippage(us_futures=api.slippage.FixedSlippage(spread=0.0))\n api.set_commission(us_futures=api.commission.PerTrade(0.0))\n\n context.future = api.sid(2)\n\n api.schedule_function(\n func=do_order,\n date_rule=api.date_rules.every_day(),\n time_rule=api.time_rules.market_open(),\n )\n\ndef do_order(context, data):\n context.ordered = True\n api.{order_func}(context.future, {arg})\n '.format(order_func=order_method, arg=amount)
result = self.run_algorithm(script=algotext)
for orders in result.orders.values:
assert_equal(len(orders), 1)
assert_equal(orders[0]['amount'], 500)
assert_equal(orders[0]['sid'], self.FUTURE)
for (i, positions) in enumerate(result.positions.values, start=1):
assert_equal(len(positions), 1)
assert_equal(positions[0]['amount'], (500.0 * i))
assert_equal(positions[0]['sid'], self.FUTURE)
([('order_target', 500), ('order_target_value', 10000), ('order_target_percent', 1)])
def test_order_future_targeted(self, order_method, amount):
algotext = '\nimport zipline.api as api\n\ndef initialize(context):\n api.set_slippage(us_futures=api.slippage.FixedSlippage(spread=0.0))\n api.set_commission(us_futures=api.commission.PerTrade(0.0))\n\n context.future = api.sid(2)\n\n api.schedule_function(\n func=do_order,\n date_rule=api.date_rules.every_day(),\n time_rule=api.time_rules.market_open(),\n )\n\ndef do_order(context, data):\n context.ordered = True\n api.{order_func}(context.future, {arg})\n '.format(order_func=order_method, arg=amount)
result = self.run_algorithm(script=algotext)
assert_equal([len(ords) for ords in result.orders], [1, 0, 0, 0])
order = result.orders.iloc[0][0]
assert_equal(order['amount'], 500)
assert_equal(order['sid'], self.FUTURE)
for positions in result.positions.values:
assert_equal(len(positions), 1)
assert_equal(positions[0]['amount'], 500.0)
assert_equal(positions[0]['sid'], self.FUTURE)
([(api.order, 5000), (api.order_value, 10000), (api.order_percent, 1.0), (api.order_target, 5000), (api.order_target_value, 10000), (api.order_target_percent, 1.0)])
def test_order_method_style_forwarding(self, order_method, order_param):
def initialize(context):
api.schedule_function(func=do_order, date_rule=api.date_rules.every_day(), time_rule=api.time_rules.market_open())
def do_order(context, data):
assert (len(context.portfolio.positions.keys()) == 0)
order_method(self.EQUITY, order_param, style=StopLimitOrder(10, 10, asset=self.EQUITY))
assert (len(context.blotter.open_orders[self.EQUITY]) == 1)
result = context.blotter.open_orders[self.EQUITY][0]
assert (result.limit == 10)
assert (result.stop == 10)
self.run_algorithm(initialize=initialize, sim_params=self.sim_params.create_new(start_session=self.END_DATE, end_session=self.END_DATE)) |
class HeadQABase(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = inspect.getfile(lm_eval.datasets.headqa.headqa)
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if (self._training_docs is None):
self._training_docs = list(map(self._process_doc, self.dataset['train']))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset['validation'])
def test_docs(self):
return map(self._process_doc, self.dataset['test'])
def _process_doc(self, doc):
out_doc = {'id': doc['qid'], 'query': (('Question: ' + doc['qtext']) + '\nAnswer:'), 'choices': [answer['atext'] for answer in doc['answers']], 'gold': (int(doc['ra']) - 1)}
return out_doc
def doc_to_text(self, doc):
return doc['query']
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc['query'] |
class iSendBase():
def make_td(ones):
raise NotImplementedError
def client(cls, pseudo_rand):
torch.distributed.init_process_group('gloo', rank=1, world_size=2, init_method='tcp://localhost:10017')
td = cls.make_td(True)
td.isend(0, pseudo_rand=pseudo_rand)
def server(cls, queue, pseudo_rand):
torch.distributed.init_process_group('gloo', rank=0, world_size=2, init_method='tcp://localhost:10017')
td = cls.make_td(False)
td.recv(1, pseudo_rand=pseudo_rand)
assert (td == 1).all()
queue.put('yuppie')
.flaky(reruns=5, reruns_delay=5)
def test_isend(self, pseudo_rand, set_context):
queue = mp.Queue(1)
main_worker = mp.Process(target=type(self).server, args=(queue, pseudo_rand))
secondary_worker = mp.Process(target=type(self).client, args=(pseudo_rand,))
main_worker.start()
secondary_worker.start()
try:
out = queue.get(timeout=TIMEOUT)
assert (out == 'yuppie')
except Exception as err:
raise err
finally:
main_worker.join()
secondary_worker.join() |
def test_body3d_semi_supervision_dataset_compatibility():
labeled_data_cfg = dict(num_joints=17, seq_len=27, seq_frame_interval=1, causall=False, temporal_padding=True, joint_2d_src='gt', subset=1, subjects=['S1'], need_camera_param=True, camera_param_file='tests/data/h36m/cameras.pkl')
labeled_dataset = dict(type='Body3DH36MDataset', ann_file='tests/data/h36m/test_h36m_body3d.npz', img_prefix='tests/data/h36m', data_cfg=labeled_data_cfg, pipeline=[])
unlabeled_data_cfg = dict(num_joints=17, seq_len=27, seq_frame_interval=1, causal=False, temporal_padding=True, joint_2d_src='gt', subjects=['S5', 'S7', 'S8'], need_camera_param=True, camera_param_file='tests/data/h36m/cameras.pkl', need_2d_label=True)
unlabeled_dataset = dict(type='Body3DH36MDataset', ann_file='tests/data/h36m/test_h36m_body3d.npz', img_prefix='tests/data/h36m', data_cfg=unlabeled_data_cfg, pipeline=[dict(type='Collect', keys=[('input_2d', 'unlabeled_input')], meta_name='metas', meta_keys=[])])
dataset = 'Body3DSemiSupervisionDataset'
dataset_class = DATASETS.get(dataset)
with pytest.warns(DeprecationWarning):
custom_dataset = dataset_class(labeled_dataset, unlabeled_dataset)
item = custom_dataset[0]
assert ('unlabeled_input' in item.keys())
unlabeled_dataset = build_dataset(unlabeled_dataset)
assert (len(unlabeled_dataset) == len(custom_dataset)) |
def _lenarray(d):
if (len(d) == 5):
l = ((d[4] * qcmio.lind4(False, d[0], d[1], d[2], d[3], 0, abs(d[0]), abs(d[1]), abs(d[2]), abs(d[3]))[0]) + 1)
elif (len(d) == 4):
l = (qcmio.lind4(False, d[0], d[1], d[2], d[3], 0, abs(d[0]), abs(d[1]), abs(d[2]), abs(d[3]))[0] + 1)
elif (len(d) == 3):
l = (qcmio.lind3(False, d[0], d[1], d[2], 0, abs(d[0]), abs(d[1]), abs(d[2]))[0] + 1)
elif (len(d) == 2):
l = (qcmio.lind2(False, d[0], d[1], 0, abs(d[0]), abs(d[1]))[0] + 1)
else:
l = d[0]
return l |
def cmd_obj(args) -> None:
if args.obj_spec:
sock_file = (args.socket or find_sockfile())
ipc_client = Client(sock_file)
cmd_object = IPCCommandInterface(ipc_client)
cmd_client = CommandClient(cmd_object)
obj = get_object(cmd_client, args.obj_spec)
if (args.function == 'help'):
try:
print_commands(('-o ' + ' '.join(args.obj_spec)), obj)
except CommandError:
if (len(args.obj_spec) == 1):
print(f"{args.obj_spec} object needs a specified identifier e.g. '-o bar top'.")
sys.exit(1)
else:
raise
elif args.info:
print((args.function + get_formated_info(obj, args.function, args=True, short=False)))
else:
ret = run_function(obj, args.function, args.args)
if (ret is not None):
pprint.pprint(ret)
else:
print_base_objects()
sys.exit(1) |
def test_get_operators():
operator_00 = QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 1)
operator_01 = QubitOperator(((2, 'Z'), (3, 'Y')), 1)
sum_operator = (operator_00 + operator_01)
operators = list(sum_operator.get_operators())
assert (operators in [[operator_00, operator_01], [operator_01, operator_00]]) |
def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if ('mode' not in log_line.keys()):
continue
if ((log_line['mode'] == 'train') and (log_line['epoch'] == epoch)):
result_dict['memory'] = log_line['memory']
if ((log_line['mode'] == 'val') and (log_line['epoch'] == epoch)):
result_dict.update({key: log_line[key] for key in results_lut if (key in log_line)})
return result_dict |
.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_glorot_uniform(tensor_shape):
(fan_in, fan_out) = initializers._compute_fans(tensor_shape)
scale = np.sqrt((6.0 / (fan_in + fan_out)))
_runner(initializers.glorot_uniform(), tensor_shape, target_mean=0.0, target_max=scale, target_min=(- scale)) |
('/v1/organization/<orgname>/marketplace/<subscription_id>')
_param('orgname', 'The name of the organization')
_param('subscription_id', 'Marketplace subscription id')
_user_resource(UserPlan)
_if(features.BILLING)
class OrganizationRhSkuSubscriptionField(ApiResource):
_scope(scopes.ORG_ADMIN)
def delete(self, orgname, subscription_id):
permission = AdministerOrganizationPermission(orgname)
if permission.can():
try:
organization = model.organization.get_organization(orgname)
except InvalidOrganizationException:
return ('Organization not valid', 400)
model.organization_skus.remove_subscription_from_org(organization.id, subscription_id)
return ('Deleted', 204)
abort(401) |
def add_CCL_constraints(n, config):
agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits')
try:
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2)))
except IOError:
logger.exception("Need to specify the path to a .csv file containing aggregate capacity limits per country in config['electricity']['agg_p_nom_limit'].")
logger.info('Adding per carrier generation capacity constraints for individual countries')
gen_country = n.generators.bus.map(n.buses.country)
p_nom_per_cc = pd.DataFrame({'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))), 'country': gen_country, 'carrier': n.generators.carrier}).dropna(subset=['p_nom']).groupby(['country', 'carrier']).p_nom.apply(join_exprs)
minimum = agg_p_nom_minmax['min'].dropna()
if (not minimum.empty):
minconstraint = define_constraints(n, p_nom_per_cc[minimum.index], '>=', minimum, 'agg_p_nom', 'min')
maximum = agg_p_nom_minmax['max'].dropna()
if (not maximum.empty):
maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index], '<=', maximum, 'agg_p_nom', 'max') |
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
if self.is_available():
self.modem_config = amodem.config.slowest()
self.library_name = {'Linux': 'libportaudio.so'}[platform.system()]
def is_available(self):
return (amodem is not None)
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _('Audio Modem Settings'))
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Bit rate [kbps]: ')), 0, 0)
bitrates = list(sorted(amodem.config.bitrates.keys()))
def _index_changed(index):
bitrate = bitrates[index]
self.modem_config = amodem.config.bitrates[bitrate]
combo = QComboBox()
combo.addItems([str(x) for x in bitrates])
combo.currentIndexChanged.connect(_index_changed)
layout.addWidget(combo, 0, 1)
ok_button = QPushButton(_('OK'))
ok_button.clicked.connect(d.accept)
layout.addWidget(ok_button, 1, 1)
return bool(d.exec_())
def transaction_dialog(self, dialog: 'TxDialog'):
b = QPushButton()
b.setIcon(read_QIcon('speaker.png'))
def handler():
blob = dialog.tx.serialize()
self._send(parent=dialog, blob=blob)
b.clicked.connect(handler)
dialog.sharing_buttons.insert((- 1), b)
def scan_text_edit(self, parent):
parent.addButton('microphone.png', partial(self._recv, parent), _('Read from microphone'))
def show_text_edit(self, parent):
def handler():
blob = str(parent.toPlainText())
self._send(parent=parent, blob=blob)
parent.addButton('speaker.png', handler, _('Send to speaker'))
def _audio_interface(self):
interface = amodem.audio.Interface(config=self.modem_config)
return interface.load(self.library_name)
def _send(self, parent, blob):
def sender_thread():
with self._audio_interface() as interface:
src = BytesIO(blob)
dst = interface.player()
amodem.main.send(config=self.modem_config, src=src, dst=dst)
_logger.info(f'Sending: {repr(blob)}')
blob = zlib.compress(blob.encode('ascii'))
kbps = (self.modem_config.modem_bps / 1000.0)
msg = 'Sending to Audio MODEM ({0:.1f} kbps)...'.format(kbps)
WaitingDialog(parent, msg, sender_thread)
def _recv(self, parent):
def receiver_thread():
with self._audio_interface() as interface:
src = interface.recorder()
dst = BytesIO()
amodem.main.recv(config=self.modem_config, src=src, dst=dst)
return dst.getvalue()
def on_finished(blob):
if blob:
blob = zlib.decompress(blob).decode('ascii')
_logger.info(f'Received: {repr(blob)}')
parent.setText(blob)
kbps = (self.modem_config.modem_bps / 1000.0)
msg = 'Receiving from Audio MODEM ({0:.1f} kbps)...'.format(kbps)
WaitingDialog(parent, msg, receiver_thread, on_finished) |
class MessageBroker():
def __init__(self):
self.__latest_values = {}
self.__subscribers = {}
self.__lock = threading.RLock()
def subscribe(self, topic, callback):
with self.__lock:
subscribers = self.__subscribers.setdefault(topic, [])
subscribers.append(callback)
value = self.__latest_values.get(topic)
if (value is not None):
callback(value)
def unsubscribe(self, topic, callback):
with self.__lock:
subscribers = self.__subscribers[topic]
subscribers.remove(callback)
if (not subscribers):
del self.__subscribers[topic]
def publish(self, topic, value):
with self.__lock:
if (value is not None):
for callback in self.__subscribers.get(topic, []):
try:
callback(value)
except:
_log.error(f'Failed to notify subscriber of {topic}', exc_info=True)
self.__latest_values[topic] = value |
def patchify_augmentation(args, batch):
aug_batch = dict()
img = batch['image']
label = batch['label']
batch_size = img.size()[0]
patch_dim = (img.size()[(- 1)] // args.mask_patch_size)
images_patch = rearrange(img, 'b c (h p1) (w p2) (d p3) -> (b h w d) c p1 p2 p3 ', p1=args.mask_patch_size, p2=args.mask_patch_size, p3=args.mask_patch_size)
labels_patch = rearrange(label, 'b c (h p1) (w p2) (d p3) -> (b h w d) c p1 p2 p3 ', p1=args.mask_patch_size, p2=args.mask_patch_size, p3=args.mask_patch_size)
mask = np.ones(images_patch.size()[0])
if args.add_contrast_mask:
num_patches = (images_patch.shape[0] // (args.mask_scale ** 3))
num_mask = int((args.mask_ratio * num_patches))
mask_index = np.random.permutation(num_patches)[:num_mask]
mask = np.zeros(num_patches, dtype=int)
mask[mask_index] = 1
mask = mask.reshape(batch_size, (patch_dim // args.mask_scale), (patch_dim // args.mask_scale), (patch_dim // args.mask_scale))
if (args.mask_scale > 1):
mask = mask.repeat(args.mask_scale, axis=1).repeat(args.mask_scale, axis=2).repeat(args.mask_scale, axis=3)
mask = mask.reshape((- 1))
noise_mask = torch.normal(mean=torch.zeros((num_mask * (args.mask_scale ** 3)), 4, args.mask_patch_size, args.mask_patch_size, args.mask_patch_size), std=(0.1 * torch.ones((num_mask * (args.mask_scale ** 3)), 4, args.mask_patch_size, args.mask_patch_size, args.mask_patch_size)))
images_patch[(mask == 1)] = noise_mask
aug_list = get_augmentation(args)
aug_batch['image'] = images_patch
aug_batch['label'] = labels_patch
aug_batch = aug_list(aug_batch)
feature_size = (img.shape[2] // args.mask_patch_size)
aug_batch['image'] = rearrange(aug_batch['image'], '(b h w d) c p1 p2 p3 -> b c (h p1) (w p2) (d p3)', h=patch_dim, w=patch_dim, d=patch_dim)
aug_batch['label'] = rearrange(aug_batch['label'], '(b h w d) c p1 p2 p3 -> b c (h p1) (w p2) (d p3)', h=patch_dim, w=patch_dim, d=patch_dim)
return (aug_batch, mask) |
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
num_repeat = sum(repeats)
if (depth_trunc == 'round'):
num_repeat_scaled = max(1, round((num_repeat * depth_multiplier)))
else:
num_repeat_scaled = int(math.ceil((num_repeat * depth_multiplier)))
repeats_scaled = []
for r in repeats[::(- 1)]:
rs = max(1, round(((r / num_repeat) * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::(- 1)]
sa_scaled = []
for (ba, rep) in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled |
def get_label(task, line):
if (task in ['MNLI', 'MRPC', 'QNLI', 'QQP', 'RTE', 'SNLI', 'SST-2', 'STS-B', 'WNLI', 'CoLA']):
line = line.strip().split('\t')
if (task == 'CoLA'):
return line[1]
elif (task == 'MNLI'):
return line[(- 1)]
elif (task == 'MRPC'):
return line[0]
elif (task == 'QNLI'):
return line[(- 1)]
elif (task == 'QQP'):
return line[(- 1)]
elif (task == 'RTE'):
return line[(- 1)]
elif (task == 'SNLI'):
return line[(- 1)]
elif (task == 'SST-2'):
return line[(- 1)]
elif (task == 'STS-B'):
return (0 if (float(line[(- 1)]) < 2.5) else 1)
elif (task == 'WNLI'):
return line[(- 1)]
else:
raise NotImplementedError
else:
return line[0] |
def run_inference(onnx_session, input_size, image):
temp_image = copy.deepcopy(image)
resize_image = cv.resize(temp_image, dsize=(input_size[0], input_size[1]))
x = cv.cvtColor(resize_image, cv.COLOR_BGR2RGB)
x = np.array(x, dtype=np.float32)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
x = (((x / 255) - mean) / std)
x = x.transpose(2, 0, 1)
x = x.reshape((- 1), 3, input_size[0], input_size[1]).astype('float32')
input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name
onnx_result = onnx_session.run([output_name], {input_name: x})
onnx_result = np.array(onnx_result).squeeze()
min_value = np.min(onnx_result)
max_value = np.max(onnx_result)
onnx_result = ((onnx_result - min_value) / (max_value - min_value))
onnx_result *= 255
onnx_result = onnx_result.astype('uint8')
return onnx_result |
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = (1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
n = x.shape[0]
t = torch.arange(n, device=x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=(- 1))
return emb |
.parametrize('test_args, expected', [([100], '100'), ([1000], '1,000'), ([10123], '10,123'), ([10311], '10,311'), ([1000000], '1,000,000'), ([1234567.25], '1,234,567.25'), (['100'], '100'), (['1000'], '1,000'), (['10123'], '10,123'), (['10311'], '10,311'), (['1000000'], '1,000,000'), (['1234567.1234567'], '1,234,'), (['1234567.1234567', 0], '1,234,567'), (['1234567.1234567', 1], '1,234,567.1'), (['1234567.1234567', 10], '1,234,567.'), (['1234567', 1], '1,234,567.0'), ([None], 'None'), ([14308.4], '14,308.4'), ([14308.4, None], '14,308.4'), ([14308.4, 1], '14,308.4'), ([14308.4, 2], '14,308.40'), ([14308.4, 3], '14,308.400'), ([1234.5454545], '1,'), ([1234.5454545, None], '1,'), ([1234.5454545, 0], '1,235'), ([1234.5454545, 1], '1,234.5'), ([1234.5454545, 2], '1,234.55'), ([1234.5454545, 3], '1,234.545'), ([1234.5454545, 10], '1,234.'), ([math.nan], 'NaN'), ([math.inf], '+Inf'), ([(- math.inf)], '-Inf'), (['nan'], 'NaN'), (['-inf'], '-Inf')])
def test_intcomma(test_args: ((list[int] | list[float]) | list[str]), expected: str) -> None:
assert (humanize.intcomma(*test_args) == expected) |
.end_to_end()
def test_two_tasks_have_the_same_product(tmp_path, runner, snapshot_cli):
source = '\n import pytask\n\n .produces("out.txt")\n def task_1(produces):\n produces.write_text("1")\n\n .produces("out.txt")\n def task_2(produces):\n produces.write_text("2")\n '
tmp_path.joinpath('task_d.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.DAG_FAILED)
if (sys.platform == 'linux'):
assert (result.output == snapshot_cli()) |
def get_MNIST(root='./'):
input_size = 28
num_classes = 10
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST((root + 'data/'), train=True, download=True, transform=transform)
test_dataset = datasets.MNIST((root + 'data/'), train=False, download=True, transform=transform)
return (input_size, num_classes, train_dataset, test_dataset) |
def test_custom_pane_show():
m = Map()
pane = CustomPane('test-name', z_index=625, pointer_events=False).add_to(m)
rendered = pane._template.module.script(this=pane, kwargs={})
expected = f'''
var {pane.get_name()} = {m.get_name()}.createPane("test-name");
{pane.get_name()}.style.zIndex = 625;
{pane.get_name()}.style.pointerEvents = 'none';
'''
assert (normalize(rendered) == normalize(expected)) |
def train_rcnn(cfg, dataset, image_set, root_path, dataset_path, frequent, kvstore, flip, shuffle, resume, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, train_shared, lr, lr_step, proposal, logger=None, output_path=None):
mx.random.seed(np.random.randint(10000))
np.random.seed(np.random.randint(10000))
if (not logger):
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sym_instance = eval(((cfg.symbol + '.') + cfg.symbol))()
sym = sym_instance.get_symbol_rcnn(cfg, is_train=True)
batch_size = len(ctx)
input_batch_size = (cfg.TRAIN.BATCH_IMAGES * batch_size)
pprint.pprint(cfg)
logger.info('training rcnn cfg:{}\n'.format(pprint.pformat(cfg)))
image_sets = [iset for iset in image_set.split('+')]
roidbs = [load_proposal_roidb(dataset, image_set, root_path, dataset_path, proposal=proposal, append_gt=True, flip=flip, result_path=output_path) for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, cfg)
(means, stds) = add_bbox_regression_targets(roidb, cfg)
train_data = ROIIter(roidb, cfg, batch_size=input_batch_size, shuffle=shuffle, ctx=ctx, aspect_grouping=cfg.TRAIN.ASPECT_GROUPING)
max_data_shape = [('data', (cfg.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]
data_shape_dict = dict((train_data.provide_data_single + train_data.provide_label_single))
sym_instance.infer_shape(data_shape_dict)
if resume:
print('continue training from ', begin_epoch)
(arg_params, aux_params) = load_param(prefix, begin_epoch, convert=True)
else:
(arg_params, aux_params) = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight_rcnn(cfg, arg_params, aux_params)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
if train_shared:
fixed_param_prefix = cfg.network.FIXED_PARAMS_SHARED
else:
fixed_param_prefix = cfg.network.FIXED_PARAMS
mod = MutableModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix)
if cfg.TRAIN.RESUME:
mod._preload_opt_states = ('%s-%04d.states' % (prefix, begin_epoch))
eval_metric = metric.RCNNAccMetric(cfg)
cls_metric = metric.RCNNLogLossMetric(cfg)
bbox_metric = metric.RCNNL1LossMetric(cfg)
eval_metrics = mx.metric.CompositeEvalMetric()
for child_metric in [eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent)
epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
base_lr = lr
lr_factor = cfg.TRAIN.lr_factor
lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [(epoch - begin_epoch) for epoch in lr_epoch if (epoch > begin_epoch)]
lr = (base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff))))
lr_iters = [int(((epoch * len(roidb)) / batch_size)) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, cfg.TRAIN.warmup, cfg.TRAIN.warmup_lr, cfg.TRAIN.warmup_step)
optimizer_params = {'momentum': cfg.TRAIN.momentum, 'wd': cfg.TRAIN.wd, 'learning_rate': lr, 'lr_scheduler': lr_scheduler, 'rescale_grad': 1.0, 'clip_gradient': None}
if (not isinstance(train_data, PrefetchingIter)):
train_data = PrefetchingIter(train_data)
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, optimizer='sgd', optimizer_params=optimizer_params, arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch) |
class Paragraph(Block):
accepts_lines = True
def continue_(parser=None, container=None):
return (1 if parser.blank else 0)
def finalize(parser=None, block=None):
has_reference_defs = False
while (peek(block.string_content, 0) == '['):
pos = parser.inline_parser.parseReference(block.string_content, parser.refmap)
if (not pos):
break
block.string_content = block.string_content[pos:]
has_reference_defs = True
if (has_reference_defs and is_blank(block.string_content)):
block.unlink()
def can_contain(t):
return False |
class TestProcessTomography(unittest.TestCase):
def setUp(self):
super().setUp()
self.method = 'lstsq'
def test_bell_2_qubits(self):
q2 = QuantumRegister(2)
bell = QuantumCircuit(q2)
bell.h(q2[0])
bell.cx(q2[0], q2[1])
(choi, choi_ideal) = run_circuit_and_tomography(bell, q2, self.method)
F_bell = state_fidelity((choi_ideal / 4), (choi / 4), validate=False)
self.assertAlmostEqual(F_bell, 1, places=1) |
class THCRotations(Bloq):
num_mu: int
num_spin_orb: int
num_bits_theta: int
kr1: int = 1
kr2: int = 1
two_body_only: bool = False
adjoint: bool = False
_property
def signature(self) -> Signature:
return Signature([Register('nu_eq_mp1', bitsize=1), Register('data', bitsize=self.num_bits_theta), Register('sel', bitsize=self.num_mu.bit_length()), Register('trg', bitsize=(self.num_spin_orb // 2))])
def pretty_name(self) -> str:
dag = ('' if self.adjoint else '')
return f'In_mu-R{dag}'
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
num_data_sets = (self.num_mu + (self.num_spin_orb // 2))
if self.adjoint:
if self.two_body_only:
toff_cost_qrom = ((int(np.ceil((self.num_mu / self.kr1))) + int(np.ceil((self.num_spin_orb / (2 * self.kr1))))) + self.kr1)
else:
toff_cost_qrom = (int(np.ceil((self.num_mu / self.kr2))) + self.kr2)
else:
toff_cost_qrom = (num_data_sets - 2)
if self.two_body_only:
toff_cost_qrom -= (self.num_spin_orb // 2)
rot_cost = (self.num_spin_orb * (self.num_bits_theta - 2))
return {(Toffoli(), (rot_cost + toff_cost_qrom))} |
def test_prompt_format_equivalency_mistral():
model = 'mistralai/Mistral-7B-Instruct-v0.1'
tokenizer = AutoTokenizer.from_pretrained(model)
prompt_format = PromptFormat(system='{instruction} + ', assistant='{instruction}</s> ', trailing_assistant='', user='[INST] {system}{instruction} [/INST]', default_system_message='', system_in_user=True)
conversations = [[Message(role='user', content='hello1')], [Message(role='user', content='hello1'), Message(role='assistant', content='hello2'), Message(role='user', content='hello3')], [Message(role='user', content='hello2'), Message(role='assistant', content='hello3'), Message(role='user', content='hello4')], [Message(role='user', content='hello1'), Message(role='assistant', content='hello2'), Message(role='user', content='hello3'), Message(role='assistant', content='hello4'), Message(role='user', content='hello5')]]
for conversation in conversations:
dict_conversation = [message.dict() for message in conversation]
reference_tokens = tokenizer.apply_chat_template(dict_conversation, tokenize=True)
our_tokens = tokenizer.encode(prompt_format.generate_prompt(conversation))
assert (reference_tokens == our_tokens) |
class TestDirUtil(support.TempdirManager):
def test_mkpath_remove_tree_verbosity(self, caplog):
mkpath(self.target, verbose=0)
assert (not caplog.records)
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=1)
wanted = [('creating %s' % self.root_target), ('creating %s' % self.target)]
assert (caplog.messages == wanted)
caplog.clear()
remove_tree(self.root_target, verbose=1)
wanted = [("removing '%s' (and everything under it)" % self.root_target)]
assert (caplog.messages == wanted)
.skipif("platform.system() == 'Windows'")
def test_mkpath_with_custom_mode(self):
umask = os.umask(2)
os.umask(umask)
mkpath(self.target, 448)
assert (stat.S_IMODE(os.stat(self.target).st_mode) == (448 & (~ umask)))
mkpath(self.target2, 365)
assert (stat.S_IMODE(os.stat(self.target2).st_mode) == (365 & (~ umask)))
def test_create_tree_verbosity(self, caplog):
create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
assert (caplog.messages == [])
remove_tree(self.root_target, verbose=0)
wanted = [('creating %s' % self.root_target)]
create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
assert (caplog.messages == wanted)
remove_tree(self.root_target, verbose=0)
def test_copy_tree_verbosity(self, caplog):
mkpath(self.target, verbose=0)
copy_tree(self.target, self.target2, verbose=0)
assert (caplog.messages == [])
remove_tree(self.root_target, verbose=0)
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
with open(a_file, 'w') as f:
f.write('some content')
wanted = ['copying {} -> {}'.format(a_file, self.target2)]
copy_tree(self.target, self.target2, verbose=1)
assert (caplog.messages == wanted)
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_copy_tree_skips_nfs_temp_files(self):
mkpath(self.target, verbose=0)
a_file = os.path.join(self.target, 'ok.txt')
nfs_file = os.path.join(self.target, '.nfs123abc')
for f in (a_file, nfs_file):
with open(f, 'w') as fh:
fh.write('some content')
copy_tree(self.target, self.target2)
assert (os.listdir(self.target2) == ['ok.txt'])
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
def test_ensure_relative(self):
if (os.sep == '/'):
assert (ensure_relative('/home/foo') == 'home/foo')
assert (ensure_relative('some/path') == 'some/path')
else:
assert (ensure_relative('c:\\home\\foo') == 'c:home\\foo')
assert (ensure_relative('home\\foo') == 'home\\foo')
def test_copy_tree_exception_in_listdir(self):
with mock.patch('os.listdir', side_effect=OSError()), pytest.raises(errors.DistutilsFileError):
src = self.tempdirs[(- 1)]
dir_util.copy_tree(src, None) |
class Pedestrian(_BaseCatalog):
def __init__(self, name, mass, category, boundingbox, model=None, role=None):
super().__init__()
self.name = name
self.model = model
self.mass = convert_float(mass)
self.category = convert_enum(category, PedestrianCategory)
if (not isinstance(boundingbox, BoundingBox)):
raise TypeError('boundingbox input is not of type BoundingBox')
self.boundingbox = boundingbox
self.properties = Properties()
self.role = convert_enum(role, Role, True)
def __eq__(self, other):
if isinstance(other, Pedestrian):
if ((self.get_attributes() == other.get_attributes()) and (self.boundingbox == other.boundingbox) and (self.properties == other.properties) and (self.parameters == other.parameters) and (self.role == other.role)):
return True
return False
def parse(element):
name = element.attrib['name']
mass = convert_float(element.attrib['mass'])
model = None
if ('model3d' in element.attrib):
model = element.attrib['model3d']
elif ('model' in element.attrib):
model = element.attrib['model']
category = convert_enum(element.attrib['pedestrianCategory'], PedestrianCategory)
if (element.find('ParameterDeclarations') != None):
parameters = ParameterDeclarations.parse(element.find('ParameterDeclarations'))
else:
parameters = ParameterDeclarations()
boundingbox = BoundingBox.parse(element.find('BoundingBox'))
properties = Properties.parse(element.find('Properties'))
role = None
if ('role' in element.attrib):
role = convert_enum(element.attrib['role'], Role)
pedestrian = Pedestrian(name, mass, category, boundingbox, model, role)
pedestrian.parameters = parameters
pedestrian.properties = properties
return pedestrian
def add_property(self, name, value):
self.properties.add_property(name, value)
return self
def add_property_file(self, filename):
self.properties.add_file(filename)
return self
def get_attributes(self):
retdict = {}
retdict['name'] = str(self.name)
retdict['pedestrianCategory'] = self.category.get_name()
if (self.isVersion(minor=0) and (self.model is None)):
raise OpenSCENARIOVersionError('model is required for OSC 1.0')
if (self.model is not None):
if self.isVersion(minor=0):
retdict['model'] = self.model
else:
retdict['model3d'] = self.model
retdict['mass'] = str(self.mass)
if self.role:
if self.isVersionEqLess(minor=1):
raise OpenSCENARIOVersionError('role for Pedestrian was added in OSC V1.2')
retdict['role'] = self.role.get_name()
return retdict
def get_element(self):
element = ET.Element('Pedestrian', attrib=self.get_attributes())
self.add_parameters_to_element(element)
element.append(self.boundingbox.get_element())
element.append(self.properties.get_element())
return element |
(short_help='Publish distributions to VCS Releases', context_settings={'help_option_names': ['-h', '--help']})
('--tag', 'tag', help='The tag associated with the release to publish to', default='latest')
_context
def publish(ctx: click.Context, tag: str='latest') -> None:
runtime = ctx.obj
repo = runtime.repo
hvcs_client = runtime.hvcs_client
translator = runtime.version_translator
dist_glob_patterns = runtime.dist_glob_patterns
if (tag == 'latest'):
try:
tag = str(tags_and_versions(repo.tags, translator)[0][0])
except IndexError:
ctx.fail(f"No tags found with format {translator.tag_format!r}, couldn't identify latest version")
if runtime.global_cli_options.noop:
noop_report((('would have uploaded files matching any of the globs ' + ', '.join((repr(g) for g in dist_glob_patterns))) + ' to a remote VCS release, if supported'))
ctx.exit(0)
log.info('Uploading distributions to release')
for pattern in dist_glob_patterns:
hvcs_client.upload_dists(tag=tag, dist_glob=pattern) |
def test_trim_turns():
turns = [Turn(1, 5, speaker_id='S1', file_id='FILE1'), Turn(6, 10, speaker_id='S1', file_id='FILE1'), Turn(0, 10, speaker_id='S1', file_id='FILE2')]
uem = UEM({'FILE1': [(2, 6), (5.8, 7)], 'FILE2': [(2, 3), (4, 5)]})
expected_turns = [Turn(2, 5, speaker_id='S1', file_id='FILE1'), Turn(6, 7, speaker_id='S1', file_id='FILE1'), Turn(2, 3, speaker_id='S1', file_id='FILE2'), Turn(4, 5, speaker_id='S1', file_id='FILE2')]
assert (set(expected_turns) == set(trim_turns(turns, uem)))
expected_turns = [Turn(2, 5, speaker_id='S1', file_id='FILE1'), Turn(6, 7, speaker_id='S1', file_id='FILE1'), Turn(2, 7, speaker_id='S1', file_id='FILE2')]
assert (set(expected_turns) == set(trim_turns(turns, None, 2, 7))) |
class Logic(object):
def __init__(self, name, description, quantifier_free=False, theory=None, **theory_kwargs):
self.name = name
self.description = description
self.quantifier_free = quantifier_free
if (theory is None):
self.theory = Theory(**theory_kwargs)
else:
self.theory = theory
return
def get_quantified_version(self):
if self.quantifier_free:
return self
target_logic = Logic(name='', description='', quantifier_free=False, theory=self.theory)
return get_closer_pysmt_logic(target_logic)
def is_quantified(self):
return (not self.quantifier_free)
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def __eq__(self, other):
if ((other is None) or (not isinstance(other, Logic))):
return False
return ((self.name == other.name) and (self.quantifier_free == other.quantifier_free) and (self.theory == other.theory))
def __ne__(self, other):
return (not (self == other))
def __lt__(self, other):
return ((self != other) and self.__le__(other))
def __le__(self, other):
return ((self.theory <= other.theory) and (self.quantifier_free >= other.quantifier_free))
def __ge__(self, other):
return other.__le__(self)
def __gt__(self, other):
return other.__lt__(self)
def __hash__(self):
return hash(self.name) |
class TestRawFeatureVector(QiskitMLTestCase):
def test_construction(self):
circuit = RawFeatureVector(4)
with self.subTest('check number of qubits'):
self.assertEqual(circuit.num_qubits, 2)
with self.subTest('check parameters'):
self.assertEqual(len(circuit.parameters), 4)
with self.subTest('check unrolling fails'):
with self.assertRaises(QiskitError):
_ = transpile(circuit, basis_gates=['u', 'cx'], optimization_level=0)
def test_fully_bound(self):
circuit = RawFeatureVector(8)
params = (np.random.random(8) + (1j * np.random.random(8)))
params /= np.linalg.norm(params)
bound = circuit.bind_parameters(params)
ref = QuantumCircuit(3)
ref.initialize(params, ref.qubits)
self.assertEqual(bound, ref)
def test_partially_bound(self):
circuit = RawFeatureVector(4)
params = circuit.ordered_parameters
with self.subTest('single numeric value'):
circuit.assign_parameters({params[0]: 0.2}, inplace=True)
self.assertEqual(len(circuit.parameters), 3)
with self.subTest('bound to another parameter'):
circuit.assign_parameters({params[1]: params[2]}, inplace=True)
self.assertEqual(len(circuit.parameters), 2)
with self.subTest('test now fully bound circuit'):
bound = circuit.assign_parameters({params[2]: 0.4, params[3]: 0.8})
ref = QuantumCircuit(2)
ref.initialize([0.2, 0.4, 0.4, 0.8], ref.qubits)
self.assertEqual(bound, ref)
def test_usage_in_vqc(self):
feature_dim = 4
(_, training_input, test_input, _) = wine(training_size=1, test_size=1, n=feature_dim, plot_data=False)
feature_map = RawFeatureVector(feature_dimension=feature_dim)
vqc = VQC(COBYLA(maxiter=1), feature_map, EfficientSU2(feature_map.num_qubits, reps=1), training_input, test_input)
backend = Aer.get_backend('qasm_simulator')
result = vqc.run(backend)
self.assertTrue((result['eval_count'] > 0)) |
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
return torch.log(torch.div(net_output['encoder_out'], (1 - net_output['encoder_out']))) |
def get_op_key(instr):
parts = instr['opcode'].split(' ')
op = parts[0]
args = []
if (len(parts) > 1):
args_str = ''.join(parts[1:])
for arg in args_str.split(','):
if (arg[0] == '-'):
args.append((- 1))
elif ((arg[:2] == '0x') or arg.isdigit()):
args.append(0)
elif ('[' in arg):
args.append(1)
elif ('lsl' in arg):
args.append(5)
elif ('sxt' in arg):
args.append(6)
elif ('uxt' in arg):
args.append(7)
elif ('x' in arg):
args.append(2)
elif ('w' in arg):
args.append(3)
else:
args.append(4)
return '_'.join([str(x) for x in ([op] + args)]) |
.xfail(reason="Remote driver currently doesn't support logs")
def test_no_service_log_path(testdir):
file_test = testdir.makepyfile("\n import pytest\n \n def driver_log():\n return None\n\n .nondestructive\n def test_pass(driver_kwargs):\n assert driver_kwargs['service_log_path'] is None\n ")
testdir.quick_qa(file_test, passed=1) |
class Adahessian(torch.optim.Optimizer):
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):
if (not (0.0 <= lr)):
raise ValueError(f'Invalid learning rate: {lr}')
if (not (0.0 <= eps)):
raise ValueError(f'Invalid epsilon value: {eps}')
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if (not (0.0 <= hessian_power <= 1.0)):
raise ValueError(f'Invalid Hessian power value: {hessian_power}')
self.n_samples = n_samples
self.update_each = update_each
self.avg_conv_kernel = avg_conv_kernel
self.seed =
self.generator = torch.Generator().manual_seed(self.seed)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]['hessian step'] = 0
def is_second_order(self):
return True
def get_params(self):
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
for p in self.get_params():
if ((not isinstance(p.hess, float)) and ((self.state[p]['hessian step'] % self.update_each) == 0)):
p.hess.zero_()
_grad()
def set_hessian(self):
params = []
for p in filter((lambda p: (p.grad is not None)), self.get_params()):
if ((self.state[p]['hessian step'] % self.update_each) == 0):
params.append(p)
self.state[p]['hessian step'] += 1
if (len(params) == 0):
return
if (self.generator.device != params[0].device):
self.generator = torch.Generator(params[0].device).manual_seed(self.seed)
grads = [p.grad for p in params]
for i in range(self.n_samples):
zs = [((torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0) - 1.0) for p in params]
h_zs = torch.autograd.grad(grads, params, grad_outputs=zs, only_inputs=True, retain_graph=(i < (self.n_samples - 1)))
for (h_z, z, p) in zip(h_zs, zs, params):
p.hess += ((h_z * z) / self.n_samples)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if ((p.grad is None) or (p.hess is None)):
continue
if (self.avg_conv_kernel and (p.dim() == 4)):
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
p.mul_((1 - (group['lr'] * group['weight_decay'])))
state = self.state[p]
if (len(state) == 1):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
(exp_avg, exp_hessian_diag_sq) = (state['exp_avg'], state['exp_hessian_diag_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_(p.grad, alpha=(1 - beta1))
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=(1 - beta2))
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_((k / 2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
p.addcdiv_(exp_avg, denom, value=(- step_size))
return loss |
.integration
def test_export_and_import_metadata_df(simple_project):
metadata = simple_project.export_metadata(format_type='df', df_kwargs={'index_col': 'field_name', 'dtype': {'text_validation_min': pd.Int64Dtype(), 'text_validation_max': pd.Int64Dtype()}})
assert (metadata.index.name == 'field_name')
res = simple_project.import_metadata(to_import=metadata, import_format='df')
assert (res == 15) |
def get_random_ddf(args):
total_size = (args.chunk_size * args.in_parts)
chunk_kwargs = {'unique_size': max(int((args.unique_ratio * total_size)), 1), 'gpu': (True if (args.type == 'gpu') else False)}
return dd.from_map(generate_chunk, [(i, args.chunk_size) for i in range(args.in_parts)], meta=generate_chunk((0, 1), **chunk_kwargs), enforce_metadata=False, **chunk_kwargs) |
class ImageDescription(NamedTuple):
id: int
file_name: str
original_size: Tuple[(int, int)]
url: Optional[str] = None
license: Optional[int] = None
coco_url: Optional[str] = None
date_captured: Optional[str] = None
flickr_url: Optional[str] = None
flickr_id: Optional[str] = None
coco_id: Optional[str] = None |
def same_pyname(expected, pyname):
if ((expected is None) or (pyname is None)):
return False
if (expected == pyname):
return True
if ((not isinstance(expected, (pynames.ImportedModule, pynames.ImportedName))) and (not isinstance(pyname, (pynames.ImportedModule, pynames.ImportedName)))):
return False
return ((expected.get_definition_location() == pyname.get_definition_location()) and (expected.get_object() == pyname.get_object())) |
def playback_results(trackers, sequence):
plot_draw_styles = get_plot_draw_styles()
tracker_results = []
for (trk_id, trk) in enumerate(trackers):
base_results_path = '{}/{}'.format(trk.results_dir, sequence.name)
results_path = '{}.txt'.format(base_results_path)
if os.path.isfile(results_path):
try:
pred_bb = torch.tensor(np.loadtxt(str(results_path), dtype=np.float64))
except:
pred_bb = torch.tensor(np.loadtxt(str(results_path), delimiter=',', dtype=np.float64))
else:
raise Exception('Result not found. {}'.format(results_path))
tracker_results.append(pred_bb)
tracker_results = torch.stack(tracker_results, dim=1).tolist()
tracker_names = [_get_display_name(t) for t in trackers]
display = Display(len(tracker_results), plot_draw_styles, sequence.name)
while display.active:
frame_number = display.frame_number
image = read_image(sequence.frames[frame_number])
display.show(image, tracker_results[frame_number], tracker_names)
time.sleep(0.01)
if (display.pause_mode and (display.frame_number == frame_number)):
time.sleep(0.1)
elif (not display.pause_mode):
display.step() |
def make_annotation(word):
words = [word]
words = split_initial_compounds(words)
words = (words[:(- 1)] + words[(- 1)].split(ORTHOGRAPHIC_COMPOUND_MARKER))
words = split_suffix(words)
words = split_preannotated_compounds(words)
annotations = [Annotation(word) for word in words]
for i in range(1, len(annotations)):
annotations[0].join(annotations[i])
return annotations[0] |
def create_path(root: Path, path: Path):
if path.is_absolute():
raise ValueError('Only test using relative paths to prevent leaking outside test environment')
fullpath = (root / path)
if (not fullpath.parent.exists()):
fullpath.parent.mkdir(parents=True)
fullpath.touch() |
class Solution():
def rotateTheBox(self, box: List[List[str]]) -> List[List[str]]:
(m, n) = (len(box), len(box[0]))
queue = deque()
for i in range(m):
j = (n - 1)
temp = ['.' for _ in range(n)]
rest = (n - 1)
while (j >= 0):
if (box[i][j] == '*'):
temp[j] = '*'
rest = (j - 1)
elif (box[i][j] == '#'):
temp[rest] = '#'
rest -= 1
j -= 1
queue.appendleft(temp)
res = [['.' for _ in range(m)] for _ in range(n)]
for i in range(m):
for j in range(n):
res[j][i] = queue[i][j]
return res |
class W_InputPort(W_Port):
errorname = 'input-port'
_attrs_ = []
def read(self, n):
raise NotImplementedError('abstract class')
def peek(self):
raise NotImplementedError('abstract class')
def readline(self):
raise NotImplementedError('abstract class')
def get_read_handler(self):
raise NotImplementedError('abstract class')
def set_read_handler(self, handler):
raise NotImplementedError('abstract class')
def tostring(self):
return '#<input-port>'
def _length_up_to_end(self):
raise NotImplementedError('abstract class') |
class LoadTo(SimpleDownloader):
__name__ = 'LoadTo'
__type__ = 'downloader'
__version__ = '0.29'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Load.to downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('halfman', ''), ('stickell', 'l.')]
NAME_PATTERN = '<h1>(?P<N>.+?)</h1>'
SIZE_PATTERN = 'Size: (?P<S>[\\d.,]+) (?P<U>[\\w^_]+)'
OFFLINE_PATTERN = ">Can\\'t find file"
LINK_FREE_PATTERN = '<form method="post" action="(.+?)"'
WAIT_PATTERN = 'type="submit" value="Download \\((\\d+)\\)"'
URL_REPLACEMENTS = [('(\\w)$', '\\1/')]
def setup(self):
self.multi_dl = True
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.data)
if (m is None):
self.error(self._('LINK_FREE_PATTERN not found'))
self.link = m.group(1)
m = re.search(self.WAIT_PATTERN, self.data)
if (m is not None):
self.wait(m.group(1))
self.captcha = SolveMedia(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
(response, challenge) = self.captcha.challenge(captcha_key)
self.download(self.link, post={'adcopy_challenge': challenge, 'adcopy_response': response, 'returnUrl': pyfile.url}) |
.parametrize('kwargs', ({'redirect_to_fallback': False, 'disable_fallback': False}, {'disable_fallback': False, 'redirect_to_fallback': False}))
def test_backwards_compat_kwargs_duplicate_check(kwargs: t.Dict[(str, t.Any)]) -> None:
with pytest.raises(ValueError) as err:
pypiserver.backwards_compat_kwargs(kwargs)
assert ("('redirect_to_fallback', 'disable_fallback')" in str(err.value)) |
def attribute_list(names=PROPERTY_NAMES, values=PROPERTY_VALUES):
return st.tuples(st.just(ConvertChildrenToText('attributeList')), st.lists(st.tuples((st.just('attribute') | names), st.lists(((st.tuples(st.just('name'), names) | st.tuples(st.just('values'), values)) | st.tuples(names, values)), max_size=3)), max_size=4)) |
def test_launch_legacy(testdir):
file_test = testdir.makepyfile("\n import pytest\n .nondestructive\n def test_pass(webtext):\n assert webtext == u'Success!'\n ")
testdir.quick_qa('--driver', 'remote', '--capability', 'browserName', 'edge', file_test, passed=1) |
class _BusIterator(_objfinalizer.AutoFinalizedObject):
def __init__(self):
self.buslist = POINTER(_openusb_busid)()
num_busids = c_uint32()
_check(_lib.openusb_get_busid_list(_ctx.handle, byref(self.buslist), byref(num_busids)))
self.num_busids = num_busids.value
def __iter__(self):
for i in range(self.num_busids):
(yield self.buslist[i])
def _finalize_object(self):
if hasattr(self, 'buslist'):
_lib.openusb_free_busid_list(self.buslist) |
class CDAE(nn.Module):
def __init__(self, NUM_USER, NUM_MOVIE, NUM_BOOK, EMBED_SIZE, dropout, is_sparse=False):
super(CDAE, self).__init__()
self.NUM_MOVIE = NUM_MOVIE
self.NUM_BOOK = NUM_BOOK
self.NUM_USER = NUM_USER
self.emb_size = EMBED_SIZE
self.user_embeddings = nn.Embedding(self.NUM_USER, EMBED_SIZE, sparse=is_sparse)
self.user_embeddings.weight.data = torch.from_numpy(np.random.normal(0, 0.01, size=[self.NUM_USER, EMBED_SIZE])).float()
self.encoder_x = nn.Sequential(nn.Linear(self.NUM_MOVIE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, EMBED_SIZE))
self.decoder_x = nn.Sequential(nn.Linear(EMBED_SIZE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, self.NUM_MOVIE))
self.encoder_y = nn.Sequential(nn.Linear(self.NUM_BOOK, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, EMBED_SIZE))
self.decoder_y = nn.Sequential(nn.Linear(EMBED_SIZE, EMBED_SIZE), nn.ReLU(), nn.Linear(EMBED_SIZE, self.NUM_BOOK))
self.transformer_x_layer1 = nn.Linear(EMBED_SIZE, EMBED_SIZE, bias=False)
self.transformer_x_layer2 = nn.Linear(EMBED_SIZE, EMBED_SIZE, bias=False)
self.transformer_y_layer1 = nn.Linear(EMBED_SIZE, EMBED_SIZE, bias=False)
self.transformer_y_layer2 = nn.Linear(EMBED_SIZE, EMBED_SIZE, bias=False)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU
def orthogonal_map(self, z_x, z_y):
mapped_z_x = self.transformer_x_layer1(z_x)
mapped_z_x = self.transformer_x_layer2(F.relu(mapped_z_x))
mapped_z_y = self.transformer_y_layer1(z_y)
mapped_z_y = self.transformer_y_layer2(F.relu(mapped_z_y))
return (mapped_z_x, mapped_z_y)
def inver_orthogonal_map(self, z_x, z_y):
mapped_z_x = self.transformer_y_layer1(z_x)
mapped_z_x = self.transformer_y_layer2(F.relu(mapped_z_x))
mapped_z_y = self.transformer_x_layer1(z_y)
mapped_z_y = self.transformer_x_layer2(F.relu(mapped_z_y))
return (mapped_z_x, mapped_z_y)
def forward(self, batch_user, batch_user_x, batch_user_y):
h_user_x = self.encoder_x(self.dropout(batch_user_x))
h_user_y = self.encoder_y(self.dropout(batch_user_y))
h_user = self.user_embeddings(batch_user)
feature_x = torch.add(h_user_x, h_user)
feature_y = torch.add(h_user_y, h_user)
z_x = F.relu(feature_x)
z_y = F.relu(feature_y)
preds_x = self.decoder_x(z_x)
preds_y = self.decoder_y(z_y)
(mapped_z_x, mapped_z_y) = self.orthogonal_map(z_x, z_y)
preds_x2y = self.decoder_y(mapped_z_x)
preds_y2x = self.decoder_x(mapped_z_y)
(z_x_, z_y_) = self.inver_orthogonal_map(z_x, z_y)
z_x_reg_loss = torch.norm((z_x - z_x_), p=1, dim=1)
z_y_reg_loss = torch.norm((z_y - z_y_), p=1, dim=1)
return (preds_x, preds_y, preds_x2y, preds_y2x, feature_x, feature_y, z_x_reg_loss, z_y_reg_loss)
def get_user_embedding(self, batch_user_x, batch_user_y):
h_user_x = self.encoder_x(self.dropout(batch_user_x))
h_user_y = self.encoder_y(self.dropout(batch_user_y))
return (h_user_x, h_user_y) |
class RandomGoalAntEnv(AntEnv):
('ctrl_cost_coeff', type=float, help='cost coefficient for controls')
('survive_reward', type=float, help='bonus reward for being alive')
('contact_cost_coeff', type=float, help='cost coefficient for contact')
def __init__(self, reward_type='dense', terminate_at_goal=True, goal_reward_weight=0.3, goal_radius=0.25, goal_distance=5, goal_angle_range=(0, (2 * np.pi)), velocity_reward_weight=0, ctrl_cost_coeff=0.01, contact_cost_coeff=0.001, survive_reward=0.05, fixed_goal_position=None, *args, **kwargs):
assert (reward_type in REWARD_TYPES)
self._reward_type = reward_type
self.terminate_at_goal = terminate_at_goal
self.goal_reward_weight = goal_reward_weight
self.goal_radius = goal_radius
self.goal_distance = goal_distance
self.goal_angle_range = goal_angle_range
self.velocity_reward_weight = velocity_reward_weight
self.ctrl_cost_coeff = ctrl_cost_coeff
self.contact_cost_coeff = contact_cost_coeff
self.survive_reward = survive_reward
MujocoEnv.__init__(self, *args, **kwargs)
Serializable.quick_init(self, locals())
def reset(self, goal_position=None, *args, **kwargs):
if (goal_position is None):
goal_position = random_point_in_circle(angle_range=self.goal_angle_range, radius=self.goal_distance)
self.goal_position = goal_position
if ('target' in self.model.geom_names):
goal_geom_idx = self.model.geom_names.index('target')
new_geom_pos = self.model.geom_pos.copy()
new_geom_pos[goal_geom_idx] = np.concatenate([goal_position, [0]])
self.model.geom_pos = new_geom_pos
new_geom_size = self.model.geom_size.copy()
new_geom_size[goal_geom_idx] = np.array([self.goal_radius, 0, 0])
self.model.geom_size = new_geom_size
return super().reset(*args, **kwargs)
def get_current_obs(self):
proprioceptive_observation = super().get_current_obs()
if (self.goal_reward_weight > 0):
exteroceptive_observation = self.goal_position
else:
exteroceptive_observation = np.zeros_like(self.goal_position)
observation = np.concatenate([proprioceptive_observation, exteroceptive_observation]).reshape((- 1))
return observation
def step(self, action):
self.forward_dynamics(action)
xy_position = self.get_body_com('torso')[:2]
goal_distance = np.linalg.norm((xy_position - self.goal_position))
goal_reached = (goal_distance < self.goal_radius)
if (self.goal_reward_weight > 0):
if (self._reward_type == 'dense'):
goal_reward = ((np.max(self.goal_distance) - goal_distance) * self.goal_reward_weight)
elif (self._reward_type == 'sparse'):
goal_reward = (int(goal_reached) * self.goal_reward_weight)
else:
goal_reward = 0
if (self.velocity_reward_weight > 0):
xy_velocities = self.get_body_comvel('torso')[:2]
velocity_reward = (self.velocity_reward_weight * np.linalg.norm(xy_velocities))
else:
velocity_reward = 0
if (self.ctrl_cost_coeff > 0):
(lb, ub) = self.action_bounds
scaling = ((ub - lb) * 0.5)
ctrl_cost = ((0.5 * self.ctrl_cost_coeff) * np.sum(np.square((action / scaling))))
else:
ctrl_cost = 0
if (self.contact_cost_coeff > 0):
contact_cost = (((0.5 * self.contact_cost_coeff) * np.sum(np.square(np.clip(self.model.data.cfrc_ext, (- 1), 1)))),)
else:
contact_cost = 0
reward = ((((goal_reward + velocity_reward) + self.survive_reward) - ctrl_cost) - contact_cost)
is_healthy = (np.isfinite(self._state).all() and (0.2 <= self._state[2] <= 1.0))
done = ((not is_healthy) or (self.terminate_at_goal and goal_reached))
next_observation = self.get_current_obs()
info = {'goal_position': self.goal_position}
return Step(next_observation, reward, done, **info)
def log_diagnostics(self, paths, *args, **kwargs):
logs = get_random_goal_logs(paths, self.goal_radius, fixed_goal_position=getattr(self, 'fixed_goal_position', False))
for row in logs:
logger.record_tabular(*row) |
class Model(nn.Module):
def __init__(self, args, embedding, encoder, target, subencoder=None):
super(Model, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.target = target
if (subencoder is not None):
(self.vocab, self.sub_vocab) = (args.vocab, args.sub_vocab)
self.subword_type = args.subword_type
self.subencoder = subencoder
else:
self.subencoder = None
def forward(self, src, tgt, seg):
emb = self.embedding(src, seg)
if (self.subencoder is not None):
sub_ids = word2sub(src, self.vocab, self.sub_vocab, self.subword_type)
emb = (emb + self.subencoder(sub_ids).contiguous().view(*emb.size()))
output = self.encoder(emb, seg)
loss_info = self.target(output, tgt)
return loss_info |
class TestLibraryError(BaseTestCase):
def test_from_exception_not_found(self):
exc = errors.LibraryError.from_exception(ValueError('visa.dll: image not found'), 'visa.dll')
assert ('File not found' in str(exc))
def test_from_exception_wrong_arch(self):
exc = errors.LibraryError.from_exception(ValueError('visa.dll: no suitable image found. no matching architecture'), 'visa.dll')
assert ('No matching architecture' in str(exc))
def test_from_exception_wrong_filetype(self):
exc = errors.LibraryError.from_exception(ValueError('visa.dll: no suitable image found.'), 'visa.dll')
assert ('Could not determine filetype' in str(exc))
def test_from_exception_wrong_ELF(self):
exc = errors.LibraryError.from_exception(ValueError('visa.dll: wrong ELF class'), 'visa.dll')
assert ('No matching architecture' in str(exc))
def test_from_exception_random(self):
exc = errors.LibraryError.from_exception(ValueError('visa.dll'), 'visa.dll')
assert ('Error while accessing' in str(exc))
def test_from_exception_decode_error(self):
class DummyExc(Exception):
def __str__(self):
raise b'\xff'.decode('ascii')
exc = errors.LibraryError.from_exception(DummyExc('visa.dll: wrong ELF class'), 'visa.dll')
assert ('Error while accessing visa.dll.' == str(exc)) |
class StructTypeSpecifier(object):
def __init__(self, is_union, tag, declarations):
self.is_union = is_union
self.tag = tag
self.declarations = declarations
def __repr__(self):
if self.is_union:
s = 'union'
else:
s = 'struct'
if self.tag:
s += (' %s' % self.tag)
if self.declarations:
s += (' {%s}' % '; '.join([repr(d) for d in self.declarations]))
return s |
def _consolidate_replicated_chunked_tensor_entries(rank_to_entries: List[Dict[(str, Entry)]]) -> List[Dict[(str, Entry)]]:
groups: Dict[(str, List[ChunkedTensorEntry])] = defaultdict(list)
for entries in rank_to_entries:
for (logical_path, entry) in entries.items():
if (is_replicated_entry(entry) and isinstance(entry, ChunkedTensorEntry)):
groups[logical_path].append(entry)
for (logical_path, group) in groups.items():
merged = ChunkedTensorEntry(dtype=group[0].dtype, shape=group[0].shape, chunks=sorted((chunk for entry in group for chunk in entry.chunks), key=(lambda chunk: chunk.offsets)), replicated=True)
for entries in rank_to_entries:
entries[logical_path] = merged
return rank_to_entries |
class BasicClosureCompiler(ClosureCompiler):
def _make_source_builder(self, builder: CodeBuilder) -> CodeBuilder:
main_builder = CodeBuilder()
main_builder += 'def _closure_maker():'
with main_builder:
main_builder.extend(builder)
return main_builder
def _compile(self, source: str, unique_filename: str, namespace: Dict[(str, Any)]):
code_obj = compile(source, unique_filename, 'exec')
local_namespace: Dict[(str, Any)] = {}
exec(code_obj, namespace, local_namespace)
linecache.cache[unique_filename] = (len(source), None, source.splitlines(keepends=True), unique_filename)
return local_namespace['_closure_maker']()
def _get_unique_id(self, base_filename: str) -> str:
idx = _counter.generate_idx(base_filename)
if (idx == 0):
return base_filename
return f'{base_filename} {idx}'
def compile(self, base_filename: str, filename_maker: Callable[([str], str)], builder: CodeBuilder, namespace: Dict[(str, Any)]) -> Callable:
source = self._make_source_builder(builder).string()
unique_id = self._get_unique_id(base_filename)
return self._compile(source, filename_maker(unique_id), namespace) |
class Queries():
def __init__(self, path=None, data=None):
self.path = path
if data:
assert isinstance(data, dict), type(data)
(self._load_data(data) or self._load_file(path))
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data.items())
def provenance(self):
return self.path
def toDict(self):
return {'provenance': self.provenance()}
def _load_data(self, data):
if (data is None):
return None
self.data = {}
self._qas = {}
for (qid, content) in data.items():
if isinstance(content, dict):
self.data[qid] = content['question']
self._qas[qid] = content
else:
self.data[qid] = content
if (len(self._qas) == 0):
del self._qas
return True
def _load_file(self, path):
if (not path.endswith('.json')):
self.data = load_queries(path)
return True
self.data = {}
self._qas = {}
with open(path) as f:
for line in f:
qa = ujson.loads(line)
assert (qa['qid'] not in self.data)
self.data[qa['qid']] = qa['question']
self._qas[qa['qid']] = qa
return self.data
def qas(self):
return dict(self._qas)
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def save(self, new_path):
assert new_path.endswith('.tsv')
assert (not os.path.exists(new_path)), new_path
with Run().open(new_path, 'w') as f:
for (qid, content) in self.data.items():
content = f'''{qid} {content}
'''
f.write(content)
return f.name
def save_qas(self, new_path):
assert new_path.endswith('.json')
assert (not os.path.exists(new_path)), new_path
with open(new_path, 'w') as f:
for (qid, qa) in self._qas.items():
qa['qid'] = qid
f.write((ujson.dumps(qa) + '\n'))
def _load_tsv(self, path):
raise NotImplementedError
def _load_jsonl(self, path):
raise NotImplementedError
def cast(cls, obj):
if (type(obj) is str):
return cls(path=obj)
if (isinstance(obj, dict) or isinstance(obj, list)):
return cls(data=obj)
if (type(obj) is cls):
return obj
assert False, f'obj has type {type(obj)} which is not compatible with cast()' |
def circ_vtest(angles, dir=0.0, w=None, d=None):
angles = np.asarray(angles)
if (w is None):
r = circ_r(angles)
mu = circ_mean(angles)
n = len(angles)
else:
assert (len(angles) == len(w)), 'Input dimensions do not match'
r = circ_r(angles, w, d)
mu = circ_mean(angles, w)
n = np.sum(w)
R = (n * r)
v = (R * np.cos((mu - dir)))
u = (v * np.sqrt((2 / n)))
pval = (1 - norm.cdf(u))
return (v, pval) |
def test_transform_radians():
with pytest.warns(FutureWarning):
WGS84 = pyproj.Proj('+init=EPSG:4326')
ECEF = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
with pytest.warns(FutureWarning):
assert_almost_equal(pyproj.transform(ECEF, WGS84, (- 2704026.01), (- 4253051.81), 3895878.82, radians=True), ((- 2.), 0., (- 20.)))
assert_almost_equal(pyproj.transform(WGS84, ECEF, (- 2.), 0., (- 20.), radians=True), ((- 2704026.01), (- 4253051.81), 3895878.82)) |
def build_model(opt, dicts):
opt = backward_compatible(opt)
onmt.constants.layer_norm = opt.layer_norm
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.activation_layer = opt.activation_layer
onmt.constants.version = 1.0
onmt.constants.attention_out = opt.attention_out
onmt.constants.residual_type = opt.residual_type
if (not opt.fusion):
model = build_tm_model(opt, dicts)
else:
raise NotImplementedError
model = build_fusion(opt, dicts)
return model |
def _parse_yaml_area_file(area_file_name, *regions):
area_dict = _read_yaml_area_file_content(area_file_name)
area_list = (regions or area_dict.keys())
res = []
for area_name in area_list:
params = area_dict.get(area_name)
if (params is None):
raise AreaNotFound('Area "{0}" not found in file "{1}"'.format(area_name, area_file_name))
area_def = _create_area_def_from_dict(area_name, params)
res.append(area_def)
return res |
def test_replace_component_list_of_foo_by_real():
foo_wrap = Foo_shamt_list_wrap(32)
foo_wrap.elaborate()
order = list(range(5))
random.shuffle(order)
for i in order:
foo_wrap.replace_component(foo_wrap.inner[i], Real_shamt)
simple_sim_pass(foo_wrap)
print()
foo_wrap.in_ = Bits32(16)
foo_wrap.tick()
print(foo_wrap.line_trace())
foo_wrap.in_ = Bits32(4)
foo_wrap.tick()
print(foo_wrap.line_trace()) |
_config
def test_toggle_max(manager):
manager.c.next_layout()
assert (len(manager.c.layout.info()['stacks']) == 2)
manager.test_window('two')
manager.test_window('one')
assert (manager.c.group.info()['focus'] == 'one')
assert (manager.c.window.info()['width'] == 398)
assert (manager.c.window.info()['height'] == 578)
assert (manager.c.window.info()['float_info'] == {'y': 0, 'x': 400, 'width': 100, 'height': 100})
assert (manager.c.window.info()['x'] == 400)
assert (manager.c.window.info()['y'] == 0)
manager.c.window.toggle_maximize()
assert (manager.c.window.info()['floating'] is True)
assert (manager.c.window.info()['maximized'] is True)
assert (manager.c.window.info()['width'] == 800)
assert (manager.c.window.info()['height'] == 580)
assert (manager.c.window.info()['x'] == 0)
assert (manager.c.window.info()['y'] == 0)
manager.c.window.toggle_maximize()
assert (manager.c.window.info()['floating'] is False)
assert (manager.c.window.info()['maximized'] is False)
assert (manager.c.window.info()['width'] == 398)
assert (manager.c.window.info()['height'] == 578)
assert (manager.c.window.info()['x'] == 400)
assert (manager.c.window.info()['y'] == 0) |
.skip
.allow_bad_gc
def test_background_plotter_export_vtkjs(qtbot, tmpdir, plotting):
output_dir = str(tmpdir.mkdir('tmpdir'))
assert os.path.isdir(output_dir)
plotter = BackgroundPlotter(show=False, off_screen=False, title='Testing Window')
assert_hasattr(plotter, 'app_window', MainWindow)
window = plotter.app_window
qtbot.addWidget(window)
if (not False):
assert (not window.isVisible())
with qtbot.wait_exposed(window):
window.show()
assert window.isVisible()
plotter.add_mesh(pyvista.Sphere())
assert_hasattr(plotter, 'renderer', Renderer)
renderer = plotter.renderer
assert (len(renderer._actors) == 1)
assert np.any(plotter.mesh.points)
dlg = plotter._qt_export_vtkjs(show=False)
qtbot.addWidget(dlg)
if hasattr(plotter, 'export_vtksz'):
ext = '.vtksz'
filename = str(os.path.join(output_dir, f'tmp{ext}'))
else:
ext = '.vtkjs'
filename = str(os.path.join(output_dir, f'tmp'))
dlg.selectFile(filename)
assert (not dlg.isVisible())
with qtbot.wait_exposed(dlg):
dlg.show()
assert dlg.isVisible()
with qtbot.wait_signals([dlg.dlg_accepted], timeout=1000):
dlg.accept()
assert (not dlg.isVisible())
plotter.close()
assert (not window.isVisible())
if hasattr(plotter, 'export_vtksz'):
assert os.path.isfile(filename)
else:
assert os.path.isfile((filename + ext)) |
()
def update_flight_traffic_fill():
max_objects = 20
threshold = 0.01
log.info('Updating flight traffic fill')
for flight in Flight.objects.filter(live=True, campaign__campaign_type=PAID_CAMPAIGN, total_views__gt=0):
publisher_traffic_fill = {}
country_traffic_fill = {}
region_traffic_fill = {}
for imp in AdImpression.objects.using(settings.REPLICA_SLUG).filter(advertisement__flight=flight).values('publisher__slug').annotate(publisher_views=Sum('views')).order_by('-publisher_views')[:max_objects]:
publisher_slug = imp['publisher__slug']
publisher_percentage = (imp['publisher_views'] / flight.total_views)
if (publisher_percentage >= threshold):
publisher_traffic_fill[publisher_slug] = publisher_percentage
for imp in RegionImpression.objects.using(settings.REPLICA_SLUG).filter(advertisement__flight=flight).values('region').annotate(region_views=Sum('views')).order_by('-region_views')[:max_objects]:
region = imp['region']
region_percentage = (imp['region_views'] / flight.total_views)
if (region_percentage >= threshold):
region_traffic_fill[region] = region_percentage
for imp in GeoImpression.objects.using(settings.REPLICA_SLUG).filter(advertisement__flight=flight).values('country').annotate(country_views=Sum('views')).order_by('-country_views')[:max_objects]:
country_code = imp['country']
country_percentage = (imp['country_views'] / flight.total_views)
if (country_percentage >= threshold):
country_traffic_fill[country_code] = country_percentage
flight.refresh_from_db()
if (not flight.traffic_fill):
flight.traffic_fill = {}
flight.traffic_fill['publishers'] = publisher_traffic_fill
flight.traffic_fill['countries'] = country_traffic_fill
flight.traffic_fill['regions'] = region_traffic_fill
flight.save()
log.info('Completed updating flight traffic fill') |
def task_create_random_data(node: Annotated[(PickleNode, Product)]=data_catalog['data']) -> None:
rng = np.random.default_rng(0)
beta = 2
x = rng.normal(loc=5, scale=10, size=1000)
epsilon = rng.standard_normal(1000)
y = ((beta * x) + epsilon)
df = pd.DataFrame({'x': x, 'y': y})
node.save(df) |
def train(args, epoch, train_data, device, model, criterion, optimizer):
model.train()
train_loss = 0.0
top1 = AvgrageMeter()
top5 = AvgrageMeter()
for (step, (inputs, targets)) in enumerate(train_data):
(inputs, targets) = (inputs.to(device), targets.to(device))
optimizer.zero_grad()
choice = random_choice(path_num=len(args.kernels), m=args.m, layers=args.layers)
outputs = model(inputs, choice)
loss = criterion(outputs, targets)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
(prec1, prec5) = accuracy(outputs, targets, topk=(1, 5))
n = inputs.size(0)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
optimizer.step()
train_loss += loss.item()
postfix = {'train loss: {:.6}, train top1: {:.6}, train top5: {:.6}'.format((train_loss / (step + 1)), top1.avg, top5.avg)}
train_data.set_postfix(log=postfix) |
class AudioFormatTestCase(unittest.TestCase):
def test_equality_true(self):
af1 = AudioFormat(2, 8, 44100)
af2 = AudioFormat(2, 8, 44100)
self.assertEqual(af1, af2)
def test_equality_false(self):
channels = [1, 2]
sample_sizes = [8, 16]
sample_rates = [11025, 22050, 44100]
formats = [AudioFormat(c, s, r) for c in channels for s in sample_sizes for r in sample_rates]
while formats:
a = formats.pop()
for b in formats:
self.assertNotEqual(a, b)
def test_bytes_per(self):
af1 = AudioFormat(1, 8, 22050)
af2 = AudioFormat(2, 16, 44100)
self.assertEqual(af1.bytes_per_frame, 1)
self.assertEqual(af1.bytes_per_second, 22050)
self.assertEqual(af2.bytes_per_frame, 4)
self.assertEqual(af2.bytes_per_second, 176400)
def test_alignment(self):
af = AudioFormat(2, 16, 44100)
self.assertEqual(af.align(2049), 2048)
self.assertEqual(af.align(2048), 2048)
self.assertEqual(af.align(2047), 2044)
self.assertEqual(af.align(0), 0)
self.assertEqual(af.align((- 1)), (- 4))
self.assertEqual(af.align_ceil(2049), 2052)
self.assertEqual(af.align_ceil(2048), 2048)
self.assertEqual(af.align_ceil(2047), 2048)
self.assertEqual(af.align_ceil(0), 0)
self.assertEqual(af.align_ceil((- 1)), 0)
def test_repr(self):
af1 = AudioFormat(1, 8, 22050)
self.assertEqual(repr(af1), 'AudioFormat(channels=1, sample_size=8, sample_rate=22050)')
af2 = AudioFormat(2, 16, 44100)
self.assertEqual(repr(af2), 'AudioFormat(channels=2, sample_size=16, sample_rate=44100)') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.