code stringlengths 281 23.7M |
|---|
def process_routes(watch_url_routes, iter_track_time):
failed_routes = []
if watch_url_routes:
watch_routes_start_time = time.time()
for route_info in watch_url_routes:
header = {'Accept': 'application/json'}
if (len(route_info) > 1):
header['Authorization'] = route_info[1]
route_status = is_url_available(route_info[0], header)
if (not route_status):
failed_routes.append(route_info[0])
iter_track_time['watch_routes'] = (time.time() - watch_routes_start_time)
return failed_routes |
class StyleForm(forms.Form):
bgcolor = forms.CharField(widget=ColorWidget, required=False)
linear_gradient_direction = forms.ChoiceField(choices=Petition.LINEAR_GRADIENT_CHOICES, required=False)
gradient_from = forms.CharField(widget=ColorWidget, required=False)
gradient_to = forms.CharField(widget=ColorWidget, required=False) |
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds) |
class CIFARDIAResNet(nn.Module):
def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10):
super(CIFARDIAResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = DualPathSequential(return_two=False)
attention = DIAAttention(in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0])
for (j, out_channels) in enumerate(channels_per_stage):
stride = (2 if ((j == 0) and (i != 0)) else 1)
stage.add_module('unit{}'.format((j + 1)), DIAResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False, attention=attention))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=8, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
class upResBlock_3x3(nn.Module):
def __init__(self, in_c, out_c, hid_c=None, conv2d=None, norm_layer=None, non_linear=None):
super(upResBlock_3x3, self).__init__()
if (hid_c is None):
hid_c = in_c
if (conv2d is None):
conv2d = nn.Conv2d
if (norm_layer is None):
norm_layer = Identity
if (non_linear is None):
non_linear = nn.LeakyReLU()
self.__build_block(in_c, out_c, hid_c, conv2d, norm_layer, non_linear)
def __build_block(self, in_c, out_c, hid_c, conv2d, norm_layer, non_linear):
self.main_path = nn.Sequential(conv2d(in_c, hid_c, kernel_size=3, padding=1, stride=1), norm_layer(hid_c), non_linear, conv2d(hid_c, out_c, kernel_size=3, padding=1, stride=1))
self.side_path = conv2d(in_c, out_c, kernel_size=1, padding=0, stride=1)
self.output_layer = nn.Sequential(norm_layer(out_c), non_linear)
def forward(self, input_var):
input_var = F.upsample(input_var, scale_factor=2, mode='bilinear')
output = self.main_path(input_var)
res_out = self.side_path(input_var)
final_output = self.output_layer((res_out + output))
return final_output |
def repartition_range(tables: List[pa.Table], destination_partition: Partition, repartition_args: dict, max_records_per_output_file: int, s3_table_writer_kwargs: Optional[Dict[(str, Any)]]=None, repartitioned_file_content_type: ContentType=ContentType.PARQUET, deltacat_storage=unimplemented_deltacat_storage, deltacat_storage_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs):
if (deltacat_storage_kwargs is None):
deltacat_storage_kwargs = {}
column: str = repartition_args['column']
partition_ranges: List = repartition_args['ranges']
if (len(partition_ranges) == 0):
raise ValueError('No partition ranges specified')
if (not all(((column in table.column_names) for table in tables))):
raise ValueError(f'Column {column} does not exist in the table')
partition_ranges.sort()
partition_ranges = (([SIGNED_INT64_MIN_VALUE] + partition_ranges) + [SIGNED_INT64_MAX_VALUE])
partitioned_tables_list = [[] for _ in range((len(partition_ranges) - 1))]
total_record_count = 0
col_name_int64 = f'{column}_int64'
col_name_int64 = generate_unique_name(col_name_int64, tables[0].schema.names)
for table in tables:
total_record_count += len(table)
table_new = table.add_column(0, pa.field(col_name_int64, pa.int64()), pc.cast(table[column], pa.int64()))
null_row_table = table_new.filter(pc.field(col_name_int64).is_null())
for (i, (lower_limit, upper_limit)) in enumerate(zip(partition_ranges[:(- 1)], partition_ranges[1:]), start=0):
partitioned_tables_list[i].append(table_new.filter(((pc.field(col_name_int64) > pc.scalar(lower_limit)) & (pc.field(col_name_int64) <= pc.scalar(upper_limit)))))
if (i == 0):
partitioned_tables_list[i].append(null_row_table)
partition_table_length = 0
partition_deltas: List[Delta] = []
for partition_tables in partitioned_tables_list:
if (len(partition_tables) > 0):
print(f'column to be dropped: {col_name_int64}')
partition_table: pa.Table = pa.concat_tables(partition_tables).drop([col_name_int64])
assert (col_name_int64 not in partition_table.schema.names)
if (len(partition_table) > 0):
partition_table_length += len(partition_table)
partition_delta: Delta = deltacat_storage.stage_delta(partition_table, destination_partition, max_records_per_entry=max_records_per_output_file, content_type=repartitioned_file_content_type, s3_table_writer_kwargs=s3_table_writer_kwargs, **deltacat_storage_kwargs)
partition_deltas.append(partition_delta)
assert (partition_table_length == total_record_count), f'Repartitioned table should have the same number of records {partition_table_length} as the original table {total_record_count}'
return RepartitionResult(range_deltas=partition_deltas) |
def get_node_ancestors(synset):
ancestors = set()
to_visit = set(synset.parents)
visited = set()
while to_visit:
ancestor = to_visit.pop()
ancestors.add(ancestor)
visited.add(ancestor)
to_visit = (to_visit | (set(ancestor.parents) - visited))
return ancestors |
def connectToNamedPipeViaPrinter(subPipeName='toto'):
accessRequired = 0
hPrinter = PVOID()
targetServer = '\\\\{0}'.format('127.0.0.1')
targetServer = create_unicode_buffer(targetServer)
configBuffer = create_string_buffer(8192)
devModeContainer = cast(configBuffer, POINTER(DEVMODE_CONTAINER))
devModeContainer.cbBuf = 0
devModeContainer.pDevMode = None
status = RpcOpenPrinter(targetServer, byref(hPrinter), None, devModeContainer, accessRequired)
if (status != RPC_S_OK):
logging.error('Impossible to retrieve a handle for the local printer: {0}'.format(getLastErrorMessage()))
return False
logging.debug('Handle to the local printer object is retrieved')
captureServer = '\\\\{0}/pipe/{1}'.format('127.0.0.1', subPipeName)
captureServer = create_unicode_buffer(captureServer)
status = RpcRemoteFindFirstPrinterChangeNotificationEx(hPrinter, PRINTER_CHANGE_ADD_JOB, 0, captureServer, 0, None)
if (status != RPC_S_OK):
logging.error('Impossible to create a remote change notification object. Named piped accessible ?: {0}'.format(getLastErrorMessage()))
return False
status = RpcClosePrinter(byref(hPrinter))
if (status != RPC_S_OK):
logging.warning('Impossible to close the handle to the printer object: {0}'.format(getLastErrorMessage()))
else:
logging.debug('Handle to the printer object is closed')
return True |
class _TfDatasetIterable(Iterable[tf.Tensor]):
def __init__(self, dataset: tf.compat.v1.data.Dataset):
self._graph = dataset._graph
with self._graph.as_default():
self._tf_dataset_iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self._get_next = self._tf_dataset_iterator.get_next()
self._is_busy = False
def __iter__(self) -> '_IteratorWrapper':
if self._is_busy:
raise RuntimeError
self._is_busy = True
def cleanup_fn():
self._is_busy = False
iterator = _IteratorWrapper(self._make_new_iterator(), cleanup_fn)
return iterator
def _make_new_iterator(self) -> Iterator[tf.Tensor]:
with tf.compat.v1.Session(graph=self._graph) as sess:
sess.run(self._tf_dataset_iterator.initializer)
while True:
try:
(yield sess.run(self._get_next))
except tf.errors.OutOfRangeError:
break
def is_busy(self):
return self._is_busy |
class SawyerHandlePullEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, (- 0.001))
obj_high = (0.1, 0.9, (+ 0.001))
goal_low = ((- 0.1), 0.55, 0.04)
goal_high = (0.1, 0.7, 0.18)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.9, 0.0]), 'hand_init_pos': np.array((0, 0.6, 0.2))}
self.goal = np.array([0, 0.8, 0.14])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_handle_press.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
obj = obs[4:7]
(reward, tcp_to_obj, tcp_open, obj_to_target, grasp_reward, in_place_reward) = self.compute_reward(action, obs)
info = {'success': float((obj_to_target <= self.TARGET_RADIUS)), 'near_object': float((tcp_to_obj <= 0.05)), 'grasp_success': float(((tcp_open > 0) and ((obj[2] - 0.03) > self.obj_init_pos[2]))), 'grasp_reward': grasp_reward, 'in_place_reward': in_place_reward, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _target_site_config(self):
return []
def _get_pos_objects(self):
return self._get_site_pos('handleRight')
def _get_quat_objects(self):
return np.zeros(4)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self.obj_init_pos = (self._get_state_rand_vec() if self.random_init else self.init_config['obj_init_pos'])
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self._set_obj_xyz((- 0.1))
self._target_pos = self._get_site_pos('goalPull')
return self._get_obs()
def compute_reward(self, action, obs):
obj = obs[4:7]
target = self._target_pos.copy()
target_to_obj = abs((target[2] - obj[2]))
target_to_obj_init = abs((target[2] - self.obj_init_pos[2]))
in_place = reward_utils.tolerance(target_to_obj, bounds=(0, self.TARGET_RADIUS), margin=target_to_obj_init, sigmoid='long_tail')
object_grasped = self._gripper_caging_reward(action, obj, pad_success_thresh=0.05, obj_radius=0.022, object_reach_radius=0.01, xz_thresh=0.01, high_density=True)
reward = reward_utils.hamacher_product(object_grasped, in_place)
tcp_opened = obs[3]
tcp_to_obj = np.linalg.norm((obj - self.tcp_center))
if ((tcp_to_obj < 0.035) and (tcp_opened > 0) and ((obj[2] - 0.01) > self.obj_init_pos[2])):
reward += (1.0 + (5.0 * in_place))
if (target_to_obj < self.TARGET_RADIUS):
reward = 10.0
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place) |
def get_backend_class(connection: str) -> type['testinfra.backend.base.BaseBackend']:
try:
classpath = BACKENDS[connection]
except KeyError:
raise RuntimeError("Unknown connection type '{}'".format(connection))
(module, name) = classpath.rsplit('.', 1)
return getattr(importlib.import_module(module), name) |
def _find_imbalance_tables(sharding_options: List[ShardingOption], target_imbalance: str='perf') -> List[ShardingOption]:
rank_to_target_stats: Dict[(int, float)] = {}
for sharding_option in sharding_options:
for shard in sharding_option.shards:
rank = cast(int, shard.rank)
if (rank not in rank_to_target_stats):
rank_to_target_stats[rank] = 0
if (target_imbalance == 'perf'):
rank_to_target_stats[rank] += cast(Perf, shard.perf).total
elif (target_imbalance == 'hbm'):
rank_to_target_stats[rank] += cast(Storage, shard.storage).hbm
else:
raise ValueError(f'Unknown target imbalance {target_imbalance}')
if (len(rank_to_target_stats.values()) <= 1):
return []
max_value = max(rank_to_target_stats.values())
max_value_ranks = {rank for (rank, value) in rank_to_target_stats.items() if (value == max_value)}
tables_in_max_value_ranks: List[ShardingOption] = []
for sharding_option in sharding_options:
sharding_option_ranks = [shard.rank for shard in sharding_option.shards]
if ((set(sharding_option_ranks) >= max_value_ranks) and (sharding_option.sharding_type not in [ShardingType.DATA_PARALLEL.value, ShardingType.ROW_WISE.value])):
tables_in_max_value_ranks.append(sharding_option)
if (target_imbalance == 'perf'):
tables_in_max_value_ranks.sort(key=(lambda sharding_option: sharding_option.shards[0].perf.total), reverse=True)
elif (target_imbalance == 'hbm'):
tables_in_max_value_ranks.sort(key=(lambda sharding_option: sharding_option.shards[0].storage.hbm), reverse=True)
else:
raise ValueError(f'Unknown target imbalance {target_imbalance}')
return tables_in_max_value_ranks |
class Adaptor(a_base.Base):
def __init__(self):
a_base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\\[(.*)\\]-\\[(.*?)\\]$')
self.epoch = datetime.datetime(1970, 1, 1)
def sanity_check(self):
pass
def parse_id(self, id):
match = self.id_re.match(id)
if ((not match) or (len(match.groups()) != 2)):
raise rse.BadParameter(("Cannot parse job id '%s'" % id))
return (match.group(1), match.group(2)) |
_model
def resnest50d(pretrained=False, **kwargs):
model_kwargs = dict(block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False), **kwargs)
return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) |
def simulation_ordered_grouped_hubbard_terms_with_info(hubbard_hamiltonian):
hamiltonian = normal_ordered(hubbard_hamiltonian)
n_qubits = count_qubits(hamiltonian)
side_length = int(numpy.sqrt((n_qubits / 2.0)))
ordered_terms = []
ordered_indices = []
ordered_is_hopping_operator = []
original_ordering = list(range(n_qubits))
for i in range(0, (n_qubits - side_length), (2 * side_length)):
for j in range((2 * bool((i % (4 * side_length)))), (2 * side_length), 4):
(original_ordering[(i + j)], original_ordering[((i + j) + 1)]) = (original_ordering[((i + j) + 1)], original_ordering[(i + j)])
input_ordering = list(original_ordering)
parities = ([False, True] * int(((side_length / 2) + 1)))
for parity in parities:
results = stagger_with_info(hubbard_hamiltonian, input_ordering, parity)
(terms_in_step, indices_in_step, is_hopping_operator_in_step) = results
ordered_terms.extend(terms_in_step)
ordered_indices.extend(indices_in_step)
ordered_is_hopping_operator.extend(is_hopping_operator_in_step)
input_ordering = list(original_ordering)
parities = ([True] + ([False, True] * int((side_length / 2))))
for parity in parities:
results = stagger_with_info(hubbard_hamiltonian, input_ordering, parity)
(terms_in_step, indices_in_step, is_hopping_operator_in_step) = results
ordered_terms.extend(terms_in_step)
ordered_indices.extend(indices_in_step)
ordered_is_hopping_operator.extend(is_hopping_operator_in_step)
return (ordered_terms, ordered_indices, ordered_is_hopping_operator) |
_model
def efficientnet_b3_pruned(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs)
return model |
class QuantAct(nn.Module):
def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if (not self.per_channel):
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('act_scaling_factor', torch.zeros(1))
self.x_min -= 1e-05
self.x_max += 1e-05
else:
raise NotImplementedError('per-channel mode is not currently supported for activation.')
def __repr__(self):
return f'{self.__class__.__name__}(activation_bit={self.activation_bit}, quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, Act_max: {self.x_max.item():.2f})'
def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None):
x_act = (x if (identity is None) else (identity + x))
if self.training:
assert (not self.percentile), 'percentile mode is not currently supported for activation.'
assert (not self.per_channel), 'per-channel mode is not currently supported for activation.'
x_min = x_act.data.min()
x_max = x_act.data.max()
assert ((x_max.isnan().sum() == 0) and (x_min.isnan().sum() == 0)), 'NaN detected when computing min/max of the activation'
if ((self.x_min.min() > (- 1.1e-05)) and (self.x_max.max() < 1.1e-05)):
self.x_min = (self.x_min + x_min)
self.x_max = (self.x_max + x_max)
elif (self.act_range_momentum == (- 1)):
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = ((self.x_min * self.act_range_momentum) + (x_min * (1 - self.act_range_momentum)))
self.x_max = ((self.x_max * self.act_range_momentum) + (x_max * (1 - self.act_range_momentum)))
if (not self.quant_mode):
return (x_act, None)
x_min = (self.x_min if (specified_min is None) else specified_min)
x_max = (self.x_max if (specified_max is None) else specified_max)
self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel)
if (pre_act_scaling_factor is None):
quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)
else:
quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor)
correct_output_scale = self.act_scaling_factor.view((- 1))
return ((quant_act_int * correct_output_scale), self.act_scaling_factor) |
class ExtractFeature(nn.Module):
def __init__(self, opt={}, finetune=True):
super(ExtractFeature, self).__init__()
self.embed_dim = opt['embed']['embed_dim']
self.resnet = resnet18(pretrained=True)
for param in self.resnet.parameters():
param.requires_grad = finetune
self.pool_2x2 = nn.MaxPool2d(4)
self.up_sample_2 = nn.Upsample(scale_factor=2, mode='nearest')
self.up_sample_4 = nn.Upsample(scale_factor=4, mode='nearest')
self.linear = nn.Linear(in_features=512, out_features=self.embed_dim)
def forward(self, img):
x = self.resnet.conv1(img)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
f1 = self.resnet.layer1(x)
f2 = self.resnet.layer2(f1)
f3 = self.resnet.layer3(f2)
f4 = self.resnet.layer4(f3)
f2_up = self.up_sample_2(f2)
lower_feature = torch.cat([f1, f2_up], dim=1)
f4_up = self.up_sample_2(f4)
higher_feature = torch.cat([f3, f4_up], dim=1)
feature = f4.view(f4.shape[0], 512, (- 1))
solo_feature = self.linear(torch.mean(feature, dim=(- 1)))
return (lower_feature, higher_feature, solo_feature) |
def update():
global phase
if ((phase % (8 * np.pi)) > (4 * np.pi)):
m1['angle'] = (315 + (1.5 * np.sin(phase)))
m1a['angle'] = (315 + (1.5 * np.sin(phase)))
else:
m2['angle'] = (135 + (1.5 * np.sin(phase)))
m2a['angle'] = (135 + (1.5 * np.sin(phase)))
phase += 0.2 |
def load_w2v_feature(file, max_idx=0):
with open(file, 'rb') as f:
nu = 0
for line in f:
content = line.strip().split()
nu += 1
if (nu == 1):
(n, d) = (int(content[0]), int(content[1]))
feature = [([0.0] * d) for i in range(max(n, (max_idx + 1)))]
continue
index = int(content[0])
while (len(feature) <= index):
feature.append(([0.0] * d))
for (i, x) in enumerate(content[1:]):
feature[index][i] = float(x)
for item in feature:
assert (len(item) == d)
return np.array(feature, dtype=np.float32) |
.unit()
def test_print_collected_tasks_with_nodes(capsys):
dictionary = {Path('task_path.py'): [Task(base_name='function', path=Path('task_path.py'), function=function, depends_on={'depends_on': PathNode(name='in.txt', path=Path('in.txt'))}, produces={0: PathNode(name='out.txt', path=Path('out.txt'))})]}
_print_collected_tasks(dictionary, True, 'file', Path())
captured = capsys.readouterr().out
assert ('<Module task_path.py>' in captured)
assert ('<Function task_path.py::function>' in captured)
assert ('<Dependency in.txt>' in captured)
assert ('<Product out.txt>' in captured) |
def test_backjumps_after_partial_satisfier(root: ProjectPackage, provider: Provider, repo: Repository) -> None:
root.add_dependency(Factory.create_dependency('c', '*'))
root.add_dependency(Factory.create_dependency('y', '^2.0.0'))
add_to_repo(repo, 'a', '1.0.0', deps={'x': '>=1.0.0'})
add_to_repo(repo, 'b', '1.0.0', deps={'x': '<2.0.0'})
add_to_repo(repo, 'c', '1.0.0')
add_to_repo(repo, 'c', '2.0.0', deps={'a': '*', 'b': '*'})
add_to_repo(repo, 'x', '0.0.0')
add_to_repo(repo, 'x', '1.0.0', deps={'y': '1.0.0'})
add_to_repo(repo, 'x', '2.0.0')
add_to_repo(repo, 'y', '1.0.0')
add_to_repo(repo, 'y', '2.0.0')
check_solver_result(root, provider, {'c': '1.0.0', 'y': '2.0.0'}, tries=4) |
def test_parses(parses):
finder = FunDefFindingVisitor()
for (f, tree) in parses:
(globs, astree) = parse_object(f)
fundef = finder.visit(astree)
parser = LogicExpressionASTVisitor(globs)
ptree = parser.visit(fundef)
print(ptree, tree)
assert (ptree.return_node == tree) |
_if_py38
.flaky
def test_default_transformer_epoch_optim_loop(optim_asset_loader):
asset = optim_asset_loader('default_transformer_epoch_optim_loop')
image_loader = asset.input.image_loader
criterion = asset.input.perceptual_loss
make_torch_ge_1_6_compatible(image_loader, criterion)
transformer = asset.input.transformer
optimizer = asset.params.get_optimizer(transformer)
lr_scheduler = asset.params.get_lr_scheduler(optimizer)
transformer = optim.multi_epoch_model_optimization(image_loader, transformer, criterion, asset.input.criterion_update_fn, asset.input.epochs, optimizer=optimizer, lr_scheduler=lr_scheduler, quiet=True)
actual = tuple(transformer.parameters())
desired = tuple(asset.output.transformer.parameters())
ptu.assert_allclose(actual, desired, rtol=0.0001) |
def parse_ieee_block_header(block: Union[(bytes, bytearray)], length_before_block: Optional[int]=None, raise_on_late_block: bool=False) -> Tuple[(int, int)]:
begin = block.find(b'#')
if (begin < 0):
raise ValueError(('Could not find hash sign (#) indicating the start of the block. The block begin by %r' % block[:25]))
length_before_block = (length_before_block or DEFAULT_LENGTH_BEFORE_BLOCK)
if (begin > length_before_block):
msg = ('The beginning of the block has been found at %d which is an unexpectedly large value. The actual block may have been missing a beginning marker but the block contained one:\n%s' % (begin, repr(block)))
if raise_on_late_block:
raise RuntimeError(msg)
else:
warnings.warn(msg, UserWarning)
try:
header_length = int(block[(begin + 1):(begin + 2)])
except ValueError:
header_length = 0
offset = ((begin + 2) + header_length)
if (header_length > 0):
data_length = int(block[(begin + 2):offset])
else:
data_length = (- 1)
return (offset, data_length) |
class AsyncEnum(ENUM):
async def create_async(self, bind=None, checkfirst=True):
if ((not checkfirst) or (not (await bind.dialect.has_type(bind, self.name, schema=self.schema)))):
(await bind.status(CreateEnumType(self)))
async def drop_async(self, bind=None, checkfirst=True):
if ((not checkfirst) or (await bind.dialect.has_type(bind, self.name, schema=self.schema))):
(await bind.status(DropEnumType(self)))
async def _on_table_create_async(self, target, bind, checkfirst=False, **kw):
if (checkfirst or (((not self.metadata) and (not kw.get('_is_metadata_operation', False))) and (not self._check_for_name_in_memos(checkfirst, kw)))):
(await self.create_async(bind=bind, checkfirst=checkfirst))
async def _on_table_drop_async(self, target, bind, checkfirst=False, **kw):
if ((not self.metadata) and (not kw.get('_is_metadata_operation', False)) and (not self._check_for_name_in_memos(checkfirst, kw))):
(await self.drop_async(bind=bind, checkfirst=checkfirst))
async def _on_metadata_create_async(self, target, bind, checkfirst=False, **kw):
if (not self._check_for_name_in_memos(checkfirst, kw)):
(await self.create_async(bind=bind, checkfirst=checkfirst))
async def _on_metadata_drop_async(self, target, bind, checkfirst=False, **kw):
if (not self._check_for_name_in_memos(checkfirst, kw)):
(await self.drop_async(bind=bind, checkfirst=checkfirst)) |
class MonolingualDataset(FairseqDataset):
def __init__(self, dataset, sizes, src_vocab, tgt_vocab=None, add_eos_for_other_targets=False, shuffle=False, targets=None, add_bos_token=False, fixed_pad_length=None, pad_to_bsz=None, src_lang_idx=None, tgt_lang_idx=None):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = (tgt_vocab or src_vocab)
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
self.fixed_pad_length = fixed_pad_length
self.pad_to_bsz = pad_to_bsz
self.src_lang_idx = src_lang_idx
self.tgt_lang_idx = tgt_lang_idx
assert ((targets is None) or all(((t in {'self', 'future', 'past'}) for t in targets))), "targets must be none or one of 'self', 'future', 'past'"
if ((targets is not None) and (len(targets) == 0)):
targets = None
self.targets = targets
def __getitem__(self, index):
if (self.targets is not None):
(source, future_target, past_target) = self.dataset[index]
(source, target) = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
(source, target) = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if (self.targets is not None):
target = []
if (self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) and (source[(- 1)] != self.vocab.eos())):
source = torch.cat([source, source.new([self.vocab.eos()])])
if ('future' in self.targets):
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if ('past' in self.targets):
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[((- 2), None)]])
for t in self.targets:
if (t == 'self'):
target.append(source)
elif (t == 'future'):
target.append(future_target)
elif (t == 'past'):
target.append(past_target)
else:
raise Exception(('invalid target ' + t))
if (len(target) == 1):
target = target[0]
else:
target = future_target
return (source, self._filter_vocab(target))
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if (target is not None):
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return (source, target)
def num_tokens_vec(self, indices):
return self.sizes[indices]
def _filter_vocab(self, target):
if (len(self.tgt_vocab) != len(self.vocab)):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
return collate(samples, self.vocab.pad(), self.vocab.eos(), self.fixed_pad_length, self.pad_to_bsz)
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices) |
def test_create_observation_fail(requests_mock):
params = {'species_guess': 'Pieris rapae', 'observed_on_string': (datetime.now() + timedelta(days=1)).isoformat(), 'latitude': 200}
requests_mock.post(f'{API_V0}/observations.json', json=load_sample_data('create_observation_fail.json'), status_code=422)
with pytest.raises(HTTPError) as excinfo:
create_observation(access_token='valid token', **params)
assert (excinfo.value.response.status_code == 422)
assert ('errors' in excinfo.value.response.json()) |
def read_index(index_path: PathType, storage_options: Optional[Dict[(str, str)]]=None) -> Any:
url = str(index_path)
if url.endswith(TABIX_EXTENSION):
return read_tabix(url, storage_options=storage_options)
elif url.endswith(CSI_EXTENSION):
return read_csi(url, storage_options=storage_options)
else:
raise ValueError('Only .tbi or .csi indexes are supported.') |
_module
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, conv_cfg=None, norm_cfg=None, activation=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.fp16_enabled = False
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and self.extra_convs_on_inputs):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
laterals[(i - 1)] += F.interpolate(laterals[i], scale_factor=2, mode='nearest')
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[(self.backbone_end_level - 1)]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[(- 1)]))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs) |
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator = nn.ModuleList([nn.Sequential(nn.ReflectionPad1d(7), nn.utils.spectral_norm(nn.Conv1d(1, 16, kernel_size=15)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.spectral_norm(nn.Conv1d(16, 64, kernel_size=41, stride=4, padding=20, groups=4)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.spectral_norm(nn.Conv1d(64, 256, kernel_size=41, stride=4, padding=20, groups=16)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.spectral_norm(nn.Conv1d(256, 1024, kernel_size=41, stride=4, padding=20, groups=64)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.spectral_norm(nn.Conv1d(1024, 1024, kernel_size=41, stride=4, padding=20, groups=256)), nn.LeakyReLU(0.2, True)), nn.Sequential(nn.utils.spectral_norm(nn.Conv1d(1024, 1024, kernel_size=5, stride=1, padding=2)), nn.LeakyReLU(0.2, True)), nn.utils.spectral_norm(nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1))])
def forward(self, x):
for layer in self.discriminator:
x = layer(x)
return x |
('ruamel.yaml.YAML.load', return_value='mocked pipeline def')
def test_get_pipeline_definition(mocked_yaml):
pipeline = 'pipeline'
pipeline_def = string_loader.get_pipeline_definition(pipeline, None)
mocked_yaml.assert_called_once_with(pipeline)
expected_pipeline_def = PipelineDefinition('mocked pipeline def', PipelineFileInfo(pipeline_name='', loader='pypyr.loaders.string', parent=None, path=None))
assert (pipeline_def == expected_pipeline_def) |
def test_save_load_observables_expressions():
buff = io.BytesIO()
tspan = np.linspace(0, 100, 100)
sim = ScipyOdeSimulator(tyson_oscillator.model, tspan).run()
sim.save(buff, include_obs_exprs=True)
sim2 = SimulationResult.load(buff)
assert (len(sim2.observables) == len(tspan))
assert_raises(ValueError, (lambda : sim2.expressions)) |
class MtimeLinemode(LinemodeBase):
name = 'mtime'
def filetitle(self, fobj, metadata):
return fobj.relative_path
def infostring(self, fobj, metadata):
if (fobj.stat is None):
return '?'
return datetime.fromtimestamp(fobj.stat.st_mtime).strftime('%Y-%m-%d %H:%M') |
def test_year():
current_year = datetime.now().year
path = (((Path(__file__).parent.parent / 'we_get') / 'core') / 'we_get.py')
with path.open() as f:
m_content = f.read()
m_content.splitlines()[1]
year = m_content.split('Copyright (c) 2016-')[1].split(' ')[0]
assert (year == str(current_year)) |
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--ess_iters', help='(int) number of ess samples per iteration', default=20, type=int)
parser.add_argument('--mean', help='(str) latent mean, options = "Constant", "LogRBF"', default='LogRBF')
parser.add_argument('--nomg', help='(int) number of omegas to use', default=100, type=int)
parser.add_argument('--iters', help='(int) # of ESS iterations to run', default=100, type=int)
parser.add_argument('--data', help='(str) options: "airline"...', default='all', type=str, choices=['all', 'challenger', 'fertility', 'concreteslump', 'servo', 'yacht', 'autompg', 'housing', 'stock', 'pendulum', 'energy', 'concrete', 'airfoil'])
parser.add_argument('--nx', help='(int) number of data points for simulated data', default=400, type=int)
parser.add_argument('--lengthscale', help='(float) lengthscale for sim kernel', default=2.0, type=float)
parser.add_argument('--period', help='(float) period for QP kernel', default=1.0, type=float)
parser.add_argument('--slope', help='(float) slope for linear data', default=1.0, type=float)
parser.add_argument('--intercept', help='(float) intercept for linear data', default=0.0, type=float)
parser.add_argument('--spacing', help='(str) should data be evenly spaced or randomly sampled', default='even', type=str, choices=['even', 'random'])
parser.add_argument('--noise', help='(bool) should generated data be generated with noise', default='False', type=bool)
parser.add_argument('--optim_iters', help='(int) number of optimization iterations', default=1, type=int)
parser.add_argument('--mlatent', help='(str) shared or separate latent gps', default='separate', type=str, choices=['shared', 'separate'])
parser.add_argument('--model_avg', help='(str) (partial) kernels or (full) kernels + theta model averaging', default='full', type=str, choices=['full', 'partial'])
parser.add_argument('--omega_max', help='(float) maximum value of omega', default=8.0, type=float)
return parser.parse_args() |
def test_bulk_imports(gl, group):
destination = f'{group.full_path}-import'
configuration = {'url': gl.url, 'access_token': gl.private_token}
migration_entity = {'source_full_path': group.full_path, 'source_type': 'group_entity', 'destination_slug': destination, 'destination_namespace': destination}
created_migration = gl.bulk_imports.create({'configuration': configuration, 'entities': [migration_entity]})
assert (created_migration.source_type == 'gitlab')
assert (created_migration.status == 'created')
migration = gl.bulk_imports.get(created_migration.id)
assert (migration == created_migration)
migration.refresh()
assert (migration == created_migration)
migrations = gl.bulk_imports.list()
assert (migration in migrations)
all_entities = gl.bulk_import_entities.list()
entities = migration.entities.list()
assert isinstance(entities, list)
assert (entities[0] in all_entities)
entity = migration.entities.get(entities[0].id)
assert (entity == entities[0])
entity.refresh()
assert (entity.created_at == entities[0].created_at) |
def test_datetime_format_provider(strict_coercion, debug_trail):
retort = Retort(strict_coercion=strict_coercion, debug_trail=debug_trail, recipe=[DatetimeFormatProvider('%Y-%m-%d')])
loader = retort.get_loader(datetime)
assert (loader('3045-02-13') == datetime(year=3045, month=2, day=13))
check_any_dt(loader)
raises_exc(DatetimeFormatMismatch('%Y-%m-%d', 'some string'), (lambda : loader('some string')))
dumper = retort.get_dumper(datetime)
assert (dumper(datetime(year=3045, month=2, day=13)) == '3045-02-13') |
def test_call_which_returns_a_string_before_smart_contract_deployed(deploy_client: JSONRPCClient) -> None:
(contract_proxy, receipt) = deploy_rpc_test_contract(deploy_client, 'RpcTest')
deploy_block = receipt['blockNumber']
assert (contract_proxy.functions.ret_str().call(block_identifier=deploy_block) == '')
with pytest.raises(BadFunctionCallOutput):
contract_proxy.functions.ret_str().call(block_identifier=(deploy_block - 1)) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, resample=None):
super().__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, 3, stride=stride, padding=1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, stride=1, padding=1)
self.stride = stride
self.relu = nn.ReLU(inplace=True)
self.resample = resample
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
residual = x
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if (self.resample is not None):
residual = self.resample(x)
out += residual
return out |
class ProxyManager():
def __init__(self, rpc_client: JSONRPCClient, contract_manager: ContractManager, metadata: ProxyManagerMetadata) -> None:
self.address_to_secret_registry: Dict[(SecretRegistryAddress, SecretRegistry)] = {}
self.address_to_token: Dict[(TokenAddress, Token)] = {}
self.address_to_custom_token: Dict[(TokenAddress, CustomToken)] = {}
self.address_to_token_network: Dict[(TokenNetworkAddress, TokenNetwork)] = {}
self.address_to_token_network_registry: Dict[(TokenNetworkRegistryAddress, TokenNetworkRegistry)] = {}
self.address_to_user_deposit: Dict[(UserDepositAddress, UserDeposit)] = {}
self.address_to_service_registry: Dict[(ServiceRegistryAddress, ServiceRegistry)] = {}
self.address_to_monitoring_service: Dict[(MonitoringServiceAddress, MonitoringService)] = {}
self.address_to_one_to_n: Dict[(OneToNAddress, OneToN)] = {}
self.identifier_to_payment_channel: Dict[(Tuple[(TokenNetworkAddress, ChannelID)], PaymentChannel)] = {}
self.client = rpc_client
self.contract_manager = contract_manager
self.metadata = metadata
self.token_network_creation_lock = Semaphore()
self._token_creation_lock = Semaphore()
self._token_network_registry_creation_lock = Semaphore()
self._secret_registry_creation_lock = Semaphore()
self._service_registry_creation_lock = Semaphore()
self._payment_channel_creation_lock = Semaphore()
self._user_deposit_creation_lock = Semaphore()
self._monitoring_service_creation_lock = Semaphore()
self._one_to_n_creation_lock = Semaphore()
def token(self, token_address: TokenAddress, block_identifier: BlockIdentifier) -> Token:
if (not is_binary_address(token_address)):
raise ValueError('token_address must be a valid address')
with self._token_creation_lock:
if (token_address not in self.address_to_token):
self.address_to_token[token_address] = Token(jsonrpc_client=self.client, token_address=token_address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_token[token_address]
def custom_token(self, token_address: TokenAddress, block_identifier: BlockIdentifier) -> CustomToken:
if (not is_binary_address(token_address)):
raise ValueError('token_address must be a valid address')
with self._token_creation_lock:
if (token_address not in self.address_to_custom_token):
self.address_to_custom_token[token_address] = CustomToken(jsonrpc_client=self.client, token_address=token_address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_custom_token[token_address]
def token_network_registry(self, address: TokenNetworkRegistryAddress, block_identifier: BlockIdentifier) -> TokenNetworkRegistry:
with self._token_network_registry_creation_lock:
if (address not in self.address_to_token_network_registry):
metadata = SmartContractMetadata(deployed_at=self.metadata.token_network_registry_deployed_at, address=Address(address), abi=self.contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK_REGISTRY), gas_measurements=gas_measurements(self.contract_manager.contracts_version), filters_start_at=self.metadata.filters_start_at)
self.address_to_token_network_registry[address] = TokenNetworkRegistry(rpc_client=self.client, metadata=metadata, proxy_manager=self, block_identifier=block_identifier)
return self.address_to_token_network_registry[address]
def token_network(self, address: TokenNetworkAddress, block_identifier: BlockIdentifier) -> TokenNetwork:
if (not is_binary_address(address)):
raise ValueError('address must be a valid address')
with self.token_network_creation_lock:
if (address not in self.address_to_token_network):
metadata = TokenNetworkMetadata(deployed_at=None, abi=self.contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK), gas_measurements=gas_measurements(self.contract_manager.contracts_version), address=Address(address), token_network_registry_address=None, filters_start_at=self.metadata.filters_start_at)
self.address_to_token_network[address] = TokenNetwork(jsonrpc_client=self.client, contract_manager=self.contract_manager, proxy_manager=self, metadata=metadata, block_identifier=block_identifier)
return self.address_to_token_network[address]
def secret_registry(self, address: SecretRegistryAddress, block_identifier: BlockIdentifier) -> SecretRegistry:
if (not is_binary_address(address)):
raise ValueError('address must be a valid address')
with self._secret_registry_creation_lock:
if (address not in self.address_to_secret_registry):
self.address_to_secret_registry[address] = SecretRegistry(jsonrpc_client=self.client, secret_registry_address=address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_secret_registry[address]
def service_registry(self, address: ServiceRegistryAddress, block_identifier: BlockIdentifier) -> ServiceRegistry:
with self._service_registry_creation_lock:
if (address not in self.address_to_service_registry):
self.address_to_service_registry[address] = ServiceRegistry(jsonrpc_client=self.client, service_registry_address=address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_service_registry[address]
def payment_channel(self, channel_state: NettingChannelState, block_identifier: BlockIdentifier) -> PaymentChannel:
token_network_address = channel_state.canonical_identifier.token_network_address
channel_id = channel_state.canonical_identifier.channel_identifier
if (not is_binary_address(token_network_address)):
raise ValueError('address must be a valid address')
typecheck(channel_id, T_ChannelID)
with self._payment_channel_creation_lock:
dict_key = (token_network_address, channel_id)
if (dict_key not in self.identifier_to_payment_channel):
token_network = self.token_network(token_network_address, block_identifier=block_identifier)
self.identifier_to_payment_channel[dict_key] = PaymentChannel(token_network=token_network, channel_state=channel_state, contract_manager=self.contract_manager)
return self.identifier_to_payment_channel[dict_key]
def user_deposit(self, address: UserDepositAddress, block_identifier: BlockIdentifier) -> UserDeposit:
if (not is_binary_address(address)):
raise ValueError('address must be a valid address')
with self._user_deposit_creation_lock:
if (address not in self.address_to_user_deposit):
self.address_to_user_deposit[address] = UserDeposit(jsonrpc_client=self.client, user_deposit_address=address, contract_manager=self.contract_manager, proxy_manager=self, block_identifier=block_identifier)
return self.address_to_user_deposit[address]
def monitoring_service(self, address: MonitoringServiceAddress, block_identifier: BlockIdentifier) -> MonitoringService:
if (not is_binary_address(address)):
raise ValueError('address must be a valid address')
with self._monitoring_service_creation_lock:
if (address not in self.address_to_monitoring_service):
self.address_to_monitoring_service[address] = MonitoringService(jsonrpc_client=self.client, monitoring_service_address=address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_monitoring_service[address]
def one_to_n(self, address: OneToNAddress, block_identifier: BlockIdentifier) -> OneToN:
if (not is_binary_address(address)):
raise ValueError('address must be a valid address')
with self._one_to_n_creation_lock:
if (address not in self.address_to_one_to_n):
self.address_to_one_to_n[address] = OneToN(jsonrpc_client=self.client, one_to_n_address=address, contract_manager=self.contract_manager, block_identifier=block_identifier)
return self.address_to_one_to_n[address] |
class TestBaseFairseqModelBase(unittest.TestCase):
def setUpClass(cls):
if (cls is TestBaseFairseqModelBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass |
def save_model(model: nn.Module, iteration: int, suffix: str) -> None:
os.makedirs(args.save_folder, exist_ok=True)
save_path = os.path.join(args.save_folder, '{}_{}_{}_size{}_anchor{}_{}_{}.pth'.format(args.dataset, args.neck, args.backbone, args.image_size, args.anchor_size, ('MG' if args.mutual_guide else 'Retina'), suffix))
tosave = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'iteration': iteration}
print('Saving to {}'.format(save_path))
torch.save(tosave, save_path)
return |
class AirInitBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(AirInitBlock, self).__init__()
mid_channels = (out_channels // 2)
self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, stride=2)
self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels)
self.conv3 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x |
class MockVoiceChannel(CustomMockMixin, unittest.mock.Mock, HashableMixin):
spec_set = voice_channel_instance
def __init__(self, **kwargs) -> None:
default_kwargs = {'id': next(self.discord_id), 'name': 'channel', 'guild': MockGuild()}
super().__init__(**collections.ChainMap(kwargs, default_kwargs))
if ('mention' not in kwargs):
self.mention = f'#{self.name}' |
class EasyuploadIo(SimpleDownloader):
__name__ = 'EasyuploadIo'
__type__ = 'downloader'
__version__ = '0.02'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Easyupload.io downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
NAME_PATTERN = '<h4>(?P<N>.+?)</h4>'
SIZE_PATTERN = '>Size: (?P<S>[\\d.,]+) (?P<U>[\\w^_]+)'
OFFLINE_PATTERN = '<h4>FILE NOT FOUND</h4>'
RECAPTCHA_PATTERN = "grecaptcha.execute\\('((?:[\\w\\-]|%[0-9a-fA-F]{2})+)'"
def setup(self):
self.multi_dl = True
def handle_free(self, pyfile):
password_protected = ("$('#password-modal').modal('open');" in self.data)
m = re.search('url: "( self.data)
if (m is None):
self.fail('action URL not found')
else:
action_url = m.group(1)
if password_protected:
password = self.get_password()
if (not password):
self.fail(self._('Password required'))
else:
password = ''
m = re.search(self.RECAPTCHA_PATTERN, self.data)
if (m is None):
self.fail(self._('ReCaptcha key not found'))
else:
recaptcha_key = urllib.parse.unquote(m.group(1).strip())
self.captcha = ReCaptcha(pyfile)
response = self.captcha.challenge(recaptcha_key, version='2invisible')
data = self.load(action_url, ref=' post={'type': 'download-token', 'url': self.info['pattern']['ID'], 'value': password, 'captchatoken': response, 'method': 'regular'})
json_data = json.loads(data)
if (json_data.get('status') == True):
self.link = json_data['download_link']
elif password_protected:
self.fail(self._('Wrong password'))
else:
self.fail(json_data['data']) |
def _setup_common_routes(api_blueprint: Blueprint, spa_blueprint: Blueprint, options: Options) -> None:
cors_options = options.cors
if cors_options:
cors_params = (cors_options if isinstance(cors_options, dict) else {})
CORS(api_blueprint, **cors_params)
_blueprint.route(f'/{ASSETS_PATH.name}/<path:path>')
def send_assets_dir(path: str='') -> Any:
return send_file(safe_client_build_dir_path(f'assets/{path}'))
_blueprint.route(f'/{MODULES_PATH.name}/<path:path>')
def send_modules_dir(path: str='') -> Any:
return send_file(safe_web_modules_dir_path(path), mimetype='text/javascript')
index_html = read_client_index_html(options)
if options.serve_index_route:
_blueprint.route('/')
_blueprint.route('/<path:_>')
def send_client_dir(_: str='') -> Any:
return index_html |
class Frame():
__slots__ = ('raw',)
def __init__(self, frame: FrameType) -> None:
self.raw = frame
def lineno(self) -> int:
return (self.raw.f_lineno - 1)
def f_globals(self) -> Dict[(str, Any)]:
return self.raw.f_globals
def f_locals(self) -> Dict[(str, Any)]:
return self.raw.f_locals
def code(self) -> Code:
return Code(self.raw.f_code)
def statement(self) -> 'Source':
if (self.code.fullsource is None):
return Source('')
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def repr(self, object: object) -> str:
return saferepr(object)
def getargs(self, var: bool=False):
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass
return retval |
()
def logs_model_config():
conf = {'LOGS_MODEL': 'elasticsearch', 'LOGS_MODEL_CONFIG': {'producer': 'elasticsearch', 'elasticsearch_config': {'host': FAKE_ES_HOST, 'port': FAKE_ES_PORT, 'access_key': FAKE_AWS_ACCESS_KEY, 'secret_key': FAKE_AWS_SECRET_KEY, 'aws_region': FAKE_AWS_REGION}}}
return conf |
class FitEqu(object):
def __init__(self):
super(FitEqu, self).__init__()
def prepare_data(self):
dataset = SpringMassDataset(self.k, self.m, self.A0, self.c)
return dataset.solution()
def prepare_library(self, data):
(is_poly, remove_num) = (False, 50)
(t, x_clean) = (data['t'].to_numpy(), data['x'].to_numpy())
np.random.seed(0)
x_noise = (x_clean + (np.random.normal(0, np.std(x_clean), x_clean.shape) * 0.04))
(width, deg, diff, is_poly) = (30, 3, 2, True)
(x_fit, x_12dot) = PolyDiff(x_noise, t, width=width, deg=deg, diff=diff)
x_12dot = x_12dot.reshape((- 1), 2)
x_dot = TVRegDiff(x_noise, 1, 0.4, dx=(t[1] - t[0]), plotflag=False, diffkernel='sq')
x_2dot = TVRegDiff(x_dot, 1, 0.4, dx=(t[1] - t[0]), plotflag=False, diffkernel='sq')
(x_dot, x_2dot) = (x_dot[width:(- width)], x_2dot[width:(- width)])
x_dot_FD = dxdt(x_clean, t, kind='finite_difference', k=1)
x_2dot_FD = dxdt(x_dot_FD, t, kind='finite_difference', k=1)
x_dot_FD = x_dot_FD[remove_num:(- remove_num)]
x_2dot_FD = x_2dot_FD[remove_num:(- remove_num)]
t = t[remove_num:(- remove_num)]
x_noise = x_noise[remove_num:(- remove_num)]
x_clean = x_clean[remove_num:(- remove_num)]
if (not is_poly):
x_dot = x_dot[remove_num:(- remove_num)]
x_2dot = x_2dot[remove_num:(- remove_num)].reshape((- 1))
else:
x_dot = x_dot[(remove_num - width):(- (remove_num - width))].reshape((- 1))
x_2dot = x_2dot[(remove_num - width):(- (remove_num - width))].reshape((- 1))
x_fit = x_fit[(remove_num - width):(- (remove_num - width))].reshape((- 1))
fig = plt.figure(figsize=(8, 5))
plt.plot(t, x_clean, label='x (Clean)')
plt.plot(t, x_noise, label='x (Noise)')
if is_poly:
plt.plot(t, x_fit, label='x (PolyDiff)')
plt.plot(t, x_dot, label='x_dot (Noise)')
plt.plot(t, x_2dot, label='x_2dot (Noise)')
plt.plot(t, x_dot_FD, label='x_dot (FD-clean)')
plt.plot(t, x_2dot_FD, label='x_2dot (FD-clean)')
plt.legend(fontsize=14, loc=4)
plt.xlabel('$t$', fontsize=16)
plt.ylabel('$x$', fontsize=16)
X_library = np.stack((x_fit, (x_fit ** 2), x_2dot, np.multiply(x_fit.reshape((- 1), 1), x_dot.reshape((- 1), 1)).reshape((- 1))), axis=(- 1))
y_library = x_dot
return (X_library, y_library)
def fit(self, k, m, A0, c, threshold=0.05):
(self.k, self.m, self.A0, self.c, self.threshold) = (k, m, A0, c, threshold)
data = self.prepare_data()
(X_library, y_library) = self.prepare_library(data)
model = LinearRegression(fit_intercept=True)
model.fit(X_library, y_library)
r2_train = model.score(X_library, y_library)
for i in range(3):
coef = model.coef_
flag = np.repeat((np.abs(coef) > threshold).astype(int).reshape(1, (- 1)), X_library.shape[0], axis=0)
X1 = copy.copy(X_library)
X1 = np.multiply(X1, flag)
model.fit(X1, y_library)
r2_train = model.score(X1, y_library)
coef = np.squeeze(model.coef_)
coef = np.squeeze(coef)
k_pred = round(((- coef[0]) * c), 4)
m_pred = round(((- coef[2]) * c), 4)
k_pred_list = [round(i, 4) for i in coef.tolist()]
m_pred_list = [round(i, 4) for i in coef.tolist()]
return (coef[0], coef[2], coef) |
(scope='session')
def session_capabilities(pytestconfig):
driver = pytestconfig.getoption('driver').upper()
capabilities = getattr(DesiredCapabilities, driver, {}).copy()
if (driver == 'REMOTE'):
browser = capabilities.get('browserName', '').upper()
capabilities.update(getattr(DesiredCapabilities, browser, {}))
capabilities.update(pytestconfig._capabilities)
return capabilities |
class Registry():
mapping = {'builder_name_mapping': {}, 'trainer_name_mapping': {}, 'model_name_mapping': {}, 'metric_name_mapping': {}, 'loss_name_mapping': {}, 'optimizer_name_mapping': {}, 'scheduler_name_mapping': {}, 'processor_name_mapping': {}, 'state': {}}
def register_trainer(cls, name):
def wrap(trainer_cls):
cls.mapping['trainer_name_mapping'][name] = trainer_cls
return trainer_cls
return wrap
def register_builder(cls, name):
def wrap(builder_cls):
from pythia.datasets.base_dataset_builder import BaseDatasetBuilder
assert issubclass(builder_cls, BaseDatasetBuilder), 'All builders must inherit BaseDatasetBuilder class'
cls.mapping['builder_name_mapping'][name] = builder_cls
return builder_cls
return wrap
def register_metric(cls, name):
def wrap(func):
from pythia.modules.metrics import BaseMetric
assert issubclass(func, BaseMetric), 'All Metric must inherit BaseMetric class'
cls.mapping['metric_name_mapping'][name] = func
return func
return wrap
def register_loss(cls, name):
def wrap(func):
from torch import nn
assert issubclass(func, nn.Module), 'All loss must inherit torch.nn.Module class'
cls.mapping['loss_name_mapping'][name] = func
return func
return wrap
def register_model(cls, name):
def wrap(func):
from pythia.models.base_model import BaseModel
assert issubclass(func, BaseModel), 'All models must inherit BaseModel class'
cls.mapping['model_name_mapping'][name] = func
return func
return wrap
def register_processor(cls, name):
def wrap(func):
from pythia.datasets.processors import BaseProcessor
assert issubclass(func, BaseProcessor), 'All Processor classes must inherit BaseProcessor class'
cls.mapping['processor_name_mapping'][name] = func
return func
return wrap
def register_optimizer(cls, name):
def wrap(func):
cls.mapping['optimizer_name_mapping'][name] = func
return func
return wrap
def register_scheduler(cls, name):
def wrap(func):
cls.mapping['scheduler_name_mapping'][name] = func
return func
return wrap
def register(cls, name, obj):
path = name.split('.')
current = cls.mapping['state']
for part in path[:(- 1)]:
if (part not in current):
current[part] = {}
current = current[part]
current[path[(- 1)]] = obj
def get_trainer_class(cls, name):
return cls.mapping['trainer_name_mapping'].get(name, None)
def get_builder_class(cls, name):
return cls.mapping['builder_name_mapping'].get(name, None)
def get_model_class(cls, name):
return cls.mapping['model_name_mapping'].get(name, None)
def get_processor_class(cls, name):
return cls.mapping['processor_name_mapping'].get(name, None)
def get_metric_class(cls, name):
return cls.mapping['metric_name_mapping'].get(name, None)
def get_loss_class(cls, name):
return cls.mapping['loss_name_mapping'].get(name, None)
def get_optimizer_class(cls, name):
return cls.mapping['optimizer_name_mapping'].get(name, None)
def get_scheduler_class(cls, name):
return cls.mapping['scheduler_name_mapping'].get(name, None)
def get(cls, name, default=None, no_warning=False):
original_name = name
name = name.split('.')
value = cls.mapping['state']
for subname in name:
value = value.get(subname, default)
if (value is default):
break
if (('writer' in cls.mapping['state']) and (value == default) and (no_warning is False)):
cls.mapping['state']['writer'].write('Key {} is not present in registry, returning default value of {}'.format(original_name, default))
return value
def unregister(cls, name):
return cls.mapping['state'].pop(name, None) |
class QubitOperator(SymbolicOperator):
def actions(self):
return ('X', 'Y', 'Z')
def action_strings(self):
return ('X', 'Y', 'Z')
def action_before_index(self):
return True
def different_indices_commute(self):
return True
def renormalize(self):
norm = self.induced_norm(2)
if numpy.isclose(norm, 0.0):
raise ZeroDivisionError('Cannot renormalize empty or zero operator')
else:
self /= norm
def _simplify(self, term, coefficient=1.0):
if (not term):
return (coefficient, term)
term = sorted(term, key=(lambda factor: factor[0]))
new_term = []
left_factor = term[0]
for right_factor in term[1:]:
(left_index, left_action) = left_factor
(right_index, right_action) = right_factor
if (left_index == right_index):
(new_coefficient, new_action) = _PAULI_OPERATOR_PRODUCTS[(left_action, right_action)]
left_factor = (left_index, new_action)
coefficient *= new_coefficient
else:
if (left_action != 'I'):
new_term.append(left_factor)
left_factor = right_factor
if (left_factor[1] != 'I'):
new_term.append(left_factor)
return (coefficient, tuple(new_term)) |
def main():
fps = 30
print('Plug in a USB gamepad. Do it! Do it now! Press enter after you have done this.')
wait_for_enter()
pygame.init()
num_joysticks = pygame.joystick.get_count()
if (num_joysticks < 1):
print("You didn't plug in a joystick. FORSHAME!")
return
input_manager = InputManager()
screen = pygame.display.set_mode((640, 480))
button_index = 0
player = Player()
while (not input_manager.quit_attempt):
start = time.time()
screen.fill((0, 0, 0))
is_configured = (button_index >= len(input_manager.buttons))
if (not is_configured):
success = configure_phase(screen, input_manager.buttons[button_index], input_manager)
if success:
button_index += 1
else:
interaction_phase(screen, player, input_manager)
pygame.display.flip()
difference = (start - time.time())
delay = ((1.0 / fps) - difference)
if (delay > 0):
time.sleep(delay) |
def import_parser(path, import_type, parser_func, loader=None):
try:
__import__(import_type)
mod = sys.modules[import_type]
except ImportError:
sys.exit('{0} import error, please make sure that {0} is installed'.format(import_type))
return parser_func(mod, path, loader) |
.parametrize('keys, input_dict, expected', [(['a', 'b'], {'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2}), (['a', 'b', 'd'], {'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2}), (['a'], {}, {}), (['a'], {'b': 2}, {})])
def test_build_kwargs(keys, input_dict, expected):
kwargs = tools._build_kwargs(keys, input_dict)
assert (kwargs == expected) |
def test_properties():
instance = m.TestProperties()
assert (instance.def_readonly == 1)
with pytest.raises(AttributeError):
instance.def_readonly = 2
instance.def_readwrite = 2
assert (instance.def_readwrite == 2)
assert (instance.def_property_readonly == 2)
with pytest.raises(AttributeError):
instance.def_property_readonly = 3
instance.def_property = 3
assert (instance.def_property == 3)
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_writeonly
assert ('unreadable attribute' in str(excinfo))
instance.def_property_writeonly = 4
assert (instance.def_property_readonly == 4)
with pytest.raises(AttributeError) as excinfo:
dummy = instance.def_property_impossible
assert ('unreadable attribute' in str(excinfo))
with pytest.raises(AttributeError) as excinfo:
instance.def_property_impossible = 5
assert ("can't set attribute" in str(excinfo)) |
class TestLegacyAreaParser(unittest.TestCase):
def test_area_parser_legacy(self):
from pyresample import parse_area_file
(ease_nh, ease_sh) = parse_area_file(os.path.join(TEST_FILES_PATH, 'areas.cfg'), 'ease_nh', 'ease_sh')
projection = "{'R': '6371228', 'lat_0': '90', 'lon_0': '0', 'no_defs': 'None', 'proj': 'laea', 'type': 'crs', 'units': 'm', 'x_0': '0', 'y_0': '0'}"
nh_str = 'Area ID: ease_nh\nDescription: Arctic EASE grid\nProjection ID: ease_nh\nProjection: {}\nNumber of columns: 425\nNumber of rows: 425\nArea extent: (-5326849.0625, -5326849.0625, 5326849.0625, 5326849.0625)'.format(projection)
self.assertEqual(ease_nh.__str__(), nh_str)
self.assertIsInstance(ease_nh.proj_dict['lat_0'], (int, float))
projection = "{'R': '6371228', 'lat_0': '-90', 'lon_0': '0', 'no_defs': 'None', 'proj': 'laea', 'type': 'crs', 'units': 'm', 'x_0': '0', 'y_0': '0'}"
sh_str = 'Area ID: ease_sh\nDescription: Antarctic EASE grid\nProjection ID: ease_sh\nProjection: {}\nNumber of columns: 425\nNumber of rows: 425\nArea extent: (-5326849.0625, -5326849.0625, 5326849.0625, 5326849.0625)'.format(projection)
self.assertEqual(ease_sh.__str__(), sh_str)
self.assertIsInstance(ease_sh.proj_dict['lat_0'], (int, float))
def test_load_area(self):
from pyresample import load_area
ease_nh = load_area(os.path.join(TEST_FILES_PATH, 'areas.cfg'), 'ease_nh')
projection = "{'R': '6371228', 'lat_0': '90', 'lon_0': '0', 'no_defs': 'None', 'proj': 'laea', 'type': 'crs', 'units': 'm', 'x_0': '0', 'y_0': '0'}"
nh_str = 'Area ID: ease_nh\nDescription: Arctic EASE grid\nProjection ID: ease_nh\nProjection: {}\nNumber of columns: 425\nNumber of rows: 425\nArea extent: (-5326849.0625, -5326849.0625, 5326849.0625, 5326849.0625)'.format(projection)
self.assertEqual(nh_str, ease_nh.__str__())
def test_area_file_not_found_exception(self):
from pyresample.area_config import load_area
self.assertRaises(FileNotFoundError, load_area, '/this/file/does/not/exist.yaml')
self.assertRaises(FileNotFoundError, load_area, pathlib.Path('/this/file/does/not/exist.yaml'))
def test_not_found_exception(self):
from pyresample.area_config import AreaNotFound, parse_area_file
self.assertRaises(AreaNotFound, parse_area_file, os.path.join(TEST_FILES_PATH, 'areas.cfg'), 'no_area')
def test_commented(self):
from pyresample import parse_area_file
areas = parse_area_file(os.path.join(TEST_FILES_PATH, 'areas.cfg'))
self.assertNotIn('commented', [area.name for area in areas]) |
class CRDLoss(nn.Module):
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
(out_s, out_t) = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = (s_loss + t_loss)
return loss |
class Effect2017(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'hp', (container.getModifiedItemAttr('hullHpBonus') * level), **kwargs) |
class LinearFunction(torch.autograd.Function):
def forward(ctx, input, weight, bias):
output = linear_blaslt.forward(input, weight, bias)
ctx.save_for_backward(input, weight)
return output
def backward(ctx, grad_output):
(input, weight) = ctx.saved_tensors
if weight.requires_grad:
(d_input, d_weight, d_bias) = linear_blaslt.backward(input, weight, grad_output, True)
else:
d_input = linear_blaslt.backward_input_only(input, weight, grad_output)
(d_weight, d_bias) = (None, None)
return (d_input, d_weight, d_bias) |
class EditorTabContextMenu(Menu):
def __init__(self, *args, **kwds):
Menu.__init__(self, *args, **kwds)
self._index = (- 1)
def setIndex(self, index):
self._index = index
def build(self):
icons = pyzo.icons
self.addItem(translate('menu', 'Save ::: Save the current file to disk.'), icons.disk, self._fileAction, 'saveFile')
self.addItem(translate('menu', 'Save as... ::: Save the current file under another name.'), icons.disk_as, self._fileAction, 'saveFileAs')
self.addItem(translate('menu', 'Close ::: Close the current file.'), icons.page_delete, self._fileAction, 'closeFile')
self.addItem(translate('menu', 'Close others::: Close all files but this one.'), None, self._fileAction, 'close_others')
self.addItem(translate('menu', 'Close all ::: Close all files.'), icons.page_delete_all, self._fileAction, 'close_all')
self.addItem(translate('menu', 'Rename ::: Rename this file.'), None, self._fileAction, 'rename')
self.addSeparator()
self.addItem(translate('menu', 'Copy path ::: Copy the full path of this file.'), None, self._fileAction, 'copypath')
self.addItem(translate('menu', 'Open directory in file browser'), None, self._fileAction, 'opendir')
self.addSeparator()
self.addItem(translate('menu', 'Pin/Unpin ::: Pinned files get closed less easily.'), None, self._fileAction, 'pin')
self.addItem(translate('menu', 'Set/Unset as MAIN file ::: The main file can be run while another file is selected.'), icons.star, self._fileAction, 'main')
self.addSeparator()
self.addItem(translate('menu', 'Run file ::: Run the code in this file.'), icons.run_file, self._fileAction, 'run')
self.addItem(translate('menu', 'Run file as script ::: Run this file as a script (restarts the interpreter).'), icons.run_file_script, self._fileAction, 'run_script')
def _fileAction(self, action):
item = pyzo.editors._tabs.getItemAt(self._index)
if (action in ['saveFile', 'saveFileAs', 'closeFile']):
getattr(pyzo.editors, action)(item.editor)
elif ((action == 'close_others') or (action == 'close_all')):
if (action == 'close_all'):
item = None
items = pyzo.editors._tabs.items()
for i in reversed(range(pyzo.editors._tabs.count())):
if ((items[i] is item) or items[i].pinned):
continue
pyzo.editors._tabs.tabCloseRequested.emit(i)
elif (action == 'rename'):
filename = item.filename
pyzo.editors.saveFileAs(item.editor)
if (item.filename != filename):
try:
os.remove(filename)
except Exception:
pass
elif (action == 'copypath'):
filename = item.filename
QtWidgets.qApp.clipboard().setText(filename)
elif (action == 'opendir'):
fileBrowser = pyzo.toolManager.getTool('pyzofilebrowser')
if fileBrowser:
fileBrowser.setPath(os.path.dirname(item.filename))
elif (action == 'pin'):
item._pinned = (not item._pinned)
elif (action == 'main'):
if (pyzo.editors._tabs._mainFile == item.id):
pyzo.editors._tabs._mainFile = None
else:
pyzo.editors._tabs._mainFile = item.id
elif (action == 'run'):
menu = pyzo.main.menuBar().findChild(RunMenu)
if menu:
menu._runFile((False, False), item.editor)
elif (action == 'run_script'):
menu = pyzo.main.menuBar().findChild(RunMenu)
if menu:
menu._runFile((True, False), item.editor)
pyzo.editors._tabs.updateItems() |
def train_vae_model():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, help='specify the dataset')
parser.add_argument('--split', type=int, default=0, help='specify the split of dataset for experiment')
parser.add_argument('--batch_size', type=int, default=500, help='specify the batch size for updating the vae')
parser.add_argument('--device', type=str, default='0', help='specify the visible GPU device')
parser.add_argument('--simulate', type=str, choices=['exposure', 'ratings'], help='to simulate the exposure or the ratings')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
data_root = os.path.join('data', args.dataset, str(args.split), args.simulate)
train_gen = CollaborativeVAEDataGenerator(data_root=data_root, phase='train', simulate=args.simulate, batch_size=args.batch_size, joint=True)
valid_gen = CollaborativeVAEDataGenerator(data_root=data_root, phase='val', simulate=args.simulate, batch_size=(args.batch_size * 8), joint=True)
lr_schedule = PiecewiseSchedule([[0, 0.001], [50, 0.001], [51, 0.0005]], outside_value=0.0005)
collabo_vae = get_collabo_vae(args.dataset, input_dim=train_gen.num_items)
vae_train = collabo_vae.build_vae_train()
vae_eval = collabo_vae.build_vae_eval()
(best_recall_10, best_NDCG_10, best_sum) = ((- np.inf), (- np.inf), (- np.inf))
save_root = os.path.join('models', args.dataset, str(args.split), args.simulate)
if (not os.path.exists(save_root)):
os.makedirs(save_root)
training_dynamics = os.path.join(save_root, 'training_dynamics.csv')
with open(training_dynamics, 'w') as f:
f.write(',,\n')
best_path = os.path.join(save_root, 'best.model')
lamb_schedule_gauss = PiecewiseSchedule([[0, 0.0], [80, 0.2]], outside_value=0.2)
if (args.simulate == 'exposure'):
rec_loss = binary_crossentropy
recall_func = Recall_at_k
NDCG_func = NDCG_at_k
elif (args.simulate == 'ratings'):
rec_loss = multinomial_crossentropy
recall_func = Recall_at_k_explicit
NDCG_func = NDCG_at_k_explicit
vae_train.compile(loss=rec_loss, optimizer=optimizers.Adam(), metrics=[rec_loss])
epochs = 150
for epoch in range(epochs):
K.set_value(vae_train.optimizer.lr, lr_schedule.value(epoch))
K.set_value(collabo_vae.add_gauss_loss.lamb_kl, lamb_schedule_gauss.value(epoch))
print((('-' * 10) + 'Epoch:{}'.format(epoch)), ('-' * 10))
vae_train.fit_generator(train_gen, workers=4, epochs=1, validation_data=valid_gen)
recall_10 = EvaluateModel(vae_eval, valid_gen, recall_func, k=10)
NDCG_10 = EvaluateModel(vae_eval, valid_gen, NDCG_func, k=10)
if (recall_10 > best_recall_10):
best_recall_10 = recall_10
if (NDCG_10 > best_NDCG_10):
best_NDCG_10 = NDCG_10
cur_sum = (recall_10 + NDCG_10)
if (cur_sum > best_sum):
best_sum = cur_sum
vae_train.save_weights(best_path, save_format='tf')
with open(training_dynamics, 'a') as f:
f.write('{:.4f},{:.4f}\n'.format(recall_10, NDCG_10))
print(((('-' * 5) + 'Epoch: {}'.format(epoch)) + ('-' * 5)))
print('cur : {:5f}, best : {:5f}'.format(recall_10, best_recall_10))
print('cur : {:5f}, best : {:5f}'.format(NDCG_10, best_NDCG_10)) |
def do_import(parent, library):
db_path = os.path.expanduser('~/.local/share/rhythmbox/rhythmdb.xml')
handler = RBDBContentHandler(library)
try:
xml.sax.parse(db_path, handler)
except Exception:
util.print_exc()
handler.finish()
msg = _('Import Failed')
ErrorMessage(parent, RBImport.PLUGIN_NAME, msg).run()
else:
count = handler.finish()
msg = (_('Successfully imported ratings and statistics for %d songs') % count)
WarningMessage(parent, RBImport.PLUGIN_NAME, msg).run() |
def setup_roles(application: Application) -> Roles:
if isinstance(application.bot_data, RolesBotData):
roles = application.bot_data.get_roles()
if (roles is None):
roles = Roles(application.bot)
application.bot_data.set_roles(roles)
return roles
return roles
if isinstance(application.bot_data, dict):
return application.bot_data.setdefault(BOT_DATA_KEY, Roles(application.bot))
raise TypeError('bot_data must either be a dict or implement RolesBotData!') |
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None, wrap=False, num_workers=None):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = (num_workers if (num_workers is not None) else (batch_size * 2))
if (train is not None):
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if (validation is not None):
self.dataset_configs['validation'] = validation
self.val_dataloader = self._val_dataloader
if (test is not None):
self.dataset_configs['test'] = test
self.test_dataloader = self._test_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs))
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
return DataLoader(self.datasets['train'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=self.worker_init_fn, shuffle=True)
def _val_dataloader(self):
return DataLoader(self.datasets['validation'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=self.worker_init_fn)
def _test_dataloader(self):
return DataLoader(self.datasets['test'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=self.worker_init_fn)
def worker_init_fn(worker_id):
np.random.seed((np.random.get_state()[1][0] + worker_id)) |
def test_shape(zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None:
hist_length = 10
cfg['raster_params']['map_type'] = 'py_satellite'
cfg['raster_params']['filter_agents_threshold'] = 1.0
cfg['model_params']['history_num_frames'] = hist_length
rasterizer = build_rasterizer(cfg, dmg)
frames = zarr_dataset.frames[:(hist_length + 1)][::(- 1)]
agents = filter_agents_by_frames(frames, zarr_dataset.agents)
out = rasterizer.rasterize(frames, agents, [])
assert (out.shape == (224, 224, (((hist_length + 1) * 2) + 3))) |
def test_creating_simple_background():
background = Background('Background', 'I am a Background', 'foo.feature', 1, parent=None)
assert (background.id is None)
assert (background.keyword == 'Background')
assert (background.sentence == 'I am a Background')
assert (background.path == 'foo.feature')
assert (background.line == 1)
assert (background.parent is None) |
def _eval_forward_ref(val: str, ctx: Context, *, is_typeddict: bool=False, allow_unpack: bool=False) -> Value:
try:
tree = ast.parse(val, mode='eval')
except SyntaxError:
ctx.show_error(f'Syntax error in type annotation: {val}')
return AnyValue(AnySource.error)
else:
return _type_from_ast(tree.body, ctx, is_typeddict=is_typeddict, allow_unpack=allow_unpack) |
def subscribe(email: str, ip: str) -> SubscriptionResult:
if (not settings.FLODESK_API_KEY):
raise ValueError('Flodesk integration is not configured')
subscriber = get_subscriber(email)
if (not subscriber):
return subscribe_email(email, ip)
email_status = subscriber.get('status')
segments = subscriber.get('segments')
if (email_status == 'active'):
if (not is_in_segment(segments)):
add_to_segment(email, settings.FLODESK_SEGMENT_ID)
return SubscriptionResult.SUBSCRIBED
if (email_status == 'bounced'):
return SubscriptionResult.UNABLE_TO_SUBSCRIBE
if (email_status == 'unconfirmed'):
return SubscriptionResult.WAITING_CONFIRMATION
if (email_status and (email_status in OPT_IN_REQUIRED_STATUSES)):
return SubscriptionResult.OPT_IN_FORM_REQUIRED |
class LeNet_5(PruningModule):
def __init__(self, mask=False):
super(LeNet_5, self).__init__()
linear = (MaskedLinear if mask else Linear)
self.conv1 = nn.Conv2d(1, 20, kernel_size=(5, 5))
self.conv2 = nn.Conv2d(20, 50, kernel_size=(5, 5))
self.fc1 = linear(800, 500)
self.fc2 = linear(500, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=(2, 2), stride=2)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=(2, 2), stride=2)
x = x.view((- 1), 120)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x |
def url_to_storage_plugin_in_event_loop(url_path: str, event_loop: asyncio.AbstractEventLoop, storage_options: Optional[Dict[(str, Any)]]=None) -> StoragePlugin:
async def _url_to_storage_plugin() -> StoragePlugin:
return url_to_storage_plugin(url_path=url_path, storage_options=storage_options)
return event_loop.run_until_complete(_url_to_storage_plugin()) |
class NfsdCollector(diamond.collector.Collector):
PROC = '/proc/net/rpc/nfsd'
def get_default_config_help(self):
config_help = super(NfsdCollector, self).get_default_config_help()
config_help.update({})
return config_help
def get_default_config(self):
config = super(NfsdCollector, self).get_default_config()
config.update({'path': 'nfsd'})
return config
def collect(self):
if os.access(self.PROC, os.R_OK):
results = {}
file = open(self.PROC)
for line in file:
line = line.split()
if (line[0] == 'rc'):
results['reply_cache.hits'] = line[1]
results['reply_cache.misses'] = line[2]
results['reply_cache.nocache'] = line[3]
elif (line[0] == 'fh'):
results['filehandle.stale'] = line[1]
results['filehandle.total-lookups'] = line[2]
results['filehandle.anonlookups'] = line[3]
results['filehandle.dir-not-in-cache'] = line[4]
results['filehandle.nodir-not-in-cache'] = line[5]
elif (line[0] == 'io'):
results['input_output.bytes-read'] = line[1]
results['input_output.bytes-written'] = line[2]
elif (line[0] == 'th'):
results['threads.threads'] = line[1]
results['threads.fullcnt'] = line[2]
results['threads.10-20-pct'] = line[3]
results['threads.20-30-pct'] = line[4]
results['threads.30-40-pct'] = line[5]
results['threads.40-50-pct'] = line[6]
results['threads.50-60-pct'] = line[7]
results['threads.60-70-pct'] = line[8]
results['threads.70-80-pct'] = line[9]
results['threads.80-90-pct'] = line[10]
results['threads.90-100-pct'] = line[11]
results['threads.100-pct'] = line[12]
elif (line[0] == 'ra'):
results['read-ahead.cache-size'] = line[1]
results['read-ahead.10-pct'] = line[2]
results['read-ahead.20-pct'] = line[3]
results['read-ahead.30-pct'] = line[4]
results['read-ahead.40-pct'] = line[5]
results['read-ahead.50-pct'] = line[6]
results['read-ahead.60-pct'] = line[7]
results['read-ahead.70-pct'] = line[8]
results['read-ahead.80-pct'] = line[9]
results['read-ahead.90-pct'] = line[10]
results['read-ahead.100-pct'] = line[11]
results['read-ahead.not-found'] = line[12]
elif (line[0] == 'net'):
results['net.cnt'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif (line[0] == 'rpc'):
results['rpc.cnt'] = line[1]
results['rpc.badfmt'] = line[2]
results['rpc.badauth'] = line[3]
results['rpc.badclnt'] = line[4]
elif (line[0] == 'proc2'):
results['v2.unknown'] = line[1]
results['v2.null'] = line[2]
results['v2.getattr'] = line[3]
results['v2.setattr'] = line[4]
results['v2.root'] = line[5]
results['v2.lookup'] = line[6]
results['v2.readlink'] = line[7]
results['v2.read'] = line[8]
results['v2.wrcache'] = line[9]
results['v2.write'] = line[10]
results['v2.create'] = line[11]
results['v2.remove'] = line[12]
results['v2.rename'] = line[13]
results['v2.link'] = line[14]
results['v2.symlink'] = line[15]
results['v2.mkdir'] = line[16]
results['v2.rmdir'] = line[17]
results['v2.readdir'] = line[18]
results['v2.fsstat'] = line[19]
elif (line[0] == 'proc3'):
results['v3.unknown'] = line[1]
results['v3.null'] = line[2]
results['v3.getattr'] = line[3]
results['v3.setattr'] = line[4]
results['v3.lookup'] = line[5]
results['v3.access'] = line[6]
results['v3.readlink'] = line[7]
results['v3.read'] = line[8]
results['v3.write'] = line[9]
results['v3.create'] = line[10]
results['v3.mkdir'] = line[11]
results['v3.symlink'] = line[12]
results['v3.mknod'] = line[13]
results['v3.remove'] = line[14]
results['v3.rmdir'] = line[15]
results['v3.rename'] = line[16]
results['v3.link'] = line[17]
results['v3.readdir'] = line[18]
results['v3.readdirplus'] = line[19]
results['v3.fsstat'] = line[20]
results['v3.fsinfo'] = line[21]
results['v3.pathconf'] = line[22]
results['v3.commit'] = line[23]
elif (line[0] == 'proc4'):
results['v4.unknown'] = line[1]
results['v4.null'] = line[2]
results['v4.compound'] = line[3]
elif (line[0] == 'proc4ops'):
results['v4.ops.unknown'] = line[1]
results['v4.ops.op0-unused'] = line[2]
results['v4.ops.op1-unused'] = line[3]
results['v4.ops.op2-future'] = line[4]
results['v4.ops.access'] = line[5]
results['v4.ops.close'] = line[6]
results['v4.ops.commit'] = line[7]
results['v4.ops.create'] = line[8]
results['v4.ops.delegpurge'] = line[9]
results['v4.ops.delegreturn'] = line[10]
results['v4.ops.getattr'] = line[11]
results['v4.ops.getfh'] = line[12]
results['v4.ops.link'] = line[13]
results['v4.ops.lock'] = line[14]
results['v4.ops.lockt'] = line[15]
results['v4.ops.locku'] = line[16]
results['v4.ops.lookup'] = line[17]
results['v4.ops.lookup_root'] = line[18]
results['v4.ops.nverify'] = line[19]
results['v4.ops.open'] = line[20]
results['v4.ops.openattr'] = line[21]
results['v4.ops.open_conf'] = line[22]
results['v4.ops.open_dgrd'] = line[23]
results['v4.ops.putfh'] = line[24]
results['v4.ops.putpubfh'] = line[25]
results['v4.ops.putrootfh'] = line[26]
results['v4.ops.read'] = line[27]
results['v4.ops.readdir'] = line[28]
results['v4.ops.readlink'] = line[29]
results['v4.ops.remove'] = line[30]
results['v4.ops.rename'] = line[31]
results['v4.ops.renew'] = line[32]
results['v4.ops.restorefh'] = line[33]
results['v4.ops.savefh'] = line[34]
results['v4.ops.secinfo'] = line[35]
results['v4.ops.setattr'] = line[36]
results['v4.ops.setcltid'] = line[37]
results['v4.ops.setcltidconf'] = line[38]
results['v4.ops.verify'] = line[39]
results['v4.ops.write'] = line[40]
results['v4.ops.rellockowner'] = line[41]
file.close()
for stat in results.keys():
metric_name = ('.' + stat)
metric_value = long(float(results[stat]))
metric_value = self.derivative(metric_name, metric_value)
self.publish(metric_name, metric_value, precision=3)
return True
return False |
def patch_asyncio():
if (not (sys.version_info < (3, 7))):
return
def _get_context():
state = _get_state()
ctx = getattr(state, 'context', None)
if (ctx is None):
ctx = contextvars.Context()
state.context = ctx
return ctx
def _set_context(ctx):
state = _get_state()
state.context = ctx
def _get_state():
loop = asyncio._get_running_loop()
if (loop is None):
return contextvars._state
task = asyncio.Task.current_task(loop=loop)
return (contextvars._state if (task is None) else task)
contextvars._get_context = _get_context
contextvars._set_context = _set_context
def create_task(loop, coro):
task = loop._orig_create_task(coro)
if task._source_traceback:
del task._source_traceback[(- 1)]
task.context = contextvars.copy_context()
return task
def _patch_loop(loop):
if (loop and (not hasattr(loop, '_orig_create_task'))):
loop._orig_create_task = loop.create_task
loop.create_task = types.MethodType(create_task, loop)
return loop
def get_event_loop():
return _patch_loop(_get_event_loop())
def set_event_loop(loop):
return _set_event_loop(_patch_loop(loop))
def new_event_loop():
return _patch_loop(_new_event_loop())
_get_event_loop = asyncio.get_event_loop
_set_event_loop = asyncio.set_event_loop
_new_event_loop = asyncio.new_event_loop
asyncio.get_event_loop = asyncio.events.get_event_loop = get_event_loop
asyncio.set_event_loop = asyncio.events.set_event_loop = set_event_loop
asyncio.new_event_loop = asyncio.events.new_event_loop = new_event_loop |
class PlayEntityPositionAndRotation(Packet):
id = 40
to = 1
def __init__(self, entity_id: int, dx: int, dy: int, dz: int, yaw: float, pitch: float, on_ground: bool) -> None:
super().__init__()
self.entity_id = entity_id
(self.dx, self.dy, self.dz) = (dx, dy, dz)
self.yaw = yaw
self.pitch = pitch
self.on_ground = on_ground
def encode(self) -> bytes:
return ((((((Buffer.pack_varint(self.entity_id) + Buffer.pack('h', self.dx)) + Buffer.pack('h', self.dy)) + Buffer.pack('h', self.dz)) + Buffer.pack('f', self.yaw)) + Buffer.pack('f', self.pitch)) + Buffer.pack('?', self.on_ground)) |
class ScheduledScanAdmin(admin.ModelAdmin):
list_display = ('id', 'site_name', 'start_time', 'scan_engine', 'start_datetime', 'scan_binary', 'scan_command', 'targets', 'excluded_targets', 'scan_status', 'completed_time', 'result_file_base_name', 'pooled_scan_result_file_base_name', 'scan_binary_process_id')
list_filter = ('scan_engine', 'scan_binary', 'scan_status')
exclude = ('completed_time', 'result_file_base_name') |
def seq(*parsers: Parser, **kw_parsers: Parser) -> Parser:
if ((not parsers) and (not kw_parsers)):
return success([])
if (parsers and kw_parsers):
raise ValueError('Use either positional arguments or keyword arguments with seq, not both')
if parsers:
def seq_parser(stream, index):
result = None
values = []
for parser in parsers:
result = parser(stream, index).aggregate(result)
if (not result.status):
return result
index = result.index
values.append(result.value)
return Result.success(index, values).aggregate(result)
return seq_parser
else:
def seq_kwarg_parser(stream, index):
result = None
values = {}
for (name, parser) in kw_parsers.items():
result = parser(stream, index).aggregate(result)
if (not result.status):
return result
index = result.index
values[name] = result.value
return Result.success(index, values).aggregate(result)
return seq_kwarg_parser |
def make_releasenotes(summary, prev_pdfium, new_pdfium, prev_tag, new_tag, c_updates):
relnotes = ''
relnotes += f'''## Changes (Release {new_tag})
'''
relnotes += '### Summary (pypdfium2)\n\n'
if summary:
relnotes += (summary + '\n')
relnotes += _get_log('pypdfium2', RepositoryURL, ProjectDir, prev_tag, new_tag, '/tree/', '/commit/', '')
relnotes += '\n'
if c_updates:
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
run_cmd(['git', 'clone', '--filter=blob:none', '--no-checkout', PdfiumURL, 'pdfium_history'], cwd=tmpdir)
relnotes += _get_log('PDFium', PdfiumURL, (tmpdir / 'pdfium_history'), str(prev_pdfium), str(new_pdfium), '/+/refs/heads/chromium/', '/+/', 'origin/chromium/')
(ProjectDir / 'RELEASE.md').write_text(relnotes) |
class SentencePieceExtractor():
def __init__(self, model: str):
requires_backends(self, 'sentencepiece')
from sentencepiece import SentencePieceProcessor
self.sp = SentencePieceProcessor()
self.sp.Load(model)
def extract(self) -> Tuple[(Dict[(str, int)], List[Tuple])]:
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
merges = []
for piece_l in vocab.keys():
for piece_r in vocab.keys():
merge = f'{piece_l}{piece_r}'
piece_id = vocab.get(merge, None)
if piece_id:
merges += [(piece_l, piece_r, piece_id)]
merges = sorted(merges, key=(lambda val: val[2]))
merges = [(val[0], val[1]) for val in merges]
return (vocab, merges) |
def calculate_tuple_fallback(typ: TupleType) -> None:
fallback = typ.partial_fallback
assert (fallback.type.fullname == 'builtins.tuple')
items = []
for item in typ.items:
if isinstance(item, UnpackType):
unpacked_type = get_proper_type(item.type)
if isinstance(unpacked_type, TypeVarTupleType):
unpacked_type = get_proper_type(unpacked_type.upper_bound)
if (isinstance(unpacked_type, Instance) and (unpacked_type.type.fullname == 'builtins.tuple')):
items.append(unpacked_type.args[0])
else:
raise NotImplementedError
else:
items.append(item)
fallback.args = (join.join_type_list(items),) |
class Effect6222(BaseEffect):
runTime = 'early'
type = ('projected', 'active')
def handler(fit, module, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowOffensiveModifiers'):
return
if (module.getModifiedItemAttr('maxRange', 0) < (projectionRange or 0)):
return
fit.ship.increaseItemAttr('warpScrambleStatus', module.getModifiedItemAttr('warpScrambleStrength'), **kwargs)
fit.modules.filteredItemIncrease((lambda mod: (mod.item.requiresSkill('High Speed Maneuvering') or mod.item.requiresSkill('Micro Jump Drive Operation'))), 'activationBlocked', module.getModifiedItemAttr('activationBlockedStrenght'), **kwargs) |
def test_PlotItem_preserve_external_visibility_control():
item = pg.PlotItem()
curve1 = pg.PlotDataItem(np.random.normal(size=10))
curve2 = pg.PlotDataItem(np.random.normal(size=10))
item.addItem(curve1)
curve1.hide()
item.addItem(curve2)
assert (not curve1.isVisible())
item.removeItem(curve2)
assert (not curve1.isVisible()) |
class calibrate(menu):
def __init__(self):
super(calibrate, self).__init__(_('calibrate'), [level(_('level')), ValueEdit(_('heading'), self.getheading, 'imu.heading_offset'), ValueCheck(_('lock'), 'imu.compass.calibration.locked'), calibrate_rudder_feedback(), calibrate_info()])
self.lastcounter = 0
def getheading(self):
try:
return ('%.1f' % self.last_val('imu.heading'))
except:
return str(self.last_val('imu.heading'))
def display(self, refresh):
counter = self.last_val('imu.alignmentCounter', default=0)
super(calibrate, self).display((refresh or (counter != self.lastcounter)))
self.lastcounter = counter
if counter:
r = rectangle(0, 0, 1, 0.15)
r.height = 0.2
self.fittext(r, (' %d%%' % (100 - counter)), False, black)
r.width = (1 - (float(counter) / 100))
r.height = 0.25
self.invertrectangle(r)
self.fittext(rectangle(0, 0.9, 0.5, 0.11), self.round_last_val('imu.pitch', 1))
self.fittext(rectangle(0.5, 0.9, 0.5, 0.11), self.round_last_val('imu.heel', 1)) |
_datapipe('flatten')
class FlattenIterDataPipe(IterDataPipe[T_co]):
datapipe: IterDataPipe
indices: Set[Hashable] = set()
def __init__(self, datapipe: IterDataPipe, indices: Optional[Union[(Hashable, List[Hashable])]]=None) -> None:
super().__init__()
self.datapipe = datapipe
if indices:
if isinstance(indices, list):
self.indices = set(indices)
else:
self.indices = {indices}
def __iter__(self) -> Iterator[T_co]:
flatten_all = False
if (not self.indices):
flatten_all = True
for old_item in self.datapipe:
if isinstance(old_item, dict):
new_item = {}
for (k, v) in old_item.items():
if (k in self.indices):
pass
if ((flatten_all or (k in self.indices)) and isinstance(v, dict)):
for (k_sub, v_sub) in v.items():
if (k_sub not in old_item):
new_item[k_sub] = v_sub
else:
warnings.warn('Flattener tried to insert the same key twice into the dict item,the second key,value pair has been dropped.')
elif (k not in new_item):
new_item[k] = v
else:
warnings.warn('Flattener tried to insert the same key twice into the dict item,the second key,value pair has been dropped.')
else:
is_tuple = False
new_item = []
if isinstance(old_item, tuple):
is_tuple = True
old_item = list(old_item)
for (i, item) in enumerate(old_item):
if ((flatten_all or (i in self.indices)) and isinstance(item, (list, tuple))):
new_item.extend(list(item))
else:
new_item.append(item)
if is_tuple:
new_item = tuple(new_item)
try:
if self.indices:
for index in self.indices:
old_item[index]
except (IndexError, KeyError):
warnings.warn('At least one index in the filter is not present in the item being returned, please be aware that expected columns/keys may be missing.')
(yield new_item)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length") |
def collect_dep_env(data):
try:
data.append(('torchvision', ((str(torchvision.__version__) + ' ') + os.path.dirname(torchvision.__file__))))
except AttributeError:
data.append(('torchvision', 'unknown'))
try:
import hydra
data.append(('hydra', ((str(hydra.__version__) + ' ') + os.path.dirname(hydra.__file__))))
except ImportError:
pass
try:
import classy_vision
data.append(('classy_vision', ((str(classy_vision.__version__) + ' ') + os.path.dirname(classy_vision.__file__))))
except ImportError:
pass
try:
import tensorboard
data.append(('tensorboard', tensorboard.__version__))
except ImportError:
pass
try:
import apex
data.append(('apex', ((str(pkg_resources.get_distribution('apex').version) + ' ') + os.path.dirname(apex.__file__))))
except ImportError:
data.append(('apex', 'unknown'))
try:
import cv2
data.append(('cv2', cv2.__version__))
except ImportError:
pass
return data |
def url(*, info):
model = completionmodel.CompletionModel(column_widths=(40, 50, 10))
quickmarks = [(url, name) for (name, url) in objreg.get('quickmark-manager').marks.items()]
bookmarks = objreg.get('bookmark-manager').marks.items()
searchengines = [(k, v) for (k, v) in sorted(config.val.url.searchengines.items()) if (k != 'DEFAULT')]
categories = config.val.completion.open_categories
models: Dict[(str, QAbstractItemModel)] = {}
if (searchengines and ('searchengines' in categories)):
models['searchengines'] = listcategory.ListCategory('Search engines', searchengines, sort=False)
if (quickmarks and ('quickmarks' in categories)):
models['quickmarks'] = listcategory.ListCategory('Quickmarks', quickmarks, delete_func=_delete_quickmark, sort=False)
if (bookmarks and ('bookmarks' in categories)):
models['bookmarks'] = listcategory.ListCategory('Bookmarks', bookmarks, delete_func=_delete_bookmark, sort=False)
history_disabled = (info.config.get('completion.web_history.max_items') == 0)
if ((not history_disabled) and ('history' in categories)):
hist_cat = histcategory.HistoryCategory(database=history.web_history.database, delete_func=_delete_history)
models['history'] = hist_cat
if ('filesystem' in categories):
models['filesystem'] = filepathcategory.FilePathCategory(name='Filesystem')
for category in categories:
if (category in models):
model.add_category(models[category])
return model |
def test_group_deploy_tokens(gl, group):
deploy_token = group.deploytokens.create({'name': 'foo', 'scopes': ['read_registry']})
assert (deploy_token in group.deploytokens.list())
assert (set(group.deploytokens.list()) <= set(gl.deploytokens.list()))
deploy_token = group.deploytokens.get(deploy_token.id)
assert (deploy_token.name == 'foo')
assert (deploy_token.scopes == ['read_registry'])
deploy_token.delete()
assert (deploy_token not in group.deploytokens.list())
assert (deploy_token not in gl.deploytokens.list()) |
class Nadam(Optimizer):
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1.0, name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.epsilon = epsilon
self.schedule_decay = schedule_decay
_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = (K.cast(self.iterations, K.floatx()) + 1)
momentum_cache_t = (self.beta_1 * (1.0 - (0.5 * K.pow(K.cast_to_floatx(0.96), (t * self.schedule_decay)))))
momentum_cache_t_1 = (self.beta_1 * (1.0 - (0.5 * K.pow(K.cast_to_floatx(0.96), ((t + 1) * self.schedule_decay)))))
m_schedule_new = (self.m_schedule * momentum_cache_t)
m_schedule_next = ((self.m_schedule * momentum_cache_t) * momentum_cache_t_1)
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = (([self.iterations] + ms) + vs)
for (p, g, m, v) in zip(params, grads, ms, vs):
g_prime = (g / (1.0 - m_schedule_new))
m_t = ((self.beta_1 * m) + ((1.0 - self.beta_1) * g))
m_t_prime = (m_t / (1.0 - m_schedule_next))
v_t = ((self.beta_2 * v) + ((1.0 - self.beta_2) * K.square(g)))
v_t_prime = (v_t / (1.0 - K.pow(self.beta_2, t)))
m_t_bar = (((1.0 - momentum_cache_t) * g_prime) + (momentum_cache_t_1 * m_t_prime))
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
p_t = (p - ((self.lr * m_t_bar) / (K.sqrt(v_t_prime) + self.epsilon)))
new_p = p_t
if (getattr(p, 'constraint', None) is not None):
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), 'beta_2': float(K.get_value(self.beta_2)), 'epsilon': self.epsilon, 'schedule_decay': self.schedule_decay}
base_config = super(Nadam, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def test_load_editable_with_import_package(repository: InstalledRepository) -> None:
editable = get_package_from_repository('editable-with-import', repository)
assert (editable is not None)
assert (editable.name == 'editable-with-import')
assert (editable.version.text == '2.3.4')
assert (editable.source_type is None)
assert (editable.source_url is None) |
def total_intersect_and_union(results, gt_seg_maps, num_classes, ignore_index, label_map=dict(), reduce_zero_label=False):
num_imgs = len(results)
assert (len(gt_seg_maps) == num_imgs)
total_area_intersect = np.zeros((num_classes,), dtype=np.float)
total_area_union = np.zeros((num_classes,), dtype=np.float)
total_area_pred_label = np.zeros((num_classes,), dtype=np.float)
total_area_label = np.zeros((num_classes,), dtype=np.float)
for i in range(num_imgs):
(area_intersect, area_union, area_pred_label, area_label) = intersect_and_union(results[i], gt_seg_maps[i], num_classes, ignore_index, label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return (total_area_intersect, total_area_union, total_area_pred_label, total_area_label) |
def _find_ammo_for(ammo_names: tuple[(str, ...)], ammo_pickup_configuration: AmmoPickupConfiguration) -> tuple[((AmmoPickupDefinition | None), bool)]:
for (ammo, ammo_state) in ammo_pickup_configuration.pickups_state.items():
if (ammo.items == ammo_names):
return (ammo, ammo_state.requires_main_item)
return (None, False) |
class TestIntelHex16bit(TestIntelHexBase):
def setUp(self):
self.f = StringIO(hex16)
def tearDown(self):
self.f.close()
del self.f
def test_init_from_file(self):
ih = intelhex.IntelHex16bit(self.f)
def test_init_from_ih(self):
ih = intelhex.IntelHex(self.f)
ih16 = intelhex.IntelHex16bit(ih)
def test_default_padding(self):
ih16 = intelhex.IntelHex16bit()
self.assertEqual(65535, ih16.padding)
self.assertEqual(65535, ih16[0])
def test_minaddr(self):
ih = intelhex.IntelHex16bit(self.f)
addr = ih.minaddr()
self.assertEqual(0, addr, ('Error in detection of minaddr (0 != 0x%x)' % addr))
def test_maxaddr(self):
ih = intelhex.IntelHex16bit(self.f)
addr = ih.maxaddr()
self.assertEqual(29, addr, ('Error in detection of maxaddr (0x001D != 0x%x)' % addr))
def test_getitem(self):
ih = intelhex.IntelHex16bit(self.f)
ih.padding = 16383
for (addr, word) in enumerate(bin16):
self.assertEqual(word, ih[addr], ('Data mismatch at address 0x%x (0x%x != 0x%x)' % (addr, word, ih[addr])))
def test_not_enough_data(self):
ih = intelhex.IntelHex()
ih[0] = 1
ih16 = intelhex.IntelHex16bit(ih)
self.assertRaisesMsg(BadAccess16bit, 'Bad access at 0x0: not enough data to read 16 bit value', (lambda x: ih16[x]), 0)
def test_write_hex_file(self):
ih = intelhex.IntelHex16bit(self.f)
sio = StringIO()
ih.write_hex_file(sio)
s = sio.getvalue()
sio.close()
fin = StringIO(s)
ih2 = intelhex.IntelHex16bit(fin)
self.assertEqual(ih.tobinstr(), ih2.tobinstr(), 'Written hex file does not equal with original')
def test_bug_988148(self):
ih = intelhex.IntelHex16bit(intelhex.IntelHex())
ih[0] = 25
sio = StringIO()
ih.write_hex_file(sio)
def test_setitem(self):
ih = intelhex.IntelHex16bit(self.f)
old = ih[0]
ih[0] = (old ^ 65535)
self.assertNotEqual(old, ih[0], 'Setting new value to internal buffer failed')
def test_tobinarray(self):
ih = intelhex.IntelHex16bit()
ih[0] = 4660
ih[1] = 22136
self.assertEqual(array.array('H', [4660, 22136, 65535]), ih.tobinarray(start=0, end=2))
ih.padding = 16383
self.assertEqual(array.array('H', [4660, 22136, 16383]), ih.tobinarray(start=0, end=2)) |
def save_model_and_optimizer_sharded(model, rank, cfg, optim=None, verbose=True):
folder_name = ((((cfg.dist_checkpoint_root_folder + '/') + cfg.dist_checkpoint_folder) + '-') + cfg.model_name)
save_dir = (Path.cwd() / folder_name)
if (rank == 0):
print(f'Saving model to {save_dir}')
distributed_writer = dist_cp.FileSystemWriter(save_dir)
t0 = time.perf_counter()
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = {'model': model.state_dict()}
if (optim is not None):
state_dict['optim'] = FSDP.optim_state_dict(model, optim)
dist_cp.save_state_dict(state_dict=state_dict, storage_writer=distributed_writer, planner=DefaultSavePlanner())
dist.barrier()
t1 = time.perf_counter()
if (rank == 0):
print(f'Sharded state checkpoint saved to {save_dir}')
print(f'''Checkpoint Time = {(t1 - t0):.4f}
using cfg.save_using_num_threads={cfg.save_using_num_threads!r} total threads''') |
class TestRuntimeTypeGuard(TestNameCheckVisitorBase):
_passes()
def test_runtime(self):
from typing_extensions import Annotated
from annotated_types import Predicate
from pyanalyze.runtime import is_compatible
IsLower = Annotated[(str, Predicate(str.islower))]
def want_lowercase(s: IsLower) -> None:
assert s.islower()
def capybara(s: str) -> None:
want_lowercase(s)
if is_compatible(s, IsLower):
want_lowercase(s)
def asserting_capybara(s: str) -> None:
assert is_compatible(s, IsLower)
want_lowercase(s) |
def crop_to_square(image):
(height, width) = (tf.shape(image)[0], tf.shape(image)[1])
if (height > width):
image = tf.image.crop_to_bounding_box(image, ((height - width) // 2), 0, width, width)
elif (width > height):
image = tf.image.crop_to_bounding_box(image, 0, ((width - height) // 2), height, height)
return image |
def grids_available(*grid_names, check_network=True, check_all=False):
if (check_network and pyproj.network.is_network_enabled()):
return True
available = [(Path(get_data_dir(), grid_name).exists() or Path(get_user_data_dir(), grid_name).exists()) for grid_name in grid_names]
if check_all:
return all(available)
return any(available) |
def test_legacy_record_update_listener():
zc = Zeroconf(interfaces=['127.0.0.1'])
with pytest.raises(RuntimeError):
r.RecordUpdateListener().update_record(zc, 0, r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL))
updates = []
class LegacyRecordUpdateListener(r.RecordUpdateListener):
def update_record(self, zc: 'Zeroconf', now: float, record: r.DNSRecord) -> None:
nonlocal updates
updates.append(record)
listener = LegacyRecordUpdateListener()
zc.add_listener(listener, None)
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
type_ = '_homeassistant._tcp.local.'
name = 'MyTestHome'
browser = ServiceBrowser(zc, type_, [on_service_state_change])
info_service = ServiceInfo(type_, f'{name}.{type_}', 80, 0, 0, {'path': '/~paulsm/'}, 'ash-2.local.', addresses=[socket.inet_aton('10.0.1.2')])
zc.register_service(info_service)
time.sleep(0.001)
browser.cancel()
assert len(updates)
assert (len([(isinstance(update, r.DNSPointer) and (update.name == type_)) for update in updates]) >= 1)
zc.remove_listener(listener)
zc.remove_listener(listener)
zc.close() |
def get_item_id_for_item(item: ResourceInfo) -> str:
assert isinstance(item, ItemResourceInfo)
if ('item_capacity_id' in item.extra):
return item.extra['item_capacity_id']
try:
return item.extra['item_id']
except KeyError as e:
raise KeyError(f'{item.long_name} has no item ID.') from e |
class AudioEncoder(nn.Module):
def __init__(self):
super(AudioEncoder, self).__init__()
self.audio_encoder = Sequential(Conv2d(1, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), BatchNorm2d(128), ReLU(), Dropout(0.25), Conv2d(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), BatchNorm2d(128), ReLU(), Dropout(0.25), Conv2d(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), BatchNorm2d(128), ReLU(), Dropout(0.25), Conv2d(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), BatchNorm2d(128), ReLU(), Dropout(0.25), Conv2d(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), BatchNorm2d(128), ReLU(), Dropout(0.25), Flatten())
self.fc_audio = Sequential(Linear(1152, 1152, bias=False), Dropout(0.25))
def forward(self, x):
z = self.audio_encoder(x)
z_d = self.fc_audio(z)
return (z, z_d) |
class KnownValues(unittest.TestCase):
def test_ea_adc2(self):
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcea = adc.radc_ea.RADCEA(myadc)
(e, v, p, x) = myadcea.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ea_adc2x(self):
myadc.method = 'adc(2)-x'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcea = adc.radc_ea.RADCEA(myadc)
(e, v, p, x) = myadcea.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ea_adc3(self):
myadc.method = 'adc(3)'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcea = adc.radc_ea.RADCEA(myadc)
(e, v, p, x) = myadcea.kernel(nroots=4)
myadcea.analyze()
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(e[3], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
self.assertAlmostEqual(p[3], 1., 6) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.