code stringlengths 281 23.7M |
|---|
class TestRunnerWrapper():
def __init__(self, runner: DocTestRunner):
self._runner = runner
def __getattr__(self, name: str) -> Any:
return getattr(self._runner, name)
def run(self, test: DocTest, *args: Any, **kwargs: Any) -> Any:
for ex in test.examples:
ex.source = test_template.format(test=indent(ex.source, ' ').strip())
return self._runner.run(test, *args, **kwargs) |
def create_rss_feed(doctree_dir: Path, output_dir: Path):
last_build_date = _format_rfc_2822(dt.datetime.now(dt.timezone.utc))
items = '\n'.join(_generate_items(Path(doctree_dir)))
output = f'''<?xml version='1.0' encoding='UTF-8'?>
<rss xmlns:atom=" xmlns:content=" version="2.0">
<channel>
<title>Newest Python PEPs</title>
<link>
<description>{RSS_DESCRIPTION}</description>
<atom:link href=" rel="self"/>
<docs>
<language>en</language>
<lastBuildDate>{last_build_date}</lastBuildDate>
{items}
</channel>
</rss>
'''
Path(output_dir, 'peps.rss').write_text(output, encoding='utf-8') |
def test_call_invalid_selector(deploy_client: JSONRPCClient) -> None:
(contract_proxy, _) = deploy_rpc_test_contract(deploy_client, 'RpcTest')
address = contract_proxy.address
assert (len(deploy_client.web3.eth.get_code(address)) > 0)
data = decode_hex(get_transaction_data(deploy_client.web3, contract_proxy.abi, 'ret', None))
next_byte = chr((data[0] + 1)).encode()
data_with_wrong_selector = (next_byte + data[1:])
call = deploy_client.web3.eth.call
transaction = {'from': deploy_client.address, 'to': address, 'data': data_with_wrong_selector}
assert (call(transaction) == b'') |
class ExGaussianRV(RandomVariable):
name = 'exgaussian'
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = 'floatX'
_print_name = ('ExGaussian', '\\operatorname{ExGaussian}')
def rng_fn(cls, rng, mu, sigma, nu, size=None) -> np.ndarray:
return np.asarray((rng.normal(mu, sigma, size=size) + rng.exponential(scale=nu, size=size))) |
def get_next_arguments(action, type='input'):
req = []
non_req = []
if (type == 'input'):
for (k, v) in action.signature.inputs.items():
if (not v.has_default()):
req.append([k, v.qiime_type])
else:
non_req.append([('.' + k), v.qiime_type])
elif (type == 'param'):
for (k, v) in action.signature.parameters.items():
if (not v.has_default()):
req.append([k, v.qiime_type])
else:
non_req.append([('.' + k), v.qiime_type])
else:
for (k, v) in action.signature.outputs.items():
if (not v.has_default()):
req.append([k, v.qiime_type])
else:
non_req.append([('.' + k), v.qiime_type])
return (req, non_req) |
class BackboneEncoder(Module):
def __init__(self, num_layers, mode='ir', n_styles=18, opts=None):
super(BackboneEncoder, self).__init__()
assert (num_layers in [50, 100, 152]), 'num_layers should be 50,100, or 152'
assert (mode in ['ir', 'ir_se']), 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if (mode == 'ir'):
unit_module = bottleneck_IR
elif (mode == 'ir_se'):
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
for i in range(self.style_count):
style = GradualStyleBlock(512, 512, 16)
self.styles.append(style)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
latents = []
for j in range(self.style_count):
latents.append(self.styles[j](x))
out = torch.stack(latents, dim=1)
return out |
class UseFunctionTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.mod1 = testutils.create_module(self.project, 'mod1')
self.mod2 = testutils.create_module(self.project, 'mod2')
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def test_simple_case(self):
code = dedent(' def f():\n pass\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(code, self.mod1.read())
def test_simple_function(self):
code = dedent(' def f(p):\n print(p)\n print(1)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n print(p)\n f(1)\n '), self.mod1.read())
def test_simple_function2(self):
code = dedent(' def f(p):\n print(p + 1)\n print(1 + 1)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n print(p + 1)\n f(1)\n '), self.mod1.read())
def test_functions_with_multiple_statements(self):
code = dedent(' def f(p):\n r = p + 1\n print(r)\n r = 2 + 1\n print(r)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n r = p + 1\n print(r)\n f(2)\n '), self.mod1.read())
def test_returning(self):
code = dedent(' def f(p):\n return p + 1\n r = 2 + 1\n print(r)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n return p + 1\n r = f(2)\n print(r)\n '), self.mod1.read())
def test_returning_a_single_expression(self):
code = dedent(' def f(p):\n return p + 1\n print(2 + 1)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n return p + 1\n print(f(2))\n '), self.mod1.read())
def test_occurrences_in_other_modules(self):
code = dedent(' def f(p):\n return p + 1\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.mod2.write('print(2 + 1)\n')
self.project.do(user.get_changes())
self.assertEqual(dedent(' import mod1\n print(mod1.f(2))\n '), self.mod2.read())
def test_when_performing_on_non_functions(self):
code = 'var = 1\n'
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.rindex('var'))
def test_differing_in_the_inner_temp_names(self):
code = dedent(' def f(p):\n a = p + 1\n print(a)\n b = 2 + 1\n print(b)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n a = p + 1\n print(a)\n f(2)\n '), self.mod1.read())
def xxx_test_being_a_bit_more_intelligent_when_returning_assigneds(self):
code = dedent(' def f(p):\n a = p + 1\n return a\n var = 2 + 1\n print(var)\n ')
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(dedent(' def f(p):\n a = p + 1\n return a\n var = f(p)\n print(var)\n '), self.mod1.read())
def test_exception_when_performing_a_function_with_yield(self):
code = dedent(' def func():\n yield 1\n ')
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func'))
def test_exception_when_performing_a_function_two_returns(self):
code = dedent(' def func():\n return 1\n return 2\n ')
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func'))
def test_exception_when_returns_is_not_the_last_statement(self):
code = dedent(' def func():\n return 2\n a = 1\n ')
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func')) |
class GraphSearchUtils():
def __init__(self, model: tf.Graph, start_op_names: Union[(str, List[str])], output_op_names: Union[(str, List[str])]):
if isinstance(start_op_names, str):
start_op_names = [start_op_names]
if isinstance(output_op_names, str):
output_op_names = [output_op_names]
self._connected_graph = ConnectedGraph(model, start_op_names, output_op_names)
def find_and_replace_relu6_with_relu(self, sess: tf.compat.v1.Session) -> tf.compat.v1.Session:
for op in self._connected_graph.get_all_ops().values():
if (op.type in ['Relu6']):
ReluUtils.replace_relu6_with_relu(sess, op.get_module())
after_relu_replace_sess = save_and_load_graph('./replace_relu6_with_relu', sess)
return after_relu_replace_sess
def find_downstream_layer_groups_to_scale(op, layer_groups, visited_nodes, current_group=None):
if (not current_group):
current_group = []
if (op in visited_nodes):
return
visited_nodes.append(op)
logger.debug('Visiting node: {%s}', op.dotted_name)
if (op.type in ['Conv2D', 'DepthwiseConv2dNative']):
current_group.append(op)
if (not (op.type in ['Conv2D', 'DepthwiseConv2dNative', 'Relu', 'PReLU', 'Pad', 'Identity'])):
if ((len(current_group) > 1) and (current_group not in layer_groups)):
layer_groups.append(current_group)
node_set = [op.dotted_name for op in current_group]
logger.debug('Added new set of nodes: {%s}', node_set)
current_group = []
if op.output:
for consumer in op.output.consumers:
GraphSearchUtils.find_downstream_layer_groups_to_scale(consumer, layer_groups, visited_nodes, current_group)
if ((len(current_group) > 1) and (current_group not in layer_groups)):
layer_groups.append(current_group)
node_set = [op.dotted_name for op in current_group]
logger.debug('Added new set of nodes: {%s}', node_set)
def find_layer_groups_to_scale_as_conn_ops(self) -> List[List[Op]]:
input_nodes = []
for op in self._connected_graph.get_all_ops().values():
if (op.inputs and op.inputs[0].is_model_input):
input_nodes.append(op)
layer_groups = []
visited_nodes = []
for op in input_nodes:
self.find_downstream_layer_groups_to_scale(op=op, layer_groups=layer_groups, visited_nodes=visited_nodes)
return layer_groups
def find_layer_groups_to_scale(self):
layer_groups_as_conn_graph_ops = self.find_layer_groups_to_scale_as_conn_ops()
(layer_groups_as_tf_ops, tf_op_to_conn_graph_op_map) = self.convert_conn_graph_ops_to_tf_op(layer_groups_as_conn_graph_ops)
return (tf_op_to_conn_graph_op_map, layer_groups_as_tf_ops)
def convert_conn_graph_ops_to_tf_op(op_groups: List[List[Op]]) -> List[List[tf.Operation]]:
tf_op_to_conn_graph_op_map = {}
layer_groups_as_tf_ops = []
for ops in op_groups:
curr_group = []
for op in ops:
tf_op_to_conn_graph_op_map[op.get_module()] = op
curr_group.append(op.get_module())
layer_groups_as_tf_ops.append(curr_group)
return (layer_groups_as_tf_ops, tf_op_to_conn_graph_op_map)
def convert_layer_group_to_cls_sets(layer_group: List[tf.Operation]):
def convert_to_cls_layer_type(layer: tf.Operation) -> Tuple[(ClsLayerType, tf.Operation)]:
if (layer.type in ['Conv', 'Conv2D', 'ConvTranspose', 'Conv2DTranspose']):
layer_type = ClsLayerType.Conv
elif (layer.type == 'DepthwiseConv2dNative'):
layer_type = ClsLayerType.DepthwiseConv
else:
layer_type = ClsLayerType.Unsupported
return (layer_type, layer)
def get_next_layer() -> Union[Tuple[(ClsLayerType, Union[(tf.Operation, None)])]]:
if (not layer_group):
return (ClsLayerType.Unsupported, None)
layer = layer_group.pop(0)
return convert_to_cls_layer_type(layer)
cls_sets = []
first_layer_to_scale = (ClsLayerType.Unsupported, None)
while layer_group:
while (layer_group and (first_layer_to_scale[0] is ClsLayerType.Unsupported)):
first_layer_to_scale = get_next_layer()
if (first_layer_to_scale[0] is ClsLayerType.Unsupported):
logger.info('Layer %s is not supported. Ignoring for cls', first_layer_to_scale[1])
second_layer_to_scale = get_next_layer()
if (first_layer_to_scale[0] == ClsLayerType.Conv):
if (second_layer_to_scale[0] == ClsLayerType.Conv):
cls_sets.append((first_layer_to_scale[1], second_layer_to_scale[1]))
first_layer_to_scale = second_layer_to_scale
elif (second_layer_to_scale[0] == ClsLayerType.DepthwiseConv):
if layer_group:
third_layer_to_scale = convert_to_cls_layer_type(layer_group[0])
if (third_layer_to_scale[0] == ClsLayerType.Conv):
cls_sets.append((first_layer_to_scale[1], second_layer_to_scale[1], third_layer_to_scale[1]))
first_layer_to_scale = get_next_layer()
else:
first_layer_to_scale = second_layer_to_scale
else:
logger.info('Layer %s is not supported. Ignoring for cls', second_layer_to_scale[1])
first_layer_to_scale = (ClsLayerType.Unsupported, None)
elif (first_layer_to_scale[0] == ClsLayerType.DepthwiseConv):
if (second_layer_to_scale[0] == ClsLayerType.Conv):
cls_sets.append((first_layer_to_scale[1], second_layer_to_scale[1]))
first_layer_to_scale = second_layer_to_scale
else:
logger.info('Layer %s is not supported. Ignoring for cls', first_layer_to_scale[1])
first_layer_to_scale = second_layer_to_scale
return cls_sets
def is_relu_activation_present_in_cls_sets(cls_sets: List[ClsSet], tf_op_to_conn_graph_op_map: Dict) -> List[bool]:
is_relu_activation_in_cls_sets = []
for cls_set in cls_sets:
cls_set = cls_set[:(- 1)]
is_relu_activation_in_cls_set = ()
for conv_op in cls_set:
conn_graph_conv_op = tf_op_to_conn_graph_op_map[conv_op]
is_relu_activation_in_cls_set += (ReluUtils.does_conv_have_relu_activation(conn_graph_conv_op),)
if (len(is_relu_activation_in_cls_set) == 1):
is_relu_activation_in_cls_set = is_relu_activation_in_cls_set[0]
is_relu_activation_in_cls_sets.append(is_relu_activation_in_cls_set)
return is_relu_activation_in_cls_sets
def map_op_names_to_ops(sess: tf.compat.v1.Session) -> Dict[(str, tf.Operation)]:
tf_names_op_dict = {}
with sess.graph.as_default():
op_list = sess.graph.get_operations()
for op in op_list:
if (op.type in ['Conv2D', 'DepthwiseConv2dNative', 'FusedBatchNormV3']):
tf_names_op_dict[op.name] = op
return tf_names_op_dict |
class Task(BaseModel):
testcase_name: str
task_mode: str = 'normal'
custom_strategies: List[List[Any]] = []
parallel_workers: int = multiprocessing.cpu_count()
api_addresses: List[str] = []
api_timeout: int = 30000
net_ordering_evaluation_mode: int = 2
droute_end_iter: int = (- 1) |
class BasicLayer(nn.Module):
def __init__(self, dim, out_dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=nn.LayerNorm, upsample=None):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.blocks = nn.ModuleList([SwinTransformerBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, shift_size=(0 if ((i % 2) == 0) else (window_size // 2)), mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop, drop_path=(drop_path[i] if isinstance(drop_path, list) else drop_path), norm_layer=norm_layer) for i in range(depth)])
if (upsample is not None):
self.upsample = upsample(input_resolution, dim=dim, out_dim=out_dim, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for (_, blk) in enumerate(self.blocks):
x = blk(x)
if (self.upsample is not None):
x = self.upsample(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if (self.upsample is not None):
flops += self.upsample.flops()
return flops
def update_resolution(self, H, W):
self.input_resolution = (H, W)
for (_, blk) in enumerate(self.blocks):
blk.input_resolution = (H, W)
blk.update_mask()
if (self.upsample is not None):
self.upsample.input_resolution = (H, W) |
.parametrize('proc_name', ['s1', 's2', 's3'])
def test_terminate_no_pid(tcp_port, proc_name, xprocess):
class Starter(ProcessStarter):
pattern = 'started'
args = [sys.executable, server_path, tcp_port]
xprocess.ensure(proc_name, Starter)
info = xprocess.getinfo(proc_name)
(pid, info.pid) = (info.pid, None)
assert (info.terminate() == 0)
info.pid = pid
info.terminate() |
class TransformerLayer(nn.Module):
def __init__(self, args):
super(TransformerLayer, self).__init__()
self.self_attn = MultiHeadedAttention(args.hidden_size, args.heads_num, args.dropout)
self.dropout_1 = nn.Dropout(args.dropout)
self.layer_norm_1 = LayerNorm(args.hidden_size)
self.feed_forward = PositionwiseFeedForward(args.hidden_size, args.feedforward_size)
self.dropout_2 = nn.Dropout(args.dropout)
self.layer_norm_2 = LayerNorm(args.hidden_size)
def forward(self, hidden, mask, similarity=None):
inter = self.dropout_1(self.self_attn(hidden, hidden, hidden, mask, similarity))
inter = self.layer_norm_1((inter + hidden))
output = self.dropout_2(self.feed_forward(inter))
output = self.layer_norm_2((output + inter))
return output |
def make_loader(split, dst_cls=DatasetAllTasks, repeat=None, is_training=True, unlabeled=False, transforms_tr=None, transforms_val=None):
if is_training:
dst = dst_cls(split=split, repeat=repeat, unlabeled=unlabeled, transform=transforms_tr, task=args.task, num_cls=config.num_cls, is_2d=True)
return DataLoader(dst, batch_size=config.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, worker_init_fn=seed_worker, drop_last=True)
else:
dst = dst_cls(split=split, is_val=True, task=args.task, num_cls=config.num_cls, transform=transforms_val, is_2d=True)
return DataLoader(dst, pin_memory=True, num_workers=1, shuffle=False) |
def test_fixture_order_respects_scope(pytester: Pytester) -> None:
pytester.makepyfile("\n import pytest\n\n data = {}\n\n (scope='module')\n def clean_data():\n data.clear()\n\n (autouse=True)\n def add_data():\n data.update(value=True)\n\n .usefixtures('clean_data')\n def test_value():\n assert data.get('value')\n ")
result = pytester.runpytest()
assert (result.ret == 0) |
def generate_all_rotation_angles(increment):
n = round(((2 * math.pi) / increment))
print(n, 'rotations along each axis')
print('generating template rotations...')
angles = []
phi = 0.0
theta = 0.0
psi = 0.0
for i in range(n):
phi = (i * increment)
for j in range(n):
theta = (j * increment)
for k in range(n):
psi = (k * increment)
angles.append((phi, theta, psi))
return angles |
class TestCommonAncestor():
def test_has_ancestor(self, tmp_path: Path) -> None:
fn1 = (((tmp_path / 'foo') / 'bar') / 'test_1.py')
fn1.parent.mkdir(parents=True)
fn1.touch()
fn2 = (((tmp_path / 'foo') / 'zaz') / 'test_2.py')
fn2.parent.mkdir(parents=True)
fn2.touch()
assert (get_common_ancestor([fn1, fn2]) == (tmp_path / 'foo'))
assert (get_common_ancestor([fn1.parent, fn2]) == (tmp_path / 'foo'))
assert (get_common_ancestor([fn1.parent, fn2.parent]) == (tmp_path / 'foo'))
assert (get_common_ancestor([fn1, fn2.parent]) == (tmp_path / 'foo'))
def test_single_dir(self, tmp_path: Path) -> None:
assert (get_common_ancestor([tmp_path]) == tmp_path)
def test_single_file(self, tmp_path: Path) -> None:
fn = (tmp_path / 'foo.py')
fn.touch()
assert (get_common_ancestor([fn]) == tmp_path) |
class S3():
def __init__(self, session):
self._session = session
self._s3 = session.client('s3')
def cp(self, target_path, bucket, key):
target_basename = os.path.basename(target_path)
if os.path.isdir(target_path):
tmpdir = tempfile.mkdtemp(prefix='petctl_')
tar_basename = f'{target_basename}.tar.gz'
tar_file = os.path.join(tmpdir, tar_basename)
log.info(f'Compressing {target_path} into {tar_basename}')
with tar.open(tar_file, 'x:gz') as f:
f.add(target_path, arcname='', recursive=True)
dest_key = f'{key}/{tar_basename}'
target_file = tar_file
else:
tmpdir = None
dest_key = f'{key}/{target_basename}'
target_file = target_path
log.info(f'Uploading {target_file} to s3://{bucket}/{dest_key}')
self._s3.upload_file(target_file, bucket, dest_key)
if tmpdir:
log.info(f'Deleting tmp dir: {tmpdir}')
shutil.rmtree(tmpdir)
return f's3://{bucket}/{dest_key}' |
class RotatedDecoder(LatticeDecoder):
encoder_type = XXZZQubit
syndrome_graph_keys = ['X', 'Z']
def _params_validation(self):
super()._params_validation()
if isinstance(self.params['d'], Number):
d = int(self.params['d'])
self.params['d'] = (d, d)
if (len(self.params['d']) != 2):
raise LatticeError('Please provide a code height and width in parameter d: e.g. (3,7).')
dh = self.params['d'][constants.DH]
dw = self.params['d'][constants.DW]
if ((dh % 2) != 1):
raise LatticeError('Surface code height must be odd!')
if ((dw % 2) != 1):
raise LatticeError('Surface code width must be odd!')
def _make_syndrome_graph(self) -> None:
start_nodes = {'Z': (0.5, 0.5), 'X': (0.5, 1.5)}
for syndrome_graph_key in self.syndrome_graph_keys:
for t in range(0, self.params['T']):
start_node = start_nodes[syndrome_graph_key]
node_label = ((t,) + start_node)
self.node_map[syndrome_graph_key][node_label] = self.S[syndrome_graph_key].add_node(node_label)
self._populate_syndrome_graph(((t,) + start_node), t, [], syndrome_graph_key, 1)
syndrome_nodes_t0 = [(t, x, y) for (t, x, y) in self.S[syndrome_graph_key].nodes() if (t == 0)]
for node in syndrome_nodes_t0:
space_label = (node[1], node[2])
for t in range(0, (self.params['T'] - 1)):
self.S[syndrome_graph_key].add_edge(self.node_map[syndrome_graph_key][((t,) + space_label)], self.node_map[syndrome_graph_key][(((t + 1),) + space_label)], 1)
def _populate_syndrome_graph(self, current_node: TQubit, t: int, visited_nodes: List[TQubit], syndrome_graph_key: str, edge_weight: int=1) -> None:
visited_nodes.append(current_node)
neighbors = []
i = current_node[1]
j = current_node[2]
neighbors.append(((i - 1), (j - 1)))
neighbors.append(((i + 1), (j - 1)))
neighbors.append(((i - 1), (j + 1)))
neighbors.append(((i + 1), (j + 1)))
normal_neighbors = [n for n in neighbors if (self._valid_syndrome(n, syndrome_graph_key) and ((t, n[0], n[1]) not in visited_nodes))]
virtual_neighbors = [n for n in neighbors if ((((- 1), n[0], n[1]) in self.virtual[syndrome_graph_key]) and (((- 1), n[0], n[1]) not in visited_nodes))]
if ((not normal_neighbors) and (not virtual_neighbors)):
return
for target in normal_neighbors:
target_node = ((t,) + target)
if (target_node not in self.S[syndrome_graph_key].nodes()):
self.node_map[syndrome_graph_key][target_node] = self.S[syndrome_graph_key].add_node(target_node)
self.S[syndrome_graph_key].add_edge(self.node_map[syndrome_graph_key][current_node], self.node_map[syndrome_graph_key][target_node], edge_weight)
for target in virtual_neighbors:
target_node = (((- 1),) + target)
if (target_node not in self.S[syndrome_graph_key].nodes()):
self.node_map[syndrome_graph_key][target_node] = self.S[syndrome_graph_key].add_node(target_node)
self.S[syndrome_graph_key].add_edge(self.node_map[syndrome_graph_key][current_node], self.node_map[syndrome_graph_key][target_node], edge_weight)
for target in normal_neighbors:
self._populate_syndrome_graph(((t,) + target), t, visited_nodes, syndrome_graph_key, 1)
for target in virtual_neighbors:
self._populate_syndrome_graph((((- 1),) + target), t, visited_nodes, syndrome_graph_key, 1)
def _valid_syndrome(self, node: TQubitLoc, syndrome_graph_key: str) -> bool:
i = node[0]
j = node[1]
dh = self.params['d'][constants.DH]
dw = self.params['d'][constants.DW]
if (syndrome_graph_key == 'Z'):
if ((i > 0) and (i < (dh - 1)) and (j < dw) and (j > (- 1))):
return True
else:
return False
elif (syndrome_graph_key == 'X'):
if ((j > 0) and (j < (dw - 1)) and (i < dh) and (i > (- 1))):
return True
else:
return False
else:
raise ValueError('Please enter a valid syndrome_graph_key: X or Z')
def _specify_virtual(self) -> Dict[(str, List[TQubit])]:
virtual: Dict[(str, List[TQubit])] = {}
virtual['X'] = []
virtual['Z'] = []
dh = self.params['d'][constants.DH]
dw = self.params['d'][constants.DW]
for j in range(0, dw, 2):
virtual['Z'].append(((- 1), (- 0.5), (j - 0.5)))
virtual['Z'].append(((- 1), (dh - 0.5), (j + 0.5)))
for j in range(0, dh, 2):
virtual['X'].append(((- 1), (j + 0.5), (- 0.5)))
virtual['X'].append(((- 1), (j - 0.5), (dw - 0.5)))
return virtual
def _is_crossing_readout_path(self, match: Tuple[(TQubit, TQubit)], logical_readout_type: str) -> bool:
(source, target) = match
if (logical_readout_type == 'Z'):
return (((source[0] == (- 1)) and (source[1] == (- 0.5))) or ((target[0] == (- 1)) and (target[1] == (- 0.5))))
elif (logical_readout_type == 'X'):
return (((source[0] == (- 1)) and (source[2] == (- 0.5))) or ((target[0] == (- 1)) and (target[2] == (- 0.5))))
else:
raise ValueError('Please enter a valid logical_readout_type (X/Z).') |
class TypeCheckSuite(DataSuite):
files = typecheck_files
def run_case(self, testcase: DataDrivenTestCase) -> None:
if ((lxml is None) and (os.path.basename(testcase.file) == 'check-reports.test')):
pytest.skip('Cannot import lxml. Is it installed?')
incremental = (('incremental' in testcase.name.lower()) or ('incremental' in testcase.file) or ('serialize' in testcase.file))
if incremental:
num_steps = max(([2] + list(testcase.output2.keys())))
for (dn, dirs, files) in os.walk(os.curdir):
for file in files:
m = re.search('\\.([2-9])$', file)
if (m and (int(m.group(1)) > num_steps)):
raise ValueError('Output file {} exists though test case only has {} runs'.format(file, num_steps))
steps = testcase.find_steps()
for step in range(1, (num_steps + 1)):
idx = (step - 2)
ops = (steps[idx] if ((idx < len(steps)) and (idx >= 0)) else [])
self.run_case_once(testcase, ops, step)
else:
self.run_case_once(testcase)
def _sort_output_if_needed(self, testcase: DataDrivenTestCase, a: list[str]) -> None:
idx = testcase.output_inline_start
if ((not testcase.files) or (idx == len(testcase.output))):
return
def _filename(_msg: str) -> str:
return _msg.partition(':')[0]
file_weights = {file: idx for (idx, file) in enumerate((_filename(msg) for msg in a))}
testcase.output[idx:] = sorted(testcase.output[idx:], key=(lambda msg: file_weights.get(_filename(msg), (- 1))))
def run_case_once(self, testcase: DataDrivenTestCase, operations: (list[FileOperation] | None)=None, incremental_step: int=0) -> None:
if (operations is None):
operations = []
original_program_text = '\n'.join(testcase.input)
module_data = self.parse_module(original_program_text, incremental_step)
for (file, _) in testcase.files:
module = module_from_path(file)
if (module.endswith('_plugin') and (module in sys.modules)):
del sys.modules[module]
if ((incremental_step == 0) or (incremental_step == 1)):
for (module_name, program_path, program_text) in module_data:
if (module_name == '__main__'):
with open(program_path, 'w', encoding='utf8') as f:
f.write(program_text)
break
elif (incremental_step > 1):
perform_file_operations(operations)
options = parse_options(original_program_text, testcase, incremental_step)
options.use_builtins_fixtures = True
options.show_traceback = True
if ('columns' in testcase.file):
options.show_column_numbers = True
if ('errorcodes' in testcase.file):
options.hide_error_codes = False
if ('abstract' not in testcase.file):
options.allow_empty_bodies = (not testcase.name.endswith('_no_empty'))
if ('lowercase' not in testcase.file):
options.force_uppercase_builtins = True
if ('union-error' not in testcase.file):
options.force_union_syntax = True
if (incremental_step and options.incremental):
options.incremental = True
else:
options.incremental = False
if (not testcase.writescache):
options.cache_dir = os.devnull
sources = []
for (module_name, program_path, program_text) in module_data:
sources.append(BuildSource(program_path, module_name, (None if incremental_step else program_text)))
plugin_dir = os.path.join(test_data_prefix, 'plugins')
sys.path.insert(0, plugin_dir)
res = None
try:
res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir)
a = res.errors
except CompileError as e:
a = e.messages
finally:
assert (sys.path[0] == plugin_dir)
del sys.path[0]
if testcase.normalize_output:
a = normalize_error_messages(a)
if (incremental_step < 2):
if (incremental_step == 1):
msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})'
else:
assert (incremental_step == 0)
msg = 'Unexpected type checker output ({}, line {})'
self._sort_output_if_needed(testcase, a)
output = testcase.output
else:
msg = (f'Unexpected type checker output in incremental, run {incremental_step}' + ' ({}, line {})')
output = testcase.output2.get(incremental_step, [])
if ((output != a) and testcase.config.getoption('--update-data', False)):
update_testcase_output(testcase, a, incremental_step=incremental_step)
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
if res:
if (options.cache_dir != os.devnull):
self.verify_cache(module_data, res.errors, res.manager, res.graph)
name = 'targets'
if incremental_step:
name += str((incremental_step + 1))
expected = testcase.expected_fine_grained_targets.get((incremental_step + 1))
actual = [target for (module, target) in res.manager.processed_targets if (module in testcase.test_modules)]
if (expected is not None):
assert_target_equivalence(name, expected, actual)
if (incremental_step > 1):
suffix = ('' if (incremental_step == 2) else str((incremental_step - 1)))
expected_rechecked = testcase.expected_rechecked_modules.get((incremental_step - 1))
if (expected_rechecked is not None):
assert_module_equivalence(('rechecked' + suffix), expected_rechecked, res.manager.rechecked_modules)
expected_stale = testcase.expected_stale_modules.get((incremental_step - 1))
if (expected_stale is not None):
assert_module_equivalence(('stale' + suffix), expected_stale, res.manager.stale_modules)
if testcase.output_files:
check_test_output_files(testcase, incremental_step, strip_prefix='tmp/')
def verify_cache(self, module_data: list[tuple[(str, str, str)]], a: list[str], manager: build.BuildManager, graph: Graph) -> None:
error_paths = self.find_error_message_paths(a)
busted_paths = {m.path for (id, m) in manager.modules.items() if graph[id].transitive_error}
modules = self.find_module_files(manager)
modules.update({module_name: path for (module_name, path, text) in module_data})
missing_paths = self.find_missing_cache_files(modules, manager)
assert (error_paths or (not busted_paths)), 'Some modules reported error despite no errors'
if (not (missing_paths == busted_paths)):
raise AssertionError(f'cache data discrepancy {missing_paths} != {busted_paths}')
assert os.path.isfile(os.path.join(manager.options.cache_dir, '.gitignore'))
cachedir_tag = os.path.join(manager.options.cache_dir, 'CACHEDIR.TAG')
assert os.path.isfile(cachedir_tag)
with open(cachedir_tag) as f:
assert f.read().startswith('Signature: 8a477f597d28d172789fbc55')
def find_error_message_paths(self, a: list[str]) -> set[str]:
hits = set()
for line in a:
m = re.match('([^\\s:]+):(\\d+:)?(\\d+:)? (error|warning|note):', line)
if m:
p = m.group(1)
hits.add(p)
return hits
def find_module_files(self, manager: build.BuildManager) -> dict[(str, str)]:
return {id: module.path for (id, module) in manager.modules.items()}
def find_missing_cache_files(self, modules: dict[(str, str)], manager: build.BuildManager) -> set[str]:
ignore_errors = True
missing = {}
for (id, path) in modules.items():
meta = build.find_cache_meta(id, path, manager)
if (not build.validate_meta(meta, id, path, ignore_errors, manager)):
missing[id] = path
return set(missing.values())
def parse_module(self, program_text: str, incremental_step: int=0) -> list[tuple[(str, str, str)]]:
m = re.search('# cmd: mypy -m ([a-zA-Z0-9_. ]+)$', program_text, flags=re.MULTILINE)
if (incremental_step > 1):
alt_regex = f'# cmd{incremental_step}: mypy -m ([a-zA-Z0-9_. ]+)$'
alt_m = re.search(alt_regex, program_text, flags=re.MULTILINE)
if (alt_m is not None):
m = alt_m
if m:
module_names = m.group(1)
out = []
search_paths = SearchPaths((test_temp_dir,), (), (), ())
cache = FindModuleCache(search_paths, fscache=None, options=None)
for module_name in module_names.split(' '):
path = cache.find_module(module_name)
assert isinstance(path, str), f"Can't find ad hoc case file: {module_name}"
with open(path, encoding='utf8') as f:
program_text = f.read()
out.append((module_name, path, program_text))
return out
else:
return [('__main__', 'main', program_text)] |
def validate_sort_fields(sort_fields):
descending = set()
def sort_order_filter(name):
if name.startswith('-'):
name = name[1:]
descending.add(name)
return name
sort_fields = validate_field_list(sort_fields, name_filter=sort_order_filter)
log.debug(('Sorting order is: %s' % ', '.join([(('-' if (i in descending) else '') + i) for i in sort_fields])))
if (not descending):
return operator.attrgetter(*tuple(sort_fields))
class Key(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
for field in sort_fields:
(lhs, rhs) = (getattr(self.obj, field), getattr(other.obj, field))
if (lhs == rhs):
continue
return ((rhs < lhs) if (field in descending) else (lhs < rhs))
return False
return Key |
class TestHarness(Component):
def construct(s, Type, q, src_msgs, sink_msgs, src_interval, sink_interval):
s.src = TestSrcCL(Type, src_msgs, interval_delay=src_interval)
s.q = q
s.sink = TestSinkCL(Type, sink_msgs, interval_delay=sink_interval)
connect(s.src.send, s.q.enq)
connect(s.sink.recv, s.q.deq)
def done(s):
return (s.src.done() and s.sink.done())
def line_trace(s):
return ((((s.src.line_trace() + ' ') + s.q.line_trace()) + ' ') + s.sink.line_trace()) |
def create_dataset(input_folder: str, output_folder: str, target_transform):
dataset = DSprites(root=input_folder, target_transform=target_transform)
mapper = DSpritesMapper(dataset, output_path=output_folder)
loader = DataLoader(mapper, num_workers=8, batch_size=1, collate_fn=(lambda x: x[0]))
with tqdm(total=len(dataset)) as progress_bar:
for _ in loader:
progress_bar.update(1) |
def _add_variable_to_netcdf_file(nc, var_name, var_info):
v = nc.createVariable(var_name, var_info['data'].dtype.str[1:], dimensions=var_info['dim_labels'], fill_value=var_info.get('fill_value'))
v[:] = var_info['data']
for (attr_key, attr_val) in var_info['attrs'].items():
if isinstance(attr_val, (int, float)):
attr_val = v.dtype.type(attr_val)
setattr(v, attr_key, attr_val) |
def groups_target(tmp_path):
filenames = ['older.c', 'older.h', 'target.o', 'newer.c', 'newer.h']
paths = [(tmp_path / name) for name in filenames]
for (mtime, path) in enumerate(paths):
path.write_text('', encoding='utf-8')
os.utime(path, (mtime, mtime))
return types.SimpleNamespace(older=paths[:2], target=paths[2], newer=paths[3:]) |
class Continuation(Model):
project = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='+', verbose_name=_('Project'), help_text=_('The project for this continuation.'))
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='+', verbose_name=_('User'), help_text=_('The user for this continuation.'))
page = models.ForeignKey(Page, null=True, on_delete=models.CASCADE, related_name='+', verbose_name=_('Page'), help_text=_('The page for this continuation.'))
class Meta():
ordering = ('user', 'project')
verbose_name = _('Continuation')
verbose_name_plural = _('Continuations')
def __str__(self):
return f'{self.project}/{self.user}/{self.page}' |
class Tpattern(TestCase):
def test_empty(self):
self.assertEqual(util.pattern(''), '')
def test_basic(self):
self.assertEqual(util.pattern('<title>'), 'Title')
def test_basic_nocap(self):
self.assertEqual(util.pattern('<title>', False), 'title')
def test_internal(self):
self.assertEqual(util.pattern('<~plays>'), 'Plays')
def test_tied(self):
self.assertEqual(util.pattern('<~title~album>'), 'Title - Album')
def test_unknown(self):
self.assertEqual(util.pattern('<foobarbaz>'), 'Foobarbaz')
def test_condition(self):
self.assertEqual(util.pattern('<~year|<~year> - <album>|<album>>'), 'Year - Album')
def test_escape(self):
self.assertEqual(util.pattern('\\<i\\><&>\\</i\\>', esc=True), '<i>&</i>')
def test_invalid(self):
self.assertEqual(util.pattern('<date'), '')
util.pattern('<d\\')
def test_complex_condition(self):
self.assertEqual(util.pattern('<#(bitrate \\> 150)|HQ|LQ>'), 'LQ')
def test_escape_condition(self):
self.assertEqual(util.pattern('<~filename=/\\/adsad\\/sadads/|BLA|BLU>'), 'BLU') |
class KnownValues(unittest.TestCase):
def test_orth(self):
numpy.random.seed(10)
n = 100
a = numpy.random.random((n, n))
s = numpy.dot(a.T, a)
c = orth.lowdin(s)
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)), numpy.eye(n)))
x1 = numpy.dot(a, c)
x2 = orth.vec_lowdin(a)
d = numpy.dot(x1.T, x2)
d[numpy.diag_indices(n)] = 0
self.assertAlmostEqual(numpy.linalg.norm(d), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(c), 36., 9)
self.assertAlmostEqual(abs(c).sum(), 2655., 7)
def test_schmidt(self):
numpy.random.seed(10)
n = 100
a = numpy.random.random((n, n))
s = numpy.dot(a.T, a)
c = orth.schmidt(s)
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)), numpy.eye(n)))
x1 = numpy.dot(a, c)
x2 = orth.vec_schmidt(a)
d = numpy.dot(x1.T, x2)
d[numpy.diag_indices(n)] = 0
self.assertAlmostEqual(numpy.linalg.norm(d), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(c), 36., 9)
self.assertAlmostEqual(abs(c).sum(), 1123., 7)
def test_weight_orth(self):
numpy.random.seed(10)
n = 100
a = numpy.random.random((n, n))
s = numpy.dot(a.T, a)
weight = numpy.random.random(n)
c = orth.weight_orth(s, weight)
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)), numpy.eye(n)))
self.assertAlmostEqual(numpy.linalg.norm(c), 36., 8)
self.assertAlmostEqual(abs(c).sum(), 1908., 6)
def test_orth_ao(self):
c0 = orth.pre_orth_ao(mol, method='scf')
self.assertAlmostEqual(abs(c0).sum(), 33., 7)
c = orth.orth_ao(mol, 'lowdin', c0)
self.assertAlmostEqual(abs(c).sum(), 94., 7)
c = orth.orth_ao(mol, 'meta_lowdin', c0)
self.assertAlmostEqual(abs(c).sum(), 92., 7)
c = orth.orth_ao(mol, 'meta_lowdin', 'sto-3g')
self.assertAlmostEqual(abs(c).sum(), 90., 7)
c = orth.orth_ao(mol, 'meta_lowdin', None)
self.assertAlmostEqual(abs(c).sum(), 83., 7)
def test_ghost_atm_meta_lowdin(self):
mol = gto.Mole()
mol.atom = [['O', (0.0, 0.0, 0.0)], ['ghost', (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]]
mol.spin = 1
mol.basis = {'O': 'ccpvdz', 'H': 'ccpvdz', 'GHOST': gto.basis.load('631g', 'H')}
mol.build()
c = orth.orth_ao(mol, method='meta_lowdin')
self.assertAlmostEqual(numpy.linalg.norm(c), 7., 9)
def test_pre_orth_ao_with_ecp(self):
mol = gto.M(atom='Cu 0. 0. 0.; H 0. 0. -1.56; H 0. 0. 1.56', basis={'Cu': 'lanl2dz', 'H': 'ccpvdz'}, ecp={'cu': 'lanl2dz'}, charge=(- 1), verbose=0)
c0 = orth.pre_orth_ao(mol, method='ano')
self.assertAlmostEqual(numpy.linalg.norm(c0), 5., 9) |
('a paragraph format having {prop_name} set {setting}')
def given_a_paragraph_format_having_prop_set(context, prop_name, setting):
style_name = {'to inherit': 'Normal', 'On': 'Base', 'Off': 'Citation'}[setting]
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format |
def syllabify(language, word):
if (type(word) == str):
word = word.split()
syllables = []
internuclei = []
for phoneme in word:
phoneme = phoneme.strip()
if (phoneme == ''):
continue
stress = None
if phoneme[(- 1)].isdigit():
stress = int(phoneme[(- 1)])
phoneme = phoneme[0:(- 1)]
if (phoneme in language['vowels']):
coda = None
onset = None
if ('.' in internuclei):
period = internuclei.index('.')
coda = internuclei[:period]
onset = internuclei[(period + 1):]
else:
for split in range(0, (len(internuclei) + 1)):
coda = internuclei[:split]
onset = internuclei[split:]
if ((' '.join(onset) in language['onsets']) or (len(syllables) == 0) or (len(onset) == 0)):
break
if (len(syllables) > 0):
syllables[(- 1)][3].extend(coda)
syllables.append((stress, onset, [phoneme], []))
internuclei = []
elif ((not (phoneme in language['consonants'])) and (phoneme != '.')):
raise ValueError(('Invalid phoneme: ' + phoneme))
else:
internuclei.append(phoneme)
if (len(internuclei) > 0):
if (len(syllables) == 0):
syllables.append((None, internuclei, [], []))
else:
syllables[(- 1)][3].extend(internuclei)
return syllables |
def test_run_with_fill_defaults_adds_required_field(run_line, tmp_path):
schemafile = (tmp_path / 'schema.json')
schemafile.write_text(json.dumps(SCHEMA))
doc = (tmp_path / 'instance.json')
doc.write_text(json.dumps(MISSING_FIELD_DOC))
result_without_fill_defaults = run_line(['check-jsonschema', '--schemafile', str(schemafile), str(doc)])
assert (result_without_fill_defaults.exit_code == 1)
result_with_fill_defaults = run_line(['check-jsonschema', '--fill-defaults', '--schemafile', str(schemafile), str(doc)])
assert (result_with_fill_defaults.exit_code == 0) |
def _update(input: torch.Tensor, target: torch.Tensor, from_logits: bool, weight: Optional[torch.Tensor]=None) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
if from_logits:
cross_entropy = F.binary_cross_entropy_with_logits(input, target, weight, reduction='none').sum(dim=(- 1))
else:
cross_entropy = F.binary_cross_entropy(input, target, weight, reduction='none').sum(dim=(- 1))
weight = ((target.new_ones(target.size()) * 1.0) if (weight is None) else weight)
num_examples = torch.sum(weight, dim=(- 1)).double()
num_positive = torch.sum((weight * target), dim=(- 1)).double()
return (cross_entropy, num_positive, num_examples) |
class HotpotFullIterativeDataset(QuestionAndParagraphsDataset):
def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher, bridge_as_comparison=False):
self.questions = questions
self.batcher = batcher
self.bridge_as_comparison = bridge_as_comparison
self.samples = self._build_full_dataset()
def _get_labels(self, is_gold1: bool, is_gold2: bool, q_type: str, are_same: bool, is_first_higher: bool) -> Tuple[(int, int)]:
if (not is_gold1):
return (0, 0)
if ((q_type == 'comparison') or self.bridge_as_comparison):
return (int(is_gold1), int((is_gold1 and is_gold2 and (not are_same))))
else:
return (int((is_gold1 and is_first_higher)), int((is_gold1 and is_first_higher and is_gold2 and (not are_same))))
def _build_full_dataset(self):
samples = []
for question in self.questions:
pars_and_scores = list(zip((question.supporting_facts + question.distractors), (question.gold_scores + question.distractor_scores)))
higher_gold = (question.supporting_facts[0] if (question.gold_scores[0] >= question.gold_scores[1]) else question.supporting_facts[1])
for (p1, score1) in pars_and_scores:
for (p2, score2) in pars_and_scores:
(first_label, second_label) = self._get_labels(is_gold1=(p1 in question.supporting_facts), is_gold2=(p2 in question.supporting_facts), q_type=question.q_type, are_same=(p1 == p2), is_first_higher=(higher_gold == p1))
samples.append(IterativeQuestionAndParagraphs(question=question.question_tokens, paragraphs=[flatten_iterable(p1.sentences), flatten_iterable(p2.sentences)], first_label=first_label, second_label=second_label, question_id=question.question_id, q_type=question.q_type, sentence_segments=[get_segments_from_sentences(s) for s in [p1.sentences, p2.sentences]]))
return samples
def get_batches(self, n_batches):
if (len(self) < n_batches):
raise ValueError()
return itertools.islice(self.get_epoch(), n_batches)
def get_samples(self, n_samples: int):
n_batches = self.batcher.epoch_size(n_samples)
return (self.batcher.get_epoch(np.random.choice(self.samples, n_samples, replace=False)), n_batches)
def get_epoch(self):
return self.batcher.get_epoch(self.samples)
def get_spec(self):
batch_size = self.batcher.get_fixed_batch_size()
num_contexts = 2
max_q_words = max((len(q.question_tokens) for q in self.questions))
max_c_words = max((max((c.num_tokens for c in (q.distractors + q.supporting_facts))) for q in self.questions))
return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts, max_num_question_words=max_q_words, max_num_context_words=max_c_words)
def get_vocab(self):
voc = set()
for q in self.questions:
voc.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
voc.update(flatten_iterable(para.sentences))
return voc
def get_word_counts(self):
count = Counter()
for q in self.questions:
count.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
count.update(flatten_iterable(para.sentences))
return count
def __len__(self):
return self.batcher.epoch_size(len(self.samples)) |
class TrainGenerator(Dataset):
def __init__(self, args_config, graph):
self.args_config = args_config
self.graph = graph
self.user_dict = graph.train_user_dict
self.exist_users = list(graph.exist_users)
self.low_item_index = graph.item_range[0]
self.high_item_index = graph.item_range[1]
def __len__(self):
return self.graph.n_train
def __getitem__(self, index):
out_dict = {}
user_dict = self.user_dict
u_id = random.sample(self.exist_users, 1)[0]
out_dict['u_id'] = u_id
pos_items = user_dict[u_id]
n_pos_items = len(user_dict[u_id])
pos_idx = np.random.randint(low=0, high=n_pos_items, size=1)[0]
pos_i_id = pos_items[pos_idx]
out_dict['pos_i_id'] = pos_i_id
neg_i_id = self.get_random_neg(pos_items, [])
out_dict['neg_i_id'] = neg_i_id
return out_dict
def get_random_neg(self, pos_items, selected_items):
while True:
neg_i_id = np.random.randint(low=self.low_item_index, high=self.high_item_index, size=1)[0]
if ((neg_i_id not in pos_items) and (neg_i_id not in selected_items)):
break
return neg_i_id |
def capture_regexes():
regexes = []
real_compile = re.compile
real_search = re.search
real_sub = re.sub
def capture_compile(regex, flags=0):
regexes.append((regex, flags))
return real_compile(regex, flags)
def capture_search(regex, target, flags=0):
regexes.append((regex, flags))
return real_search(regex, target, flags)
def capture_sub(regex, *args):
regexes.append((regex, 0))
return real_sub(regex, *args)
re.compile = capture_compile
re.search = capture_search
re.sub = capture_sub
try:
import bm_regex_effbot
bm_regex_effbot.bench_regex_effbot(1)
import bm_regex_v8
bm_regex_v8.bench_regex_v8(1)
finally:
re.compile = real_compile
re.search = real_search
re.sub = real_sub
return regexes |
def treutler_ahlrichs(n, *args, **kwargs):
r = numpy.empty(n)
dr = numpy.empty(n)
step = (numpy.pi / (n + 1))
ln2 = (1 / numpy.log(2))
for i in range(n):
x = numpy.cos(((i + 1) * step))
r[i] = (((- ln2) * ((1 + x) ** 0.6)) * numpy.log(((1 - x) / 2)))
dr[i] = ((((step * numpy.sin(((i + 1) * step))) * ln2) * ((1 + x) ** 0.6)) * ((((- 0.6) / (1 + x)) * numpy.log(((1 - x) / 2))) + (1 / (1 - x))))
return (r[::(- 1)], dr[::(- 1)]) |
def load_question_set(qs_file_name, append_hat_for_LL=True, convert_svs_pattern=True):
with open(qs_file_name) as f:
lines = f.readlines()
binary_qs_index = 0
continuous_qs_index = 0
binary_dict = {}
numeric_dict = {}
LL = re.compile(re.escape('LL-'))
for line in lines:
line = line.replace('\n', '')
temp_list = line.split()
if ((len(line) <= 0) or line.startswith('#')):
continue
name = temp_list[1].replace('"', '').replace("'", '')
temp_list = line.split('{')
temp_line = temp_list[1]
temp_list = temp_line.split('}')
temp_line = temp_list[0]
temp_line = temp_line.strip()
question_list = temp_line.split(',')
temp_list = line.split(' ')
question_key = temp_list[1]
if (temp_list[0] == 'CQS'):
assert (len(question_list) == 1)
processed_question = wildcards2regex(question_list[0], convert_number_pattern=True, convert_svs_pattern=convert_svs_pattern)
numeric_dict[continuous_qs_index] = (name, re.compile(processed_question))
continuous_qs_index = (continuous_qs_index + 1)
elif (temp_list[0] == 'QS'):
re_list = []
for temp_question in question_list:
processed_question = wildcards2regex(temp_question)
if (append_hat_for_LL and LL.search(question_key) and (processed_question[0] != '^')):
processed_question = ('^' + processed_question)
re_list.append(re.compile(processed_question))
binary_dict[binary_qs_index] = (name, re_list)
binary_qs_index = (binary_qs_index + 1)
else:
raise RuntimeError('Not supported question format')
return (binary_dict, numeric_dict) |
def test_async_subproc_command_no_stdout_on_save():
with pytest.raises(ContextError) as err:
Command('arb', is_save=True, stdout='in')
assert (str(err.value) == "You can't set `stdout` or `stderr` when `save` is True.")
with pytest.raises(ContextError) as err:
Command('arb', is_save=True, stderr='out')
assert (str(err.value) == "You can't set `stdout` or `stderr` when `save` is True.")
with pytest.raises(ContextError) as err:
Command('arb', is_save=True, stdout='in', stderr='out')
assert (str(err.value) == "You can't set `stdout` or `stderr` when `save` is True.") |
_constructor
class W_Character(W_Object):
_attrs_ = _immutable_fields_ = ['value']
errorname = 'char'
def __init__(self, val):
assert isinstance(val, unicode)
self.value = val
def tostring(self):
from pypy.objspace.std.bytesobject import string_escape_encode
return ('#\\%s' % string_escape_encode(self.value.encode('utf-8'), ''))
def get_value_utf8(self):
return self.value.encode('utf-8')
def immutable(self):
return True
def eqv(self, other):
if (not isinstance(other, W_Character)):
return False
return (self.value == other.value)
def hash_eqv(self):
return ord(self.value)
def hash_equal(self, info=None):
return self.hash_eqv() |
class RDP(BaseDeepAD):
def __init__(self, epochs=100, batch_size=64, lr=0.001, rep_dim=128, hidden_dims='100,50', act='LeakyReLU', bias=False, epoch_steps=(- 1), prt_steps=10, device='cuda', verbose=2, random_state=42):
super(RDP, self).__init__(model_name='RDP', epochs=epochs, batch_size=batch_size, lr=lr, epoch_steps=epoch_steps, prt_steps=prt_steps, device=device, verbose=verbose, random_state=random_state)
self.hidden_dims = hidden_dims
self.rep_dim = rep_dim
self.act = act
self.bias = bias
return
def training_prepare(self, X, y):
train_loader = DataLoader(X, batch_size=self.batch_size, shuffle=True)
net = MLPnet(n_features=self.n_features, n_hidden=self.hidden_dims, n_output=self.rep_dim, activation=self.act, bias=self.bias, skip_connection=None).to(self.device)
rp_net = copy.deepcopy(net)
criterion = RDPLoss(rp_net)
if (self.verbose >= 2):
print(net)
return (train_loader, net, criterion)
def inference_prepare(self, X):
test_loader = DataLoader(X, batch_size=self.batch_size, drop_last=False, shuffle=False)
self.criterion.reduction = 'none'
return test_loader
def training_forward(self, batch_x, net, criterion):
batch_x1 = batch_x[torch.randperm(batch_x.shape[0])]
batch_x = batch_x.float().to(self.device)
batch_x1 = batch_x1.float().to(self.device)
(z, z1) = (net(batch_x), net(batch_x1))
loss = criterion(z, z1, batch_x, batch_x1)
return loss
def inference_forward(self, batch_x, net, criterion):
batch_x = batch_x.float().to(self.device)
batch_x1 = batch_x[torch.randperm(batch_x.shape[0])]
(batch_z, batch_z1) = (net(batch_x), net(batch_x1))
s = criterion(batch_z, batch_z1, batch_x, batch_x1)
return (batch_z, s) |
class IPS120_10(OxfordInstrumentsBase):
_SWITCH_HEATER_HEATING_DELAY = 20
_SWITCH_HEATER_COOLING_DELAY = 20
_SWITCH_HEATER_SET_VALUES = {False: 0, True: 1, 'Force': 2}
_SWITCH_HEATER_GET_VALUES = {0: False, 1: True, 2: False, 5: 'Heater fault, low heater current', 8: 'No switch fitted'}
def __init__(self, adapter, name='Oxford IPS', clear_buffer=True, switch_heater_heating_delay=None, switch_heater_cooling_delay=None, field_range=None, **kwargs):
super().__init__(adapter=adapter, name=name, **kwargs)
if (switch_heater_heating_delay is not None):
self._SWITCH_HEATER_HEATING_DELAY = switch_heater_heating_delay
if (switch_heater_cooling_delay is not None):
self._SWITCH_HEATER_COOLING_DELAY = switch_heater_cooling_delay
if (field_range is not None):
if isinstance(field_range, (float, int)):
self.field_setpoint_values = [(- field_range), (+ field_range)]
elif isinstance(field_range, (list, tuple)):
self.field_setpoint_values = field_range
if clear_buffer:
self.adapter.connection.clear()
version = Instrument.measurement('V', ' A string property that returns the version of the IPS. ')
control_mode = Instrument.control('X', 'C%d', ' A string property that sets the IPS in `local` or `remote` and `locked`\n or `unlocked`, locking the LOC/REM button. Allowed values are:\n\n ===== \n value state\n ===== \n LL local & locked\n RL remote & locked\n LU local & unlocked\n RU remote & unlocked\n ===== \n ', preprocess_reply=(lambda v: v[6]), cast=int, validator=strict_discrete_set, values={'LL': 0, 'RL': 1, 'LU': 2, 'RU': 3}, map_values=True)
current_measured = Instrument.measurement('R1', ' A floating point property that returns the measured magnet current of\n the IPS in amps. ', dynamic=True)
demand_current = Instrument.measurement('R0', ' A floating point property that returns the demand magnet current of\n the IPS in amps. ', dynamic=True)
demand_field = Instrument.measurement('R7', ' A floating point property that returns the demand magnetic field of\n the IPS in Tesla. ', dynamic=True)
persistent_field = Instrument.measurement('R18', ' A floating point property that returns the persistent magnetic field of\n the IPS in Tesla. ', dynamic=True)
switch_heater_status = Instrument.control('X', 'H%d', ' An integer property that returns the switch heater status of\n the IPS. Use the :py:attr:`~switch_heater_enabled` property for controlling\n and reading the switch heater. When using this property, the user\n is referred to the IPS120-10 manual for the meaning of the integer\n values. ', preprocess_reply=(lambda v: v[8]), cast=int)
def switch_heater_enabled(self):
status_value = self.switch_heater_status
status = self._SWITCH_HEATER_GET_VALUES[status_value]
if isinstance(status, str):
raise SwitchHeaterError(('IPS 120-10: switch heater status reported issue with switch heater: %s' % status))
return status
_heater_enabled.setter
def switch_heater_enabled(self, value):
status_value = self._SWITCH_HEATER_SET_VALUES[value]
if (status_value == 2):
log.info('IPS 120-10: Turning on the switch heater without any safety checks.')
self.switch_heater_status = status_value
current_setpoint = Instrument.control('R0', 'I%f', ' A floating point property that controls the magnet current set-point of\n the IPS in ampere. ', validator=truncated_range, values=[0, 120], dynamic=True)
field_setpoint = Instrument.control('R8', 'J%f', ' A floating point property that controls the magnetic field set-point of\n the IPS in Tesla. ', validator=truncated_range, values=[(- 7), 7], dynamic=True)
sweep_rate = Instrument.control('R9', 'T%f', ' A floating point property that controls the sweep-rate of\n the IPS in Tesla/minute. ', dynamic=True)
activity = Instrument.control('X', 'A%d', ' A string property that controls the activity of the IPS. Valid values\n are "hold", "to setpoint", "to zero" and "clamp" ', preprocess_reply=(lambda v: v[4]), cast=int, values={'hold': 0, 'to setpoint': 1, 'to zero': 2, 'clamp': 4}, map_values=True)
sweep_status = Instrument.measurement('X', ' A string property that returns the current sweeping mode of the IPS. ', preprocess_reply=(lambda v: v[11]), cast=int, values={'at rest': 0, 'sweeping': 1, 'sweep limiting': 2, 'sweeping & sweep limiting': 3}, map_values=True)
def field(self):
try:
heater_on = self.switch_heater_enabled
except SwitchHeaterError as e:
log.error(('IPS 120-10: Switch heater status reported issue: %s' % e))
field = self.demand_field
else:
if heater_on:
field = self.demand_field
else:
field = self.persistent_field
return field
def enable_control(self):
log.debug('start enabling control')
self.control_mode = 'RU'
if (self.activity == 'clamp'):
self.activity = 'hold'
if (self.field == 0):
log.debug('enabling switch heater')
self.switch_heater_enabled = True
def disable_control(self):
log.debug('start disabling control')
if (not (self.field == 0)):
raise MagnetError('IPS 120-10: field not at 0T; cannot disable the supply. ')
log.debug('disabling switch heater')
self.switch_heater_enabled = False
self.activity = 'clamp'
self.control_mode = 'LU'
def enable_persistent_mode(self):
log.debug('enabling persistent mode')
if (not (self.sweep_status == 'at rest')):
raise MagnetError('IPS 120-10: magnet not at rest; cannot enable persistent mode')
if (not self.switch_heater_enabled):
log.debug('magnet already in persistent mode')
return
else:
self.activity = 'hold'
self.switch_heater_enabled = False
log.info('IPS 120-10: Wait for for switch heater delay')
sleep(self._SWITCH_HEATER_COOLING_DELAY)
self.activity = 'to zero'
self.wait_for_idle()
def disable_persistent_mode(self):
log.debug('disabling persistent mode')
if (not (self.sweep_status == 'at rest')):
raise MagnetError('IPS 120-10: magnet not at rest; cannot disable persistent mode')
if (not (self.field == self.field_setpoint)):
log.warning('IPS 120-10: field setpoint and persistent field not identical; setting the setpoint to the persistent field.')
self.field_setpoint = self.field
if self.switch_heater_enabled:
log.debug('magnet already in demand mode or at 0 field')
return
else:
log.debug("set activity to 'to setpoint'")
self.activity = 'to setpoint'
self.wait_for_idle()
log.debug("set activity to 'hold'")
self.activity = 'hold'
log.debug('enable switch heater')
self.switch_heater_enabled = True
log.info('IPS 120-10: Wait for for switch heater delay')
sleep(self._SWITCH_HEATER_HEATING_DELAY)
def wait_for_idle(self, delay=1, max_wait_time=None, should_stop=(lambda : False)):
log.debug('waiting for magnet to be idle')
start_time = time()
while True:
log.debug('sleeping for %d s', delay)
sleep(delay)
log.debug('checking the status of the sweep')
status = self.sweep_status
if (status == 'at rest'):
log.debug("status is 'at rest', waiting is done")
break
if should_stop():
log.debug('external function signals to stop waiting')
break
if ((max_wait_time is not None) and ((time() - start_time) > max_wait_time)):
raise TimeoutError('IPS 120-10: Magnet not idle within max wait time.')
def set_field(self, field, sweep_rate=None, persistent_mode_control=True):
if (self.field == field):
return
if self.switch_heater_enabled:
pass
log.debug('Magnet in demand mode, continuing')
else:
log.debug('Magnet in persistent mode')
if persistent_mode_control:
log.debug('trying to disable persistent mode')
self.disable_persistent_mode()
else:
raise MagnetError('IPS 120-10: magnet is in persistent mode but cannot turn off persistent mode because persistent_mode_control == False. ')
if (sweep_rate is not None):
log.debug('setting the sweep rate to %s', sweep_rate)
self.sweep_rate = sweep_rate
if (field == 0):
log.debug("setting activity to 'to zero' - running down the field")
self.activity = 'to zero'
else:
log.debug("setting activity to 'to setpoint'")
self.activity = 'to setpoint'
log.debug('setting the field_setpoint to %d', field)
self.field_setpoint = field
log.debug('waiting for magnet to be finished')
self.wait_for_idle()
log.debug('sleeping for additional 10s (whatever the reason)')
sleep(10)
if (persistent_mode_control and (field != 0)):
log.debug('persistent mode control is on, and setpoint_field !=0 - enabling persistent mode')
self.enable_persistent_mode()
def train_magnet(self, training_scheme):
for (field, rate) in training_scheme:
self.set_field(field, rate, persistent_mode_control=False)
self.set_field(0) |
class FC6_Monitor(FC3_Monitor):
removedKeywords = FC3_Monitor.removedKeywords
removedAttrs = FC3_Monitor.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
FC3_Monitor.__init__(self, writePriority, *args, **kwargs)
self.probe = kwargs.get('probe', True)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.hsync:
retval += (' --hsync=%s' % self.hsync)
if self.monitor:
retval += (' --monitor="%s"' % self.monitor)
if (not self.probe):
retval += ' --noprobe'
if self.vsync:
retval += (' --vsync=%s' % self.vsync)
if retval:
retval = ('monitor%s\n' % retval)
return retval
def _getParser(self):
op = FC3_Monitor._getParser(self)
op.add_argument('--noprobe', dest='probe', action='store_false', default=True, version=FC6, help='\n Do not probe the monitor.')
return op |
def _create_isolated_env_virtualenv(path: str) -> tuple[(str, str)]:
import virtualenv
cmd = [str(path), '--no-setuptools', '--no-wheel', '--activators', '']
result = virtualenv.cli_run(cmd, setup_logging=False)
executable = str(result.creator.exe)
script_dir = str(result.creator.script_dir)
return (executable, script_dir) |
class BuildInstructions(object):
def __init__(self, build_instr_file=None):
self.instructions = {}
self.metadata = {}
if build_instr_file:
allheaders = get_inp_sections_details(build_instr_file)
instructions = {}
for (section, _) in allheaders.items():
change = INPSectionDiff(build_instr_file=build_instr_file, section=section)
instructions.update({section: change})
self.instructions = instructions
self.metadata = vc_utils.read_meta_data(build_instr_file)
def __add__(self, other):
bi = BuildInstructions()
for (section, change_obj) in self.instructions.items():
if (section in other.instructions):
new_change = (change_obj + other.instructions[section])
bi.instructions[section] = new_change
else:
bi.instructions[section] = change_obj
for (section, change_obj) in other.instructions.items():
if (section not in self.instructions):
bi.instructions[section] = change_obj
bi.metadata = deepcopy(self.metadata)
otherbaseline = other.metadata['Parent Models']['Baseline']
otheralternatives = other.metadata['Parent Models']['Alternatives']
bi.metadata['Parent Models']['Baseline'].update(otherbaseline)
bi.metadata['Parent Models']['Alternatives'].update(otheralternatives)
bi.metadata['Log'].update(other.metadata['Log'])
return bi
def __radd__(self, other):
if (other == 0):
return self
else:
return self.__add__(other)
def save(self, dir, filename):
if (not os.path.exists(dir)):
os.makedirs(dir)
filepath = os.path.join(dir, filename)
with open(filepath, 'w') as f:
vc_utils.write_meta_data(f, self.metadata)
for (section, change_obj) in self.instructions.items():
section_df = pd.concat([change_obj.removed, change_obj.altered, change_obj.added])
vc_utils.write_inp_section(f, allheaders=None, sectionheader=section, section_data=section_df, pad_top=False, na_fill='NaN')
def build(self, baseline_dir, target_path):
basemodel = swmmio.Model(baseline_dir)
allheaders = get_inp_sections_details(basemodel.inp.path)
with open(target_path, 'w') as f:
for (section, _) in allheaders.items():
if ((section not in problem_sections) and (allheaders[section]['columns'] != ['blob']) and (section in self.instructions)):
basedf = dataframe_from_bi(basemodel.inp.path, section)
basedf[';'] = ';'
changes = self.instructions[section]
remove_ids = (changes.removed.index | changes.altered.index)
new_section = basedf.drop(remove_ids)
new_section = pd.concat([new_section, changes.altered, changes.added])
else:
new_section = dataframe_from_bi(basemodel.inp.path, section=section)
new_section[';'] = ';'
vc_utils.write_inp_section(f, allheaders, section, new_section) |
class TestInputContactMessageContentWithoutRequest(TestInputContactMessageContentBase):
def test_slot_behaviour(self, input_contact_message_content):
inst = input_contact_message_content
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, input_contact_message_content):
assert (input_contact_message_content.first_name == self.first_name)
assert (input_contact_message_content.phone_number == self.phone_number)
assert (input_contact_message_content.last_name == self.last_name)
def test_to_dict(self, input_contact_message_content):
input_contact_message_content_dict = input_contact_message_content.to_dict()
assert isinstance(input_contact_message_content_dict, dict)
assert (input_contact_message_content_dict['phone_number'] == input_contact_message_content.phone_number)
assert (input_contact_message_content_dict['first_name'] == input_contact_message_content.first_name)
assert (input_contact_message_content_dict['last_name'] == input_contact_message_content.last_name)
def test_equality(self):
a = InputContactMessageContent('phone', 'first', last_name='last')
b = InputContactMessageContent('phone', 'first_name', vcard='vcard')
c = InputContactMessageContent('phone_number', 'first', vcard='vcard')
d = User(123, 'first', False)
assert (a == b)
assert (hash(a) == hash(b))
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d)) |
class LengthColumn(NumericColumn):
def __init__(self):
super().__init__('~#length')
def _get_min_width(self):
return self._cell_width(util.format_time_display(((60 * 82) + 22)))
def _fetch_value(self, model, iter_):
return model.get_value(iter_).get('~#length', 0)
def _apply_value(self, model, iter_, cell, value):
text = util.format_time_display(value)
cell.set_property('text', text)
self._recalc_width(model.get_path(iter_), text) |
.pydicom
def test_require_dicom_patient_position():
test_ds_dict = {key: pydicom.dcmread(test_coords.get_data_file(key)) for key in ORIENTATIONS_SUPPORTED}
ds_no_orient = pydicom.dcmread(str(pymedphys.data_path('example_structures.dcm')), force=True)
test_ds_dict['no orient'] = ds_no_orient
test_orientations = ('HFS', 'HFP', 'FFS', 'FFP')
for (orient, ds) in test_ds_dict.items():
for test_orient in test_orientations:
if (orient == test_orient):
orientation.require_dicom_patient_position(ds, test_orient)
elif (orient == 'no orient'):
with pytest.raises(AttributeError):
orientation.require_dicom_patient_position(ds, test_orient)
else:
with pytest.raises(ValueError):
orientation.require_dicom_patient_position(ds, test_orient) |
def get_param_from_h5(sdf_h5_file, cat_id, obj):
h5_f = h5py.File(sdf_h5_file, 'r')
try:
if ('norm_params' in h5_f.keys()):
norm_params = h5_f['norm_params'][:]
else:
raise Exception(cat_id, obj, 'no sdf and sample')
finally:
h5_f.close()
return (norm_params[:3], norm_params[3]) |
('beeref.widgets.SceneToPixmapExporterDialog.exec', return_value=False)
('beeref.widgets.SceneToPixmapExporterDialog.value')
def test_scene_to_pixmap_exporter_get_user_input_when_canceled(value_mock, exec_mock, view):
exporter = SceneToPixmapExporter(view.scene)
value = exporter.get_user_input(None)
assert (value is False) |
class LossBuilder():
LOSS_DICT = {'edge': EdgeLoss, 'depth': DepthLoss}
def __init__(self, weight_name, weight, name, img, pil_target):
self.weight_name = weight_name
self.weight = weight
self.name = name
self.img = img
self.pil_target = pil_target
def weight_category(self):
return self.weight_name.split('_')[0]
def loss_factory(self):
weight_name = self.weight_category
if (weight_name == 'direct'):
Loss = type(self.img).get_preferred_loss()
else:
Loss = self.LOSS_DICT[weight_name]
return Loss
def build_loss(self) -> Loss:
Loss = self.loss_factory
out = Loss.TargetImage(f'{self.weight_category} {self.name}:{self.weight}', self.img.image_shape, self.pil_target)
out.set_enabled((self.pil_target is not None))
return out |
class PrideFacts(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
self.daily_fact_task = self.bot.loop.create_task(self.send_pride_fact_daily())
_task(Month.JUNE)
async def send_pride_fact_daily(self) -> None:
channel = self.bot.get_channel(Channels.sir_lancebot_playground)
(await self.send_select_fact(channel, datetime.now(tz=UTC).day))
async def send_select_fact(self, target: discord.abc.Messageable, day_num: int) -> None:
try:
(await target.send(embed=self.get_fact_embed(day_num)))
except IndexError:
(await target.send(f'Day {day_num} is not supported'))
return
(name='pridefact', aliases=('pridefacts',))
async def pridefact(self, ctx: commands.Context, option: ((int | str) | None)=None) -> None:
if (not option):
(await self.send_select_fact(ctx, datetime.now(tz=UTC).day))
elif isinstance(option, int):
(await self.send_select_fact(ctx, option))
elif option.lower().startswith('rand'):
(await ctx.send(embed=self.get_fact_embed()))
else:
(await ctx.send(f'Could not parse option {option}'))
def get_fact_embed(day_num: (int | None)=None) -> discord.Embed:
fact = (FACTS[(day_num - 1)] if day_num else random.choice(FACTS))
return discord.Embed(colour=Colours.pink, title=(f"Day {day_num}'s pride fact!" if day_num else 'Random pride fact!'), description=fact) |
('xarray.open_dataset')
def test_1258(fake_open_dataset):
from satpy import Scene
fake_open_dataset.side_effect = generate_fake_abi_xr_dataset
scene = Scene(abi_file_list, reader='abi_l1b')
scene.load(['true_color_nocorr', 'C04'], calibration='radiance')
resampled_scene = scene.resample(scene.coarsest_area(), resampler='native')
assert (len(resampled_scene.keys()) == 2) |
def _get_unit_vector_x(sat_sun_vec, unit_vector_z, angle_between_earth_and_sun):
beta = angle_between_earth_and_sun
sin_beta = np.sin(beta)
cos_beta = np.cos(beta)
cross1 = _get_uz_cross_satsun(unit_vector_z, sat_sun_vec)
cross2 = cross_product(cross1, unit_vector_z)
unit_vector_x = Vector3D(x=((sin_beta * cross1.x) + (cos_beta * cross2.x)), y=((sin_beta * cross1.y) + (cos_beta * cross2.y)), z=((sin_beta * cross1.z) + (cos_beta * cross2.z)))
return normalize_vector(unit_vector_x) |
class rpt(SWMMIOFile):
def __init__(self, filePath):
SWMMIOFile.__init__(self, filePath)
meta_data = get_rpt_metadata(filePath)
self.swmm_version = meta_data['swmm_version']
self.simulationStart = meta_data['simulation_start']
self.simulationEnd = meta_data['simulation_end']
self.timeStepMin = meta_data['time_step_min']
self.dateOfAnalysis = meta_data['analysis_date']
self.elementByteLocations = {'Link Results': {}, 'Node Results': {}}
self._rpt_section_details = None
def headers(self):
if (self._rpt_section_details is None):
self._rpt_section_details = get_rpt_sections_details(self.path)
return self._rpt_section_details
def _external_outflow_volume(self):
return float(swmmio.utils.text.get_rpt_value(self.path, 'External Outflow'))
def _flooding_loss_volume(self):
return float(swmmio.utils.text.get_rpt_value(self.path, 'Flooding Loss')) |
def bar(view_tmin, view_tmax, changes, tmin, tmax, sx):
delta = ((view_tmax - view_tmin) / (sx - 2))
out = [ansi_dim]
ic = 0
while ((ic < len(changes)) and (changes[ic][0] < view_tmin)):
ic += 1
if ((0 < ic) and (ic < len(changes))):
count = changes[(ic - 1)][1]
else:
count = 0
out.append((bar_right if ((ic == 0) and (view_tmin <= tmin)) else tri_left))
for i in range((sx - 2)):
block_tmin = (view_tmin + (i * delta))
block_tmax = (view_tmin + ((i + 1) * delta))
ic_start = ic
if (i < (sx - 3)):
while ((ic < len(changes)) and (changes[ic][0] < block_tmax)):
ic += 1
else:
while ((ic < len(changes)) and (changes[ic][0] <= block_tmax)):
ic += 1
nc_block = (ic - ic_start)
if (nc_block == 0):
if (count == 0):
out.append(blocks[(- 1)])
elif (count == 1):
out.append(blocks[0])
else:
out.append((vmulti % (('%i' % count) if (count <= 9) else 'N')))
elif (nc_block == 1):
(t, new_count) = changes[ic_start]
ib = int(round((((t - block_tmin) / delta) * (len(blocks) - 1))))
if ((new_count == 1) and (count == 0)):
out.append(('%s%s%s' % (ansi_reverse, blocks[((- ib) - 1)], ansi_reverse_reset)))
elif ((new_count == 0) and (count == 1)):
out.append(blocks[((- ib) - 1)])
elif (new_count > count):
out.append(('%s%s%s' % (ansi_reverse, '+', ansi_reverse_reset)))
elif (new_count < count):
out.append(('%s%s%s' % (ansi_reverse, '-', ansi_reverse_reset)))
elif ((count == 0) and (count == new_count)):
out.append(blocks[(- 1)])
elif ((count == 1) and (count == new_count)):
out.append(blocks[0])
else:
out.append('N')
count = new_count
elif (nc_block > 1):
(_, count) = changes[((ic_start + nc_block) - 1)]
out.append(hmulti)
else:
assert False
out.append((bar_left if ((ic == len(changes)) and (tmax <= view_tmax)) else tri_right))
out.append(ansi_dim_reset)
return ''.join(out) |
class _MemoryStreamCloser(_StreamCloser):
def __init__(self, write, close_on_exit, is_binary):
super().__init__(write, close_on_exit)
io_class = (io.BytesIO if is_binary else io.StringIO)
fp = self._wrap(io_class)()
assert (fp == self.fp)
def close(self, parent_close=None):
self.value = self.fp.getvalue()
super().close(parent_close)
def _success(self):
self._write_on_success(self.value) |
def test_parse_empty_string(parser):
line = ''
statement = parser.parse(line)
assert (statement == '')
assert (statement.args == statement)
assert (statement.raw == line)
assert (statement.command == '')
assert (statement.arg_list == [])
assert (statement.multiline_command == '')
assert (statement.terminator == '')
assert (statement.suffix == '')
assert (statement.pipe_to == '')
assert (statement.output == '')
assert (statement.output_to == '')
assert (statement.command_and_args == line)
assert (statement.argv == statement.arg_list) |
def make_env(scenario_name, benchmark=False):
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
scenario = scenarios.load((scenario_name + '.py')).Scenario()
world = scenario.make_world()
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env |
def test_h_constraints_offxml(methanol, tmpdir):
with tmpdir.as_cwd():
methanol.to_offxml(file_name='methanol.offxml', h_constraints=True)
methanol_ff = ForceField('methanol.offxml')
off_methanol = Molecule.from_rdkit(methanol.to_rdkit())
system = methanol_ff.create_openmm_system(topology=off_methanol.to_topology())
assert (system.getNumConstraints() == 4)
for i in range(4):
(a, b, constraint) = system.getConstraintParameters(i)
assert (unit.Quantity(methanol.BondForce[(a, b)].length, unit.nanometer) == constraint) |
class SawyerButtonPressV1Policy(Policy):
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'button_start_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self.desired_pos(o_d), p=4.0)
action['grab_effort'] = 0.0
return action.array
def desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = (o_d['button_start_pos'] + np.array([0.0, 0.0, (- 0.07)]))
(hand_x, hand_y, hand_z) = pos_curr
(button_initial_x, button_initial_y, button_initial_z) = pos_button
if (not np.all(np.isclose(np.array([hand_x, hand_z]), np.array([button_initial_x, button_initial_z]), atol=0.02))):
pos_button[1] = (pos_curr[1] - 0.1)
return pos_button
pos_button[1] += 0.02
return pos_button |
def convert(x, dtype=None):
if isinstance(x, np.ma.MaskedArray):
raise NotImplementedError('MaskedArrays are not supported')
if (dtype is not None):
x_ = _asarray(x, dtype=dtype)
else:
x_ = None
if isinstance(x, int):
try:
x_ = autocast_int(x)
except OverflowError:
x_ = _asarray(x, dtype='uint64')
elif isinstance(x, builtins.float):
x_ = autocast_float(x)
elif isinstance(x, np.ndarray):
x_ = x
else:
x_ = np.asarray(x)
if ((x_.size == 0) and (not hasattr(x, 'dtype'))):
x_ = np.asarray(x, dtype=config.floatX)
assert issubclass(type(x_), (np.ndarray, np.memmap))
return x_ |
def _find_compound_unit(numerator_unit: str, denominator_unit: str, locale: ((Locale | str) | None)=LC_NUMERIC) -> (str | None):
locale = Locale.parse(locale)
resolved_numerator_unit = _find_unit_pattern(numerator_unit, locale=locale)
resolved_denominator_unit = _find_unit_pattern(denominator_unit, locale=locale)
if (not (resolved_numerator_unit and resolved_denominator_unit)):
return None
bare_numerator_unit = resolved_numerator_unit.split('-', 1)[(- 1)]
bare_denominator_unit = resolved_denominator_unit.split('-', 1)[(- 1)]
return _find_unit_pattern(f'{bare_numerator_unit}-per-{bare_denominator_unit}', locale=locale) |
_optimizer('lamb')
class FairseqLAMB(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError('Please install apex to use LAMB optimizer')
def add_args(parser):
parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer')
parser.add_argument('--lamb-eps', type=float, default=1e-08, metavar='D', help='epsilon for LAMB optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.lamb_betas), 'eps': self.args.lamb_eps, 'weight_decay': self.args.weight_decay}
def supports_flat_params(self):
return False |
class TestOps():
def test_eq(self, dummy_memmap):
memmap = dummy_memmap
assert (memmap == memmap.clone()).all()
assert (memmap.clone() == memmap).all()
def test_fill_(self, dummy_memmap):
memmap = dummy_memmap.fill_(1.0)
assert (memmap == 1).all()
assert isinstance(memmap, MemoryMappedTensor)
def test_copy_(self, dummy_memmap):
memmap = dummy_memmap.copy_(torch.ones(10, 11))
assert (memmap == 1).all()
assert isinstance(memmap, MemoryMappedTensor)
assert (torch.ones(10, 11).copy_(memmap) == 1).all()
def test_or(self):
memmap = MemoryMappedTensor.from_tensor(torch.ones(10, 11, dtype=torch.bool))
assert (memmap | (~ memmap)).all()
def test_ne(self):
memmap = MemoryMappedTensor.from_tensor(torch.ones(10, 11, dtype=torch.bool))
assert (memmap != (~ memmap)).all() |
def get_files(**kwargs):
metadata_directory = kwargs.get('metadata_directory', '')
shared_data_directory = kwargs.get('shared_data_directory', '')
files = []
for f in get_template_files(**kwargs):
if (str(f.path) == 'LICENSE.txt'):
files.append(File(Path(metadata_directory, 'licenses', f.path), f.contents))
if (f.path.parts[0] != kwargs['package_name']):
continue
files.append(f)
files.extend((File(Path(shared_data_directory, 'data', 'foo.txt'), ''), File(Path(shared_data_directory, 'data', 'nested', 'bar.txt'), ''), File(Path(metadata_directory, 'WHEEL'), f'''Wheel-Version: 1.0
Generator: hatchling {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
'''), File(Path(metadata_directory, 'METADATA'), f'''Metadata-Version: {DEFAULT_METADATA_VERSION}
Name: {kwargs['project_name']}
Version: 0.0.1
License-File: LICENSE.txt
Requires-Python: >3
''')))
record_file = File(Path(metadata_directory, 'RECORD'), '')
update_record_file_contents(record_file, files)
files.append(record_file)
return files |
def test_info(run_cli):
funcname = tests.utils.get_funcname()
argsprefix = ('data/mockargs/%s_' % funcname)
cliprefix = ('data/clioutput/%s_' % funcname)
prod_accessible = {'ids': [1, 7]}
prod_get = {'products': [{'id': 1, 'name': 'Prod 1 Test'}, {'id': 7, 'name': 'test-fake-product'}]}
fakebz = tests.mockbackend.make_bz(product_get_accessible_args=None, product_get_accessible_return=prod_accessible, product_get_args=(argsprefix + 'products.txt'), product_get_return=prod_get)
cmd = 'bugzilla info --products'
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, (cliprefix + 'products.txt'))
prod_get_ver = {'products': [{'id': 7, 'name': 'test-fake-product', 'versions': [{'id': 360, 'is_active': True, 'name': '7.1'}, {'id': 123, 'is_active': True, 'name': 'fooversion!'}]}]}
fakebz = tests.mockbackend.make_bz(product_get_args=(argsprefix + 'versions.txt'), product_get_return=prod_get_ver)
cmd = 'bugzilla info --versions test-fake-product'
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, (cliprefix + 'versions.txt'))
prod_get_comp_active = {'products': [{'id': 7, 'name': 'test-fake-product', 'components': [{'is_active': True, 'name': 'backend/kernel'}, {'is_active': True, 'name': 'client-interfaces'}]}]}
cmd = 'bugzilla info --components test-fake-product'
fakebz = tests.mockbackend.make_bz(product_get_args=(argsprefix + 'components.txt'), product_get_return=prod_get_comp_active)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, (cliprefix + 'components.txt'))
cmd = 'bugzilla info --components test-fake-product --active-components'
fakebz = tests.mockbackend.make_bz(product_get_args=(argsprefix + 'components-active.txt'), product_get_return=prod_get_comp_active)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, (cliprefix + 'components-active.txt'))
cmd = 'bugzilla info --component_owners test-fake-product'
prod_get_comp_owners = {'products': [{'id': 7, 'name': 'test-fake-product', 'components': [{'default_assigned_to': 'Fake Guy', 'name': 'client-interfaces'}, {'default_assigned_to': 'ANother fake dude!', 'name': 'configuration'}]}]}
fakebz = tests.mockbackend.make_bz(product_get_args=(argsprefix + 'components-owners.txt'), product_get_return=prod_get_comp_owners)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, (cliprefix + 'components-owners.txt')) |
def test_on_success_exception(server_app):
custom = MagicMock(side_effect=RuntimeError('something happened'))
server_app.on('custom', custom)
test_client = server_app.sio.test_client(server_app.app)
result = test_client.emit('custom', callback=True)
custom.assert_called_once_with(server_app)
assert (result == error.ServerError().as_json) |
_keras_backend_version_to_v2
def convert_h5_model_to_pb_model(h5_model_path: AnyStr, custom_objects: Dict=None):
supported_file_types = ['h5', 'hdf5']
def validate_model_path() -> Tuple[(str, str)]:
if (not os.path.exists(h5_model_path)):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), h5_model_path)
model_name_split = os.path.basename(h5_model_path).split('.')
if (model_name_split[(- 1)] not in supported_file_types):
raise ValueError(f'File must be of types {supported_file_types}.')
model_name = (model_name_split[0] + '_converted.pb')
save_path = os.path.dirname(h5_model_path)
return (model_name, (save_path if save_path else os.getcwd()))
def freeze_session(session, output_names):
graph = session.graph
with graph.as_default():
output_names += [v.op.name for v in tf.compat.v1.global_variables()]
input_graph_def = graph.as_graph_def()
for node in input_graph_def.node:
node.device = ''
frozen_graph = convert_variables_to_constants_from_session_graph(session, input_graph_def, output_names)
frozen_graph = remove_training_nodes(frozen_graph)
return frozen_graph
(model_name, save_path) = validate_model_path()
with tf.compat.v1.Graph().as_default():
with tf.compat.v1.Session() as sess:
tf.compat.v1.keras.backend.get_session(sess)
tf.compat.v1.keras.backend.set_learning_phase(0)
try:
model = tf.keras.models.load_model(h5_model_path, custom_objects=custom_objects, compile=False)
except ValueError:
_logger.error("If using custom layers, pass a dict mapping them. For example, {'CustomLayer': CustomLayer}")
raise
frozen_graph = freeze_session(tf.compat.v1.keras.backend.get_session(), [out.op.name for out in model.outputs])
tf.io.write_graph(frozen_graph, save_path, model_name, as_text=False)
_logger.info('Success. The converted model is located at %s saved as %s', save_path, model_name) |
class YolosFeatureExtractor(YolosImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use YolosImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class Solution():
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
gap = 1
res = 0
n = len(arr)
while (gap < (n + 1)):
i = 0
while (i < ((n - gap) + 1)):
for j in range(i, (i + gap)):
res += arr[j]
i += 1
gap += 2
return res |
class CmdDrop(COMMAND_DEFAULT_CLASS):
key = 'drop'
locks = 'cmd:all()'
arg_regex = '\\s|$'
def func(self):
caller = self.caller
if (not self.args):
caller.msg('Drop what?')
return
obj = caller.search(self.args, location=caller, nofound_string=("You aren't carrying %s." % self.args), multimatch_string=('You carry more than one %s:' % self.args))
if (not obj):
return
if (not obj.at_before_drop(caller)):
return
obj.move_to(caller.location, quiet=True)
caller.msg(('You drop %s.' % (obj.name,)))
caller.location.msg_contents(('%s drops %s.' % (caller.name, obj.name)), exclude=caller)
obj.at_drop(caller) |
class AbstractChunkIO():
def calc_offset(cls, chunk_x: int, chunk_z: int) -> int:
raise NotImplementedError(cls.__name__)
def find_chunk(cls, location: int) -> tuple:
raise NotImplementedError(cls.__name__)
def fetch_chunk(cls, world_path: str, chunk_x: int, chunk_z: int):
raise NotImplementedError(cls.__name__)
async def fetch_chunk_async(cls, world_path: str, chunk_x: int, chunk_z: int):
raise NotImplementedError(cls.__name__) |
class FC6_TestCase(CommandTest):
command = 'iscsi'
def runTest(self):
self.assert_parse('iscsi --ipaddr=1.1.1.1', 'iscsi --ipaddr=1.1.1.1\n')
self.assert_parse('iscsi --ipaddr=1.1.1.1 --target=tar --port=1234 --user=name --password=secret', 'iscsi --target=tar --ipaddr=1.1.1.1 --port=1234 --user=name --password=secret\n')
self.assert_parse('iscsi --ipaddr=1.1.1.1 --target=tar', 'iscsi --target=tar --ipaddr=1.1.1.1\n')
self.assert_parse('iscsi --ipaddr=1.1.1.1 --port=4321', 'iscsi --ipaddr=1.1.1.1 --port=4321\n')
self.assert_parse('iscsi --ipaddr=1.1.1.1 --user=name', 'iscsi --ipaddr=1.1.1.1 --user=name\n')
self.assert_parse('iscsi --ipaddr=1.1.1.1 --password=secret', 'iscsi --ipaddr=1.1.1.1 --password=secret\n')
self.assert_parse_error('iscsi')
self.assert_parse_error('iscsi --target=tar --user=name --password=secret --port=1234')
self.assert_parse_error('iscsi --ipaddr')
self.assert_parse_error('iscsi --ipaddr=1.2.3.4 not expected')
self.assert_parse_error('iscsi --ipaddr=1.2.3.4 --unknown=value')
self.assert_parse_error('iscsi --target --ipaddr=1.2.3.4')
self.assert_parse_error('iscsi --ipaddr=1.2.3.4 --user')
self.assert_parse_error('iscsi --ipaddr=1.2.3.4 --password')
self.assert_parse_error('iscsi --ipaddr=1.2.3.4 --port')
self.assert_parse_error("iscsi --ipaddr=1.2.3.4 --port=''")
data = self.handler().IscsiData()
data.ipaddr = ''
self.assertEqual(data._getArgsAsStr(), '')
cmd = self.handler().commands[self.command]
cmd.iscsi = [data]
self.assertEqual(cmd.__str__(), 'iscsi\n')
self.assertEqual(cmd.dataList(), [data]) |
def setUpModule():
global cell, cell1
cell = gto.Cell()
cell.build(unit='B', a=(numpy.eye(3) * 4), mesh=([11] * 3), atom='H 0 0 0; H 0 0 1.8', verbose=0, basis='sto3g')
cell1 = gto.Cell()
cell1.atom = '\n He 1.3 .2 .3\n He .1 .1 1.1 '
cell1.basis = {'He': [[0, [0.8, 1]], [1, [0.6, 1]]]}
cell1.mesh = ([15] * 3)
cell1.a = numpy.array(([2.0, 0.9, 0.0], [0.1, 1.9, 0.4], [0.8, 0, 2.1]))
cell1.build() |
class SpringMassDataset(object):
def __init__(self, k, m, A0, c, v0=0, et=10):
super(SpringMassDataset, self).__init__()
self.k = k
self.m = m
self.A0 = A0
self.c = c
self.et = et
self.v0 = v0
self.Nt = int(1000)
self.omega_n = np.sqrt((k / m))
self.xi = ((c / 2) / np.sqrt((m * k)))
self.omega_d = (self.omega_n * np.sqrt((1 - (self.xi ** 2))))
self.A = np.sqrt(((A0 ** 2) + (((v0 + ((self.xi * self.omega_n) * A0)) / self.omega_d) ** 2)))
self.phi = np.arctan(((self.omega_d * A0) / (v0 + ((self.xi * self.omega_n) * A0))))
def solution(self):
t = np.linspace(0, self.et, self.Nt, endpoint=False)
x = ((self.A * np.exp((((- self.xi) * self.omega_n) * t))) * np.sin(((self.omega_d * t) + self.phi)))
fig = plt.figure()
plt.plot(t, x)
plt.savefig(f'../results/pde/pde_{self.k}_{self.m}_{self.c}_{self.A0}.jpg', dpi=300)
plt.close()
d = {'t': t, 'x': x, 'k': ([self.k] * self.Nt), 'm': ([self.m] * self.Nt), 'A0': ([self.A0] * self.Nt), 'c': ([self.c] * self.Nt), 't_new': (t / np.sqrt((self.k / self.m))), 'x_new': (x / self.A0)}
df = pd.DataFrame(d)
return df |
def _get_command_line_arguments() -> Dict:
parser = argparse.ArgumentParser()
parser.add_argument(('--' + Args.FEATURES_DIR), required=True, help='Dataset directory for reading and writing features')
parser.add_argument(('--' + Args.PCA_PATH), required=True, help='Pickle containing the PCA transform')
parser.add_argument(('--' + Args.TAG), required=False, help='Tag to append to output file name', default=DEFAULT_TAG)
parser.add_argument(('--' + Args.FEATURES), required=False, help='What type of raw input features to use', default=EXTRACTOR_TYPE_RESNET_TF2, choices=[EXTRACTOR_TYPE_RESNET_TF2])
args_dict = vars(parser.parse_args())
return args_dict |
class LSTM_Parrallel(nn.Module):
def __init__(self):
super(LSTM_Parrallel, self).__init__()
self.encoder_1 = LSTM_encoder()
self.encoder_2 = LSTM_encoder()
self.classifier = nn.Linear(64, 14)
def forward(self, x1, x2, flag='unsupervised'):
if (flag == 'supervised'):
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return (y1, y2)
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return (x1, x2) |
class Mean(ScalarOp):
identity = 0
commutative = True
associative = False
nfunc_spec = ('mean', 2, 1)
nfunc_variadic = 'mean'
def impl(self, *inputs):
return (sum(inputs) / len(inputs))
def c_code(self, node, name, inputs, outputs, sub):
(z,) = outputs
if (not inputs):
return f'{z} = 0;'
else:
return f"{z} = ({' + '.join(inputs)}) / ((double) {len(inputs)});"
def L_op(self, inputs, outputs, gout):
(gz,) = gout
retval = ([(gz / len(inputs))] * len(inputs))
return retval |
def download_release(release_scans, out_dir, file_types, use_v1_sens):
if (len(release_scans) == 0):
return
print((((('Downloading ScanNet ' + RELEASE_NAME) + ' release to ') + out_dir) + '...'))
for scan_id in release_scans:
scan_out_dir = os.path.join(out_dir, scan_id)
download_scan(scan_id, scan_out_dir, file_types, use_v1_sens)
print((('Downloaded ScanNet ' + RELEASE_NAME) + ' release.')) |
def generate_sample(intensity, T, n):
Sequnces = []
i = 0
while True:
seq = []
t = 0
while True:
intens1 = intensity.getUpperBound(t, T)
dt = np.random.exponential((1 / intens1))
new_t = (t + dt)
if (new_t > T):
break
intens2 = intensity.getValue(new_t)
u = np.random.uniform()
if ((intens2 / intens1) >= u):
seq.append(new_t)
t = new_t
if (len(seq) > 1):
Sequnces.append(seq)
i += 1
if (i == n):
break
return Sequnces |
def get_sequence(gr, path=None, pyfaidx_fasta=None):
try:
import pyfaidx
except ImportError:
print('pyfaidx must be installed to get fasta sequences. Use `conda install -c bioconda pyfaidx` or `pip install pyfaidx` to install it.')
sys.exit(1)
if (pyfaidx_fasta is None):
if (path is None):
raise Exception('ERROR get_sequence : you must provide a fasta path or pyfaidx_fasta object')
pyfaidx_fasta = pyfaidx.Fasta(path, read_ahead=int(100000.0))
seqs = []
for (k, df) in gr:
if (type(k) is tuple):
_fasta = pyfaidx_fasta[k[0]]
if (k[1] == '-'):
for (start, end) in zip(df.Start, df.End):
seqs.append((- _fasta[start:end]).seq)
else:
for (start, end) in zip(df.Start, df.End):
seqs.append(_fasta[start:end].seq)
else:
_fasta = pyfaidx_fasta[k]
for (start, end) in zip(df.Start, df.End):
seqs.append(_fasta[start:end].seq)
return pd.concat([pd.Series(s) for s in seqs]).reset_index(drop=True) |
class PublisherPlacementReportView(PublisherAccessMixin, BaseReportView):
export_view = 'publisher_placement_report_export'
impression_model = PlacementImpression
template_name = 'adserver/reports/publisher-placement.html'
fieldnames = ['index', 'views', 'clicks', 'ctr', 'ecpm', 'revenue', 'revenue_share']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
div_id = self.request.GET.get('div_id', '')
publisher_slug = kwargs.get('publisher_slug', '')
publisher = get_object_or_404(Publisher, slug=publisher_slug)
queryset = self.get_queryset(publisher=publisher, campaign_type=context['campaign_type'], start_date=context['start_date'], end_date=context['end_date'], div_id=div_id)
report = PublisherPlacementReport(queryset, index=('date' if div_id else None), order=('-date' if div_id else None), max_results=(None if div_id else self.LIMIT))
report.generate()
div_id_options = self.get_queryset(publisher=publisher, start_date=context['start_date'], end_date=context['end_date']).values_list('div_id', flat=True).annotate(total_views=models.Sum('views')).order_by('-total_views').distinct()[:self.LIMIT]
context.update({'publisher': publisher, 'report': report, 'campaign_types': CAMPAIGN_TYPES, 'div_id': div_id, 'div_id_options': div_id_options, 'export_url': self.get_export_url(publisher_slug=publisher.slug)})
return context
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
if (('div_id' in kwargs) and kwargs['div_id']):
queryset = queryset.filter(div_id=kwargs['div_id'])
return queryset |
class DockerSchema2ManifestList(ManifestListInterface):
METASCHEMA = {'type': 'object', 'properties': {DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: {'type': 'number', 'description': 'The version of the manifest list. Must always be `2`.', 'minimum': 2, 'maximum': 2}, DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {'type': 'string', 'description': 'The media type of the manifest list.', 'enum': [DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE]}, DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: {'type': 'array', 'description': 'The manifests field contains a list of manifests for specific platforms', 'items': {'type': 'object', 'properties': {DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {'type': 'string', 'description': ((('The MIME type of the referenced object. This will generally be ' + 'application/vnd.docker.distribution.manifest.v2+json, but it ') + 'could also be application/vnd.docker.distribution.manifest.v1+json ') + 'if the manifest list references a legacy schema-1 manifest.'), 'enum': [DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE]}, DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: {'type': 'number', 'description': ((('The size in bytes of the object. This field exists so that a ' + 'client will have an expected size for the content before ') + 'validating. If the length of the retrieved content does not ') + 'match the specified length, the content should not be trusted.')}, DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: {'type': 'string', 'description': 'The content addressable digest of the manifest in the blob store'}, DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: {'type': 'object', 'description': ('The platform object describes the platform which the image in ' + 'the manifest runs on'), 'properties': {DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: {'type': 'string', 'description': 'Specifies the CPU architecture, for example amd64 or ppc64le.'}, DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: {'type': 'string', 'description': 'Specifies the operating system, for example linux or windows'}, DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY: {'type': 'string', 'description': 'Specifies the operating system version, for example 10.0.10586'}, DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY: {'type': 'array', 'description': ('specifies an array of strings, each listing a required OS ' + 'feature (for example on Windows win32k)'), 'items': {'type': 'string'}}, DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY: {'type': 'string', 'description': ('Specifies a variant of the CPU, for example armv6l to specify ' + 'a particular CPU variant of the ARM CPU')}, DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY: {'type': 'array', 'description': ('specifies an array of strings, each listing a required CPU ' + 'feature (for example sse4 or aes).'), 'items': {'type': 'string'}}}, 'required': [DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY, DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]}}, 'required': [DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY, DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY, DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]}}}, 'required': [DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY, DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]}
def __init__(self, manifest_bytes):
assert isinstance(manifest_bytes, Bytes)
self._layers = None
self._manifest_bytes = manifest_bytes
try:
self._parsed = json.loads(manifest_bytes.as_unicode())
except ValueError as ve:
raise MalformedSchema2ManifestList(('malformed manifest data: %s' % ve))
try:
validate_schema(self._parsed, DockerSchema2ManifestList.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2ManifestList(('manifest data does not match schema: %s' % ve))
def is_manifest_list(self):
return True
def schema_version(self):
return 2
def digest(self):
return digest_tools.sha256_digest(self._manifest_bytes.as_encoded_str())
def media_type(self):
return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
def manifest_dict(self):
return self._parsed
def bytes(self):
return self._manifest_bytes
def filesystem_layers(self):
return None
def get_layers(self, content_retriever):
return None
def blob_digests(self):
return []
def local_blob_digests(self):
return self.blob_digests
def get_blob_digests_for_translation(self):
return self.blob_digests
def layers_compressed_size(self):
return None
def config_media_type(self):
return None
def config(self):
return None
_cache(maxsize=1)
def manifests(self, content_retriever):
manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
supported_types = {}
supported_types[DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE] = DockerSchema1Manifest
supported_types[DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE] = DockerSchema2Manifest
return [LazyManifestLoader(m, content_retriever, supported_types, DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY, DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY, DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY) for m in manifests]
def amd64_linux_manifest_digest(self):
for manifest_ref in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]:
platform = manifest_ref[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
if ((architecture == 'amd64') and (os == 'linux')):
return manifest_ref[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
return None
def validate(self, content_retriever):
for (index, m) in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]):
if (m[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE):
platform = m[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
parsed = self.manifests(content_retriever)[index].manifest_obj
assert isinstance(parsed, DockerSchema1Manifest)
if (parsed.architecture and (parsed.architecture != platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY])):
raise MismatchManifestException(('Mismatch in arch for manifest `%s`' % parsed.digest))
def child_manifests(self, content_retriever):
return self.manifests(content_retriever)
def child_manifest_digests(self):
return [m[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY] for m in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]]
def get_manifest_labels(self, content_retriever):
return None
def get_leaf_layer_v1_image_id(self, content_retriever):
return None
def get_legacy_image_ids(self, content_retriever):
return None
def has_legacy_image(self):
return False
def get_requires_empty_layer_blob(self, content_retriever):
return False
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
legacy_manifest = self._get_legacy_manifest(content_retriever)
if (legacy_manifest is None):
return None
return legacy_manifest.get_schema1_manifest(namespace_name, repo_name, tag_name, content_retriever)
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever):
if (self.media_type in allowed_mediatypes):
return self
legacy_manifest = self._get_legacy_manifest(content_retriever)
if (legacy_manifest is None):
return None
return legacy_manifest.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever)
def _get_legacy_manifest(self, content_retriever):
for manifest_ref in self.manifests(content_retriever):
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
if ((architecture != 'amd64') or (os != 'linux')):
continue
try:
return manifest_ref.manifest_obj
except (ManifestException, IOError):
logger.exception('Could not load child manifest')
return None
return None
def unsigned(self):
return self
def generate_legacy_layers(self, images_map, content_retriever):
return None |
def test_optional_columns(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['project']['dependencies'] = ['python___dateutil', 'bAr.Baz[TLS, EdDSA] >=1.2RC5', 'Foo;python_version<"3.8"']
project.save_config(config)
helpers.update_project_environment(project, 'default', {'dependencies': ['proj git+ 'bAr.Baz [TLS, EdDSA] >=1.2RC5;python_version<"3.8"']})
with project_path.as_cwd():
result = hatch('dep', 'show', 'table', '--ascii')
assert (result.exit_code == 0), result.output
assert (helpers.remove_trailing_spaces(result.output) == helpers.dedent("\n Project\n +++++\n | Name | Versions | Markers | Features |\n +++++\n | bar-baz | >=1.2rc5 | | eddsa, tls |\n | foo | | python_version < '3.8' | |\n | python-dateutil | | | |\n +++++\n Env: default\n ++++++\n | Name | URL | Versions | Markers | Features |\n ++++++\n | bar-baz | | >=1.2rc5 | python_version < '3.8' | eddsa, tls |\n | proj | git+ | | | |\n ++++++\n ")) |
class EnumSpecifier(object):
def __init__(self, tag, enumerators):
self.tag = tag
self.enumerators = enumerators
def __repr__(self):
s = 'enum'
if self.tag:
s += (' %s' % self.tag)
if self.enumerators:
s += (' {%s}' % ', '.join([repr(e) for e in self.enumerators]))
return s |
class OOTestCases(unittest.TestCase):
class TestClass():
field1: NonNull[int]
field2: str
class TestSingleton():
field1: str
def test_nonnull(self):
self.assertRaises(ValueError, self.TestClass, None, {'field1': None, 'field2': 'test'})
def test_to_string(self):
self.assertEqual(str(self.TestClass(field1=42, field2='test')), '[field1=42, field2=test]')
def test_eq(self):
t1 = self.TestClass(field1=42, field2='test')
t2 = self.TestClass(field1=42, field2='test')
self.assertEqual(t1, t2)
def test_hash(self):
t1 = self.TestClass(field1=42, field2='test')
t2 = self.TestClass(field1=42, field2='test')
self.assertEqual(hash(t1), hash(t2))
def test_singleton(self):
s1 = self.TestSingleton(field1='test')
s2 = self.TestSingleton()
self.assertEqual(s1, s2)
class TestPickledClass():
field1: Any
(st.binary(min_size=100))
(max_examples=10)
def test_pickled(self, b):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file_path = os.path.join(tmp_dir, 'data.pickle')
c = self.TestPickledClass(b)
c.__dump__(tmp_file_path)
unpickled = self.TestPickledClass.__load__(tmp_file_path)
self.assertEqual(unpickled, c)
(protocol=3)
class TestPickledClassProtocol3():
field1: Any
(st.binary(min_size=100))
(max_examples=10)
def test_pickled_protocol_4(self, b):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file_path = os.path.join(tmp_dir, 'data.pickle')
c = self.TestPickledClassProtocol3(b)
c.__dump__(tmp_file_path)
unpickled = self.TestPickledClassProtocol3.__load__(tmp_file_path)
self.assertEqual(unpickled, c) |
class TestBuiltinMethods(TestNameCheckVisitorBase):
_passes()
def test_method_wrapper(self):
import collections.abc
def capybara():
r = range(10)
assert_is_value(r, KnownValue(range(10)))
assert_is_value(r.__iter__(), GenericValue(collections.abc.Iterator, [TypedValue(int)])) |
def train(model, dataset, optimizer, criterion, epoch, args, data_start_index):
model.train()
if (data_start_index == 0):
dataset.shuffle('train', seed=(epoch + args.seed))
if (args.epoch_max_len is not None):
data_end_index = min((data_start_index + args.epoch_max_len), len(dataset.splits['train']))
loader = dataset.loader('train', num_workers=args.num_workers, indices=list(range(data_start_index, data_end_index)))
data_start_index = (data_end_index if (data_end_index < len(dataset.splits['train'])) else 0)
else:
loader = dataset.loader('train', num_workers=args.num_workers)
loss_meter = AverageMeter('loss', ':6.4f')
total_length = len(loader)
progress = ProgressMeter(total_length, [loss_meter], prefix='Training: ')
for (batch_num, batch) in enumerate(tqdm(loader, total=len(loader))):
batch = [tensor.to(args.device) for tensor in batch]
(inputs, lengths, future_words, log_probs, labels, classification_targets, syllables_to_go, future_word_num_syllables, rhyme_group_index) = batch
if (args.task not in ['formality', 'iambic']):
if ((not args.debug) and (len(inputs) != args.batch_size)):
continue
scores = model(inputs, lengths, future_words, log_probs, syllables_to_go, future_word_num_syllables, rhyme_group_index, run_classifier=True)
if (args.task == 'formality'):
expanded_labels = classification_targets.unsqueeze(1).expand((- 1), scores.shape[1])
length_mask = pad_mask(lengths).permute(1, 0)
loss = criterion(scores.flatten()[(length_mask.flatten() == 1)], expanded_labels.flatten().float()[(length_mask.flatten() == 1)])
elif (args.task in ['iambic', 'newline']):
use_indices = (classification_targets.flatten() != (- 1))
loss = criterion(scores.flatten()[use_indices], classification_targets.flatten().float()[use_indices])
else:
loss = criterion(scores.flatten(), labels.flatten().float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_meter.update(loss.detach(), len(labels))
if ((batch_num % args.train_print_freq) == 0):
progress.display(batch_num)
progress.display(total_length)
return data_start_index |
def test_console_ansiformat():
f = console.ansiformat
c = console.codes
all_attrs = f('+*_blue_*+', 'text')
assert ((c['blue'] in all_attrs) and (c['blink'] in all_attrs))
assert ((c['bold'] in all_attrs) and (c['underline'] in all_attrs))
assert (c['reset'] in all_attrs)
assert raises(KeyError, f, '*mauve*', 'text') |
def split_data(df, window_size, test_size, val_size=0, use_ratio=True):
expected_type = (float if use_ratio else int)
if ((not isinstance(test_size, expected_type)) or (val_size and (not isinstance(val_size, expected_type)))):
raise ValueError('use_ratio={} while size args are of type {}'.format(use_ratio, expected_type))
df_size = len(df.index)
train_size = ((1 - test_size) if use_ratio else (df_size - test_size))
idx_mark = (int((train_size * df_size)) if use_ratio else train_size)
slice_idx = (idx_mark - window_size)
(df_train, df_test) = (df.iloc[:idx_mark], df.iloc[slice_idx:])
df_val = df_test
if (val_size > 0):
if use_ratio:
val_factor = (val_size / train_size)
train_factor = (1 - val_factor)
idx_mark = (int((train_factor * idx_mark)) if use_ratio else (idx_mark - val_size))
slice_idx = (idx_mark - window_size)
(df_train, df_val) = (df_train.iloc[:idx_mark], df_train.iloc[slice_idx:])
return (df_train, df_test, df_val) |
class ExprIf(GrammarSymbol):
def __init__(self):
GrammarSymbol.__init__(self)
self.lbp = 5
def led(self, parser, left):
cond_ = left
then_ = parser.expression(self.lbp)
parser.expect(ExprElse, ':')
else_ = parser.expression(self.lbp)
return parser.mgr.Ite(cond_, then_, else_) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory')
parser.add_argument('--binary', required=True)
parser.add_argument('--version', required=True)
args = parser.parse_args()
directory = Path(args.directory).absolute()
staged_binary = Path(args.binary).absolute()
binary_name = staged_binary.stem
version = args.version
with TemporaryDirectory() as d:
temp_dir = Path(d)
resources_dir = (temp_dir / 'resources')
shutil.copytree(str((ASSETS_DIR / 'resources')), str(resources_dir))
resources_dir.joinpath('README.html').write_text(README.format(version=version), encoding='utf-8')
shutil.copy2((REPO_DIR / 'LICENSE.txt'), resources_dir)
root_dir = (temp_dir / 'root')
root_dir.mkdir()
relative_binary_dir = Path('usr', 'local', binary_name, 'bin')
binary_dir = (root_dir / relative_binary_dir)
binary_dir.mkdir(parents=True)
shutil.copy2(staged_binary, binary_dir)
path_file = (((root_dir / 'etc') / 'paths.d') / binary_name)
path_file.parent.mkdir(parents=True)
path_file.write_text(f'''/{relative_binary_dir}
''', encoding='utf-8')
components_dir = (temp_dir / 'components')
components_dir.mkdir()
run_command(['pkgbuild', '--root', str(root_dir), '--identifier', IDENTIFIER, '--version', version, '--install-location', '/', str((components_dir / COMPONENT_PACKAGE_NAME))])
build_dir = (temp_dir / 'build')
build_dir.mkdir()
product_archive = (build_dir / f'{binary_name}-{version}.pkg')
run_command(['productbuild', '--distribution', str((ASSETS_DIR / 'distribution.xml')), '--resources', str(resources_dir), '--package-path', str(components_dir), str(product_archive)])
directory.mkdir(parents=True, exist_ok=True)
shutil.copy2(product_archive, directory) |
class FakeWallet():
def __init__(self, fiat_value):
super().__init__()
self.fiat_value = fiat_value
self.db = WalletDB('{}', manual_upgrades=True)
self.db.transactions = self.db.verified_tx = {'abc': 'Tx'}
def get_tx_height(self, txid):
return TxMinedInfo(height=10, conf=10, timestamp=int(time.time()), header_hash='def')
default_fiat_value = Abstract_Wallet.default_fiat_value
price_at_timestamp = Abstract_Wallet.price_at_timestamp
class storage():
put = (lambda self, x: None) |
_2_unicode_compatible
class Proposal(TimeAuditModel):
conference = models.ForeignKey(Conference, on_delete=models.CASCADE)
proposal_section = models.ForeignKey(ProposalSection, verbose_name='Proposal Section', on_delete=models.CASCADE)
proposal_type = models.ForeignKey(ProposalType, verbose_name='Proposal Type', on_delete=models.CASCADE)
author = models.ForeignKey(User, verbose_name='Primary Speaker', on_delete=models.CASCADE)
title = models.CharField(max_length=255)
slug = AutoSlugField(max_length=255, populate_from=('title',))
description = models.TextField(default='')
target_audience = models.PositiveSmallIntegerField(choices=ProposalTargetAudience.CHOICES, default=ProposalTargetAudience.BEGINNER, verbose_name='Target Audience')
video_url = models.URLField(blank=True, default='', help_text='Short 1-2 min video describing your talk')
prerequisites = models.TextField(blank=True, default='')
content_urls = models.TextField(blank=True, default='')
private_content_urls = models.BooleanField(default=False, help_text='Check it if you want to make your content URLs private')
speaker_info = models.TextField(blank=True, default='')
speaker_links = models.TextField(blank=True, default='')
is_first_time_speaker = models.BooleanField(blank=True, default=False)
status = models.PositiveSmallIntegerField(choices=ProposalStatus.CHOICES, default=ProposalStatus.DRAFT)
review_status = models.PositiveSmallIntegerField(choices=ProposalReviewStatus.CHOICES, default=ProposalReviewStatus.YET_TO_BE_REVIEWED, verbose_name='Review Status')
deleted = models.BooleanField(default=False, verbose_name='Is Deleted?')
history = HistoricalRecords()
def __str__(self):
return '{}, {}'.format(self.title, self.proposal_type)
def is_public(self):
return (self.status == 2)
def get_slug(self):
return slugify(self.title)
def get_hashid(self):
hashids = Hashids(min_length=5)
return hashids.encode(self.id)
def get_absolute_url(self):
return reverse('proposal-detail', args=[self.conference.slug, self.get_slug(), self.get_hashid()])
def get_update_url(self):
return reverse('proposal-update', args=[self.conference.slug, self.slug])
def get_review_url(self):
return reverse('proposal-review', args=[self.conference.slug, self.slug])
def get_vote_url(self):
return reverse('proposal-reviewer-vote', args=[self.conference.slug, self.slug])
def get_secondary_vote_url(self):
return reverse('proposal-reviewer-secondary-vote', args=[self.conference.slug, self.slug])
def get_delete_url(self):
return reverse('proposal-delete', args=[self.conference.slug, self.slug])
def get_up_vote_url(self):
return reverse('proposal-vote-up', args=[self.conference.slug, self.slug])
def get_down_vote_url(self):
return reverse('proposal-vote-down', args=[self.conference.slug, self.slug])
def get_remove_vote_url(self):
return reverse('proposal-vote-remove', args=[self.conference.slug, self.slug])
def get_comments_count(self):
return ProposalComment.objects.filter(proposal=self, deleted=False, private=False, vote=False, reviewer=False).count()
def get_reviews_comments_count(self):
return ProposalComment.objects.filter(proposal=self, deleted=False, private=True, vote=False).count()
def get_reviewer_comments_count(self, reviewer):
return ProposalComment.objects.filter(proposal=self, deleted=False, private=True, commenter=reviewer, vote=False).count()
def get_votes_count(self):
votes = ProposalVote.objects.filter(proposal=self).values('up_vote').annotate(counts=models.Count('up_vote'))
votes = {item['up_vote']: item['counts'] for item in votes}
up_vote_count = votes.get(True, 0)
down_vote_count = votes.get(False, 0)
return (up_vote_count - down_vote_count)
def get_reviewer_votes_count(self):
return ProposalSectionReviewerVote.objects.filter(proposal=self).count()
def get_reviewer_votes_count_by_value(self, vote_value):
return ProposalSectionReviewerVote.objects.filter(proposal=self, vote_value__vote_value=vote_value).count()
def get_reviewer_votes_sum(self):
votes = ProposalSectionReviewerVote.objects.filter(proposal=self)
sum_of_votes = sum((v.vote_value.vote_value for v in votes))
return sum_of_votes
def get_reviewer_vote_value(self, reviewer):
try:
vote = ProposalSectionReviewerVote.objects.get(proposal=self, voter__conference_reviewer__reviewer=reviewer)
return vote.vote_value.vote_value
except ProposalSectionReviewerVote.DoesNotExist:
return 0
def get_reviewers_count(self):
return ProposalSectionReviewer.objects.filter(proposal_section=self.proposal_section).count()
def has_negative_votes(self):
return (ProposalSectionReviewerVote.objects.filter(proposal=self, vote_value__vote_value=ProposalReviewVote.NOT_ALLOWED).count() > 0)
def to_response(self, request):
author = '{} {}'.format(self.author.first_name, self.author.last_name)
data = {'id': self.id, 'author': author, 'title': self.title, 'description': self.description, 'target_audience': dict(ProposalTargetAudience.CHOICES)[self.target_audience], 'status': dict(ProposalStatus.CHOICES)[self.status], 'review_status': dict(ProposalReviewStatus.CHOICES)[self.review_status], 'proposal_type': self.proposal_type.name, 'proposal_section': self.proposal_section.name, 'votes_count': self.get_votes_count(), 'speaker_info': self.speaker_info, 'speaker_links': self.speaker_links, 'content_urls': self.content_urls, 'private_content_urls': self.private_content_urls, 'conference': rf_reverse('conference-detail', kwargs={'pk': self.conference_id}, request=request)}
return data
class Meta():
unique_together = ('conference', 'slug') |
class Transform(Operator):
grouping = Grouping.T(default=SensorGrouping.D())
translation = ReplaceComponentTranslation(suffix='T{system}')
def _out_codes(self, group):
return [self.translation.translate(group[0]).format(component=c, system=self.components.lower()) for c in self.components] |
def learned_context(quality, metric='mse', pretrained=False, progress=True, **kwargs):
if (metric not in ('mse', 'ms-ssim')):
raise ValueError(f'Invalid metric "{metric}"')
if ((quality < 1) or (quality > 8)):
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model('learned_context', metric, quality, pretrained, progress, **kwargs) |
class EAESolutionVerifier():
def __init__(self, gold_set: set, cur_event: Dict[(str, Any)], tokens: List[str]) -> None:
self.gold_set = gold_set
self.cur_event = cur_event
self.tokens = tokens
def verify(self, event_obj):
predicted_args = convert_event_obj_to_dict(event_obj, raise_error=True)
res = construct_pred_set_via_api(predicted_args, self.cur_event, self.tokens)
(predicted_set, not_matched_pred_args) = (res['predicted_set'], res['not_matched_pred_args'])
match_result = match_pred_gold_sets(predicted_set, self.gold_set, tokens=self.tokens, raise_error_when_not_matched=True) |
def remove_duplicates_from_file(infile_path, outfile_path='temp..bopscrk'):
lines_seen = set()
outfile = open(outfile_path, 'w')
infile = open(infile_path, 'r')
for line in infile:
if (line not in lines_seen):
outfile.write(line)
lines_seen.add(line)
outfile.close()
infile.close()
os.remove(infile_path)
os.rename(outfile_path, infile_path) |
class TestSimpleStubModuleNotPreferred():
(autouse=True, scope='class')
def built(self, builder):
builder('pyiexample2', warningiserror=True)
def test_integration(self, parse):
example_file = parse('_build/html/autoapi/example/index.html')
assert ('DoNotFindThis' not in example_file)
foo_sig = example_file.find(id='example.Foo')
assert foo_sig |
def do_LR(op, stack, state):
arg1_val = stack.pop()
size = SIZE
if z3.is_bv(arg1_val):
arg1 = arg1_val
size = arg1.size()
elif isinstance(arg1_val, str):
arg1 = state.registers[arg1_val]
size = arg1.size()
else:
arg1 = prepare(arg1_val)
(arg2,) = pop_values(stack, state, 1)
if (arg2.size() > size):
arg2 = z3.Extract((size - 1), 0, arg2)
stack.append(z3.RotateLeft(arg1, arg2)) |
class DataCollatorForMultipleChoice():
tokenizer: PreTrainedTokenizerBase
padding: Union[(bool, str, PaddingStrategy)] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = ('label' if ('label' in features[0].keys()) else 'labels')
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]['input_ids'])
flattened_features = [[{k: v[i] for (k, v) in feature.items()} for i in range(num_choices)] for feature in features]
flattened_features = list(chain(*flattened_features))
batch = self.tokenizer.pad(flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='tf')
batch = {k: tf.reshape(v, (batch_size, num_choices, (- 1))) for (k, v) in batch.items()}
batch['labels'] = tf.convert_to_tensor(labels, dtype=tf.int64)
return batch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.