code stringlengths 281 23.7M |
|---|
class StateCriticOutput():
def __init__(self):
self._step_critic_outputs: List[StateCriticStepOutput] = list()
def append(self, value: StateCriticStepOutput) -> None:
self._step_critic_outputs.append(value)
def actor_ids(self) -> List[ActorID]:
return [cso.actor_id for cso in self._step_critic_outputs]
def values(self) -> List[torch.Tensor]:
return [cso.values for cso in self._step_critic_outputs]
def detached_values(self) -> List[torch.Tensor]:
return [cso.detached_values for cso in self._step_critic_outputs]
def reshape(self, shape: Sequence[int]) -> None:
for cso in self._step_critic_outputs:
cso.values = cso.values.reshape(shape)
cso.detached_values = cso.detached_values.reshape(shape) |
class FontEditorDemo(HasTraits):
my_font_trait = Font()
font_group = Group(Item('my_font_trait', style='simple', label='Simple'), Item('_'), Item('my_font_trait', style='custom', label='Custom'), Item('_'), Item('my_font_trait', style='text', label='Text'), Item('_'), Item('my_font_trait', style='readonly', label='ReadOnly'))
traits_view = View(font_group, title='FontEditor', buttons=['OK'], resizable=True) |
class ImpalaRunner(TrainingRunner):
eval_concurrency: int
def __post_init__(self):
if (self.eval_concurrency <= 0):
self.eval_concurrency = query_cpu()
(TrainingRunner)
def setup(self, cfg: DictConfig) -> None:
super().setup(cfg)
model = TorchActorCritic(policy=self._model_composer.policy, critic=self._model_composer.critic, device=cfg.algorithm.device)
self._model_selection = BestModelSelection(dump_file=self.state_dict_dump_file, model=model, dump_interval=self.dump_interval)
rollout_policy = copy.deepcopy(model.policy)
rollout_policy.to('cpu')
evaluator = None
if (cfg.algorithm.rollout_evaluator.n_episodes > 0):
eval_env = self.create_distributed_eval_env(self.env_factory, self.eval_concurrency, logging_prefix='eval')
eval_env_instance_seeds = [self.maze_seeding.generate_env_instance_seed() for _ in range(self.eval_concurrency)]
eval_env.seed(eval_env_instance_seeds)
evaluator = Factory(base_type=RolloutEvaluator).instantiate(cfg.algorithm.rollout_evaluator, eval_env=eval_env, model_selection=self._model_selection)
train_env_instance_seeds = [self.maze_seeding.generate_env_instance_seed() for _ in range(cfg.algorithm.num_actors)]
train_agent_instance_seeds = [self.maze_seeding.generate_agent_instance_seed() for _ in range(cfg.algorithm.num_actors)]
rollout_actors = self.create_distributed_rollout_actors(self.env_factory, rollout_policy, cfg.algorithm.n_rollout_steps, cfg.algorithm.num_actors, cfg.algorithm.actors_batch_size, cfg.algorithm.queue_out_of_sync_factor, train_env_instance_seeds, train_agent_instance_seeds)
self._trainer = IMPALA(algorithm_config=cfg.algorithm, rollout_generator=rollout_actors, evaluator=evaluator, model=model, model_selection=self._model_selection)
self._init_trainer_from_input_dir(trainer=self._trainer, state_dict_dump_file=self.state_dict_dump_file, input_dir=cfg.input_dir)
def create_distributed_eval_env(self, env_factory: Callable[([], Union[(StructuredEnv, StructuredEnvSpacesMixin)])], eval_concurrency: int, logging_prefix: str) -> VectorEnv:
def create_distributed_rollout_actors(self, env_factory: Callable[([], Union[(StructuredEnv, StructuredEnvSpacesMixin, LogStatsEnv)])], policy: TorchPolicy, n_rollout_steps: int, n_actors: int, batch_size: int, queue_out_of_sync_factor: float, env_instance_seeds: List[int], agent_instance_seeds: List[int]) -> DistributedActors: |
class Migration(migrations.Migration):
replaces = [('frontend', '0017_emailmessage_maillog_squashed_0021_auto__1627'), ('frontend', '0018_auto__1533')]
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('frontend', '0016_remove_prescription_chemical')]
operations = [migrations.CreateModel(name='EmailMessage', fields=[('message_id', models.CharField(max_length=998, primary_key=True, serialize=False)), ('pickled_message', models.BinaryField()), ('subject', models.CharField(max_length=200)), ('tags', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(db_index=True, max_length=100), null=True, size=None)), ('created_at', models.DateTimeField(auto_now_add=True, db_index=True)), ('send_count', models.SmallIntegerField(default=0)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('to', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(db_index=True, max_length=254), size=None))]), migrations.CreateModel(name='MailLog', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('metadata', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)), ('recipient', models.CharField(db_index=True, max_length=254)), ('tags', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(db_index=True, max_length=100), null=True, size=None)), ('reject_reason', models.CharField(blank=True, max_length=15, null=True)), ('message_id', models.CharField(db_index=True, max_length=998)), ('event_type', models.CharField(choices=[(b'complained', b'complained'), (b'delivered', b'delivered'), (b'inbound_failed', b'inbound_failed'), (b'clicked', b'clicked'), (b'opened', b'opened'), (b'subscribed', b'subscribed'), (b'deferred', b'deferred'), (b'inbound', b'inbound'), (b'unknown', b'unknown'), (b'rejected', b'rejected'), (b'queued', b'queued'), (b'failed', b'failed'), (b'autoresponded', b'autoresponded'), (b'unsubscribed', b'unsubscribed'), (b'bounced', b'bounced'), (b'sent', b'sent')], db_index=True, max_length=15)), ('timestamp', models.DateTimeField(blank=True, null=True))])] |
class TestOFPHello(unittest.TestCase):
def _test_parser(self, xid):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_HELLO
msg_len = ofproto.OFP_HEADER_SIZE
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, version, msg_type, msg_len, xid)
res = OFPHello.parser(object, version, msg_type, msg_len, xid, bytearray(buf))
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(six.binary_type(buf), six.binary_type(res.buf))
def test_parser_xid_min(self):
xid = 0
self._test_parser(xid)
def test_parser_xid_mid(self):
xid =
self._test_parser(xid)
def test_parser_xid_max(self):
xid =
self._test_parser(xid)
def test_serialize(self):
c = OFPHello(_Datapath)
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_HELLO, c.msg_type)
eq_(0, c.xid) |
.django_db
def test_award_count_success(client, monkeypatch, award_data, helpers, elasticsearch_award_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
helpers.mock_current_fiscal_year(monkeypatch)
resp = client.get(url.format(filters=''))
results = resp.data['results']
assert (resp.status_code == status.HTTP_200_OK)
assert (len(results) == 1) |
class DocClassificationDataModule(pl.LightningDataModule):
def __init__(self, train_dataset: IterDataPipe[Tuple[(str, str)]], val_dataset: IterDataPipe[Tuple[(str, str)]], test_dataset: IterDataPipe[Tuple[(str, str)]], transform: nn.Module, label_transform: Optional[nn.Module], columns: List[str], label_column: str, batch_size: int, num_workers: int=0, drop_last: bool=False, pin_memory: bool=False) -> None:
super().__init__()
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = test_dataset
self.transform = transform
self.label_transform = label_transform
self.columns = columns
self.label_column = label_column
self.batch_size = batch_size
self.num_workers = num_workers
self.drop_last = drop_last
self.pin_memory = pin_memory
_entry
def from_config(transform: DictConfig, dataset: DictConfig, columns: List[str], label_column: str, batch_size: int, num_workers: int=0, drop_last: bool=False, pin_memory: bool=False) -> 'DocClassificationDataModule':
(train_dataset, val_dataset, test_dataset) = hydra.utils.call(dataset)
text_transform = hydra.utils.instantiate(transform.transform, _recursive_=False)
label_transform = None
if transform.label_transform:
label_transform = hydra.utils.instantiate(transform.label_transform, _recursive_=False)
return DocClassificationDataModule(train_dataset=train_dataset, val_dataset=val_dataset, test_dataset=val_dataset, transform=text_transform, label_transform=label_transform, columns=columns, label_column=label_column, batch_size=batch_size, num_workers=num_workers, drop_last=drop_last, pin_memory=pin_memory)
def _get_data_loader(self, dataset: IterDataPipe[Tuple[(str, str)]]) -> DataLoader:
dataset = dataset.batch(self.batch_size).rows2columnar(self.columns)
dataset = dataset.map(self.transform)
dataset = dataset.map((lambda x: {**x, 'label_ids': to_tensor(self.label_transform([str(label) for label in x[self.label_column]]))}))
dataset = dataset.add_index()
return DataLoader(dataset, batch_size=None, shuffle=False, num_workers=self.num_workers, drop_last=self.drop_last, pin_memory=self.pin_memory, worker_init_fn=worker_init_fn)
def train_dataloader(self) -> DataLoader:
return self._get_data_loader(self.train_dataset)
def val_dataloader(self) -> DataLoader:
return self._get_data_loader(self.val_dataset)
def test_dataloader(self) -> DataLoader:
return self._get_data_loader(self.test_dataset) |
class TreeData():
tree: Tree = None
name: str = None
style: TreeStyle = None
nodestyles: dict = None
include_props: list = None
exclude_props: list = None
layouts: list = None
timer: float = None
ultrametric: bool = False
initialized: bool = False
selected: dict = None
active: namedtuple = None
searches: dict = None |
class Resnet():
def __init__(self, input_x, phase, parameters=parameters):
self.input_x = input_x
self.phase = phase
self.parameters = parameters
self.layer_zero
self.layer
self.Net
def Net(self):
layer_zero_out = self.layer_zero(self.input_x)
current_output = layer_zero_out
for layer_idx in range(1, 5):
layer_out = self.layer(current_output, layer_idx)
current_output = layer_out
return current_output
def layer_zero(self, layer_input):
layer_dict = self.parameters['layer0']
bl_str = 'block_1'
W = np.array(layer_dict[bl_str]['conv1']['weight'], dtype=np.float32)
bn_mov_mean = np.array(layer_dict[bl_str]['bn1']['running_mean'], dtype=np.float32)
bn_mov_var = np.array(layer_dict[bl_str]['bn1']['running_var'], dtype=np.float32)
bn_gamma = np.array(layer_dict[bl_str]['bn1']['weight'], dtype=np.float32)
bn_beta = np.array(layer_dict[bl_str]['bn1']['bias'], dtype=np.float32)
W_conv = init_weights(W, '_0', False)
out = conv2d_batchnorm(layer_input, W_conv, 'layer_0', self.phase, bn_beta, bn_gamma, bn_mov_mean, bn_mov_var, [1, 2, 2, 1], True)
out = tf.nn.max_pool(out, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
print('layer0', out.shape)
return out
def layer(self, layer_input, layer_no):
layer_dict = self.parameters[('layer%d' % layer_no)]
cur = layer_input
res = layer_input
for b_no in range(1, 3):
bl_str = ('block_%d' % b_no)
stride = [0, 0]
if (b_no == 1):
stride = [2, 1]
else:
stride = [1, 1]
W1 = np.array(layer_dict[bl_str]['conv1']['weight'], dtype=np.float32)
bn_mov_mean1 = np.array(layer_dict[bl_str]['bn1']['running_mean'], dtype=np.float32)
bn_mov_var1 = np.array(layer_dict[bl_str]['bn1']['running_var'], dtype=np.float32)
bn_gamma1 = np.array(layer_dict[bl_str]['bn1']['weight'], dtype=np.float32)
bn_beta1 = np.array(layer_dict[bl_str]['bn1']['bias'], dtype=np.float32)
W2 = np.array(layer_dict[bl_str]['conv2']['weight'], dtype=np.float32)
bn_mov_mean2 = np.array(layer_dict[bl_str]['bn2']['running_mean'], dtype=np.float32)
bn_mov_var2 = np.array(layer_dict[bl_str]['bn2']['running_var'], dtype=np.float32)
bn_gamma2 = np.array(layer_dict[bl_str]['bn2']['weight'], dtype=np.float32)
bn_beta2 = np.array(layer_dict[bl_str]['bn2']['bias'], dtype=np.float32)
W_conv1 = init_weights(W1, ('_l_%d_bl_%d_no_%d' % (layer_no, b_no, 1)), False)
W_conv2 = init_weights(W2, ('_l_%d_bl_%d_no_%d' % (layer_no, b_no, 2)), False)
out1 = conv2d_batchnorm(cur, W_conv1, ('layer_%d_%d_1' % (layer_no, b_no)), self.phase, bn_beta1, bn_gamma1, bn_mov_mean1, bn_mov_var1, [1, stride[0], stride[0], 1], False)
print(('layer_%d_%d_1' % (layer_no, b_no)), out1.shape)
if ((layer_no > 1) and (b_no == 1)):
downsample_dict = self.parameters[('layer%d_downsample' % layer_no)]
W_dn = np.array(downsample_dict['block_1']['conv']['weight'], dtype=np.float32)
bn_mov_mean_dn = np.array(downsample_dict['block_1']['bn']['running_mean'], dtype=np.float32)
bn_mov_var_dn = np.array(downsample_dict['block_1']['bn']['running_var'], dtype=np.float32)
bn_gamma_dn = np.array(downsample_dict['block_1']['bn']['weight'], dtype=np.float32)
bn_beta_dn = np.array(downsample_dict['block_1']['bn']['bias'], dtype=np.float32)
W_conv_dn = init_weights(W_dn, ('downsample_%d' % layer_no), False)
res = conv2d_batchnorm(res, W_conv_dn, ('layer_dn_%d' % layer_no), self.phase, bn_beta_dn, bn_gamma_dn, bn_mov_mean_dn, bn_mov_var_dn, [1, 2, 2, 1], False)
print(('downsample_layer_%d_%d_1' % (layer_no, b_no)), res.shape)
out1 = tf.nn.relu((out1 + res))
else:
out1 = tf.nn.relu(out1)
out2 = conv2d_batchnorm(out1, W_conv2, ('layer_%d_%d_2' % (layer_no, b_no)), self.phase, bn_beta2, bn_gamma2, bn_mov_mean2, bn_mov_var2, [1, stride[1], stride[1], 1], True)
print(('layer_%d_%d_2' % (layer_no, b_no)), out2.shape)
cur = out2
return cur |
def test_worklist_class():
class _Task():
def __init__(self, name, depends_on=None):
self.name = name
self.depends_on = (depends_on or [])
task_a = _Task('a')
task_b = _Task('b', [task_a])
task_c = _Task('c', [task_b])
task_a.depends_on = [task_c]
def _get_list(start_with):
wlist = WorkList([start_with])
result = []
while (not wlist.empty):
task = wlist.pop()
result.append(task.name)
for dep in task.depends_on:
wlist.schedule(dep)
return result
assert (_get_list(task_a) == ['a', 'c', 'b'])
assert (_get_list(task_b) == ['b', 'a', 'c'])
assert (_get_list(task_c) == ['c', 'b', 'a']) |
class OptionPlotoptionsScatter3dStatesHoverHalo(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def opacity(self):
return self._config_get(0.25)
def opacity(self, num: float):
self._config(num, js_type=False)
def size(self):
return self._config_get(10)
def size(self, num: float):
self._config(num, js_type=False) |
def parse_apk_output_list(packages_info):
package_lines = packages_info.split('\n')
products = []
for line in package_lines:
data = {}
if re.search('(.*)-([0-9].*)', line):
splitted_line = re.match('(.*)-([0-9].*)', line)
data['product'] = splitted_line.group(1)
version = splitted_line.group(2)
if ('-' in version):
pos = version.index('-')
version = version[0:pos]
data['version'] = version
products.append(data)
return products |
def can_post_topic(user, forum):
kwargs = {}
if isinstance(forum, int):
kwargs['forum_id'] = forum
elif isinstance(forum, Forum):
kwargs['forum'] = forum
return Permission(Or(IsAtleastSuperModerator, IsModeratorInForum(**kwargs), And(Has('posttopic'), ForumNotLocked(**kwargs))), identity=user) |
class OptionSeriesTilemapSonificationDefaultinstrumentoptionsMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def generate(modname):
import sys
import functools
mod = sys.modules[modname]
def add_attr(k, v):
setattr(mod, k, v)
for i in mod.oxm_types:
if isinstance(i.num, tuple):
continue
if (i._class != OFPXMC_OPENFLOW_BASIC):
continue
uk = i.name.upper()
ofpxmt = i.oxm_field
td = i.type
add_attr(('OFPXMT_OFB_' + uk), ofpxmt)
add_attr(('OXM_OF_' + uk), mod.oxm_tlv_header(ofpxmt, td.size))
add_attr((('OXM_OF_' + uk) + '_W'), mod.oxm_tlv_header_w(ofpxmt, td.size))
oxx = 'oxm'
name_to_field = dict(((f.name, f) for f in mod.oxm_types))
num_to_field = dict(((f.num, f) for f in mod.oxm_types))
add_attr('oxm_get_field_info_by_name', functools.partial(_get_field_info_by_name, oxx, name_to_field))
add_attr('oxm_from_user', functools.partial(_from_user, oxx, name_to_field))
add_attr('oxm_from_user_header', functools.partial(_from_user_header, oxx, name_to_field))
add_attr('oxm_to_user', functools.partial(_to_user, oxx, num_to_field))
add_attr('oxm_to_user_header', functools.partial(_to_user_header, oxx, num_to_field))
add_attr('_oxm_field_desc', functools.partial(_field_desc, num_to_field))
add_attr('oxm_normalize_user', functools.partial(_normalize_user, oxx, mod))
add_attr('oxm_parse', functools.partial(_parse, mod))
add_attr('oxm_parse_header', functools.partial(_parse_header, mod))
add_attr('oxm_serialize', functools.partial(_serialize, oxx, mod))
add_attr('oxm_serialize_header', functools.partial(_serialize_header, oxx, mod))
add_attr('oxm_to_jsondict', _to_jsondict)
add_attr('oxm_from_jsondict', _from_jsondict) |
class OptionSeriesItemSonificationContexttracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsSolidgaugeSonificationContexttracksMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class FaceAccessories(BaseModel):
sunglasses: Optional[float]
reading_glasses: Optional[float]
swimming_goggles: Optional[float]
face_mask: Optional[float]
eyeglasses: Optional[float]
headwear: Optional[float]
def default() -> 'FaceAccessories':
return FaceAccessories(sunglasses=None, reading_glasses=None, swimming_goggles=None, face_mask=None, eyeglasses=None, headwear=None) |
class SignalWithStartWorkflowExecutionRequest(betterproto.Message):
namespace: str = betterproto.string_field(1)
workflow_id: str = betterproto.string_field(2)
workflow_type: v1common.WorkflowType = betterproto.message_field(3)
task_queue: v1taskqueue.TaskQueue = betterproto.message_field(4)
input: v1common.Payloads = betterproto.message_field(5)
workflow_execution_timeout: timedelta = betterproto.message_field(6)
workflow_run_timeout: timedelta = betterproto.message_field(7)
workflow_task_timeout: timedelta = betterproto.message_field(8)
identity: str = betterproto.string_field(9)
request_id: str = betterproto.string_field(10)
workflow_id_reuse_policy: v1enums.WorkflowIdReusePolicy = betterproto.enum_field(11)
signal_name: str = betterproto.string_field(12)
signal_input: v1common.Payloads = betterproto.message_field(13)
control: str = betterproto.string_field(14)
retry_policy: v1common.RetryPolicy = betterproto.message_field(15)
cron_schedule: str = betterproto.string_field(16)
memo: v1common.Memo = betterproto.message_field(17)
search_attributes: v1common.SearchAttributes = betterproto.message_field(18)
header: v1common.Header = betterproto.message_field(19) |
class Qstat():
def __init__(self, user=None, jobs=None):
if (jobs is None):
self.jobs = qstat(user=user)
else:
self.jobs = jobs
def queues(self):
queues = []
for j in self.jobs:
if (j.queue != ''):
q = j.queue_name
if (q not in queues):
queues.append(q)
queues.sort()
return queues
def nodes(self):
nodes = []
for j in self.jobs:
if (j.queue != ''):
n = j.queue_host
if (n not in nodes):
nodes.append(n)
nodes.sort()
return nodes
def users(self):
users = []
for j in self.jobs:
u = j.user
if (u not in users):
users.append(u)
users.sort()
return users
def filter(self, attr, value):
try:
if (value.startswith('*') and value.endswith('*')):
subset = [j for j in self.jobs if (value.strip('*') in j.__dict__[attr])]
elif value.startswith('*'):
subset = [j for j in self.jobs if j.__dict__[attr].endswith(value.lstrip('*'))]
elif value.endswith('*'):
subset = [j for j in self.jobs if j.__dict__[attr].startswith(value.rstrip('*'))]
else:
subset = [j for j in self.jobs if (j.__dict__[attr] == value)]
except KeyError:
subset = []
return Qstat(jobs=subset)
def __len__(self):
return len(self.jobs) |
class Computer():
parseMap: Dict[(str, Callable)]
componentMap: Dict[(str, List)] = {}
idsMap: Dict[(str, Dict)]
os: str
neofetchwin: bool = False
neofetch: bool = False
values: str
laptop: bool = False
uptime: float
def memory(self) -> str:
return self.get_component_line('Memory:')
def osinfo(self) -> str:
return self.get_component_line('OS:')
def osinfoid(self) -> str:
component = self.get_component_line('OS:')
component = (component.split()[0] + component.split()[1]).lower()
component_list = self.idsMap[self.idsMap['map']['OS:']]
for (comp, id) in component_list.items():
if (component.lower().find(comp.lower()) >= 0):
return id
print('Unknown {}, contact us on github to resolve this.'.format(self.idsMap['map']['OS:']))
return component_list['unknown']
def motherboard(self) -> str:
tmp = self.get_component_line('Motherboard:')
if (tmp == 'Motherboard: N/A'):
return self.host
return tmp
def motherboardid(self) -> str:
return self.get_component_idkey('Motherboard:')
def motherboardappid(self) -> str:
tmp = self.get_component_id('Motherboard:')
if (tmp == self.idsMap[self.idsMap['map']['Motherboard:']]['unknown']):
return self.hostappid
return tmp
def host(self) -> str:
return self.get_component_line('Host:')
def hostid(self) -> str:
hostsplit = self.host.split()
host_list: Dict[(str, str)] = self.idsMap[self.idsMap['map']['Host:']]
for line in hostsplit:
if (line in host_list):
return line
hostid = []
hostjoin = ' '.join(self.host)
for numsplit in range(len(hostjoin)):
if (not hostjoin[numsplit].isdigit()):
hostid.append(hostjoin[numsplit])
hostid = ''.join(hostid)
hostid = hostid.split()[1]
if (hostid in host_list):
return host_list[hostid]
else:
return host_list['unknown']
def hostappid(self) -> str:
return self.get_component_id('Host:')
def cpu(self) -> str:
key = 'CPU:'
cpus: List[Cpu_interface] = self.get_component(key)
temp = []
for cpu in cpus:
temp.append(cpu.info)
return ('\n'.join(temp) if (len(cpus) > 0) else '{} N/A'.format(key))
def cpuid(self) -> str:
temp: List[Cpu_interface] = self.get_component('CPU:')
if (len(temp) == 0):
return self.idsMap[self.idsMap['map']['CPU:']]['unknown']
else:
return temp[0].get_id(self.idsMap[self.idsMap['map']['CPU:']])
def gpu(self) -> str:
key = 'GPU:'
gpus: List[GpuType] = self.get_component(key)
temp = []
for gpu in gpus:
if ((gpu.vendor == 'amd') and (self.os == 'linux')):
temp.append(gpu.get_amdgpurender(gpus, self.laptop).rstrip().lstrip())
else:
temp.append(gpu.model.lstrip().rstrip())
return ('\n'.join(temp) if (len(gpus) > 0) else '{} N/A'.format(key))
def gpuid(self) -> str:
return get_gpuid(self.idsMap[self.idsMap['map']['GPU:']], self.get_component('GPU:'))
def disks(self) -> str:
return self.get_component_line('Disk')
def resolution(self) -> str:
return self.get_component_line('Resolution:')
def theme(self) -> str:
return self.get_component_line('Theme:')
def kernel(self) -> str:
return self.get_component_line('Kernel:')
def packages(self) -> str:
return self.get_component_line('Packages:')
def shell(self) -> str:
return self.get_component_line('Shell:')
def shellid(self) -> str:
return self.get_component_id('Shell:')
def terminal(self) -> str:
return self.get_component_line('Terminal:')
def terminalid(self) -> str:
return self.get_component_id('Terminal:')
def wm(self) -> str:
return self.get_component_line('WM:')
def wmid(self) -> str:
return self.get_component_line('WM:').split()[0]
def font(self) -> str:
return self.get_component_line('Font:')
def de(self) -> str:
return self.get_component_line('DE:')
def deid(self) -> str:
value = self.get_component_line('DE:').split()[0]
return ('n/a' if (value == 'DE:') else value)
def dewmid(self) -> str:
de = self.get_component_line('DE:')
return '\n'.join([('' if (de == '{} N/A'.format('DE:')) else de), self.get_component_line('WM:')])
def desktopid(self) -> str:
deid = self.deid.lower()
wmid = self.wmid.lower()
if (deid == 'unity'):
if (wmid == 'compiz'):
return 'unity'
else:
return wmid
if ((deid != 'n/a') and (deid in self.idsMap[self.idsMap['map']['DE:']])):
return deid
elif ((deid == 'n/a') and (wmid in self.idsMap[self.idsMap['map']['WM:']])):
return wmid
else:
print('Unknown DE/WM, contact us on github to resolve this.')
return 'unknown'
def battery(self) -> str:
if self.laptop:
return self.get_component_line('Battery')
else:
return '{} N/A'.format('Battery')
def lapordesk(self) -> str:
if (self.laptop and (self.os != 'macos')):
return 'laptop'
else:
return 'desktop'
def version(self) -> str:
return os.popen('sw_vers -productVersion').read()
def product(self) -> str:
return os.popen('sysctl -n hw.model').read()
def devicetype(self) -> str:
if (self.product[0:7] == 'MacBook'):
return 'laptop'
else:
return 'desktop'
def bigicon(self) -> str:
try:
return self.idsMap[self.idsMap['map']['Version:']][self.version[0:5]]
except KeyError:
print('Unsupported MacOS version')
return 'bigslurp'
def __init__(self):
super().__init__()
self.parseMap = {'CPU:': get_cpu, 'GPU:': get_gpu, 'Disk': self.get_disk, 'Memory:': self.get_memory, 'OS:': self.get, 'Motherboard:': get_mobo, 'Host:': self.get, 'Resolution:': self.get, 'Theme:': self.get, 'Kernel:': self.get, 'Packages:': self.get, 'Shell:': self.get, 'Terminal:': self.get, 'Font:': self.get, 'DE:': self.get, 'WM:': self.get, 'Battery': self.get_battery}
self.idsMap = get_infos()
self.uptime = psutil.boot_time()
self.detect_os()
self.detect_laptop()
self.fetch_values()
def fetch_values(self):
(self.neofetchwin, self.neofetch, self.values) = self.detect_neofetch()
self.neofetch_parser(self.values)
if (not bool(self.componentMap)):
args.config_path = ''
args.noconfig = False
(self.neofetchwin, self.neofetch, self.values) = self.detect_neofetch()
self.neofetch_parser(self.values)
terminallist = self.idsMap[self.idsMap['map']['Terminal:']]
if (args.terminal and (args.terminal.lower() in terminallist)):
self.componentMap['Terminal:'] = [args.terminal.lower()]
elif (args.terminal and (args.terminal.lower() not in terminallist)):
print(('\nInvalid terminal, only %s are supported.\nPlease make a github issue if you would like to have your terminal added.\n % terminallist))
sys.exit(1)
if (self.get_component('Font:', True) and args.termfont):
print(('Custom terminal font not set because a terminal font already exists, %s' % self.font))
elif ((not self.get_component('Font:', True)) and args.termfont):
self.componentMap['Font:'] = [args.termfont]
def updateMap(self):
self.clearMap()
(self.neofetchwin, self.neofetch, self.values) = self.detect_neofetch()
self.neofetch_parser(self.values)
def clearMap(self):
for key in self.componentMap.keys():
del self.componentMap[key][:]
def neofetch_parser(self, values: str):
if args.debug:
print(values)
lines = values.split('\n')
for i in range(len(lines)):
line = lines[i]
for (key, detectedFunction) in [(key, value) for (key, value) in self.parseMap.items() if (key in line)]:
if (key not in self.componentMap):
self.componentMap[key] = []
detectedFunction(self.os, self.componentMap[key], line.rstrip('\n'), key)
def detect_os(self) -> str:
if ((platform == 'linux') or (platform == 'linux2')):
self.os = 'linux'
elif (platform == 'darwin'):
self.os = 'macos'
elif (platform == 'win32'):
self.os = 'windows'
else:
raise Exception('Not a supported OS !')
return self.os
def detect_laptop(self) -> bool:
if (self.os != 'linux'):
self.laptop = False
else:
for i in os.listdir('/sys/class/power_supply'):
if i.startswith('BAT'):
self.laptop = True
break
return self.laptop
def detect_neofetch(self):
neofetchwin = False
neofetch = False
values = None
if (self.os == 'windows'):
try:
values = run_command(['neofetch', '--noart'])
if args.nfco:
with open(args.nfco) as f:
values = '\n'.join(f.readlines())
except Exception:
pass
else:
neofetchwin = True
if (not neofetchwin):
if (self.os == 'linux'):
enableFlatpak()
default_config = get_default_config()
try:
if (self.os == 'windows'):
values = run_command(['neofetch', '--config {}'.format(('none' if args.noconfig else (args.config_path if args.config_path else default_config))), '--stdout'], shell=(self.os == 'windows'))
else:
values = exec_bash('neofetch --config {} --stdout'.format(('none' if args.noconfig else (args.config_path if args.config_path else default_config))))
if args.nfco:
with open(args.nfco) as f:
values = '\n'.join(f.readlines())
except Exception:
print('ERROR: Neofetch not found, please install it or check installation and that neofetch is in PATH.')
exit(1)
else:
neofetch = True
return (neofetchwin, neofetch, values)
def get_battery(self, os: str, line: List, value: str, key: str):
line.append(value[((value.find(key) + len(key)) + 2):])
def get_disk(self, os: str, line: List, value: str, key: str):
line.append(value[((value.find(key) + len(key)) + 2):])
def get_memory(self, os: str, line: List, value: str, key: str):
if (args.memtype == 'gb'):
memgb = value.split()
used = float(memgb[1].replace('MiB', ''))
total = float(memgb[3].replace('MiB', ''))
line.append(' '.join([str(round((used / 1024), 2)), 'GiB /', str(round((total / 1024), 2)), 'GiB']))
else:
line.append(value[((value.find(key) + len(key)) + 1):])
def get(self, os: str, line: List, value: str, key: str, valueOffset: int=1):
line.append(value[((value.find(key) + len(key)) + valueOffset):])
def get_component(self, key: str, quiet: bool=False):
try:
return self.componentMap[key]
except KeyError as err:
if args.debug:
print('[KeyError]: {}'.format(err), end='')
return []
def get_component_line(self, key: str) -> str:
try:
values = self.componentMap[key]
return ('\n'.join(values) if (len(values) > 0) else '{} N/A'.format(key))
except KeyError as err:
if args.debug:
print('[KeyError]: ', end='')
print(err)
return '{} N/A'.format(key)
def get_component_id(self, key: str) -> str:
component = self.get_component_line(key).lower()
component_list = self.idsMap[self.idsMap['map'][key]]
for (comp, id) in component_list.items():
if (component.find(comp.lower()) >= 0):
return id
print('Unknown {}, contact us on github to resolve this.'.format(self.idsMap['map'][key]))
if args.debug:
print(f'Value: {component}')
return component_list['unknown']
def get_component_idkey(self, key: str) -> str:
component = self.get_component_line(key).lower()
component_list = self.idsMap[self.idsMap['map'][key]]
for (comp, _) in component_list.items():
if (component.find(comp.lower()) >= 0):
return comp
print('Unknown {}, contact us on github to resolve this.'.format(self.idsMap['map'][key]))
return component_list['unknown'] |
class CoreEnv(StructuredEnv, EventEnvMixin, SerializableEnvMixin, TimeEnvMixin, ABC):
def __init__(self):
self.context = EnvironmentContext()
self.reward_aggregator: Optional[RewardAggregatorInterface] = None
(StructuredEnv)
def step(self, maze_action: MazeActionType) -> Tuple[(MazeStateType, Union[(float, np.ndarray, Any)], bool, Dict[(Any, Any)])]:
(StructuredEnv)
def reset(self) -> MazeStateType:
(StructuredEnv)
def seed(self, seed: int) -> None:
(StructuredEnv)
def close(self) -> None:
def get_maze_state(self) -> MazeStateType:
(TimeEnvMixin)
def get_env_time(self) -> int:
return self.context.step_id
(EventEnvMixin)
def get_step_events(self) -> Iterable[EventRecord]:
return self.context.event_service.iterate_event_records()
(EventEnvMixin)
def get_kpi_calculator(self) -> Optional[KpiCalculator]:
return None
(SerializableEnvMixin)
def get_serializable_components(self) -> Dict[(str, Any)]:
def get_renderer(self) -> Renderer:
(StructuredEnv)
def actor_id(self) -> ActorID:
(StructuredEnv)
def is_actor_done(self) -> bool:
(StructuredEnv)
def agent_counts_dict(self) -> Dict[(StepKeyType, int)]:
def clone_from(self, env: 'CoreEnv') -> None:
raise NotImplementedError |
class Solution():
def sortColors(self, nums: List[int]) -> None:
(red, yellow) = (0, 0)
for i in range(len(nums)):
n = nums[i]
if (n == 0):
nums[i] = nums[yellow]
nums[yellow] = nums[red]
nums[red] = n
red += 1
yellow += 1
elif (n == 1):
(nums[yellow], nums[i]) = (n, nums[yellow])
yellow += 1 |
class DockerImageNotAvailableError(ErsiliaError):
def __init__(self, model):
self.message = 'Error occured while trying pull docker image of model {0}'.format(model)
self.hints = "Check that the model image ersiliaos/{0} is actually available in Ersilia's DockerHub.\nIf you are working with ARM64 (e.g. M1/M2 Apple chips, it is possible that pulling went wrong because no image with the ARM64 architecture is available".format(model)
ErsiliaError.__init__(self, self.message, self.hints) |
class OptionBoostDebug(Options):
def showSkipSummary(self):
return self._config_get(False)
def showSkipSummary(self, flag: bool):
self._config(flag, js_type=False)
def timeBufferCopy(self):
return self._config_get(False)
def timeBufferCopy(self, flag: bool):
self._config(flag, js_type=False)
def timeKDTree(self):
return self._config_get(False)
def timeKDTree(self, flag: bool):
self._config(flag, js_type=False)
def timeRendering(self):
return self._config_get(False)
def timeRendering(self, flag: bool):
self._config(flag, js_type=False)
def timeSeriesProcessing(self):
return self._config_get(False)
def timeSeriesProcessing(self, flag: bool):
self._config(flag, js_type=False)
def timeSetup(self):
return self._config_get(False)
def timeSetup(self, flag: bool):
self._config(flag, js_type=False) |
class ParamValue(BaseModel):
name: str
identity: Optional[str]
references: Optional[List[Union[(FidesDatasetReference, str)]]]
connector_param: Optional[str]
unpack: Optional[bool] = False
('references')
def check_reference_direction(cls, references: Optional[List[Union[(FidesDatasetReference, str)]]]) -> Optional[List[Union[(FidesDatasetReference, str)]]]:
for reference in (references or {}):
if isinstance(reference, FidesDatasetReference):
if (reference.direction == 'to'):
raise ValueError("References can only have a direction of 'from', found 'to'")
return references
_validator
def check_exactly_one_value_field(cls, values: Dict[(str, Any)]) -> Dict[(str, Any)]:
value_fields = [bool(values.get('identity')), bool(values.get('references')), bool(values.get('connector_param'))]
if (sum(value_fields) != 1):
raise ValueError("Must have exactly one of 'identity', 'references', or 'connector_param'")
return values |
def ajax_superuser_required(view_func):
(view_func)
def wrapper(request, *args, **kwargs):
if request.user.is_superuser:
return view_func(request, *args, **kwargs)
resp = json.dumps({'not_authenticated': True})
return HttpResponse(resp, content_type='application/json', status=401)
return wrapper |
class TestConfiguration(BaseConfiguration):
DEBUG = False
SQLALCHEMY_DATABASE_URI = ('sqlite:///' + os.path.join(basedir, 'test.db'))
WTF_CSRF_ENABLED = False
TESTING = True
SESSION_PROTECTION = None
LOGIN_DISABLED = False
SERVER_NAME = 'localhost:5001'
config_data = {'db_username': '', 'db_port': '', 'db_password': '', 'db_name': '', 'db_url': ''} |
def _create_saml_provider_config():
provider_id = 'saml.{0}'.format(_random_string())
return auth.create_saml_provider_config(provider_id=provider_id, idp_entity_id='IDP_ENTITY_ID', sso_url=' x509_certificates=[X509_CERTIFICATES[0]], rp_entity_id='RP_ENTITY_ID', callback_url=' display_name='SAML_DISPLAY_NAME', enabled=True) |
('/', methods=['GET', 'POST'])
def home():
resp = None
if (request.method == 'POST'):
if ('file' not in request.files):
return redirect(request.url)
file = request.files['file']
if (file.filename == ''):
return redirect('/')
if (file and allowed_file(file.filename)):
outfile = tempfile.NamedTemporaryFile(dir=app.config['UPLOAD_FOLDER'])
f = tempfile.NamedTemporaryFile(dir=app.config['UPLOAD_FOLDER'])
f.write(file.read())
f.seek(0)
convert_and_nuke(f.name, app.config['UPLOAD_FOLDER'])
with open((f.name + '.pdf'), 'rb') as converted_pdf:
outfile.write(converted_pdf.read())
outfile.seek(0)
resp = Response(outfile)
resp.headers['Content-Disposition'] = ('inline; filename=%s' % '1.pdf')
resp.mimetype = 'application/pdf'
return resp
return render_template('index.html') |
class OptionSeriesAreaTooltip(Options):
def clusterFormat(self):
return self._config_get('Clustered points: {point.clusterPointsAmount}')
def clusterFormat(self, text: str):
self._config(text, js_type=False)
def dateTimeLabelFormats(self) -> 'OptionSeriesAreaTooltipDatetimelabelformats':
return self._config_sub_data('dateTimeLabelFormats', OptionSeriesAreaTooltipDatetimelabelformats)
def distance(self):
return self._config_get(16)
def distance(self, num: float):
self._config(num, js_type=False)
def followPointer(self):
return self._config_get(False)
def followPointer(self, flag: bool):
self._config(flag, js_type=False)
def followTouchMove(self):
return self._config_get(True)
def followTouchMove(self, flag: bool):
self._config(flag, js_type=False)
def footerFormat(self):
return self._config_get('')
def footerFormat(self, text: str):
self._config(text, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def headerFormat(self):
return self._config_get(None)
def headerFormat(self, text: str):
self._config(text, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, text: str):
self._config(text, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def pointFormat(self):
return self._config_get(None)
def pointFormat(self, text: str):
self._config(text, js_type=False)
def pointFormatter(self):
return self._config_get(None)
def pointFormatter(self, value: Any):
self._config(value, js_type=False)
def valueDecimals(self):
return self._config_get(None)
def valueDecimals(self, num: float):
self._config(num, js_type=False)
def valuePrefix(self):
return self._config_get(None)
def valuePrefix(self, text: str):
self._config(text, js_type=False)
def valueSuffix(self):
return self._config_get(None)
def valueSuffix(self, text: str):
self._config(text, js_type=False)
def xDateFormat(self):
return self._config_get(None)
def xDateFormat(self, text: str):
self._config(text, js_type=False) |
def _parse_module(path: str, component_configs: Dict[(str, SkillComponentConfiguration)], skill_context: SkillContext, component_class: Type) -> Dict[(str, Any)]:
components: Dict[(str, Any)] = {}
component_type_name = component_class.__name__.lower()
component_type_name_plural = (component_type_name + 's')
if (component_configs == {}):
return components
component_names = set((config.class_name for (_, config) in component_configs.items()))
component_module = load_module(component_type_name_plural, Path(path))
classes = inspect.getmembers(component_module, inspect.isclass)
component_classes = list(filter((lambda x: (any((re.match(component, x[0]) for component in component_names)) and issubclass(x[1], component_class) and (not str.startswith(x[1].__module__, 'aea.')) and (not str.startswith(x[1].__module__, f'packages.{skill_context.skill_id.author}.skills.{skill_context.skill_id.name}')))), classes))
name_to_class = dict(component_classes)
_print_warning_message_for_non_declared_skill_components(skill_context, set(name_to_class.keys()), {component_config.class_name for component_config in component_configs.values()}, component_type_name_plural, path)
for (component_id, component_config) in component_configs.items():
component_class_name = cast(str, component_config.class_name)
skill_context.logger.debug(f'Processing {component_type_name} {component_class_name}')
if (not component_id.isidentifier()):
raise AEAComponentLoadException(f"'{component_id}' is not a valid identifier.")
component_class = name_to_class.get(component_class_name, None)
if (component_class is None):
skill_context.logger.warning(f"{component_type_name.capitalize()} '{component_class_name}' cannot be found.")
else:
try:
component = component_class(name=component_id, configuration=component_config, skill_context=skill_context, **dict(component_config.args))
except Exception as e:
e_str = parse_exception(e)
raise AEAInstantiationException(f'''An error occurred during instantiation of component {skill_context.skill_id}/{component_config.class_name}:
{e_str}''')
components[component_id] = component
return components |
class OptionPlotoptionsColumnSonificationContexttracksMappingHighpass(Options):
def frequency(self) -> 'OptionPlotoptionsColumnSonificationContexttracksMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsColumnSonificationContexttracksMappingHighpassFrequency)
def resonance(self) -> 'OptionPlotoptionsColumnSonificationContexttracksMappingHighpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsColumnSonificationContexttracksMappingHighpassResonance) |
def test_scipy_gridder_extra_args():
data = CheckerBoard().scatter(random_state=100)
coords = (data.easting, data.northing)
grd = ScipyGridder(method='linear', extra_args=dict(rescale=True))
grd.fit(coords, data.scalars)
predicted = grd.predict(coords)
npt.assert_allclose(predicted, data.scalars) |
def test_dataset_drift_metric_value_error() -> None:
test_data = pd.DataFrame({'category_feature': ['1', '2', '3'], 'numerical_feature': [3, 2, 1], 'target': [None, np.NAN, 1], 'prediction': [1, np.NAN, 1]})
data_mapping = ColumnMapping()
report = Report(metrics=[DatasetDriftMetric()])
with pytest.raises(ValueError):
report.run(current_data=test_data, reference_data=None, column_mapping=data_mapping)
report.json()
with pytest.raises(ValueError):
report.run(current_data=None, reference_data=test_data, column_mapping=data_mapping)
report.json() |
def extractDreamnovelsloverBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class IfExpr(ASTNode):
def __init__(self, ifexpr, thenexpr, elseexpr):
self.ifexpr = ifexpr
self.thenexpr = thenexpr
self.elseexpr = elseexpr
self._children = [self.ifexpr, self.thenexpr, self.elseexpr]
def __str__(self):
return 'If({}, {}, {})'.format(self.ifexpr, self.thenexpr, self.elseexpr) |
def extractNsiriblogWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_mass_units():
P_earth = 365.256
Tper_earth = 2454115.5208333
inclination_earth = np.radians(45.0)
orbit1 = KeplerianOrbit(period=P_earth, t_periastron=Tper_earth, incl=inclination_earth, m_planet=units.with_unit(1.0, u.M_earth))
orbit2 = KeplerianOrbit(period=P_earth, t_periastron=Tper_earth, incl=inclination_earth, m_planet=1.0, m_planet_units=u.M_earth)
t = np.linspace(Tper_earth, (Tper_earth + 1000), 1000)
rv1 = orbit1.get_radial_velocity(t).eval()
rv_diff = (np.max(rv1) - np.min(rv1))
assert (rv_diff < 1.0), 'with_unit'
rv2 = orbit2.get_radial_velocity(t).eval()
rv_diff = (np.max(rv2) - np.min(rv2))
assert (rv_diff < 1.0), 'm_planet_units'
np.testing.assert_allclose(rv2, rv1) |
class Model3PredictiveChecks(SimulatedPredictiveChecks, PlotMixin):
MODEL_NAME = 'Model 3'
def __init__(self, p_query: RVIdentifier, s_query_str: str, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.t = self.data['total'].astype(int).values
self.observed = self.data['postdrug'].astype(int).values
if (self.samples_with_observations_xr is not None):
data_vars = self.samples_with_observations_xr.data_vars
p_with_observations = data_vars.get(p_query())
p_with_observations = p_with_observations.values.flatten()
self.p_with_observations = torch.tensor(p_with_observations)
s_with_observations = {key: torch.tensor(data_vars.get(key).values).reshape((self.n_chains_with_observations * self.n_samples_with_observations), 2) for (key, _) in self.samples_with_observations_xr.items() if (s_query_str in str(key))}
s_with_observations = dict(sorted(s_with_observations.items(), key=(lambda item: item[0].arguments)))
self.s_with_observations = list(s_with_observations.values())
if (self.samples_without_observations_xr is not None):
data_vars = self.samples_without_observations_xr.data_vars
p_without_observations = data_vars.get(p_query())
p_without_observations = p_without_observations.values.flatten()
self.p_without_observations = torch.tensor(p_without_observations)
s_without_observations = {key: torch.tensor(data_vars.get(key).values).reshape((self.n_chains_without_observations * self.n_samples_without_observations), 2) for (key, _) in self.samples_without_observations_xr.items() if (s_query_str in str(key))}
s_without_observations = dict(sorted(s_without_observations.items(), key=(lambda item: item[0].arguments)))
self.s_without_observations = list(s_without_observations.values())
def _model(self, i: int, p: Tensor, s: List[Tensor]) -> dist.Binomial:
switch = s[i].clone()
switch[(switch.sum(dim=1) == 0.0).nonzero()] = torch.tensor([1.0, 1.0], dtype=torch.float64)
s = dist.Categorical(probs=switch).sample((1,)).flatten()
return dist.Binomial(torch.tensor(self.t[i]), (p * s))
def _simulate_data(self, using: str='prior', N: int=1) -> List[Tensor]:
p = torch.zeros((0,))
s = [torch.zeros((self.n_records, 0))]
if (using == 'prior'):
p = self.p_without_observations
s = self.s_without_observations
elif (using == 'posterior'):
p = self.p_with_observations
s = self.s_with_observations
simulated_data = []
for i in range(self.n_records):
simulation = self._model(i, p, s)
simulation = simulation.sample((N,)).flatten()
simulated_data.append(simulation)
return simulated_data
def _plot_prior_predictive_checks(self) -> Figure:
return self.prior_predictive_plot()
def _plot_posterior_predictive_checks(self) -> Figure:
return self.posterior_predictive_plot() |
def _print_valid_commands(commands, out=sys.stdout):
if (not commands):
print('No commands found, did you specify the correct --cmd-path?', file=out)
return
print('The following are the available commands:\n', file=out)
max_cmd_len = max((len(name) for name in commands.keys()))
for name in sorted(commands.keys()):
docstring = (commands[name].__doc__ or '')
print(f'{name:{max_cmd_len}} {docstring}', file=out)
print(file=out) |
def chunker(lines, is_boundary):
if (not lines):
return
collected = []
for item in lines:
if is_boundary(item):
if collected:
(yield collected)
collected = []
continue
else:
collected.append(item)
if collected:
(yield collected) |
class OptionSeriesSolidgaugeSonificationContexttracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
_frequency(timedelta(days=2))
def fetch_exchange_forecast(zone_key1: str='FR-COR', zone_key2: str='IT-SAR', session: (Session | None)=None, target_datetime: (datetime | None)=None, logger: Logger=getLogger(__name__)) -> list[dict]:
return fetch_data(zone_key1=zone_key1, zone_key2=zone_key2, session=session, target_datetime=target_datetime, logger=logger, type='exchange_forecast') |
def validate_model(model, val_loader):
print('Validating the model')
model.eval()
y_true = []
y_pred = []
fnames = []
running_loss = 0.0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for (step, (x, mel, fname)) in enumerate(val_loader):
(x, mel) = (Variable(x).cuda(), Variable(mel).cuda())
logits = model(mel)
loss = criterion(logits, x)
running_loss += loss.item()
targets = x.cpu().view((- 1)).numpy()
y_true += targets.tolist()
predictions = return_classes(logits)
y_pred += predictions.tolist()
fnames += fname
ff = open((exp_dir + '/eval'), 'a')
assert (len(fnames) == len(y_pred))
for (f, yp, yt) in list(zip(fnames, y_pred, y_true)):
ff.write((((((f + ' ') + str(yp)) + ' ') + str(yt)) + '\n'))
ff.close()
averaged_loss = (running_loss / len(val_loader))
recall = get_metrics(y_pred, y_true)
log_value('Unweighted Recall per epoch', recall, global_epoch)
log_value('validation loss (per epoch)', averaged_loss, global_epoch)
print('Validation Loss: ', averaged_loss)
print('Unweighted Recall for the validation set: ', recall)
print('\n')
return (recall, model) |
def create_therapy_plan(template=None, patient=None):
if (not patient):
patient = create_patient()
therapy_type = create_therapy_type()
plan = frappe.new_doc('Therapy Plan')
plan.patient = patient
plan.start_date = getdate()
if template:
plan.therapy_plan_template = template
plan = plan.set_therapy_details_from_template()
else:
plan.append('therapy_plan_details', {'therapy_type': therapy_type.name, 'no_of_sessions': 2})
plan.save()
return plan |
class Limits():
def __init__(self, *, max_connections: typing.Optional[int]=None, max_keepalive_connections: typing.Optional[int]=None, keepalive_expiry: typing.Optional[float]=5.0) -> None:
self.max_connections = max_connections
self.max_keepalive_connections = max_keepalive_connections
self.keepalive_expiry = keepalive_expiry
def __eq__(self, other: typing.Any) -> bool:
return (isinstance(other, self.__class__) and (self.max_connections == other.max_connections) and (self.max_keepalive_connections == other.max_keepalive_connections) and (self.keepalive_expiry == other.keepalive_expiry))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f'{class_name}(max_connections={self.max_connections}, max_keepalive_connections={self.max_keepalive_connections}, keepalive_expiry={self.keepalive_expiry})' |
class ModeSpec(Tidy3dBaseModel):
num_modes: pd.PositiveInt = pd.Field(1, title='Number of modes', description='Number of modes returned by mode solver.')
target_neff: pd.PositiveFloat = pd.Field(None, title='Target effective index', description='Guess for effective index of the mode.')
num_pml: Tuple[(pd.NonNegativeInt, pd.NonNegativeInt)] = pd.Field((0, 0), title='Number of PML layers', description='Number of standard pml layers to add in the two tangential axes.')
filter_pol: Literal[('te', 'tm')] = pd.Field(None, title='Polarization filtering', description='The solver always computes the ``num_modes`` modes closest to the given ``target_neff``. If ``filter_pol==None``, they are simply sorted in order of decresing effective index. If a polarization filter is selected, the modes are rearranged such that the first ``n_pol`` modes in the list are the ones with the selected polarization fraction larger than or equal to 0.5, while the next ``num_modes - n_pol`` modes are the ones where it is smaller than 0.5 (i.e. the opposite polarization fraction is larger than 0.5). Within each polarization subset, the modes are still ordered by decreasing effective index. ``te``-fraction is defined as the integrated intensity of the E-field component parallel to the first plane axis, normalized to the total in-plane E-field intensity. Conversely, ``tm``-fraction uses the E field component parallel to the second plane axis.')
angle_theta: float = pd.Field(0.0, title='Polar Angle', description='Polar angle of the propagation axis from the injection axis.', units=RADIAN)
angle_phi: float = pd.Field(0.0, title='Azimuth Angle', description='Azimuth angle of the propagation axis in the plane orthogonal to the injection axis.', units=RADIAN)
precision: Literal[('single', 'double')] = pd.Field('single', title='single or double precision in mode solver', description='The solver will be faster and using less memory under single precision, but more accurate under double precision.')
bend_radius: float = pd.Field(None, title='Bend radius', description='A curvature radius for simulation of waveguide bends. Can be negative, in which case the mode plane center has a smaller value than the curvature center along the tangential axis perpendicular to the bend axis.', units=MICROMETER)
bend_axis: Axis2D = pd.Field(None, title='Bend axis', description='Index into the two tangential axes defining the normal to the plane in which the bend lies. This must be provided if ``bend_radius`` is not ``None``. For example, for a ring in the global xy-plane, and a mode plane in either the xz or the yz plane, the ``bend_axis`` is always 1 (the global z axis).')
track_freq: Union[(TrackFreq, None)] = pd.Field('central', title='Mode Tracking Frequency', description="Parameter that turns on/off mode tracking based on their similarity. Can take values ``'lowest'``, ``'central'``, or ``'highest'``, which correspond to mode tracking based on the lowest, central, or highest frequency. If ``None`` no mode tracking is performed.")
group_index_step: Union[(pd.PositiveFloat, bool)] = pd.Field(False, title='Frequency step for group index computation', description=f'Control the computation of the group index alongside the effective index. If set to a positive value, it sets the fractional frequency step used in the numerical differentiation of the effective index to compute the group index. If set to `True`, the default of {GROUP_INDEX_STEP} is used.')
('bend_axis', always=True)
def bend_axis_given(cls, val, values):
if ((val is None) and (values.get('bend_radius') is not None)):
raise SetupError("'bend_axis' must also be defined if 'bend_radius' is defined.")
return val
('bend_radius', always=True)
def bend_radius_not_zero(cls, val, values):
if (val and isclose(val, 0)):
raise SetupError("The magnitude of 'bend_radius' must be larger than 0.")
return val
('angle_theta', allow_reuse=True, always=True)
def glancing_incidence(cls, val):
if (np.abs(((np.pi / 2) - val)) < GLANCING_CUTOFF):
raise SetupError('Mode propagation axis too close to glancing angle for accurate injection. For best results, switch the injection axis.')
return val
('group_index_step', pre=True)
def assign_default_on_true(cls, val):
if (val is True):
return GROUP_INDEX_STEP
return val
('group_index_step')
def check_group_step_size(cls, val):
if (val >= 1):
raise ValidationError("Parameter 'group_index_step' is a fractional value. It must be less than 1.")
return val
_validator(skip_on_failure=True)
def check_precision(cls, values):
if (values['group_index_step'] > 0):
if (values['track_freq'] is None):
log.warning("Group index calculation without mode tracking can lead to incorrect results around mode crossings. Consider setting 'track_freq' to 'central'.")
if ((values['group_index_step'] < (5 * fp_eps)) and (values['precision'] == 'single')):
log.warning("Group index step is too small! The results might be fully corrupted by numerical errors. For more accurate results, please consider using 'double' precision, or increasing the value of 'group_index_step'.")
return values |
.skipcomplex
def test_interpolate_to_function_space_cross_mesh():
from firedrake.adjoint import ReducedFunctional, Control, taylor_test
mesh_src = UnitSquareMesh(2, 2)
mesh_dest = UnitSquareMesh(3, 3, quadrilateral=True)
V = FunctionSpace(mesh_src, 'CG', 1)
W = FunctionSpace(mesh_dest, 'DG', 1)
u = Function(V)
x = SpatialCoordinate(mesh_src)
u.interpolate(x[0])
c = Constant(1.0, domain=mesh_src)
w = Function(W).interpolate(((u + c) * u))
J = assemble(((w ** 2) * dx))
rf = ReducedFunctional(J, Control(c))
h = Constant(0.1, domain=mesh_src)
assert (taylor_test(rf, Constant(1.0, domain=mesh_src), h) > 1.9) |
def test_create_user(db: DatabaseManager, User: Type[BaseModel], user_dao, user_req):
user_dao.create(user_req)
with db.session() as session:
user = session.query(User).first()
assert (user.name == user_req.name)
assert (user.age == user_req.age)
assert (user.password == user_req.password) |
class Timer():
def __init__(self):
self.t = 0
self.scopes = list()
self.scope_starts = list()
self.runtimes = dict()
def start(self):
self.t = time.time()
def end(self):
return (time.time() - self.t)
def start_scope(self, name):
self.scopes.append(name)
self.scope_starts.append(time.time())
def end_scope(self):
name = self.scopes.pop()
start = self.scope_starts.pop()
if (name not in self.runtimes):
self.runtimes[name] = 0
self.runtimes[name] += (time.time() - start)
def __str__(self):
s = '\n'.join(map((lambda k: '{}: {}'.format(k, self.runtimes[k])), sorted(self.runtimes)))
return s |
def test_get_all_valid_business_category_display_names():
business_category_field_names_list = _get_all_business_category_field_names()
business_category_display_names_list = _get_all_business_category_display_names()
assert (get_business_category_display_names(business_category_field_names_list) == business_category_display_names_list) |
class DEITP(DeltaE):
NAME = 'itp'
def __init__(self, scalar: float=720) -> None:
self.scalar = scalar
def distance(self, color: 'Color', sample: 'Color', scalar: Optional[float]=None, **kwargs: Any) -> float:
if (scalar is None):
scalar = self.scalar
(i1, t1, p1) = alg.no_nans(color.convert('ictcp')[:(- 1)])
(i2, t2, p2) = alg.no_nans(sample.convert('ictcp')[:(- 1)])
return (scalar * math.sqrt(((((i1 - i2) ** 2) + (0.25 * ((t1 - t2) ** 2))) + ((p1 - p2) ** 2)))) |
_meta(characters.sp_aya.WindWalk)
class WindWalk():
name = ''
description = ',,,(),'
def clickable(self):
if (not self.my_turn()):
return False
me = self.me
return bool((me.cards or me.showncards or me.equips))
def is_action_valid(self, sk, tl):
acards = sk.associated_cards
if ((not acards) or (len(acards) != 1)):
return (False, '')
card = acards[0]
if (card.resides_in.type not in ('cards', 'showncards', 'equips')):
return (False, '!')
if card.is_card(Skill):
return (False, '')
return (True, '')
def effect_string(self, act):
return f'!{N.char(act.source)}{N.card(act.card)},!'
def sound_effect(self, act):
return 'thb-cv-sp_aya_windwalk' |
def upgrade():
bind = op.get_bind()
existing_system_dependencies = bind.execute(text('SELECT id, egress, system_dependencies FROM ctl_systems where system_dependencies is not null;'))
for row in existing_system_dependencies:
row_id = row['id']
existing_egress = row['egress']
system_dependencies = row['system_dependencies']
updated_egress = []
updated_egress_keys = []
if existing_egress:
for egress in existing_egress:
updated_egress.append(egress)
updated_egress_keys.append(egress['fides_key'])
for system in system_dependencies:
if (system not in updated_egress_keys):
egress = {'fides_key': system, 'type': 'system', 'data_categories': None}
updated_egress.append(egress)
update_query: TextClause = text('UPDATE ctl_systems SET egress = :updated_egress ::jsonb WHERE id = :row_id')
update_params = {}
update_params['row_id'] = row_id
update_params['updated_egress'] = f'{json.dumps(updated_egress)}'
try:
bind.execute(update_query, update_params)
except IntegrityError as exc:
raise Exception(f'Fides attempted to copy ctl_systems.system_dependencies into ctl_systems.egress but got error: {exc}. ')
op.drop_column('ctl_systems', 'system_dependencies') |
('/get_node_output_hist', methods=['POST'])
def get_node_output_hist_rq():
wf = request.get_json(True)
if ((not wf) or ('wf_unique_id' not in wf) or ('node_name' not in wf) or ('output_port_id' not in wf) or (('col_1' not in wf) and ('num_bins' not in wf))):
abort(400)
wf_unique_id = wf['wf_unique_id']
node_name = wf['node_name']
output_port_id = ('BottomLeft' if (int(wf['output_port_id']) == 1) else 'BottomRight')
col_1 = wf['col_1']
num_bins = int(wf['num_bins'])
df = read_dataframe(wf_unique_id, node_name, output_port_id)
chart = fsh.generate_histogram_plotly(df, col_1, num_bins, fsh.generate_path(wf_unique_id, node_name, 'hist.html'))
return chart |
.signal_handling
def test_hanging_cleanup_of_signaled_parent_fails_dispatcher_and_sends_to_dlq(fake_sqs_queue):
logger = logging.getLogger(((__name__ + '.') + inspect.stack()[0][3]))
logger.setLevel(logging.DEBUG)
msg_body = randint(1111, 9998)
queue = fake_sqs_queue
queue.send_message(MessageBody=msg_body)
worker_sleep_interval = 0.05
def hanging_cleanup(task_id, termination_queue: mp.Queue, work_tracking_queue: mp.Queue, queue_message):
work_tracking_queue.put('cleanup_start_{}'.format(queue_message.body), block=True, timeout=0.5)
sleep(2.5)
work_tracking_queue.put('cleanup_end_{}'.format(queue_message.body), block=True, timeout=0.5)
cleanup_timeout = int((worker_sleep_interval + 1))
dispatcher = SQSWorkDispatcher(queue, worker_process_name='Test Worker Process', long_poll_seconds=0, monitor_sleep_time=0.05, exit_handling_timeout=cleanup_timeout)
wq = mp.Queue()
tq = mp.Queue()
eq = mp.Queue()
def error_handling_dispatcher(work_dispatcher: SQSWorkDispatcher, error_queue: mp.Queue, **kwargs):
try:
work_dispatcher.dispatch(**kwargs)
except Exception as exc:
error_queue.put(exc, timeout=2)
raise exc
dispatch_kwargs = {'job': _work_to_be_terminated, 'termination_queue': tq, 'work_tracking_queue': wq, 'exit_handler': hanging_cleanup}
parent_dispatcher = mp.Process(target=error_handling_dispatcher, args=(dispatcher, eq), kwargs=dispatch_kwargs)
parent_dispatcher.start()
work_done = wq.get(True, 3)
assert (work_done == msg_body)
parent_pid = parent_dispatcher.pid
parent_proc = ps.Process(parent_pid)
worker_pid = tq.get(True, 1)
worker_proc = ps.Process(worker_pid)
parent_proc.terminate()
try:
while (worker_proc.is_running() and ps.pid_exists(worker_pid) and (worker_proc == ps.Process(worker_pid))):
wait_while = 5
logger.debug(f'Waiting {wait_while}s for worker to complete after parent received kill signal. worker pid = {worker_pid}, worker status = {worker_proc.status()}')
worker_proc.wait(timeout=wait_while)
except TimeoutExpired as tex:
pytest.fail(f'TimeoutExpired waiting for worker with pid {worker_proc.pid} to terminate (complete work).', tex)
try:
while (parent_dispatcher.is_alive() and parent_proc.is_running() and ps.pid_exists(parent_pid) and (parent_proc == ps.Process(parent_pid))):
wait_while = 3
logger.debug(f'Waiting {wait_while}s for parent dispatcher to complete after kill signal. parent_dispatcher pid = {parent_pid}, parent_dispatcher status = {parent_proc.status()}')
parent_dispatcher.join(timeout=wait_while)
except TimeoutExpired as tex:
pytest.fail(f'TimeoutExpired waiting for parent dispatcher with pid {parent_pid} to terminate (complete work).', tex)
assert (not eq.empty()), 'Should have been errors detected in parent dispatcher'
exc = eq.get_nowait()
assert isinstance(exc, QueueWorkDispatcherError), 'Error was not of type QueueWorkDispatcherError'
timeout_error_fragment = 'Could not perform cleanup during exiting of job in allotted _exit_handling_timeout ({}s)'.format(cleanup_timeout)
assert (timeout_error_fragment in str(exc)), 'QueueWorkDispatcherError did not mention exit_handling timeouts'
assert ('after 2 tries' in str(exc)), "QueueWorkDispatcherError did not mention '2 tries'"
try:
assert (not parent_dispatcher.is_alive()), 'Parent dispatcher is still alive but should have been killed'
assert (parent_dispatcher.exitcode is not None), 'Parent dispatcher is not alive but has no exitcode'
assert (1 == parent_dispatcher.exitcode), 'Parent dispatcher exitcode was not 1'
msgs = queue.receive_messages(WaitTimeSeconds=0)
assert (msgs is not None)
assert (len(msgs) == 0), 'Should be NO messages received from queue'
assert (work_done == msg_body), 'Was expecting to find worker task_id (msg_body) tracked in the queue'
cleanup_attempt_1 = wq.get(True, 1)
assert (cleanup_attempt_1 == 'cleanup_start_{}'.format(msg_body)), 'Was expecting to find a trace of cleanup attempt 1 tracked in the work queue'
cleanup_attempt_2 = wq.get(True, 1)
assert (cleanup_attempt_2 == 'cleanup_start_{}'.format(msg_body)), 'Was expecting to find a trace of cleanup attempt 2 tracked in the work queue'
assert wq.empty(), 'There should be no more work tracked on the work Queue'
finally:
if (worker_proc and worker_proc.is_running()):
logger.warning('Dispatched worker process with PID {} did not complete in timeout. Killing it.'.format(worker_proc.pid))
os.kill(worker_proc.pid, signal.SIGKILL)
pytest.fail('Worker did not complete in timeout as expected. Test fails.')
_fail_runaway_processes(logger, dispatcher=parent_dispatcher) |
.django_db
def test_no_intersection(client):
request = {'filters': {'keyword': 'test', 'award_type_codes': ['A', 'B', 'C', 'D', 'no intersection']}, 'fields': ['Award ID', 'Recipient Name', 'Mod'], 'page': 1, 'limit': 5, 'sort': 'Award ID', 'order': 'desc'}
api_start = perf_counter()
resp = client.post(ENDPOINT, content_type='application/json', data=json.dumps(request))
api_end = perf_counter()
assert (resp.status_code == status.HTTP_200_OK)
assert ((api_end - api_start) < 0.5), 'Response took over 0.5s! Investigate why'
assert (len(resp.data['results']) == 0), 'Results returned, there should be 0' |
def _solve_bezier(target: float, a: float, b: float, c: float) -> float:
x = 0.0
t = 0.5
last = math.nan
for _ in range(MAX_ITER):
x = (_bezier(t, a, b, c) - target)
dx = _bezier_derivative(t, a, b, c)
if (dx == 0):
break
t -= (x / dx)
if (t == last):
return t
last = t
if (abs((_bezier(t, a, b, c) - target)) < EPSILON):
return t
(low, high) = (0.0, 1.0)
t = target
while (abs((high - low)) > EPSILON):
x = _bezier(t, a, b, c)
if (abs((x - target)) < EPSILON):
return t
if (x > target):
high = t
else:
low = t
t = ((high + low) * 0.5)
return t |
def extractThewizardworldWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_simulation_load_export_pckl(tmp_path):
path = str((tmp_path / 'simulation.pckl'))
with open(path, 'wb') as pickle_file:
pickle.dump(SIM, pickle_file)
with open(path, 'rb') as pickle_file:
SIM2 = pickle.load(pickle_file)
assert (SIM == SIM2), 'original and loaded simulations are not the same' |
def test_lobatto_edge0():
print('0th Order Polynomial')
print('Edge')
lobattoEdge.setOrder(1)
int0_f0 = dot(f0(lobattoEdge.points), lobattoEdge.weights)
print(int0_f0)
lobattoEdge.setOrder(2)
int1_f0 = dot(f0(lobattoEdge.points), lobattoEdge.weights)
print(int1_f0)
lobattoEdge.setOrder(3)
int2_f0 = dot(f0(lobattoEdge.points), lobattoEdge.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0, int1_f0)
npt.assert_almost_equal(int1_f0, int2_f0) |
class TestCarryFoward(util.ColorAssertsPyTest):
CASES = [['oklab', 4, ['lch(none 30 270)', 'lab(none -2 12)', 'hsl(30 80 none)', 'oklab(0.5 0.3 -0.1)'], ('l', 0.5)], ['oklab', 3, ['lab(40 none 20)', 'oklab(0.2 none -0.2)', 'oklab(0.5 0.3 -0.1)'], ('a', 0.3)], ['oklab', 3, ['lab(40 20 none)', 'oklab(0.2 0.2 none)', 'oklab(0.5 0.3 -0.1)'], ('b', (- 0.1))], ['oklch', 5, ['lch(40 none 270)', 'hsl(30 none 75)', 'color(--hsv 300 none 0.5)', 'color(--hsi 90 none 0.2)', 'color(--oklch 0.5 0.2 30)'], ('c', 0.2)], ['hsl', 5, ['lch(40 none 270)', 'color(--hsv 300 none 0.5)', 'color(--hsi 90 none 0.2)', 'color(--oklch 0.5 none 30)', 'hsl(30 30 75)'], ('s', 0.3)], ['hsv', 5, ['lch(40 none 270)', 'hsl(30 none 75)', 'color(--hsi 90 none 0.2)', 'color(--oklch 0.5 none 30)', 'color(--hsv 300 0.3 0.5)'], ('s', 0.3)], ['hsl', 6, ['lch(40 10 none)', 'color(--hsv none 0.1 0.5)', 'hwb(none 30 75)', 'color(--hsi none 0.1 0.2)', 'color(--oklch 0.5 0.1 none)', 'hsl(30 30 75)'], ('h', 30)], ['hwb', 6, ['lch(40 10 none)', 'color(--hsv none 0.1 0.5)', 'hsl(none 30 75)', 'color(--hsi none 0.1 0.2)', 'color(--oklch 0.5 0.1 none)', 'hwb(30 30 75)'], ('h', 30)], ['oklab', 3, ['lch(40 10 270 / none)', 'rgb(220 0 47 / none)', 'hsl(30 30 75 / 0.5)'], ('alpha', 0.5)], ['srgb', 3, ['color(xyz-d65 0.24 0.34 none)', 'color(display-p3 0 1 none)', 'rgb(30 30 75)'], ('blue', (75 / 255))]]
.parametrize('space, steps, colors, cmp', CASES)
def test_round_trip(self, space, steps, colors, cmp):
results = Color.steps(colors, steps=steps, space=space, method='monotone', carryforward=True)
assert all(((abs((r[cmp[0]] - cmp[1])) < 1e-12) for r in results)), '{} != {} : {}'.format(cmp[0], cmp[1], results) |
def get_page_numbers_with_uncommon_page_dimension(layout_document: LayoutDocument) -> Sequence[int]:
page_dimension_counter = Counter((page.meta.coordinates.bounding_box for page in layout_document.pages if (page.meta and page.meta.coordinates)))
LOGGER.debug('page_dimension_counter: %r', page_dimension_counter)
if (len(page_dimension_counter) < 2):
return []
most_common_page_dimension = page_dimension_counter.most_common(1)[0][0]
LOGGER.debug('most_common_page_dimension: %r', most_common_page_dimension)
return sorted({page.meta.page_number for page in layout_document.pages if (page.meta and page.meta.coordinates and (page.meta.coordinates.bounding_box != most_common_page_dimension))}) |
def fake_beam_syncer(chain, event_bus):
async def fake_beam_sync(removed_nodes: Dict):
def replace_missing_node(missing_node_hash):
if (missing_node_hash not in removed_nodes):
raise Exception(f'An unexpected node was requested: {missing_node_hash}')
chain.chaindb.db[missing_node_hash] = removed_nodes.pop(missing_node_hash)
async def collect_accounts(event: CollectMissingAccount):
replace_missing_node(event.missing_node_hash)
(await event_bus.broadcast(MissingAccountResult(1), event.broadcast_config()))
accounts_sub = event_bus.subscribe(CollectMissingAccount, collect_accounts)
async def collect_bytecodes(event: CollectMissingBytecode):
replace_missing_node(event.bytecode_hash)
(await event_bus.broadcast(MissingBytecodeResult(), event.broadcast_config()))
bytecode_sub = event_bus.subscribe(CollectMissingBytecode, collect_bytecodes)
async def collect_storage(event: CollectMissingStorage):
replace_missing_node(event.missing_node_hash)
(await event_bus.broadcast(MissingStorageResult(1), event.broadcast_config()))
storage_sub = event_bus.subscribe(CollectMissingStorage, collect_storage)
(await event_bus.wait_until_any_endpoint_subscribed_to(CollectMissingAccount))
(await event_bus.wait_until_any_endpoint_subscribed_to(CollectMissingBytecode))
(await event_bus.wait_until_any_endpoint_subscribed_to(CollectMissingStorage))
try:
(yield)
finally:
accounts_sub.unsubscribe()
bytecode_sub.unsubscribe()
storage_sub.unsubscribe()
return fake_beam_sync |
def process_transaction(env: vm.Environment, tx: Transaction) -> Tuple[(Uint, Tuple[(Log, ...)])]:
ensure(validate_transaction(tx), InvalidBlock)
sender = env.origin
sender_account = get_account(env.state, sender)
gas_fee = (tx.gas * tx.gas_price)
ensure((sender_account.nonce == tx.nonce), InvalidBlock)
ensure((sender_account.balance >= (gas_fee + tx.value)), InvalidBlock)
ensure((sender_account.code == bytearray()), InvalidBlock)
gas = (tx.gas - calculate_intrinsic_cost(tx))
increment_nonce(env.state, sender)
sender_balance_after_gas_fee = (sender_account.balance - gas_fee)
set_account_balance(env.state, sender, sender_balance_after_gas_fee)
message = prepare_message(sender, tx.to, tx.value, tx.data, gas, env)
output = process_message_call(message, env)
gas_used = (tx.gas - output.gas_left)
gas_refund = min((gas_used // 2), output.refund_counter)
gas_refund_amount = ((output.gas_left + gas_refund) * tx.gas_price)
transaction_fee = (((tx.gas - output.gas_left) - gas_refund) * tx.gas_price)
total_gas_used = (gas_used - gas_refund)
sender_balance_after_refund = (get_account(env.state, sender).balance + gas_refund_amount)
set_account_balance(env.state, sender, sender_balance_after_refund)
coinbase_balance_after_mining_fee = (get_account(env.state, env.coinbase).balance + transaction_fee)
set_account_balance(env.state, env.coinbase, coinbase_balance_after_mining_fee)
for address in output.accounts_to_delete:
destroy_account(env.state, address)
return (total_gas_used, output.logs) |
def get_eip1085_schema() -> Dict[(str, Any)]:
base_trinity_dir = Path(__file__).parent.parent
if (base_trinity_dir.name != 'trinity'):
raise RuntimeError(f'Expected to be in root `trinity` module. Got: {str(base_trinity_dir)}')
eip1085_schema_path = ((base_trinity_dir / 'assets') / 'eip1085.schema.json')
with open(eip1085_schema_path) as schema_file:
eip1085_schema = json.load(schema_file)
return eip1085_schema |
def export_theta(tumor_segs, normal_cn):
out_columns = ['#ID', 'chrm', 'start', 'end', 'tumorCount', 'normalCount']
if (not tumor_segs):
return pd.DataFrame(columns=out_columns)
xy_names = []
tumor_segs = tumor_segs.autosomes(also=xy_names)
if normal_cn:
normal_cn = normal_cn.autosomes(also=xy_names)
table = tumor_segs.data.reindex(columns=['start', 'end'])
chr2idx = {c: (i + 1) for (i, c) in enumerate(tumor_segs.chromosome.drop_duplicates())}
table['chrm'] = tumor_segs.chromosome.replace(chr2idx)
table['#ID'] = [f'start_{row.chrm}_{row.start}:end_{row.chrm}_{row.end}' for row in table.itertuples(index=False)]
(ref_means, nbins) = ref_means_nbins(tumor_segs, normal_cn)
table['tumorCount'] = theta_read_counts(tumor_segs.log2, nbins)
table['normalCount'] = theta_read_counts(ref_means, nbins)
return table[out_columns] |
def _print_type(t, env: PrintEnv) -> str:
if isinstance(t, T.Num):
return 'R'
elif isinstance(t, T.F16):
return 'f16'
elif isinstance(t, T.F32):
return 'f32'
elif isinstance(t, T.F64):
return 'f64'
elif isinstance(t, T.INT8):
return 'i8'
elif isinstance(t, T.UINT8):
return 'ui8'
elif isinstance(t, T.UINT16):
return 'ui16'
elif isinstance(t, T.INT32):
return 'i32'
elif isinstance(t, T.Bool):
return 'bool'
elif isinstance(t, T.Int):
return 'int'
elif isinstance(t, T.Index):
return 'index'
elif isinstance(t, T.Size):
return 'size'
elif isinstance(t, T.Error):
return 'err'
elif isinstance(t, T.Tensor):
base = _print_type(t.basetype(), env)
if t.is_window:
base = f'[{base}]'
ranges = ', '.join([_print_expr(r, env) for r in t.shape()])
return f'{base}[{ranges}]'
elif isinstance(t, T.Window):
return f"Window(src_type={t.src_type},as_tensor={t.as_tensor},src_buf={t.src_buf},idx='[{', '.join([_print_w_access(w, env) for w in t.idx])}]')"
elif isinstance(t, T.Stride):
return 'stride'
assert False, f'impossible type {type(t)}' |
def test_recall_values():
current = pd.DataFrame(data=dict(user_id=['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], prediction=[1, 2, 3, 1, 2, 3, 1, 2, 3], target=[1, 0, 0, 0, 0, 0, 0, 0, 1]))
metric = RecallTopKMetric(k=2)
report = Report(metrics=[metric])
column_mapping = ColumnMapping(recommendations_type=RecomType.RANK)
report.run(reference_data=None, current_data=current, column_mapping=column_mapping)
results = metric.get_result()
assert (len(results.current) == 3)
assert (results.current[1] == 0.5)
assert (results.current[2] == 0.5)
assert (results.current[3] == 1) |
class MmapSource():
def __init__(self, file_name, mode, encoding='utf-8'):
self.file_name = file_name
self.mode = mode
self.encoding = encoding
self.f = None
self.mm = None
def open(self):
self.f = open(self.file_name, mode='r+b')
self.mm = mmap.mmap(self.f.fileno(), 0, access=mmap.ACCESS_READ)
self.mm.madvise(mmap.MADV_SEQUENTIAL)
return self
def seek(self, offset):
self.mm.seek(offset)
def read(self):
return self.mm.read()
def readline(self):
return self.mm.readline()
def readlines(self, num_lines):
lines = []
mm = self.mm
for _ in range(num_lines):
line = mm.readline()
if (line == b''):
break
lines.append(line)
return lines
def close(self):
self.mm.close()
self.mm = None
self.f.close()
self.f = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __str__(self, *args, **kwargs):
return self.file_name |
class KnowledgeEnvironment(Environment):
def __init__(self, env_id: str) -> None:
super().__init__(env_id)
query_param = {'tokens': 'key words to query', 'types': 'prefered knowledge types, one or more of [text, image]', 'index': 'index of query result'}
self.add_ai_function(SimpleAIFunction('query_knowledge', 'vector query content from local knowledge base', self._query, query_param))
async def _query(self, tokens: str, types: list[str]=['text'], index: str=0):
index = int(index)
object_ids = (await KnowledgeBase().query_objects(tokens, types, 4))
if (len(object_ids) <= index):
return '*** I have no more information for your reference.\n'
else:
content = '*** I have provided the following known information for your reference with json format:\n'
return (content + KnowledgeBase().tokens_from_objects(object_ids[index:(index + 1)])) |
class LiteDRAMWishbone2Native(Module):
def __init__(self, wishbone, port, base_address=0):
wishbone_data_width = len(wishbone.dat_w)
port_data_width = (2 ** int(log2(len(port.wdata.data))))
ratio = (wishbone_data_width / port_data_width)
if (wishbone_data_width != port_data_width):
if (wishbone_data_width > port_data_width):
addr_shift = (- log2_int((wishbone_data_width // port_data_width)))
else:
addr_shift = log2_int((port_data_width // wishbone_data_width))
new_port = LiteDRAMNativePort(mode=port.mode, address_width=(port.address_width + addr_shift), data_width=wishbone_data_width)
self.submodules += LiteDRAMNativePortConverter(new_port, port)
port = new_port
aborted = Signal()
offset = (base_address >> log2_int((port.data_width // 8)))
self.submodules.fsm = fsm = FSM(reset_state='CMD')
self.comb += [port.cmd.addr.eq((wishbone.adr - offset)), port.cmd.we.eq(wishbone.we), port.cmd.last.eq((~ wishbone.we)), port.flush.eq((~ wishbone.cyc))]
fsm.act('CMD', port.cmd.valid.eq((wishbone.cyc & wishbone.stb)), If(((port.cmd.valid & port.cmd.ready) & wishbone.we), NextState('WRITE')), If(((port.cmd.valid & port.cmd.ready) & (~ wishbone.we)), NextState('READ')), NextValue(aborted, 0))
self.comb += [port.wdata.valid.eq((wishbone.stb & wishbone.we)), If((ratio <= 1), If((~ fsm.ongoing('WRITE')), port.wdata.valid.eq(0))), port.wdata.data.eq(wishbone.dat_w), port.wdata.we.eq(wishbone.sel)]
fsm.act('WRITE', NextValue(aborted, ((~ wishbone.cyc) | aborted)), If((port.wdata.valid & port.wdata.ready), wishbone.ack.eq((wishbone.cyc & (~ aborted))), NextState('CMD')))
self.comb += port.rdata.ready.eq(1)
fsm.act('READ', NextValue(aborted, ((~ wishbone.cyc) | aborted)), If(port.rdata.valid, wishbone.ack.eq((wishbone.cyc & (~ aborted))), wishbone.dat_r.eq(port.rdata.data), NextState('CMD'))) |
class OptionPlotoptionsFunnelSonificationTracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def get_geth_binary():
from geth.install import get_executable_path, install_geth
if ('GETH_BINARY' in os.environ):
return os.environ['GETH_BINARY']
elif ('GETH_VERSION' in os.environ):
geth_version = os.environ['GETH_VERSION']
_geth_binary = get_executable_path(geth_version)
if (not os.path.exists(_geth_binary)):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return 'geth' |
def _list_bots(cmd):
ircbots = [bot for bot in AccountDB.objects.filter(db_is_bot=True, username__startswith='ircbot-')]
if ircbots:
table = cmd.styled_table('|w#dbref|n', '|wbotname|n', '|wev-channel|n', '|wirc-channel|n', '|wSSL|n', maxwidth=_DEFAULT_WIDTH)
for ircbot in ircbots:
ircinfo = ('%s (%s:%s)' % (ircbot.db.irc_channel, ircbot.db.irc_network, ircbot.db.irc_port))
table.add_row(('#%i' % ircbot.id), ircbot.db.irc_botname, ircbot.db.ev_channel, ircinfo, ircbot.db.irc_ssl)
return table
else:
return 'No irc bots found.' |
class JSONHandlerTest(HandlerBaseTest, unittest.TestCase):
def setUp(self):
self.handler = JSONHandler()
self.data = {'filename': 'tests/json/hello-json.md', 'content': '\n\nTitle\n=====\n\ntitle2\n------\n\nHello.\n\nJust need three dashes\n---\n\nAnd this might break.\n', 'metadata': {'test': 'tester', 'author': 'bob', 'something': 'else'}} |
.requires_roxar
def test_rox_surfaces(roxar_project):
srf = xtgeo.surface_from_roxar(roxar_project, 'TopReek', SURFCAT1)
srf2 = xtgeo.surface_from_roxar(roxar_project, 'MidReek', SURFCAT1)
assert (srf.ncol == 554)
assert (srf.values.mean() == pytest.approx(1698.648, abs=0.01))
srf.to_roxar(roxar_project, 'TopReek_copy', 'SomeFolder', stype='clipboard')
rox = xtgeo.RoxUtils(roxar_project)
prj = rox.project
iso = (srf2 - srf)
rox.create_zones_category('IS_isochore')
prj.zones.create('UpperReek', prj.horizons['TopReek'], prj.horizons['MidReek'])
iso.to_roxar(prj, 'UpperReek', 'IS_isochore', stype='zones')
iso2 = xtgeo.surface_from_roxar(prj, 'UpperReek', 'IS_isochore', stype='zones')
assert (iso2.values.mean() == pytest.approx(20.79, abs=0.01))
prj.save()
prj.close() |
def simulate_async_global_model_update(global_model: IFLModel, local_model_before_training: IFLModel, local_model_after_training: IFLModel) -> None:
reconstructed_grad = FLModelParamUtils.clone(global_model)
FLModelParamUtils.reconstruct_gradient(old_model=local_model_before_training.fl_get_module(), new_model=local_model_after_training.fl_get_module(), grads=reconstructed_grad.fl_get_module())
FLModelParamUtils.set_gradient(model=global_model.fl_get_module(), reference_gradient=reconstructed_grad.fl_get_module())
optimizer = torch.optim.SGD(global_model.fl_get_module().parameters(), lr=1.0)
optimizer.step() |
class AbstractIRPattern(AbstractPattern, ABC):
level = Level.IR
def get_cfg(self):
cfg = self.analysis_context.cfg
if isinstance(cfg, Exception):
raise PatternMatchError('Compiled IR is not available') from cfg
if (cfg is None):
raise PatternMatchError('Compiled is not available')
return cfg |
class Settings():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
self.ship_speed = 3.0
self.ship_limit = 3
self.bullet_speed = 12.0
self.bullet_width = 15
self.bullet_height = 3
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 3
self.target_height = 120
self.target_width = 15
self.target_color = (180, 60, 10)
self.target_speed = 1.5
self.miss_limit = 3 |
def check_config(config):
valid_config_options = {'kernel', 'cve', 'exclude', 'exploit', 'verbose', 'cwe', 'files', 'ignore_files', 'kernel_configcheck_strict', 'report', 'all_files', 'metadata'}
diff = (set(config.keys()) - valid_config_options)
if diff:
print(('Unknown config options: ' + ','.join(diff)), file=sys.stderr)
sys.exit(1) |
.skipif((PYDANTIC_MAJOR_VERSION != 1), reason='Only implemented for pydantic 1')
def test_serialize_deserialize_equals() -> None:
expected = Object(id='root', description='root-object', attributes=[Number(id='number', description='Number description', examples=[]), Text(id='text', description='text description', examples=[]), Bool(id='bool', description='bool description', examples=[])], examples=[])
stringified = expected.json()
assert (Object.parse_raw(stringified) == expected)
assert isinstance(stringified, str)
assert (expected.dict() == {'attributes': [{'description': 'Number description', 'examples': [], 'id': 'number', 'many': False, '$type': 'Number'}, {'description': 'text description', 'examples': [], 'id': 'text', 'many': False, '$type': 'Text'}, {'description': 'bool description', 'examples': [], 'id': 'bool', 'many': False, '$type': 'Bool'}], 'description': 'root-object', 'examples': [], 'id': 'root', 'many': False})
assert (Object.parse_raw(stringified) == expected) |
def extractBarolaideHomeBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def _specific_schedule_is_due_today(sched_str: str) -> bool:
if (not ('|' in sched_str)):
return False
dt = _dt_from_date_str(sched_str.split('|')[1])
return ((dt.date() <= datetime.today().date()) and (dt.date() >= (datetime.today() - timedelta(days=DUE_NOTES_BOUNDARY)).date())) |
class TestDBTRun():
def test_simple_task(self):
dbt_run_task = DBTRun(name='test-task')
def my_workflow() -> DBTRunOutput:
return dbt_run_task(input=DBTRunInput(project_dir=DBT_PROJECT_DIR, profiles_dir=DBT_PROFILES_DIR, profile=DBT_PROFILE, select=['tag:something'], exclude=['tag:something-else']))
result = my_workflow()
assert isinstance(result, DBTRunOutput)
def test_incorrect_project_dir(self):
dbt_run_task = DBTRun(name='test-task')
with pytest.raises(DBTUnhandledError):
dbt_run_task(input=DBTRunInput(project_dir='.', profiles_dir=DBT_PROFILES_DIR, profile=DBT_PROFILE))
def test_task_output(self):
dbt_run_task = DBTRun(name='test-task')
output = dbt_run_task.execute(input=DBTRunInput(project_dir=DBT_PROJECT_DIR, profiles_dir=DBT_PROFILES_DIR, profile=DBT_PROFILE))
assert (output.exit_code == 0)
assert (output.command == f'dbt --log-format json run --project-dir {DBT_PROJECT_DIR} --profiles-dir {DBT_PROFILES_DIR} --profile {DBT_PROFILE}')
with open(f'{DBT_PROJECT_DIR}/target/run_results.json', 'r') as fp:
exp_run_result = fp.read()
assert (output.raw_run_result == exp_run_result)
with open(f'{DBT_PROJECT_DIR}/target/manifest.json', 'r') as fp:
exp_manifest = fp.read()
assert (output.raw_manifest == exp_manifest) |
def serve_attestation_duty(slashing_db: SlashingDB, attestation_duty: AttestationDuty) -> None:
attestation_data = consensus_on_attestation(slashing_db, attestation_duty)
assert consensus_is_valid_attestation_data(slashing_db, attestation_data, attestation_duty)
update_attestation_slashing_db(slashing_db, attestation_data, attestation_duty.pubkey)
fork_version = bn_get_fork_version(compute_start_slot_at_epoch(attestation_data.target.epoch))
attestation_signing_root = compute_attestation_signing_root(attestation_data)
attestation_signature_share = rs_sign_attestation(attestation_data, attestation_signing_root, fork_version)
attestation_signature_share = Attestation(data=attestation_data, signature=attestation_signature_share)
broadcast_attestation_signature_share(attestation_signature_share) |
def _compute_reaching_condition_of_unique_predecessors(sink_node: TransitionBlock, graph_slice: TransitionCFG, reaching_conditions: Dict[(TransitionBlock, LogicCondition)]) -> None:
current_node = sink_node
while ((predecessors := list(graph_slice.get_predecessors(current_node))) and (len(predecessors) == 1)):
reaching_conditions[predecessors[0]] = graph_slice.condition_handler.get_true_value()
current_node = predecessors[0] |
(trylast=True)
def pytest_configure(config):
if (getattr(config.option, 'cricket_mode', 'off') == 'off'):
config.pluginmanager.unregister(name='terminalreporter')
reporter = TLDRReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
config.option.tbstyle = 'native' |
.skipif(('ROXENV' in os.environ), reason='Dismiss test in ROXENV')
.dict(sys.modules)
.dict(os.environ)
def test_that_mpl_dynamically_imports():
_clear_state(sys, os)
import xtgeo
assert ('matplotlib' not in sys.modules)
assert ('matplotlib.pyplot' not in sys.modules)
from xtgeo.plot.baseplot import BasePlot
assert ('matplotlib' not in sys.modules)
assert ('matplotlib.pyplot' not in sys.modules)
baseplot = BasePlot()
assert ('matplotlib' in sys.modules)
import matplotlib as mpl
if (versionparse(mpl.__version__) < versionparse('3.6')):
assert ('matplotlib.pyplot' in sys.modules)
else:
assert ('matplotlib.pyplot' not in sys.modules)
baseplot.close()
assert ('matplotlib.pyplot' in sys.modules) |
def main_handler():
clp = command_line.create_basic_clp()
clp['output_options'].add_argument('--out-imp', default='mh_imp_trace.lobster', help='name of the implementation LOBSTER artefact (by default %(default)s)')
clp['output_options'].add_argument('--out-act', default='mh_act_trace.lobster', help='name of the activity LOBSTER artefact (by default %(default)s)')
clp['output_options'].add_argument('--only-tagged-blocks', action='store_true', default=False, help='Only emit traces for Simulink blocks with at least one tag')
clp['output_options'].add_argument('--untagged-blocks-inherit-tags', action='store_true', default=False, help='Blocks without tags inherit all tags from their parent block')
options = command_line.parse_args(clp)
mh = Message_Handler('trace')
mh.show_context = (not options.brief)
mh.show_style = False
mh.show_checks = False
mh.autofix = False
trace_backend = MH_Trace(options)
command_line.execute(mh, options, {}, trace_backend, process_tests=True) |
def main(page: ft.Page):
secret_key = MY_APP_SECRET_KEY
provider = GitHubOAuthProvider(client_id=GITHUB_CLIENT_ID, client_secret=GITHUB_CLIENT_SECRET, redirect_url='
AUTH_TOKEN_KEY = 'myapp.auth_token'
def perform_login(e):
saved_token = None
ejt = page.client_storage.get(AUTH_TOKEN_KEY)
if ejt:
saved_token = decrypt(ejt, secret_key)
if ((e is not None) or (saved_token is not None)):
page.login(provider, saved_token=saved_token, scope=['public_repo'])
def on_login(e: ft.LoginEvent):
if e.error:
raise Exception(e.error)
jt = page.auth.token.to_json()
ejt = encrypt(jt, secret_key)
page.client_storage.set(AUTH_TOKEN_KEY, ejt)
logged_user.value = f"Hello, {page.auth.user['name']}!"
toggle_login_buttons()
list_github_repositories()
page.update()
def list_github_repositories():
repos_view.controls.clear()
if page.auth:
headers = {'User-Agent': 'Flet', 'Authorization': 'Bearer {}'.format(page.auth.token.access_token)}
repos_resp = headers=headers)
repos_resp.raise_for_status()
user_repos = json.loads(repos_resp.text)
for repo in user_repos:
repos_view.controls.append(ft.ListTile(leading=ft.Icon(ft.icons.FOLDER_ROUNDED), title=ft.Text(repo['full_name'])))
def logout_button_click(e):
page.client_storage.remove(AUTH_TOKEN_KEY)
page.logout()
def on_logout(e):
toggle_login_buttons()
list_github_repositories()
page.update()
def toggle_login_buttons():
login_button.visible = (page.auth is None)
logged_user.visible = logout_button.visible = (page.auth is not None)
logged_user = ft.Text()
login_button = ft.ElevatedButton('Login with GitHub', on_click=perform_login)
logout_button = ft.ElevatedButton('Logout', on_click=logout_button_click)
repos_view = ft.ListView(expand=True)
page.on_login = on_login
page.on_logout = on_logout
toggle_login_buttons()
perform_login(None)
page.add(ft.Row([logged_user, login_button, logout_button]), repos_view) |
class PDFStream(PDFObject):
def __init__(self, attrs, rawdata, decipher=None):
assert isinstance(attrs, dict)
self.attrs = attrs
self.rawdata = rawdata
self.decipher = decipher
self.data = None
self.objid = None
self.genno = None
return
def set_objid(self, objid, genno):
self.objid = objid
self.genno = genno
return
def __repr__(self):
if (self.data is None):
assert (self.rawdata is not None)
return ('<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs))
else:
assert (self.data is not None)
return ('<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs))
def __contains__(self, name):
return (name in self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def get(self, name, default=None):
return self.attrs.get(name, default)
def get_any(self, names, default=None):
for name in names:
if (name in self.attrs):
return self.attrs[name]
return default
def get_filters(self):
filters = self.get_any(('F', 'Filter'))
params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {})
if (not filters):
return []
if (not isinstance(filters, list)):
filters = [filters]
if (not isinstance(params, list)):
params = ([params] * len(filters))
if (STRICT and (len(params) != len(filters))):
raise PDFException('Parameters len filter mismatch')
return zip(filters, params)
def decode(self):
assert ((self.data is None) and (self.rawdata is not None))
data = self.rawdata
if self.decipher:
data = self.decipher(self.objid, self.genno, data, self.attrs)
filters = self.get_filters()
if (not filters):
self.data = data
self.rawdata = None
return
for (f, params) in filters:
if (f in LITERALS_FLATE_DECODE):
try:
data = zlib.decompress(data)
except zlib.error as e:
if STRICT:
raise PDFException(('Invalid zlib bytes: %r, %r' % (e, data)))
data = b''
elif (f in LITERALS_LZW_DECODE):
data = lzwdecode(data)
elif (f in LITERALS_ASCII85_DECODE):
data = ascii85decode(data)
elif (f in LITERALS_ASCIIHEX_DECODE):
data = asciihexdecode(data)
elif (f in LITERALS_RUNLENGTH_DECODE):
data = rldecode(data)
elif (f in LITERALS_CCITTFAX_DECODE):
data = ccittfaxdecode(data, params)
elif (f in LITERALS_DCT_DECODE):
pass
elif (f == LITERAL_CRYPT):
raise PDFNotImplementedError('/Crypt filter is unsupported')
else:
raise PDFNotImplementedError(('Unsupported filter: %r' % f))
if ('Predictor' in params):
pred = int_value(params['Predictor'])
if (pred == 1):
pass
elif (10 <= pred):
colors = int_value(params.get('Colors', 1))
columns = int_value(params.get('Columns', 1))
bitspercomponent = int_value(params.get('BitsPerComponent', 8))
data = apply_png_predictor(pred, colors, columns, bitspercomponent, data)
else:
raise PDFNotImplementedError(('Unsupported predictor: %r' % pred))
self.data = data
self.rawdata = None
return
def get_data(self):
if (self.data is None):
self.decode()
return self.data
def get_rawdata(self):
return self.rawdata |
def main(page: ft.Page):
c1 = ft.Container(ft.Text('Hello!', style=ft.TextThemeStyle.HEADLINE_MEDIUM), alignment=ft.alignment.center, width=200, height=200, bgcolor=ft.colors.GREEN)
c2 = ft.Container(ft.Text('Bye!', size=50), alignment=ft.alignment.center, width=200, height=200, bgcolor=ft.colors.YELLOW)
c = ft.AnimatedSwitcher(c1, transition=ft.AnimatedSwitcherTransition.SCALE, duration=500, reverse_duration=100, switch_in_curve=ft.AnimationCurve.BOUNCE_OUT, switch_out_curve=ft.AnimationCurve.BOUNCE_IN)
def animate(e):
c.content = (c2 if (c.content == c1) else c1)
c.update()
page.add(c, ft.ElevatedButton('Animate!', on_click=animate)) |
class RankingOps(gh.ObjectType):
RkAdjustRanking = gh.Field(gh.List(gh.NonNull(RankingHistory), required=True), required=True, game=gh.Argument(GameInput, required=True, description=''))
def resolve_RkAdjustRanking(root, info, game):
ctx = info.context
pids = game.players
winners = game.winners
losers = [i for i in pids if (i not in winners)]
category = game.type
require_perm(ctx, 'game.change_ranking')
players = {v.id: v for v in models.Player.objects.filter(id__in=pids)}
if (set(players) != set(pids)):
raise Exception('')
def get_ranking(p):
r = models.Ranking.objects.filter(player=p, category=category, season=season).first()
return (r or models.Ranking(player=p, category=category, season=season))
for _ in range(3):
try:
with transaction.atomic():
import system
season = int(system.models.Setting.objects.get(key='ranking-season').value)
wl = [get_ranking(players[i]) for i in winners]
ll = [get_ranking(players[i]) for i in losers]
wrl = [TS.create_rating(mu=v.mu, sigma=v.sigma) for v in wl]
lrl = [TS.create_rating(mu=v.mu, sigma=v.sigma) for v in ll]
(nwrl, nlrl) = TS.rate([wrl, lrl], [0, 1])
rst = []
for (r, b4, af) in itertools.chain(zip(wl, wrl, nwrl), zip(ll, lrl, nlrl)):
r.mu = af.mu
r.sigma = af.sigma
r.changes += 1
r.save()
h = models.RankingHistory(game_id=game.game_id, player=players[r.player.id], season=season, category=category, score_before=models.Ranking.score_from_tsranking(b4), score_after=models.Ranking.score_from_tsranking(af), changes=r.changes)
h.save()
rst.append(h)
return rst
except IntegrityError as e:
exc = e
continue
raise exc |
class OptionPlotoptionsTilemapSonificationDefaultinstrumentoptionsMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class _TestQuest(quests.EvAdventureQuest):
key = 'testquest'
desc = 'A test quest!'
start_step = 'A'
end_text = 'This task is completed.'
help_A = 'You need to do A first.'
help_B = 'Next, do B.'
def step_A(self, *args, **kwargs):
if any((obj for obj in self.quester.contents if obj.tags.has('QuestA', category='quests'))):
self.quester.msg('Completed step A of quest!')
self.current_step = 'B'
self.progress()
def step_B(self, *args, **kwargs):
if kwargs.get('complete_quest_B', False):
self.quester.msg('Completed step B of quest!')
self.quester.db.test_quest_counter = 0
self.current_step = 'C'
self.progress()
def help_C(self):
return f'Only C left now, {self.quester.key}!'
def step_C(self, *args, **kwargs):
if (self.quester.db.test_quest_counter and (self.quester.db.test_quest_counter > 5)):
self.quester.msg('Quest complete! Get XP rewards!')
self.quester.db.xp += 10
self.complete()
def cleanup(self):
del self.quester.db.test_quest_counter |
class FlaskUserFilter(logging.Filter):
def filter(self, record):
record.user = self._user()
return True
def _user(self):
if ((not flask.g) or (not hasattr(flask.g, 'user'))):
return 'SERVER'
if flask.g.user:
return flask.g.user.name
return (self._backend() or 'ANON')
def _backend():
auth = flask.request.authorization
if (auth and (auth.password == app.config['BACKEND_PASSWORD'])):
return 'backend: {0}'.format(flask.request.remote_addr)
return None |
def generate_video(video_option: List[str], video_dir: Optional[str], images: List[np.ndarray], episode_id: Union[(int, str)], checkpoint_idx: int, metrics: Dict[(str, float)], fps: int=10, verbose: bool=True) -> None:
if (len(images) < 1):
return
metric_strs = []
for (k, v) in metrics.items():
metric_strs.append(f'{k}={v:.2f}')
video_name = (f'episode={episode_id}-ckpt={checkpoint_idx}-' + '-'.join(metric_strs))
if ('disk' in video_option):
assert (video_dir is not None)
images_to_video(images, video_dir, video_name, verbose=verbose)
if ('wandb' in video_option):
images = np.array(images)
images = images.transpose(0, 3, 1, 2)
wandb.log({f'episode{episode_id}_{checkpoint_idx}': wandb.Video(images, fps=fps)}) |
class CmdUnconnectedHelp(COMMAND_DEFAULT_CLASS):
key = 'help'
aliases = ['h', '?']
locks = 'cmd:all()'
def func(self):
string = '\nYou are not yet logged into the game. Commands available at this point:\n\n |wcreate|n - create a new account\n |wconnect|n - connect with an existing account\n |wlook|n - re-show the connection screen\n |whelp|n - show this help\n |wencoding|n - change the text encoding to match your client\n |wscreenreader|n - make the server more suitable for use with screen readers\n |wquit|n - abort the connection\n\nFirst create an account e.g. with |wcreate Anna c67jHL8p|n\n(If you have spaces in your name, use double quotes: |wcreate "Anna the Barbarian" c67jHL8p|n\nNext you can connect to the game: |wconnect Anna c67jHL8p|n\n\nYou can use the |wlook|n command if you want to see the connect screen again.\n\n'
if settings.STAFF_CONTACT_EMAIL:
string += ('For support, please contact: %s' % settings.STAFF_CONTACT_EMAIL)
self.msg(string) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.