id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
163971
|
import mock
import yaml
from boto3.session import Session
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
from botocore.exceptions import NoRegionError
from botocore.exceptions import EndpointConnectionError
from st2tests.base import BaseSensorTestCase
from sqs_sensor import AWSSQSSensor
class SQSSensorTestCase(BaseSensorTestCase):
sensor_cls = AWSSQSSensor
class MockResource(object):
def __init__(self, msgs=[]):
self.msgs = msgs
def get_queue_by_name(self, **kwargs):
return SQSSensorTestCase.MockQueue(self.msgs)
def Queue(self, queue):
return SQSSensorTestCase.MockQueue(self.msgs)
class MockResourceNonExistentQueue(object):
def __init__(self, msgs=[]):
self.msgs = msgs
def get_queue_by_name(self, **kwargs):
raise ClientError({'Error': {'Code': 'AWS.SimpleQueueService.NonExistentQueue'}},
'sqs_test')
def Queue(self, queue):
raise ClientError({'Error': {'Code': 'AWS.SimpleQueueService.NonExistentQueue'}},
'sqs_test')
def create_queue(self, **kwargs):
return SQSSensorTestCase.MockQueue(self.msgs)
class MockResourceRaiseClientError(object):
def __init__(self, error_code=''):
self.error_code = error_code
def get_queue_by_name(self, **kwargs):
raise ClientError({'Error': {'Code': self.error_code}}, 'sqs_test')
def Queue(self, queue):
raise ClientError({'Error': {'Code': self.error_code}}, 'sqs_test')
class MockResourceRaiseNoCredentialsError(object):
def get_queue_by_name(self, **kwargs):
raise NoCredentialsError()
def Queue(self, queue):
raise NoCredentialsError()
class MockResourceRaiseEndpointConnectionError(object):
def get_queue_by_name(self, **kwargs):
raise EndpointConnectionError(endpoint_url='')
def Queue(self, queue):
raise EndpointConnectionError(endpoint_url='')
class MockStsClient(object):
def __init__(self):
self.meta = mock.Mock(service_model={})
def get_caller_identity(self):
ci = mock.Mock()
ci.get = lambda attribute: '111222333444' if attribute == 'Account' else None
return ci
def assume_role(self, RoleArn, RoleSessionName):
return {
'Credentials': {
'AccessKeyId': 'access_key_id_example',
'SecretAccessKey': 'secret_access_key_example',
'SessionToken': '<PASSWORD>'
}
}
class MockStsClientRaiseClientError(MockStsClient):
def assume_role(self, RoleArn, RoleSessionName):
raise ClientError({'Error': {'Code': 'AccessDenied'}}, 'sqs_test')
class MockQueue(object):
def __init__(self, msgs=[]):
self.dummy_messages = [SQSSensorTestCase.MockMessage(x) for x in msgs]
def receive_messages(self, **kwargs):
return self.dummy_messages
class MockMessage(object):
def __init__(self, body=None):
self.body = body
def delete(self):
return mock.MagicMock(return_value=None)
def setUp(self):
super(SQSSensorTestCase, self).setUp()
self.full_config = self.load_yaml('full.yaml')
self.blank_config = self.load_yaml('blank.yaml')
self.multiaccount_config = self.load_yaml('multiaccount.yaml')
self.mixed_config = self.load_yaml('mixed.yaml')
def load_yaml(self, filename):
return yaml.safe_load(self.get_fixture_content(filename))
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
def test_poll_with_blank_config(self):
sensor = self.get_sensor_instance(config=self.blank_config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource', mock.Mock(return_value=MockResource()))
def _poll_without_message(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
def test_poll_without_message_full_config(self):
self._poll_without_message(self.full_config)
def test_poll_without_message_multiaccount_config(self):
self._poll_without_message(self.multiaccount_config)
def test_poll_without_message_mixed_config(self):
self._poll_without_message(self.mixed_config)
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource', mock.Mock(return_value=MockResource(['{"foo":"bar"}'])))
def _poll_with_message(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
self.assertNotEqual(self.get_dispatched_triggers(), [])
def test_poll_with_message_full_config(self):
self._poll_with_message(self.full_config)
def test_poll_with_message_multiaccount_config(self):
self._poll_with_message(self.multiaccount_config)
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResourceNonExistentQueue(['{"foo":"bar"}'])))
def _poll_with_non_existent_queue(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
contexts = self.get_dispatched_triggers()
self.assertNotEqual(contexts, [])
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
def test_poll_with_non_existent_queue_full_config(self):
self._poll_with_non_existent_queue(self.full_config)
def test_poll_with_non_existent_queue_multiaccount_config(self):
self._poll_with_non_existent_queue(self.multiaccount_config)
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResource(['{"foo":"bar"}'])))
def test_set_input_queues_config_dynamically(self):
sensor = self.get_sensor_instance(config=self.blank_config)
sensor._sensor_service.set_value('aws.roles',
['arn:aws:iam::123456789098:role/rolename1'],
local=False)
sensor.setup()
# set credential mock to prevent sending request to AWS
mock_credentials = mock.Mock()
mock_credentials.access_key = sensor._get_config_entry('aws_access_key_id')
mock_credentials.secret_key = sensor._get_config_entry('aws_secret_access_key')
Session.get_credentials = mock_credentials
# set test value to datastore
sensor._sensor_service.set_value('aws.input_queues', 'hoge', local=False)
sensor.poll()
# update input_queues to check this is reflected
sensor._sensor_service.set_value('aws.input_queues', 'fuga,puyo', local=False)
sensor.poll()
# update input_queues to check this is reflected
sensor._sensor_service.set_value(
'aws.input_queues',
'https://sqs.us-west-2.amazonaws.com/123456789098/queue_name_3',
local=False
)
sensor.poll()
contexts = self.get_dispatched_triggers()
self.assertNotEqual(contexts, [])
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
# get message from queue 'hoge', 'fuga' then 'puyo'
self.assertEqual([x['payload']['queue'] for x in contexts],
['hoge', 'fuga', 'puyo',
'https://sqs.us-west-2.amazonaws.com/123456789098/queue_name_3'])
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResource(['{"foo":"bar"}'])))
def test_set_input_queues_config_with_list(self):
# set 'input_queues' config with list type
config = self.full_config
config['sqs_sensor']['input_queues'] = [
'foo',
'bar',
'https://sqs.us-west-2.amazonaws.com/123456789098/queue_name_3'
]
config['sqs_sensor']['roles'] = ['arn:aws:iam::123456789098:role/rolename1']
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
contexts = self.get_dispatched_triggers()
self.assertNotEqual(contexts, [])
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
self.assertEqual([x['payload']['queue'] for x in contexts],
['foo', 'bar',
'https://sqs.us-west-2.amazonaws.com/123456789098/queue_name_3'])
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(
return_value=MockResourceRaiseClientError('InvalidClientTokenId'))
)
def _fails_with_invalid_token(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
def test_fails_with_invalid_token_full_config(self):
self._fails_with_invalid_token(self.full_config)
def test_fails_with_invalid_token_multiaccount_config(self):
self._fails_with_invalid_token(self.multiaccount_config)
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResourceRaiseNoCredentialsError()))
def _fails_without_credentials(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
def test_fails_without_credentials_full_config(self):
self._fails_without_credentials(self.full_config)
def test_fails_without_credentials_multiaccount_config(self):
self._fails_without_credentials(self.multiaccount_config)
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResourceRaiseEndpointConnectionError()))
def _fails_with_invalid_region(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
def test_fails_with_invalid_region_full_config(self):
self._fails_with_invalid_region(self.full_config)
def test_fails_with_invalid_region_multiaccount_config(self):
self._fails_with_invalid_region(self.multiaccount_config)
@mock.patch.object(Session, 'client',
mock.Mock(return_value=MockStsClientRaiseClientError()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResource(['{"foo":"bar"}'])))
def _fails_assuming_role(self, config):
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
def test_fails_assuming_role_full_config(self):
self._fails_assuming_role(self.full_config)
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
self.assertNotEqual(self.get_dispatched_triggers(), [])
def test_fails_assuming_role_multiaccount_config(self):
self._fails_assuming_role(self.multiaccount_config)
self.assertEqual(self.get_dispatched_triggers(), [])
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(side_effect=NoRegionError(
service_name='sqs', region_name='us-east-1')))
def test_fails_creating_sqs_resource(self):
sensor = self.get_sensor_instance(config=self.mixed_config)
sensor.setup()
sensor.poll()
self.assertEqual(self.get_dispatched_triggers(), [])
@mock.patch.object(Session, 'client', mock.Mock(return_value=MockStsClient()))
@mock.patch.object(Session, 'resource',
mock.Mock(return_value=MockResource(['{"foo":"bar"}'])))
def _poll_with_missing_arn(self, config):
config['sqs_sensor']['roles'] = []
sensor = self.get_sensor_instance(config=config)
sensor.setup()
sensor.poll()
def test_poll_with_missing_arn_full_config(self):
self._poll_with_missing_arn(self.full_config)
self.assertNotEqual(self.get_dispatched_triggers(), [])
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
def test_poll_with_missing_arn_multiaccount_config(self):
self._poll_with_missing_arn(self.multiaccount_config)
self.assertEqual(self.get_dispatched_triggers(), [])
def test_poll_with_missing_arn_mixed_config(self):
self._poll_with_missing_arn(self.mixed_config)
self.assertNotEqual(self.get_dispatched_triggers(), [])
self.assertTriggerDispatched(trigger='aws.sqs_new_message')
|
163985
|
import os
import tqdm
import logging
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
from transformers import BertTokenizer
from agents.utils import Statistics
from agents.optim_schedule import ScheduledOptim
from .soft_masked_bert import SoftMaskedBert
from .data_utils import build_dataset, collate
# BERT_MODEL = 'bert-base-uncased'
BERT_MODEL = 'bert-base-chinese'
logger = logging.getLogger(__file__)
class SoftMaskedBertTrainer(object):
@classmethod
def add_cmdline_args(cls, argparser):
# super(SoftMaskedBertTrainer, cls).add_cmdline_args(argparser)
ScheduledOptim.add_cmdline_args(argparser)
agent = argparser.add_argument_group('SoftMaskedBertTrainer Arguments')
# add_common_cmdline_args(agent)
# memory and knowledge arguments
agent.add_argument('--batch_size', default=8, type=int)
agent.add_argument('--num_workers', default=8, type=int)
agent.add_argument('--max_len', default=128, type=int)
agent.add_argument('--vocab_path', type=str, default=None)
agent.add_argument('--checkpoint', type=str, default=BERT_MODEL)
agent.add_argument("--hidden_size", default=256, type=int)
agent.add_argument("--rnn_layer", default=1, type=int)
agent.add_argument("--learning_rate", default=2e-5, type=float)
agent.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Accumulate gradients on several steps")
agent.add_argument("--max_grad_norm", type=float, default=1.0,
help="Clipping gradient norm")
agent.add_argument('--report_every', default=-1, type=int)
agent.add_argument('--gama', type=float, default=0.8)
def __init__(self, opt, device):
self.opt = opt
self.device = device
self._dataset = {}
self._dataloader = {}
self.tokenizer = BertTokenizer.from_pretrained(opt.vocab_path if opt.vocab_path else opt.checkpoint,
do_lower_case=True)
self.model = SoftMaskedBert(opt, self.tokenizer, device).to(device)
# if torch.cuda.device_count() > 1:
# print("Using %d GPUS for train" % torch.cuda.device_count())
# self.model = nn.DataParallel(self.model, device_ids=[0,1,2])
# _optimizer = optim.Adam(self.model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
_optimizer = _get_optimizer(self.model, opt)
self.optim_schedule = ScheduledOptim(opt, _optimizer)
self.criterion_c = nn.NLLLoss(ignore_index=self.tokenizer.pad_token_id)
self.criterion_d = nn.BCELoss(reduction="none")
self.gama = opt.gama
def load_data(self, datasets):
for k, v in datasets.items():
# self._dataset[type] = BertDataset(self.tokenizer, data, max_len=self.opt.max_len)
self._dataset[k] = build_dataset(v, self.tokenizer)
tensor_dataset = collate(self._dataset[k], self.tokenizer.pad_token_id)
dataset = TensorDataset(*tensor_dataset)
self._dataloader[k] = DataLoader(dataset,
batch_size=self.opt.batch_size,
num_workers=self.opt.num_workers,
shuffle=(k == "train"))
def train(self, epoch, data_type="train"):
self.model.train()
return self.iteration(epoch, self._dataloader[data_type])
def evaluate(self, epoch, data_type="valid"):
self.model.eval()
return self.iteration(epoch, self._dataloader[data_type], data_type=data_type)
def infer(self, data_loader):
self.model.eval()
out_put = []
data_loader = tqdm.tqdm(enumerate(data_loader),
desc="%s" % 'Inference:',
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
for i, data in data_loader:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
out, prob = self.model(data["input_ids"], data["input_mask"],
data["segment_ids"]) # prob [batch_size, seq_len, 1]
out_put.extend(out.argmax(dim=-1).cpu().numpy().tolist())
return [''.join(self.tokenizer.convert_ids_to_tokens(x)) for x in out_put]
def save(self, file_path):
torch.save(self.model.cpu(), file_path)
self.model.to(self.device)
logger.info('Model save {}'.format(file_path))
def load(self, file_path):
if not os.path.exists(file_path):
return
self.model = torch.load(file_path)
self.model.to(self.device)
def iteration(self, epoch, data_loader, data_type="train"):
str_code = data_type
# Setting the tqdm progress bar
data_loader = tqdm.tqdm(enumerate(data_loader),
desc="Epoch_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
stats = Statistics()
for step, batch in data_loader:
# 0. batch_data will be sent into the device(GPU or cpu)
# data = {key: value.to(self.device) for key, value in data.items()}
input_ids, input_mask, output_ids, labels = tuple(
input_tensor.to(self.device) for input_tensor in batch)
d_scores, c_scores = self.model(input_ids, input_mask) # prob [batch_size, seq_len, 1]
d_scores = d_scores.squeeze(dim=-1)
loss_d = self.criterion_d(d_scores, labels.float())
label_mask = labels.ne(-1)
loss_d = (loss_d * label_mask).sum() / label_mask.float().sum()
loss_c = self.criterion_c(c_scores.view(-1, c_scores.size(-1)), output_ids.view(-1))
loss = (1 - self.gama) * loss_d + self.gama * loss_c
if data_type == "train":
loss = loss / self.opt.gradient_accumulation_steps
loss.backward(retain_graph=True)
if step % self.opt.gradient_accumulation_steps == 0:
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt.max_grad_norm)
self.optim_schedule.step()
self.optim_schedule.zero_grad()
# sta
self._stats(stats, loss.item(), d_scores, labels, c_scores, input_ids, output_ids)
# if data_type == "train" and self.opt.report_every > 0 and step % self.opt.report_every == 0:
# post_fix = {
# "epoch": epoch,
# "iter": step,
# "lr": self.optim_schedule.learning_rate()
# }
# post_fix.update(stats.report())
# data_loader.write(
# "\n" + str({k: (round(v, 5) if isinstance(v, float) else v) for k, v in post_fix.items()}))
# sys.stdout.flush()
logger.info("Epoch{}_{}, ".format(epoch, str_code))
self._report(stats)
return stats.xent()
def _stats(self, stats, loss, d_scores, label, c_scores, inputs, target):
c_pred = c_scores.argmax(dim=-1)
non_padding = target.ne(self.tokenizer.pad_token_id)
num_non_padding = non_padding.sum().item()
d_pred = torch.round(d_scores).long()
error = target.ne(inputs)
metrics = {
"d_tp": (d_pred.eq(1) & error.eq(True)).masked_select(non_padding).sum().item(),
"d_tn": (d_pred.eq(0) & error.eq(False)).masked_select(non_padding).sum().item(),
"d_fp": (d_pred.eq(1) & error.eq(False)).masked_select(non_padding).sum().item(),
"d_fn": (d_pred.eq(0) & error.eq(True)).masked_select(non_padding).sum().item(),
# "n_correct": c_pred.eq(target).masked_select(non_padding).sum().item(),
"c_tp": (c_pred.eq(target) & error.eq(True)).masked_select(non_padding).sum().item(),
"c_tn": (c_pred.eq(target) & error.eq(False)).masked_select(non_padding).sum().item(),
"c_fp": (c_pred.ne(target) & error.eq(False)).masked_select(non_padding).sum().item(),
"c_fn": (c_pred.ne(target) & error.eq(True)).masked_select(non_padding).sum().item(),
}
stats.update(loss * num_non_padding, num_non_padding, metrics)
def _report(self, stats: Statistics):
logger.info("avg_loss: {} ".format(round(stats.xent(), 5)) +
"acc: {}, prec: {}, recall: {}, f1: {}".format(
round(stats.aprf("d_")[0], 5), round(stats.aprf("d_")[1], 5),
round(stats.aprf("d_")[2], 5), round(stats.aprf("d_")[3], 5)) +
"acc: {}, prec: {}, recall: {}, f1: {}".format(
round(stats.aprf("c_")[0], 5), round(stats.aprf("c_")[1], 5),
round(stats.aprf("c_")[2], 5), round(stats.aprf("c_")[3], 5))
)
def _get_optimizer(model, opt):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if
not any(nd in n for nd in no_decay)], 'weight_decay': opt.weight_decay},
{'params': [p for n, p in param_optimizer if
any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
return optim.Adam(optimizer_grouped_parameters, lr=opt.learning_rate)
|
163992
|
import unittest
import os
from thunderbolt.client.local_directory_client import LocalDirectoryClient
class TestLocalDirectoryClient(unittest.TestCase):
def setUp(self):
self.client = LocalDirectoryClient('.', None, None, use_cache=False)
def test_to_absolute_path(self):
source = './hoge/hoge/piyo'
self.client.workspace_directory = '../hoge/'
target = os.path.abspath('../hoge') + '/hoge/piyo'
output = self.client.to_absolute_path(source)
self.assertEqual(output, target)
|
164029
|
r"""
Manifold Subsets Defined as Pullbacks of Subsets under Continuous Maps
"""
# ****************************************************************************
# Copyright (C) 2021 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.sets_cat import Sets, EmptySetError
from sage.categories.metric_spaces import MetricSpaces
from sage.modules.free_module import is_FreeModule
from sage.rings.infinity import infinity, minus_infinity
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.complex_double import CDF
from sage.rings.real_double import RDF
from sage.rings.real_lazy import CLF, RLF
from sage.symbolic.ring import SR
from sage.modules.free_module_element import vector
from sage.manifolds.subset import ManifoldSubset
from sage.manifolds.chart import Chart
from sage.manifolds.scalarfield import ScalarField
from sage.sets.real_set import RealSet
import sage.geometry.abc
from sage.geometry.relative_interior import RelativeInterior
class ManifoldSubsetPullback(ManifoldSubset):
"""
Manifold subset defined as a pullback of a subset under a continuous map.
INPUT:
- ``map`` - an instance of :class:`~sage.manifolds.continuous_map.ContinuousMap`,
:class:`ScalarField`, or :class:`Chart`
- ``codomain_subset`` - an instance of :class:`~sage.manifolds.subset.ManifoldSubset`,
:class:`RealSet`, or :class:`~sage.geometry.convex_set.ConvexSet_base`
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
Pulling back a real interval under a scalar field::
sage: r_squared = M.scalar_field(x^2+y^2)
sage: r_squared.set_immutable()
sage: cl_I = RealSet([1, 4]); cl_I
[1, 4]
sage: cl_O = ManifoldSubsetPullback(r_squared, cl_I); cl_O
Subset f_inv_[1, 4] of the 2-dimensional topological manifold R^2
sage: M.point((0, 0)) in cl_O
False
sage: M.point((0, 1)) in cl_O
True
Pulling back an open real interval gives an open subset::
sage: I = RealSet((1, 4)); I
(1, 4)
sage: O = ManifoldSubsetPullback(r_squared, I); O
Open subset f_inv_(1, 4) of the 2-dimensional topological manifold R^2
sage: M.point((1, 0)) in O
False
sage: M.point((1, 1)) in O
True
Pulling back a polytope under a chart::
sage: P = Polyhedron(vertices=[[0, 0], [1, 2], [2, 1]]); P
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: S = ManifoldSubsetPullback(c_cart, P); S
Subset x_y_inv_P of the 2-dimensional topological manifold R^2
sage: M((1, 2)) in S
True
sage: M((2, 0)) in S
False
Pulling back the interior of a polytope under a chart::
sage: int_P = P.interior(); int_P
Relative interior of a 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: int_S = ManifoldSubsetPullback(c_cart, int_P, name='int_S'); int_S
Open subset int_S of the 2-dimensional topological manifold R^2
sage: M((0, 0)) in int_S
False
sage: M((1, 1)) in int_S
True
Using the embedding map of a submanifold::
sage: M = Manifold(3, 'M', structure="topological")
sage: N = Manifold(2, 'N', ambient=M, structure="topological")
sage: N
2-dimensional topological submanifold N immersed in the 3-dimensional topological manifold M
sage: CM.<x,y,z> = M.chart()
sage: CN.<u,v> = N.chart()
sage: t = var('t')
sage: phi = N.continuous_map(M, {(CN,CM): [u,v,t+u^2+v^2]})
sage: phi_inv = M.continuous_map(N, {(CM,CN): [x,y]})
sage: phi_inv_t = M.scalar_field({CM: z-x^2-y^2})
sage: N.set_immersion(phi, inverse=phi_inv, var=t,
....: t_inverse={t: phi_inv_t})
sage: N.declare_embedding()
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: S = M.open_subset('S', coord_def={CM: z<1})
sage: phi_without_t = N.continuous_map(M, {(CN, CM): [expr.subs(t=0) for expr in phi.expr()]}); phi_without_t
Continuous map
from the 2-dimensional topological submanifold N
embedded in the 3-dimensional topological manifold M
to the 3-dimensional topological manifold M
sage: phi_without_t.expr()
(u, v, u^2 + v^2)
sage: D = ManifoldSubsetPullback(phi_without_t, S); D
Subset f_inv_S of the 2-dimensional topological submanifold N embedded in the 3-dimensional topological manifold M
sage: N.point((2,0)) in D
False
"""
@staticmethod
def __classcall_private__(cls, map, codomain_subset, inverse=None,
name=None, latex_name=None):
"""
Normalize arguments and delegate to other constructors.
TESTS::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: P = Polyhedron(vertices=[[0, 0], [1, 2], [3, 4]]); P
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: S = ManifoldSubsetPullback(c_cart, P); S
Subset x_y_inv_P of the 2-dimensional topological manifold R^2
sage: S is ManifoldSubsetPullback(c_cart, P)
True
"""
try:
is_mutable = map.is_mutable()
except AttributeError:
pass
else:
if is_mutable:
map = map.copy()
map.set_immutable()
try:
is_mutable = inverse.is_mutable()
except AttributeError:
pass
else:
if is_mutable:
inverse = inverse.copy()
inverse.set_immutable()
if inverse is None:
if isinstance(map, Chart):
from sage.misc.latex import latex
inverse_latex_name = '(' + ','.join(str(latex(x)) + '^{-1}' for x in map) + ')'
inverse_name = '_'.join(repr(x) for x in map) + '_inv'
else:
map_name = map._name or 'f'
map_latex_name = map._latex_name or map_name
inverse_name = map_name + '_inv'
inverse_latex_name = map_latex_name + r'^{-1}'
else:
inverse_name = inverse._name
inverse_latex_name = inverse._latex_name
try:
codomain_subset_latex_name = codomain_subset._latex_name
codomain_subset_name = codomain_subset._name
except AttributeError:
from sage.misc.latex import latex
codomain_subset_latex_name = str(latex(codomain_subset))
s = repr(codomain_subset)
if len(s) > 10:
codomain_subset_name = 'P'
else:
codomain_subset_name = s
if latex_name is None:
if name is None:
latex_name = inverse_latex_name + '(' + codomain_subset_latex_name + ')'
else:
latex_name = name
if name is None:
name = inverse_name + '_' + codomain_subset_name
if cls._is_open(codomain_subset):
try:
coord_def = cls._coord_def(map, codomain_subset)
except NotImplementedError:
pass
else:
return map.domain().open_subset(name=name, latex_name=latex_name,
coord_def=coord_def)
self = super().__classcall__(cls, map, codomain_subset, inverse, name, latex_name)
return self
@staticmethod
def _is_open(codomain_subset):
"""
Return whether ``codomain_subset`` is (known to be) an open subset of its ambient space.
EXAMPLES:
Manifolds and subsets::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: R2 = Manifold(2, 'R^2', structure='topological'); R2
2-dimensional topological manifold R^2
sage: ManifoldSubsetPullback._is_open(R2)
True
sage: A = R2.subset('A'); A
Subset A of the 2-dimensional topological manifold R^2
sage: ManifoldSubsetPullback._is_open(A)
False
:class:`RealSet` instances::
sage: I = RealSet.open(1, 2); I
(1, 2)
sage: ManifoldSubsetPullback._is_open(I)
True
sage: cl_I = RealSet.closed(1, 2); cl_I
[1, 2]
sage: ManifoldSubsetPullback._is_open(cl_I)
False
Polyhedra::
sage: Empty = Polyhedron(ambient_dim=2); Empty
The empty polyhedron in ZZ^2
sage: ManifoldSubsetPullback._is_open(Empty)
True
sage: C = polytopes.cube(); C
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: ManifoldSubsetPullback._is_open(C)
False
Interiors of polyhedra::
sage: int_C = C.interior(); int_C
Relative interior of a 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: ManifoldSubsetPullback._is_open(int_C)
True
PPL polyhedra and not-necessarily-closed polyhedra::
sage: from ppl import Variable, C_Polyhedron, NNC_Polyhedron, Constraint_System
sage: u = Variable(0)
sage: v = Variable(1)
sage: CS = Constraint_System()
sage: CS.insert(0 < u)
sage: CS.insert(u < 1)
sage: CS.insert(0 < v)
sage: CS.insert(v < 1)
sage: CS.insert(u + v <= 3) # redundant inequality
sage: P = NNC_Polyhedron(CS); P
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 1 point, 4 closure_points
sage: ManifoldSubsetPullback._is_open(P)
True
sage: CS.insert(u + v <= 1)
sage: T = NNC_Polyhedron(CS); T
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 1 point, 3 closure_points
sage: ManifoldSubsetPullback._is_open(T)
False
"""
if isinstance(codomain_subset, ManifoldSubset):
return codomain_subset.is_open()
if isinstance(codomain_subset, RealSet):
return codomain_subset.is_open()
if isinstance(codomain_subset, sage.geometry.abc.Polyhedron):
return codomain_subset.is_empty() or codomain_subset.is_universe()
if isinstance(codomain_subset, RelativeInterior):
return codomain_subset.closure().is_full_dimensional()
if codomain_subset in Sets().Finite():
return codomain_subset.cardinality() == 0
if hasattr(codomain_subset, 'minimized_constraints'):
try:
from ppl import NNC_Polyhedron, C_Polyhedron
except ImportError:
pass
else:
if isinstance(codomain_subset, (NNC_Polyhedron, C_Polyhedron)):
cs = codomain_subset.minimized_constraints()
if cs.has_equalities():
return False
if any(constraint.is_nonstrict_inequality()
for constraint in cs):
return False
return True
return False
@staticmethod
def _interval_restriction(expr, interval):
"""
Return a restriction expressing that ``expr`` lies in ``interval``.
INPUT:
- ``expr`` -- a symbolic expression
- ``interval`` -- an instance of :class:`~sage.sets.real_set.InternalRealInterval`
OUTPUT:
- A restriction suitable as input to :meth:`~sage.manifolds.chart.restrict`:
lists are conjunctions, tuples are disjunctions
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: _interval_restriction = ManifoldSubsetPullback._interval_restriction
sage: var('t')
t
sage: assume(t >= -2)
sage: assume(t <= 5)
sage: _interval_restriction(t, RealSet(3, 4)[0])
[t > 3, t < 4]
sage: _interval_restriction(t, RealSet.unbounded_below_closed(2)[0])
t <= 2
sage: _interval_restriction(t, RealSet.closed(-5, 5)[0])
[]
sage: _interval_restriction(t, RealSet.unbounded_below_closed(-5)[0])
()
sage: _interval_restriction(t, RealSet.unbounded_above_closed(6)[0])
()
sage: _interval_restriction(t^2, RealSet.unbounded_above_closed(0)[0])
[]
"""
conjunction = []
if interval.lower() != minus_infinity:
if interval.lower_closed():
condition = (expr >= interval.lower())
negation = (expr < interval.lower())
else:
condition = (expr > interval.lower())
negation = (expr <= interval.lower())
if negation:
# known to be false
return ()
if not condition:
# not known to be true
conjunction.append(condition)
if interval.upper() != infinity:
if interval.upper_closed():
condition = (expr <= interval.upper())
negation = (expr > interval.upper())
else:
condition = (expr < interval.upper())
negation = (expr >= interval.upper())
if negation:
# known to be false
return ()
if not condition:
# not known to be true
conjunction.append(condition)
if len(conjunction) == 1:
return conjunction[0]
else:
# lists express 'and'
return conjunction
@staticmethod
def _realset_restriction(expr, realset):
"""
Return a restriction expressing that ``expr`` lies in ``realset``.
INPUT:
- ``expr`` -- a symbolic expression
- ``interval`` -- an instance of :class:`~sage.sets.real_set.RealSet`
OUTPUT:
- A restriction suitable as input to :meth:`~sage.manifolds.chart.restrict`:
lists are conjunctions, tuples are disjunctions
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: _realset_restriction = ManifoldSubsetPullback._realset_restriction
sage: var('t')
t
sage: assume(t >= -2)
sage: assume(t <= 5)
sage: _realset_restriction(t, RealSet(-oo, oo))
[]
sage: _realset_restriction(t, RealSet())
()
sage: _realset_restriction(t, RealSet([-5, -4], (-1, 1), [3, 4], [6, 7]))
([t > -1, t < 1], [t >= 3, t <= 4])
"""
disjunction = []
for interval in realset:
condition = ManifoldSubsetPullback._interval_restriction(expr, interval)
if condition == []:
return []
if condition != ():
disjunction.append(condition)
if len(disjunction) == 1:
return disjunction[0]
else:
# tuples express 'or'
return tuple(disjunction)
@staticmethod
def _polyhedron_restriction(expr, polyhedron, relint=False):
"""
Return a restriction expressing that ``expr`` lies in ``polyhedron`` or its relative interior.
INPUT:
- ``expr`` -- a symbolic expression
- ``polyhedron`` -- an instance of :class:`~sage.geometry.polyhedron.base.Polyhedron_base`
- ``relint`` -- whether the restriction should use the relative interior.
OUTPUT:
- A restriction suitable as input to :meth:`~sage.manifolds.chart.restrict`:
lists are conjunctions, tuples are disjunctions
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: _polyhedron_restriction = ManifoldSubsetPullback._polyhedron_restriction
sage: var('x y z')
(x, y, z)
sage: c = polytopes.cube()
sage: _polyhedron_restriction((x, y, z), c)
[-x + 1 >= 0, -y + 1 >= 0, -z + 1 >= 0, x + 1 >= 0, z + 1 >= 0, y + 1 >= 0]
sage: _polyhedron_restriction((x, y, z), c, relint=True)
[-x + 1 > 0, -y + 1 > 0, -z + 1 > 0, x + 1 > 0, z + 1 > 0, y + 1 > 0]
"""
conjunction = []
expr = vector(SR, expr)
for constraint in polyhedron.Hrepresentation():
if constraint.is_inequality():
if relint:
condition = (constraint.eval(expr) > 0)
else:
condition = (constraint.eval(expr) >= 0)
else:
condition = (constraint.eval(expr) == 0)
if not condition:
# not known to be true
conjunction.append(condition)
if len(conjunction) == 1:
return conjunction[0]
else:
# lists express 'and'
return conjunction
@staticmethod
def _coord_def(map, codomain_subset):
r"""
Return a coordinate definition of the open subset that is the pullback of ``codomain_subset``.
INPUT:
- ``map`` -- an instance of :class:`ScalarField` or :class:`Chart`.
- ``codomain_subset`` - if ``map`` is a :class:`ScalarField`, an instance of :class:`RealSet`;
if ``map`` is a :class:`Chart`, the relative interior of a polyhedron.
For other inputs, a ``NotImplementedError`` will be raised.
OUTPUT:
- an object suitable for the parameter ``coord_def`` of
:meth:`sage.manifolds.manifold.TopologicalManifold.open_subset`.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: _coord_def = ManifoldSubsetPullback._coord_def
sage: M = Manifold(2, 'R^2', structure='topological')
Coordinate definition of an open chart polyhedron::
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: P = Polyhedron(vertices=[[0, 0], [1, 2], [3, 4]]); P
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: ri_P = P.relative_interior(); ri_P
Relative interior of a 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: _coord_def(c_cart, ri_P)
{Chart (R^2, (x, y)): [2*x - y > 0, -4*x + 3*y > 0, x - y + 1 > 0]}
Coordinate definition of the pullback of an open interval under a scalar field::
sage: r_squared = M.scalar_field(x^2+y^2)
sage: I = RealSet((1, 4)); I
(1, 4)
sage: _coord_def(r_squared, I)
{Chart (R^2, (x, y)): [x^2 + y^2 > 1, x^2 + y^2 < 4]}
"""
if isinstance(map, ScalarField) and isinstance(codomain_subset, RealSet):
return {chart: ManifoldSubsetPullback._realset_restriction(func.expr(),
codomain_subset)
for chart, func in map._express.items()}
if isinstance(map, Chart):
chart = map
if isinstance(codomain_subset, RealSet):
return {chart: ManifoldSubsetPullback._realset_restriction(chart[0],
codomain_subset)}
if isinstance(codomain_subset, RelativeInterior) and isinstance(codomain_subset.closure(), sage.geometry.abc.Polyhedron):
return {chart: ManifoldSubsetPullback._polyhedron_restriction(
chart, codomain_subset.closure(), relint=True)}
raise NotImplementedError
def __init__(self, map, codomain_subset, inverse, name, latex_name):
r"""
Construct a manifold subset that is a pullback.
TESTS::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: r_squared = M.scalar_field(x^2+y^2)
sage: r_squared.set_immutable()
sage: cl_I = RealSet([1, 4]); cl_I
[1, 4]
sage: cl_O = ManifoldSubsetPullback(r_squared, cl_I); cl_O
Subset f_inv_[1, 4] of the 2-dimensional topological manifold R^2
sage: TestSuite(cl_O).run(skip='_test_elements')
"""
if inverse is None and isinstance(map, Chart):
chart = map
scalar_codomain = (isinstance(codomain_subset, RealSet)
or any(field.has_coerce_map_from(codomain_subset)
for field in (CDF, RDF, CLF, RLF)))
if scalar_codomain:
if chart.domain().dimension() != 1:
raise ValueError('to pull back a set of scalars by a chart, the manifold must be 1-dimensional')
map = chart.domain().scalar_field({chart: chart[0]})
def _inverse(coord):
return self.point((coord,), chart=chart)
else:
def _inverse(coords):
return self.point(coords, chart=map)
inverse = _inverse
self._map = map
self._inverse = inverse
self._codomain_subset = codomain_subset
base_manifold = map.domain()
ManifoldSubset.__init__(self, base_manifold, name, latex_name=latex_name)
def _an_element_(self):
r"""
Construct some point in ``self``.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(3, 'R^3', structure='topological')
sage: c_cart.<x,y,z> = M.chart() # Cartesian coordinates on R^3
sage: Cube = polytopes.cube(); Cube
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: McCube = ManifoldSubsetPullback(c_cart, Cube, name='McCube'); McCube
Subset McCube of the 3-dimensional topological manifold R^3
sage: p = McCube.an_element(); p
Point on the 3-dimensional topological manifold R^3
sage: p.coordinates(c_cart)
(0, 0, 0)
sage: Empty = Polyhedron(ambient_dim=3)
sage: McEmpty = ManifoldSubsetPullback(c_cart, Empty, name='McEmpty'); McEmpty
Subset McEmpty of the 3-dimensional topological manifold R^3
sage: McEmpty.an_element()
Traceback (most recent call last):
...
sage.categories.sets_cat.EmptySetError
"""
try:
return next(iter(self.some_elements()))
except StopIteration:
raise EmptySetError
def some_elements(self):
r"""
Generate some elements of ``self``.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(3, 'R^3', structure='topological')
sage: c_cart.<x,y,z> = M.chart() # Cartesian coordinates on R^3
sage: Cube = polytopes.cube(); Cube
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: McCube = ManifoldSubsetPullback(c_cart, Cube, name='McCube'); McCube
Subset McCube of the 3-dimensional topological manifold R^3
sage: L = list(McCube.some_elements()); L
[Point on the 3-dimensional topological manifold R^3,
Point on the 3-dimensional topological manifold R^3,
Point on the 3-dimensional topological manifold R^3,
Point on the 3-dimensional topological manifold R^3,
Point on the 3-dimensional topological manifold R^3,
Point on the 3-dimensional topological manifold R^3]
sage: list(p.coordinates(c_cart) for p in L)
[(0, 0, 0),
(1, -1, -1),
(1, 0, -1),
(1, 1/2, 0),
(1, -1/4, 1/2),
(0, -5/8, 3/4)]
sage: Empty = Polyhedron(ambient_dim=3)
sage: McEmpty = ManifoldSubsetPullback(c_cart, Empty, name='McEmpty'); McEmpty
Subset McEmpty of the 3-dimensional topological manifold R^3
sage: list(McEmpty.some_elements())
[]
"""
if self._inverse is not None:
for y in self._codomain_subset.some_elements():
yield self._inverse(y)
elif self.is_empty():
return
else:
# Fallback
p = super()._an_element_()
if p in self:
yield p
def __contains__(self, point):
r"""
Check whether ``point`` is contained in ``self``.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(3, 'R^3', structure='topological')
sage: c_cart.<x,y,z> = M.chart() # Cartesian coordinates on R^3
sage: Cube = polytopes.cube(); Cube
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: Cube.vertices_list()
[[1, -1, -1],
[1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, -1, -1],
[-1, 1, -1],
[-1, 1, 1]]
sage: McCube = ManifoldSubsetPullback(c_cart, Cube, name='McCube'); McCube
Subset McCube of the 3-dimensional topological manifold R^3
sage: p = M.point((0, 0, 0)); p
Point on the 3-dimensional topological manifold R^3
sage: p in McCube
True
sage: q = M.point((2, 3, 4)); q
Point on the 3-dimensional topological manifold R^3
sage: q in McCube
False
"""
if super().__contains__(point):
return True
coords = self._map(point)
if isinstance(coords, (tuple, list)):
coords = vector(coords)
return coords in self._codomain_subset
def is_open(self):
"""
Return if ``self`` is (known to be) an open set.
This version of the method always returns ``False``.
Because the map is continuous, the pullback is open if the
``codomain_subset`` is open.
However, the design of :class:`~sage.manifolds.subset.ManifoldSubset` requires that open subsets
are instances of the subclass :class:`sage.manifolds.manifold.TopologicalManifold`.
The constructor of :class:`ManifoldSubsetPullback` delegates to a subclass
of :class:`sage.manifolds.manifold.TopologicalManifold` for some open subsets.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: P = Polyhedron(vertices=[[0, 0], [1, 2], [3, 4]]); P
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: P.is_open()
False
sage: McP = ManifoldSubsetPullback(c_cart, P, name='McP'); McP
Subset McP of the 2-dimensional topological manifold R^2
sage: McP.is_open()
False
"""
return super().is_open()
def is_closed(self):
"""
Return if ``self`` is (known to be) a closed subset of the manifold.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
The pullback of a closed real interval under a scalar field is closed::
sage: r_squared = M.scalar_field(x^2+y^2)
sage: r_squared.set_immutable()
sage: cl_I = RealSet([1, 2]); cl_I
[1, 2]
sage: cl_O = ManifoldSubsetPullback(r_squared, cl_I); cl_O
Subset f_inv_[1, 2] of the 2-dimensional topological manifold R^2
sage: cl_O.is_closed()
True
The pullback of a (closed convex) polyhedron under a chart is closed::
sage: P = Polyhedron(vertices=[[0, 0], [1, 2], [3, 4]]); P
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: McP = ManifoldSubsetPullback(c_cart, P, name='McP'); McP
Subset McP of the 2-dimensional topological manifold R^2
sage: McP.is_closed()
True
The pullback of real vector subspaces under a chart is closed::
sage: V = span([[1, 2]], RR); V
Vector space of degree 2 and dimension 1 over Real Field with 53 bits of precision
Basis matrix:
[1.00000000000000 2.00000000000000]
sage: McV = ManifoldSubsetPullback(c_cart, V, name='McV'); McV
Subset McV of the 2-dimensional topological manifold R^2
sage: McV.is_closed()
True
The pullback of point lattices under a chart is closed::
sage: W = span([[1, 0], [3, 5]], ZZ); W
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1 0]
[0 5]
sage: McW = ManifoldSubsetPullback(c_cart, W, name='McW'); McW
Subset McW of the 2-dimensional topological manifold R^2
sage: McW.is_closed()
True
The pullback of finite sets is closed::
sage: F = Family([vector(QQ, [1, 2], immutable=True), vector(QQ, [2, 3], immutable=True)])
sage: McF = ManifoldSubsetPullback(c_cart, F, name='McF'); McF
Subset McF of the 2-dimensional topological manifold R^2
sage: McF.is_closed()
True
"""
if self.manifold().dimension() == 0:
return True
if isinstance(self._codomain_subset, ManifoldSubset):
if self._codomain_subset.is_closed():
# known closed
return True
elif isinstance(self._codomain_subset, RealSet):
# RealSet can decide closedness authoritatively
return self._codomain_subset.is_closed()
elif isinstance(self._codomain_subset, sage.geometry.abc.Polyhedron):
# Regardless of their base_ring, we treat polyhedra as closed
# convex subsets of R^n
return True
elif is_FreeModule(self._codomain_subset) and self._codomain_subset.rank() != infinity:
if self._codomain_subset.base_ring() in MetricSpaces().Complete():
# Closed topological vector subspace
return True
if self._codomain_subset.base_ring() == ZZ:
if self._codomain_subset.coordinate_ring().is_subring(QQ):
# Discrete subgroup of R^n
return True
if self._codomain_subset.rank() == self._codomain_subset.base_extend(RR).dimension():
# Discrete subgroup of R^n
return True
elif self._codomain_subset in Sets().Finite():
return True
else:
if hasattr(self._codomain_subset, 'is_topologically_closed'):
try:
from ppl import NNC_Polyhedron, C_Polyhedron
except ImportError:
pass
else:
if isinstance(self._codomain_subset, (NNC_Polyhedron, C_Polyhedron)):
# ppl polyhedra can decide closedness authoritatively
return self._codomain_subset.is_topologically_closed()
return super().is_closed()
def closure(self, name=None, latex_name=None):
"""
Return the topological closure of ``self`` in the manifold.
Because ``self`` is a pullback of some subset under a continuous map,
the closure of ``self`` is the pullback of the closure.
EXAMPLES::
sage: from sage.manifolds.subsets.pullback import ManifoldSubsetPullback
sage: M = Manifold(2, 'R^2', structure='topological')
sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2
sage: r_squared = M.scalar_field(x^2+y^2)
sage: r_squared.set_immutable()
sage: I = RealSet.open_closed(1, 2); I
(1, 2]
sage: O = ManifoldSubsetPullback(r_squared, I); O
Subset f_inv_(1, 2] of the 2-dimensional topological manifold R^2
sage: latex(O)
f^{-1}((1, 2])
sage: cl_O = O.closure(); cl_O
Subset f_inv_[1, 2] of the 2-dimensional topological manifold R^2
sage: cl_O.is_closed()
True
"""
if self.is_closed():
return self
try:
codomain_subset_closure = self._codomain_subset.closure()
except AttributeError:
return super().closure()
closure = ManifoldSubsetPullback(self._map, codomain_subset_closure,
inverse=self._inverse,
name=name, latex_name=latex_name)
closure.declare_superset(self)
return closure
|
164030
|
import functools
from btypes.big_endian import *
import gx
from j3d.material import *
import j3d.string_table
import logging
logger = logging.getLogger(__name__)
index8 = NoneableConverter(uint8,0xFF)
index16 = NoneableConverter(uint16,0xFFFF)
class Header(Struct):
magic = ByteString(4)
section_size = uint32
material_count = uint16
__padding__ = Padding(2)
entry_offset = uint32
entry_index_offset = uint32
name_offset = uint32
indirect_entry_offset = uint32
cull_mode_offset = uint32
material_color_offset = uint32
channel_count_offset = uint32
lighting_mode_offset = uint32
ambient_color_offset = uint32
light_offset = uint32
texcoord_generator_count_offset = uint32
texcoord_generator_offset = uint32
unknown2_offset = uint32
texture_matrix_offset = uint32
unknown3_offset = uint32
texture_index_offset = uint32
tev_order_offset = uint32
tev_color_offset = uint32
kcolor_offset = uint32
tev_stage_count_offset = uint32
tev_combiner_offset = uint32
swap_mode_offset = uint32
swap_table_offset = uint32
fog_offset = uint32
alpha_test_offset = uint32
blend_mode_offset = uint32
depth_mode_offset = uint32
depth_test_early_offset = uint32
dither_offset = uint32
unknown5_offset = uint32
def __init__(self):
self.magic = b'MAT3'
self.unknown2_offset = 0
self.unknown3_offset = 0
@classmethod
def unpack(cls,stream):
header = super().unpack(stream)
if header.magic != b'MAT3':
raise FormatError('invalid magic')
if header.unknown2_offset != 0:
logger.warning('unknown2_offset different from default')
if header.unknown3_offset != 0:
logger.warning('unknown3_offset different from default')
return header
class ColorS16(Color,replace_fields=True):
r = sint16
g = sint16
b = sint16
a = sint16
class TevCombiner(Struct):
unknown0 = uint8
color_mode = TevColorMode
alpha_mode = TevAlphaMode
unknown1 = uint8
@classmethod
def unpack(cls,stream):
tev_combiner = super().unpack(stream)
if tev_combiner.unknown0 != 0xFF:
logger.warning('tev combiner unknown0 different from default')
if tev_combiner.unknown1 != 0xFF:
logger.warning('tev combiner unknown1 different from default')
return tev_combiner
class TevOrder(Struct):
"""Arguments to GXSetTevOrder."""
texcoord = EnumConverter(uint8,gx.TexCoord)
texture = EnumConverter(uint8,gx.Texture)
color = EnumConverter(uint8,gx.Channel)
__padding__ = Padding(1)
class SwapMode(Struct):
"""Arguments to GXSetTevSwapMode."""
color_swap_table = EnumConverter(uint8,gx.SwapTable)
texture_swap_table = EnumConverter(uint8,gx.SwapTable)
__padding__ = Padding(2)
class TevIndirect(Struct):
"""Arguments to GXSetTevIndirect."""
indirect_stage = EnumConverter(uint8,gx.IndirectStage)
indirect_format = EnumConverter(uint8,gx.IndirectFormat)
indirect_bias_components = EnumConverter(uint8,gx.IndirectBiasComponents)
indirect_matrix = EnumConverter(uint8,gx.IndirectMatrix)
wrap_s = EnumConverter(uint8,gx.IndirectWrap)
wrap_t = EnumConverter(uint8,gx.IndirectWrap)
add_previous_texcoord = bool8
use_original_lod = bool8
bump_alpha = EnumConverter(uint8,gx.IndirectBumpAlpha)
__padding__ = Padding(3)
class IndirectOrder(Struct):
"""Arguments to GXSetIndTexOrder."""
texcoord = EnumConverter(uint8,gx.TexCoord)
texture = EnumConverter(uint8,gx.Texture)
__padding__ = Padding(2)
class IndirectTexCoordScale(Struct):
"""Arguments to GXSetIndTexCoordScale."""
scale_s = EnumConverter(uint8,gx.IndirectScale)
scale_t = EnumConverter(uint8,gx.IndirectScale)
__padding__ = Padding(2)
class ChannelEntry(Struct):
color_mode_index = index16
alpha_mode_index = index16
def __init__(self):
self.color_mode_index = None
self.alpha_mode_index = None
class Entry(Struct):
unknown0 = uint8
cull_mode_index = index8
channel_count_index = index8
texcoord_generator_count_index = index8
tev_stage_count_index = index8
depth_test_early_index = index8
depth_mode_index = index8
dither_index = index8
material_color_indices = Array(index16,2)
channels = Array(ChannelEntry,2)
ambient_color_indices = Array(index16,2)
light_indices = Array(index16,8)
texcoord_generator_indices = Array(index16,8)
unknown2 = Array(uint16,8)
texture_matrix_indices = Array(index16,10)
unknown3 = Array(uint16,20)
texture_index_indices = Array(index16,8)
kcolor_indices = Array(index16,4)
constant_colors = Array(EnumConverter(uint8,gx.ConstantColor),16)
constant_alphas = Array(EnumConverter(uint8,gx.ConstantAlpha),16)
tev_order_indices = Array(index16,16)
tev_color_indices = Array(index16,3)
tev_color_previous_index = index16
tev_combiner_indices = Array(index16,16)
swap_mode_indices = Array(index16,16)
swap_table_indices = Array(index16,4)
unknown4 = Array(uint16,12)
fog_index = index16
alpha_test_index = index16
blend_mode_index = index16
unknown5_index = index16
def __init__(self):
self.unknown0 = 1
self.cull_mode_index = None
self.channel_count_index = None
self.texcoord_generator_count_index = None
self.tev_stage_count_index = None
self.depth_test_early_index = None
self.depth_mode_index = None
self.dither_index = None
self.material_color_indices = [None]*2
self.channels = [ChannelEntry() for _ in range(2)]
self.ambient_color_indices = [None]*2
self.light_indices = [None]*8
self.texcoord_generator_indices = [None]*8
self.unknown2 = [0xFFFF]*8
self.texture_matrix_indices = [None]*10
self.unknown3 = [0xFFFF]*20
self.texture_index_indices = [None]*8
self.kcolor_indices = [None]*4
self.constant_colors = [gx.TEV_KCSEL_1]*16
self.constant_alphas = [gx.TEV_KASEL_1]*16
self.tev_order_indices = [None]*16
self.tev_color_indices = [None]*3
self.tev_color_previous_index = None
self.tev_combiner_indices = [None]*16
self.swap_mode_indices = [None]*16
self.swap_table_indices = [None]*4
self.unknown4 = [0xFFFF]*12
self.fog_index = None
self.alpha_test_index = None
self.blend_mode_index = None
self.unknown5_index = None
@classmethod
def unpack(cls,stream):
entry = super().unpack(stream)
if entry.unknown2 != [0xFFFF]*8:
logger.warning('unknown2 different from default')
return entry
def load_constant_colors(self,material):
for i,stage in enumerate(material.tev_stages):
self.constant_colors[i] = stage.constant_color
def load_constant_alphas(self,material):
for i,stage in enumerate(material.tev_stages):
self.constant_alphas[i] = stage.constant_alpha
def unload_constant_colors(self,material):
for stage,constant_color in zip(material.tev_stages,self.constant_colors):
stage.constant_color = constant_color
def unload_constant_alphas(self,material):
for stage,constant_alpha in zip(material.tev_stages,self.constant_alphas):
stage.constant_alpha = constant_alpha
class IndirectEntry(Struct):
has_lookup = uint8 # enable or indirect stage count?
indirect_stage_count = uint8 # enable or indirect stage count?
__padding__ = Padding(2)
indirect_orders = Array(IndirectOrder,4)
indirect_matrices = Array(IndirectMatrix,3)
indirect_texcoord_scales = Array(IndirectTexCoordScale,4)
tev_indirects = Array(TevIndirect,16)
def __init__(self):
self.tev_indirects = [TevIndirect() for _ in range(16)]
self.indirect_orders = [IndirectOrder() for _ in range(4)]
self.indirect_texcoord_scales = [IndirectTexCoordScale() for _ in range(4)]
self.indirect_matrices = [IndirectMatrix() for _ in range(3)]
@classmethod
def unpack(cls,stream):
indirect_entry = super().unpack(stream)
#if indirect_entry.unknown0 != indirect_entry.unknown1 or indirect_entry.unknown0 not in {0,1}:
# raise FormatError('unsuported indirect texture entry unknown0 and unknown1')
return indirect_entry
def load(self,material):
self.has_lookup = material.has_indirect_lookup
self.indirect_stage_count = material.indirect_stage_count
for stage,tev_indirect in zip(material.tev_stages,self.tev_indirects):
tev_indirect.indirect_stage = stage.indirect_stage
tev_indirect.indirect_format = stage.indirect_format
tev_indirect.indirect_bias_components = stage.indirect_bias_components
tev_indirect.indirect_matrix = stage.indirect_matrix
tev_indirect.wrap_s = stage.wrap_s
tev_indirect.wrap_t = stage.wrap_t
tev_indirect.add_previous_texcoord = stage.add_previous_texcoord
tev_indirect.use_original_lod = stage.use_original_lod
tev_indirect.bump_alpha = stage.bump_alpha
for stage,order in zip(material.indirect_stages,self.indirect_orders):
order.texcoord = stage.texcoord
order.texture = stage.texture
for stage,texcoord_scale in zip(material.indirect_stages,self.indirect_texcoord_scales):
texcoord_scale.scale_s = stage.scale_s
texcoord_scale.scale_t = stage.scale_t
self.indirect_matrices = material.indirect_matrices
def unload(self,material):
material.indirect_stage_count = self.indirect_stage_count
material.has_indirect_lookup = self.has_lookup
for tev_stage,tev_indirect in zip(material.tev_stages,self.tev_indirects):
tev_stage.indirect_stage = tev_indirect.indirect_stage
tev_stage.indirect_format = tev_indirect.indirect_format
tev_stage.indirect_bias_components = tev_indirect.indirect_bias_components
tev_stage.indirect_matrix = tev_indirect.indirect_matrix
tev_stage.wrap_s = tev_indirect.wrap_s
tev_stage.wrap_t = tev_indirect.wrap_t
tev_stage.add_previous_texcoord = tev_indirect.add_previous_texcoord
tev_stage.use_original_lod = tev_indirect.use_original_lod
tev_stage.bump_alpha = tev_indirect.bump_alpha
for stage,order in zip(material.indirect_stages,self.indirect_orders):
stage.texcoord = order.texcoord
stage.texture = order.texture
for stage,texcoord_scale in zip(material.indirect_stages,self.indirect_texcoord_scales):
stage.scale_s = texcoord_scale.scale_s
stage.scale_t = texcoord_scale.scale_t
material.indirect_matrices = self.indirect_matrices
class Pool:
def __init__(self,element_type,values=tuple(),equal_predicate=None):
self.element_type = element_type
self.values = list(values)
if equal_predicate is not None:
self.equal_predicate = equal_predicate
def __getitem__(self,value):
for i in range(len(self.values)):
if self.equal_predicate(value,self.values[i]):
return i
self.values.append(value)
return len(self.values) - 1
def __iter__(self):
yield from self.values
@staticmethod
def equal_predicate(a,b):
return a == b
class ArrayUnpacker:
def __init__(self,stream,offset,element_type):
self.stream = stream
self.offset = offset
self.element_type = element_type
def __getitem__(self,index):
self.stream.seek(self.offset + index*self.element_type.sizeof())
return self.element_type.unpack(self.stream)
def partial_call(func):
def wrapper(*args,**kwargs):
return functools.partial(func,*args,**kwargs)
return wrapper
@partial_call
def pool_loader(element_type,load_function,**kwargs):
@functools.wraps(load_function)
def wrapper(self,materials,entries):
pool = Pool(element_type,**kwargs)
for material,entry in zip(materials,entries):
load_function(pool,material,entry)
return pool
return wrapper
@partial_call
def array_unloader(element_type,unload_function):
@functools.wraps(unload_function)
def wrapper(self,offset,materials,entries):
array = self.create_array(offset,element_type)
for material,entry in zip(materials,entries):
unload_function(array,material,entry)
return wrapper
def equal_tev_combiner_and_swap_mode(a,b):
return TevCombiner.__eq__(a,b) and SwapMode.__eq__(a,b)
class SectionPacker:
entry_type = Entry
def seek(self,offset):
self.stream.seek(self.base + offset)
def tell(self):
return self.stream.tell() - self.base
def pack(self,stream,materials):
self.stream = stream
self.base = stream.tell()
entries = [self.entry_type() for _ in range(len(materials))]
for material,entry in zip(materials,entries):
entry.unknown0 = material.unknown0
entry.unknown2 = material.unknown2
entry.unknown3 = material.unknown3
entry.unknown4 = material.unknown4
entry.load_constant_colors(material)
entry.load_constant_alphas(material)
cull_mode_pool = self.pool_cull_mode(materials,entries)
channel_count_pool = self.pool_channel_count(materials,entries)
material_color_pool = self.pool_material_color(materials,entries)
ambient_color_pool = self.pool_ambient_color(materials,entries)
lighting_mode_pool = self.pool_lighting_mode(materials,entries)
light_pool = self.pool_light(materials,entries)
texcoord_generator_count_pool = self.pool_texcoord_generator_count(materials,entries)
texcoord_generator_pool = self.pool_texcoord_generator(materials,entries)
texture_matrix_pool = self.pool_texture_matrix(materials,entries)
texture_index_pool = self.pool_texture_index(materials,entries)
tev_stage_count_pool = self.pool_tev_stage_count(materials,entries)
tev_order_pool = self.pool_tev_order(materials,entries)
tev_combiner_pool = self.pool_tev_combiner(materials,entries)
swap_mode_pool = self.pool_swap_mode(materials,entries)
tev_color_pool = self.pool_tev_color(materials,entries)
kcolor_pool = self.pool_kcolor(materials,entries)
swap_table_pool = self.pool_swap_table(materials,entries)
fog_pool = self.pool_fog(materials,entries)
alpha_test_pool = self.pool_alpha_test(materials,entries)
blend_mode_pool = self.pool_blend_mode(materials,entries)
depth_mode_pool = self.pool_depth_mode(materials,entries)
depth_test_early_pool = self.pool_depth_test_early(materials,entries)
dither_pool = self.pool_dither(materials,entries)
unknown5_pool = self.pool_unknown5(materials,entries)
entry_pool = Pool(self.entry_type)
entry_indices = [entry_pool[entry] for entry in entries]
header = Header()
header.material_count = len(materials)
stream.write(b'\x00'*Header.sizeof())
header.entry_offset = self.pack_pool(entry_pool)
header.entry_index_offset = self.tell()
for index in entry_indices:
uint16.pack(stream,index)
align(stream,4)
header.name_offset = self.tell()
j3d.string_table.pack(stream,(material.name for material in materials))
align(stream,4)
header.indirect_entry_offset = self.pack_indirect_entries(materials)
align(stream,4)
header.cull_mode_offset = self.pack_pool(cull_mode_pool)
header.material_color_offset = self.pack_pool(material_color_pool)
header.channel_count_offset = self.pack_pool(channel_count_pool)
align(stream,4)
header.lighting_mode_offset = self.pack_pool(lighting_mode_pool)
header.ambient_color_offset = self.pack_pool(ambient_color_pool)
header.light_offset = self.pack_pool(light_pool)
header.texcoord_generator_count_offset = self.pack_pool(texcoord_generator_count_pool)
align(stream,4)
header.texcoord_generator_offset = self.pack_pool(texcoord_generator_pool)
header.texture_matrix_offset = self.pack_pool(texture_matrix_pool)
header.texture_index_offset = self.pack_pool(texture_index_pool)
align(stream,4)
header.tev_order_offset = self.pack_pool(tev_order_pool)
header.tev_color_offset = self.pack_pool(tev_color_pool)
header.kcolor_offset = self.pack_pool(kcolor_pool)
header.tev_stage_count_offset = self.pack_pool(tev_stage_count_pool)
align(stream,4)
header.tev_combiner_offset = self.pack_pool(tev_combiner_pool)
header.swap_mode_offset = self.pack_pool(swap_mode_pool)
header.swap_table_offset = self.pack_pool(swap_table_pool)
header.fog_offset = self.pack_pool(fog_pool)
header.alpha_test_offset = self.pack_pool(alpha_test_pool)
header.blend_mode_offset = self.pack_pool(blend_mode_pool)
header.depth_mode_offset = self.pack_pool(depth_mode_pool)
header.depth_test_early_offset = self.pack_pool(depth_test_early_pool)
align(stream,4)
header.dither_offset = self.pack_pool(dither_pool)
align(stream,4)
header.unknown5_offset = self.pack_pool(unknown5_pool)
align(stream,0x20)
header.section_size = self.tell()
self.seek(0)
Header.pack(stream,header)
self.seek(header.section_size)
def pack_indirect_entries(self,materials):
offset = self.tell()
for material in materials:
indirect_entry = IndirectEntry()
indirect_entry.load(material)
IndirectEntry.pack(self.stream,indirect_entry)
return offset
def pack_pool(self,pool):
if pool is None: return 0
offset = self.tell()
for value in pool:
pool.element_type.pack(self.stream,value)
return offset
@pool_loader(EnumConverter(uint32,gx.CullMode),values=(gx.CULL_BACK,gx.CULL_FRONT,gx.CULL_NONE))
def pool_cull_mode(pool,material,entry):
entry.cull_mode_index = pool[material.cull_mode]
@pool_loader(uint8)
def pool_channel_count(pool,material,entry):
entry.channel_count_index = pool[material.channel_count]
@pool_loader(Color)
def pool_material_color(pool,material,entry):
for i,channel in enumerate(material.channels):
entry.material_color_indices[i] = pool[channel.material_color]
@pool_loader(Color)
def pool_ambient_color(pool,material,entry):
for i,channel in enumerate(material.channels):
entry.ambient_color_indices[i] = pool[channel.ambient_color]
@pool_loader(LightingMode)
def pool_lighting_mode(pool,material,entry):
for channel,channel_entry in zip(material.channels,entry.channels):
channel_entry.color_mode_index = pool[channel.color_mode]
channel_entry.alpha_mode_index = pool[channel.alpha_mode]
@pool_loader(Light)
def pool_light(pool,material,entry):
for i,light in enumerate(material.lights):
if light is None: continue
entry.light_indices[i] = pool[light]
@pool_loader(uint8)
def pool_texcoord_generator_count(pool,material,entry):
entry.texcoord_generator_count_index = pool[material.texcoord_generator_count]
@pool_loader(TexCoordGenerator)
def pool_texcoord_generator(pool,material,entry):
for i,generator in enumerate(material.enabled_texcoord_generators):
entry.texcoord_generator_indices[i] = pool[generator]
@pool_loader(TextureMatrix)
def pool_texture_matrix(pool,material,entry):
for i,matrix in enumerate(material.texture_matrices):
if matrix is None: continue
entry.texture_matrix_indices[i] = pool[matrix]
@pool_loader(uint16)
def pool_texture_index(pool,material,entry):
for i,index in enumerate(material.texture_indices):
if index is None: continue
entry.texture_index_indices[i] = pool[index]
@pool_loader(uint8)
def pool_tev_stage_count(pool,material,entry):
entry.tev_stage_count_index = pool[material.tev_stage_count]
@pool_loader(TevOrder,equal_predicate=TevOrder.__eq__)
def pool_tev_order(pool,material,entry):
for i,stage in enumerate(material.enabled_tev_stages):
entry.tev_order_indices[i] = pool[stage]
@pool_loader(TevCombiner,equal_predicate=equal_tev_combiner_and_swap_mode)
def pool_tev_combiner(pool,material,entry):
for i,stage in enumerate(material.enabled_tev_stages):
entry.tev_combiner_indices[i] = pool[stage]
@pool_loader(SwapMode,equal_predicate=equal_tev_combiner_and_swap_mode)
def pool_swap_mode(pool,material,entry):
for i,stage in enumerate(material.enabled_tev_stages):
entry.swap_mode_indices[i] = pool[stage]
@pool_loader(ColorS16)
def pool_tev_color(pool,material,entry):
for i,color in enumerate(material.tev_colors):
entry.tev_color_indices[i] = pool[color]
entry.tev_color_previous_index = pool[material.tev_color_previous]
@pool_loader(Color)
def pool_kcolor(pool,material,entry):
for i,color in enumerate(material.kcolors):
entry.kcolor_indices[i] = pool[color]
@pool_loader(SwapTable)
def pool_swap_table(pool,material,entry):
for i,table in enumerate(material.swap_tables):
entry.swap_table_indices[i] = pool[table]
@pool_loader(Fog)
def pool_fog(pool,material,entry):
entry.fog_index = pool[material.fog]
@pool_loader(AlphaTest)
def pool_alpha_test(pool,material,entry):
entry.alpha_test_index = pool[material.alpha_test]
@pool_loader(BlendMode)
def pool_blend_mode(pool,material,entry):
entry.blend_mode_index = pool[material.blend_mode]
@pool_loader(DepthMode)
def pool_depth_mode(pool,material,entry):
entry.depth_mode_index = pool[material.depth_mode]
@pool_loader(bool8,values=(False,True))
def pool_depth_test_early(pool,material,entry):
entry.depth_test_early_index = pool[material.depth_test_early]
@pool_loader(bool8,values=(False,True))
def pool_dither(pool,material,entry):
entry.dither_index = pool[material.dither]
@pool_loader(UnknownStruct5)
def pool_unknown5(pool,material,entry):
entry.unknown5_index = pool[material.unknown5]
class SectionUnpacker:
entry_type = Entry
def seek(self,offset):
self.stream.seek(self.base + offset)
def unpack(self,stream):
self.stream = stream
self.base = stream.tell()
header = Header.unpack(stream)
materials = [Material() for _ in range(header.material_count)]
self.seek(header.entry_index_offset)
entry_indices = [uint16.unpack(stream) for _ in range(header.material_count)]
entry_count = max(entry_indices) + 1
self.seek(header.entry_offset)
entries = [self.entry_type.unpack(stream) for _ in range(entry_count)]
entries = [entries[i] for i in entry_indices]
for material,entry in zip(materials,entries):
material.unknown0 = entry.unknown0
material.unknown2 = entry.unknown2
material.unknown3 = entry.unknown3
material.unknown4 = entry.unknown4
entry.unload_constant_colors(material)
entry.unload_constant_alphas(material)
self.seek(header.name_offset)
names = j3d.string_table.unpack(stream)
for material,name in zip(materials,names):
material.name = name
self.unpack_indirect_entries(header.indirect_entry_offset,materials)
self.unpack_cull_mode(header.cull_mode_offset,materials,entries)
self.unpack_channel_count(header.channel_count_offset,materials,entries)
self.unpack_material_color(header.material_color_offset,materials,entries)
self.unpack_ambient_color(header.ambient_color_offset,materials,entries)
self.unpack_lighting_mode(header.lighting_mode_offset,materials,entries)
self.unpack_light(header.light_offset,materials,entries)
self.unpack_texcoord_generator_count(header.texcoord_generator_count_offset,materials,entries)
self.unpack_texcoord_generator(header.texcoord_generator_offset,materials,entries)
self.unpack_texture_matrix(header.texture_matrix_offset,materials,entries)
self.unpack_texture_index(header.texture_index_offset,materials,entries)
self.unpack_tev_stage_count(header.tev_stage_count_offset,materials,entries)
self.unpack_tev_order(header.tev_order_offset,materials,entries)
self.unpack_tev_combiner(header.tev_combiner_offset,materials,entries)
self.unpack_swap_mode(header.swap_mode_offset,materials,entries)
self.unpack_tev_color(header.tev_color_offset,materials,entries)
self.unpack_kcolor(header.kcolor_offset,materials,entries)
self.unpack_swap_table(header.swap_table_offset,materials,entries)
self.unpack_fog(header.fog_offset,materials,entries)
self.unpack_alpha_test(header.alpha_test_offset,materials,entries)
self.unpack_blend_mode(header.blend_mode_offset,materials,entries)
self.unpack_depth_mode(header.depth_mode_offset,materials,entries)
self.unpack_depth_test_early(header.depth_test_early_offset,materials,entries)
self.unpack_dither(header.dither_offset,materials,entries)
self.unpack_unknown5(header.unknown5_offset,materials,entries)
self.seek(header.section_size)
return materials
def unpack_indirect_entries(self,offset,materials):
self.seek(offset)
for material in materials:
indirect_entry = IndirectEntry.unpack(self.stream)
indirect_entry.unload(material)
def create_array(self,offset,element_type):
if offset == 0: return None
return ArrayUnpacker(self.stream,self.base + offset,element_type)
@array_unloader(EnumConverter(uint32,gx.CullMode))
def unpack_cull_mode(array,material,entry):
material.cull_mode = array[entry.cull_mode_index]
@array_unloader(uint8)
def unpack_channel_count(array,material,entry):
material.channel_count = array[entry.channel_count_index]
@array_unloader(Color)
def unpack_material_color(array,material,entry):
for channel,index in zip(material.channels,entry.material_color_indices):
if index is None:
continue
channel.material_color = array[index]
@array_unloader(Color)
def unpack_ambient_color(array,material,entry):
for channel,index in zip(material.channels,entry.ambient_color_indices):
if index is None:
continue
channel.ambient_color = array[index]
@array_unloader(LightingMode)
def unpack_lighting_mode(array,material,entry):
for channel,channel_entry in zip(material.channels,entry.channels):
if channel_entry.color_mode_index is not None:
channel.color_mode = array[channel_entry.color_mode_index]
if channel_entry.alpha_mode_index is not None:
channel.alpha_mode = array[channel_entry.alpha_mode_index]
@array_unloader(Light)
def unpack_light(array,material,entry):
for i,index in enumerate(entry.light_indices):
if index is None: continue
material.lights[i] = array[index]
@array_unloader(uint8)
def unpack_texcoord_generator_count(array,material,entry):
material.texcoord_generator_count = array[entry.texcoord_generator_count_index]
@array_unloader(TexCoordGenerator)
def unpack_texcoord_generator(array,material,entry):
for i in range(material.texcoord_generator_count):
material.texcoord_generators[i] = array[entry.texcoord_generator_indices[i]]
@array_unloader(TextureMatrix)
def unpack_texture_matrix(array,material,entry):
for i,index in enumerate(entry.texture_matrix_indices):
if index is None: continue
material.texture_matrices[i] = array[index]
@array_unloader(uint16)
def unpack_texture_index(array,material,entry):
for i,index in enumerate(entry.texture_index_indices):
if index is None: continue
material.texture_indices[i] = array[index]
@array_unloader(uint8)
def unpack_tev_stage_count(array,material,entry):
material.tev_stage_count = array[entry.tev_stage_count_index]
@array_unloader(TevOrder)
def unpack_tev_order(array,material,entry):
for stage,index in zip(material.enabled_tev_stages,entry.tev_order_indices):
tev_order = array[index]
stage.texcoord = tev_order.texcoord
stage.texture = tev_order.texture
stage.color = tev_order.color
@array_unloader(TevCombiner)
def unpack_tev_combiner(array,material,entry):
for stage,index in zip(material.enabled_tev_stages,entry.tev_combiner_indices):
tev_combiner = array[index]
stage.unknown0 = tev_combiner.unknown0
stage.color_mode = tev_combiner.color_mode
stage.alpha_mode = tev_combiner.alpha_mode
stage.unknown1 = tev_combiner.unknown1
@array_unloader(SwapMode)
def unpack_swap_mode(array,material,entry):
for stage,index in zip(material.enabled_tev_stages,entry.swap_mode_indices):
swap_mode = array[index]
stage.color_swap_table = swap_mode.color_swap_table
stage.texture_swap_table = swap_mode.texture_swap_table
@array_unloader(ColorS16)
def unpack_tev_color(array,material,entry):
for i,index in enumerate(entry.tev_color_indices):
if index is None:
continue
material.tev_colors[i] = array[index]
if entry.tev_color_previous_index is not None:
material.tev_color_previous = array[entry.tev_color_previous_index]
@array_unloader(Color)
def unpack_kcolor(array,material,entry):
for i,index in enumerate(entry.kcolor_indices):
if index is None:
continue
material.kcolors[i] = array[index]
@array_unloader(SwapTable)
def unpack_swap_table(array,material,entry):
for i,index in enumerate(entry.swap_table_indices):
if index is None:
continue
material.swap_tables[i] = array[index]
@array_unloader(Fog)
def unpack_fog(array,material,entry):
material.fog = array[entry.fog_index]
@array_unloader(AlphaTest)
def unpack_alpha_test(array,material,entry):
material.alpha_test = array[entry.alpha_test_index]
@array_unloader(BlendMode)
def unpack_blend_mode(array,material,entry):
material.blend_mode = array[entry.blend_mode_index]
@array_unloader(DepthMode)
def unpack_depth_mode(array,material,entry):
material.depth_mode = array[entry.depth_mode_index]
@array_unloader(bool8)
def unpack_depth_test_early(array,material,entry):
material.depth_test_early = array[entry.depth_test_early_index]
@array_unloader(bool8)
def unpack_dither(array,material,entry):
material.dither = array[entry.dither_index]
@array_unloader(UnknownStruct5)
def unpack_unknown5(array,material,entry):
material.unknown5 = array[entry.unknown5_index]
class AmbientSourceSVR0:
@staticmethod
def pack(stream,value):
uint8.pack(stream,0xFF)
@staticmethod
def unpack(stream):
if uint8.unpack(stream) != 0xFF:
raise FormatError('invalid ambient source for SVR0')
return gx.SRC_REG
@staticmethod
def sizeof():
return uint8.sizeof()
class ConstantColorSVR0:
@staticmethod
def pack(stream,value):
uint8.pack(stream,value if value is not None else 0xFF)
@staticmethod
def unpack(stream):
value = uint8.unpack(stream)
return gx.ConstantColor(value) if value != 0xFF else gx.TEV_KCSEL_1
@staticmethod
def sizeof():
return uint8.sizeof()
class ConstantAlphaSVR0:
@staticmethod
def pack(stream,value):
uint8.pack(stream,value if value is not None else 0xFF)
@staticmethod
def unpack(stream):
value = uint8.unpack(stream)
return gx.ConstantAlpha(value) if value != 0xFF else gx.TEV_KASEL_1
@staticmethod
def sizeof():
return uint8.sizeof()
class LightingModeSVR0(LightingMode,replace_fields=True):
ambient_source = AmbientSourceSVR0
class EntrySVR0(Entry,replace_fields=True):
constant_colors = Array(ConstantColorSVR0,16)
constant_alphas = Array(ConstantAlphaSVR0,16)
def __init__(self):
super().__init__()
self.kcolor_indices = [0,1,2,3]
self.constant_colors = [None]*16
self.constant_alphas = [None]*16
@classmethod
def unpack(cls,stream):
entry = super().unpack(stream)
if entry.ambient_color_indices != [None]*2:
raise FormatError('invalid ambient color indices for SVR0')
if entry.light_indices != [None]*8:
raise FormatError('invalid light indices for SVR0')
if entry.texture_matrix_indices != [None]*10:
raise FormatError('invalid texture matrix indices for SVR0')
if entry.swap_mode_indices != [None]*16:
raise FormatError('invalid swap mode indices for SVR0')
if entry.tev_color_indices != [None]*3:
raise FormatError('invalid tev color indices for SVR0')
if entry.tev_color_previous_index is not None:
raise FormatError('invalid tev color previous index for SVR0')
if entry.kcolor_indices != [0,1,2,3]:
raise FormatError('invalid kcolor indices for SVR0')
if entry.swap_table_indices != [None]*4:
raise FormatError('invalid swap table indices for SVR0')
if entry.fog_index is not None:
raise FormatError('invalid fog index for SVR0')
if entry.dither_index is not None:
raise FormatError('invalid dither index for SVR0')
if entry.unknown5_index is not None:
raise FormatError('invalid unknown5 index for SVR0')
if entry.unknown3 != [0xFFFF]*20:
logger.warning('unknown3 different from default for SVR0')
return entry
def load_constant_colors(self,material):
for i,stage in enumerate(material.enabled_tev_stages):
self.constant_colors[i] = stage.constant_color
def load_constant_alphas(self,material):
for i,stage in enumerate(material.enabled_tev_stages):
self.constant_alphas[i] = stage.constant_alpha
class SectionPackerSVR0(SectionPacker):
entry_type = EntrySVR0
def pack_indirect_entries(self,materials):
return 0
def pool_ambient_color(self,materials,entries):
return None
def pool_light(self,materials,entries):
return None
def pool_texture_matrix(self,materials,entries):
return None
@pool_loader(TevCombiner,equal_predicate=TevCombiner.__eq__)
def pool_tev_combiner(pool,material,entry):
for i,stage in enumerate(material.enabled_tev_stages):
entry.tev_combiner_indices[i] = pool[stage]
def pool_swap_mode(self,material,entries):
return None
def pool_tev_color(self,material,entries):
return None
def pool_swap_table(self,material,entries):
return None
def pool_fog(self,material,entries):
return None
def pool_dither(self,material,entries):
return None
def pool_unknown5(self,material,entries):
return None
@pool_loader(EnumConverter(uint32,gx.CullMode))
def pool_cull_mode(pool,material,entry):
entry.cull_mode_index = pool[material.cull_mode]
@pool_loader(Color)
def pool_material_color(pool,material,entry):
for i,channel in enumerate(material.enabled_channels):
entry.material_color_indices[i] = pool[channel.material_color]
@pool_loader(LightingModeSVR0)
def pool_lighting_mode(pool,material,entry):
for channel,channel_entry in zip(material.enabled_channels,entry.channels):
channel_entry.color_mode_index = pool[channel.color_mode]
channel_entry.alpha_mode_index = pool[channel.alpha_mode]
def pool_kcolor(self,materials,entries):
return Pool(Color,[Color(0xFF,0xFF,0xFF,0xFF)]*4)
@pool_loader(bool8)
def pool_depth_test_early(pool,material,entry):
entry.depth_test_early_index = pool[material.depth_test_early]
class SectionUnpackerSVR0(SectionUnpacker):
entry_type = EntrySVR0
def unpack_indirect_entries(self,offset,materials):
if offset != 0:
raise FormatError('invalid indirect entry offset for SVR0')
def unpack_ambient_color(self,offset,materials,entries):
if offset != 0:
raise FormatError('invalid ambient color offset for SVR0')
def unpack_light(self,offset,materials,entries):
if offset != 0:
raise FormatError('invalid light offset for SVR0')
def unpack_texture_matrix(self,offset,materials,entries):
if offset != 0:
raise FormatError('invalid texture matrix offset for SVR0')
assert offset == 0
def unpack_swap_mode(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid swap mode offset for SVR0')
def unpack_tev_color(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid tev color offset for SVR0')
def unpack_swap_table(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid swap table offset for SVR0')
def unpack_fog(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid fog offset for SVR0')
def unpack_dither(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid dither offset for SVR0')
def unpack_unknown5(self,offset,material,entries):
if offset != 0:
raise FormatError('invalid unknown5 offset for SVR0')
@array_unloader(Color)
def unpack_material_color(array,material,entry):
for channel,index in zip(material.enabled_channels,entry.material_color_indices):
channel.material_color = array[index]
@array_unloader(LightingModeSVR0)
def unpack_lighting_mode(array,material,entry):
for channel,channel_entry in zip(material.enabled_channels,entry.channels):
channel.color_mode = array[channel_entry.color_mode_index]
channel.alpha_mode = array[channel_entry.alpha_mode_index]
def unpack_kcolor(self,offset,materials,entries):
array = self.create_array(offset,Color)
for i in range(4):
if array[i] != Color(0xFF,0xFF,0xFF,0xFF):
raise FormatError('invalid kcolor for SVR0')
def pack(stream,materials,subversion):
print(subversion)
if subversion == b'SVR3':
packer = SectionPacker()
elif subversion == b'\xFF\xFF\xFF\xFF':
packer = SectionPackerSVR0()
else:
packer = SectionPacker()
#raise ValueError('invalid subversion')
packer.pack(stream,materials)
def unpack(stream,subversion):
if subversion == b'SVR3':
unpacker = SectionUnpacker()
elif subversion == b'\xFF\xFF\xFF\xFF':
unpacker = SectionUnpackerSVR0()
else:
#raise ValueError('invalid subversion')
#print("Unusual subversion", subversion)
unpacker = SectionUnpacker()
return unpacker.unpack(stream)
|
164031
|
from itertools import chain
from uuid import uuid4
from django.db.models.signals import post_save, pre_save
from main import models as edd_models
from . import models
def campaign_check(sender, instance, raw, using, **kwargs):
# make sure there is a UUID set
if instance.uuid is None:
instance.uuid = uuid4()
# log update, make sure created is set
# .load_update() accesses database, do not run if raw is True
if not raw:
instance.updated = edd_models.Update.load_update()
if instance.created_id is None:
instance.created = instance.updated
# make sure there is a slug
# ._build_slug() accesses database, do not run if raw is True
if instance.slug is None and not raw:
instance.slug = instance._build_slug()
# ensure that signal is only connected once if this code runs again
dispatch = f"{campaign_check.__name__}:{models.Campaign.__name__}"
pre_save.connect(campaign_check, sender=models.Campaign, dispatch_uid=dispatch)
def campaign_update(sender, instance, raw, using, **kwargs):
# m2m relation must have IDs on both sides; has to be post_save
instance.updates.add(instance.updated)
# ensure that signal is only connected once if this code runs again
dispatch = f"{campaign_update.__name__}:{models.Campaign.__name__}"
post_save.connect(campaign_update, sender=models.Campaign, dispatch_uid=dispatch)
def study_campaign_changed(sender, instance, raw, using, **kwargs):
# when change is made adding link campaign-study, apply all campaign permissions to study
q = dict(campaign_id=instance.campaign_id)
permissions = chain(
models.UserPermission.objects.filter(**q),
models.GroupPermission.objects.filter(**q),
models.EveryonePermission.objects.filter(**q),
)
for p in permissions:
p.apply_to_study(instance.study)
# ensure that signal is only connected once if this code runs again
dispatch = f"{study_campaign_changed.__name__}:{models.CampaignMembership}"
post_save.connect(
study_campaign_changed, sender=models.CampaignMembership, dispatch_uid=dispatch
)
|
164035
|
import util as u
import re
import StringIO
def get_reg(invoke_param):
"""Return the parameter registry list"""
if invoke_param == '':
return
reg_list = re.finditer(r'(v|p)\d{1,3}', invoke_param)
for reg_value in reg_list:
yield reg_value.group()
def is_range(invoke_type):
"""Range value"""
if re.search(r'range', invoke_type) is not None:
return True
return False
def reg_range_count(reg_list):
"""Range register count"""
return int(reg_list[1][1:]) - int(reg_list[0][1:]) + 1
def is_void(invoke_return):
if invoke_return == 'V':
return True
return False
def is_wide(invoke_return):
if invoke_return == 'J' or invoke_return == 'D':
return True
return False
def is_obj(invoke_return):
if re.search(r'L([^;]*?);|\[|\[L([^;]*?);', invoke_return) is not None:
return True
return False
def is_static(invoke_type):
if re.search(r'static', invoke_type) is not None:
return True
return False
def is_init(invoke_method):
if re.search(r'\<init\>|\<clinit\>', invoke_method) is not None:
return True
return False
def change_match_line(smali_line, invoke_type, invoke_param, invoke_object, invoke_method, invoke_pass, invoke_return, class_name, new_method):
"""Change a method call"""
string_append = u.get_random(True, 15)
is_range_value = is_range(invoke_type)
is_static_value = is_static(invoke_type)
reg_list = list(get_reg(invoke_param))
if is_range_value:
reg_count = reg_range_count(reg_list)
else:
reg_count = len(reg_list)
is_void_value = is_void(invoke_return)
is_wide_value = is_wide(invoke_return)
is_obj_value = is_obj(invoke_return)
local_reg_count = 1
if is_void_value:
local_reg_count = 0
if is_wide_value:
local_reg_count = 2
return_str = 'return v0'
if is_void_value:
return_str = 'return-void'
if is_wide_value:
return_str = 'return-wide v0'
if is_obj_value:
return_str = 'return-object v0'
move_result_str = 'move-result v0'
if is_void_value:
move_result_str = ''
if is_wide_value:
move_result_str = 'move-result-wide v0'
if is_obj_value:
move_result_str = 'move-result-object v0'
add_param = ''
if not is_static_value:
add_param = invoke_object
invoke_new = 'invoke-static'
if is_range_value:
invoke_new += '/range'
print ' ' + invoke_new + ' {' + invoke_param + '}, ' + class_name + '->' + string_append + '(' + add_param + invoke_pass + ')' + invoke_return
new_method.write('.method public static ' + string_append + '(' + add_param + invoke_pass + ')' + invoke_return + '\n')
new_method.write(' .locals ' + str(local_reg_count) + '\n')
new_method.write(' .prologue' + '\n')
new_method.write('\n')
new_method.write(' ' + invoke_type + ' {')
if is_range_value:
new_method.write('p0 .. p' + str(reg_count-1))
else:
for reg_index in range(0, reg_count):
new_method.write('p' + str(reg_index))
if reg_count != reg_index + 1:
new_method.write(', ')
new_method.write('}, ' + invoke_object + '->' + invoke_method + '(' + invoke_pass + ')' + invoke_return + '\n')
new_method.write('\n')
new_method.write(' ' + move_result_str + '\n')
new_method.write('\n')
new_method.write(' ' + return_str + '\n')
new_method.write('.end method' + '\n')
def change_all_method(smali_file, new_method):
"""Redirect all the method calls"""
for smali_line in u.open_file_input(smali_file): # For each line
class_match = re.search(r'^([ ]*?)\.class(.*?)(?P<className>L([^;]*?);)', smali_line) # Match the class declaration
if class_match is not None:
class_name = class_match.group('className') # Find the class name
invoke_match = re.search(r'^([ ]*?)(?P<invokeType>invoke\-([^ ]*?)) {(?P<invokeParam>([vp0-9,. ]*?))}, (?P<invokeObject>L(.*?);|\[L(.*?);)->(?P<invokeMethod>(.*?))\((?P<invokePass>(.*?))\)(?P<invokeReturn>(.*?))$', smali_line) # Match a method invocation
if invoke_match is not None:
if not is_init(invoke_match.group('invokeMethod')):
change_match_line(smali_line, invoke_match.group('invokeType'), invoke_match.group('invokeParam'), invoke_match.group('invokeObject'), invoke_match.group('invokeMethod'), invoke_match.group('invokePass'), invoke_match.group('invokeReturn'), class_name, new_method)
else:
print smali_line, # Print the line unchanged
else:
print smali_line, # Print the line unchanged
def add_all_method(smali_file, new_method):
"""Add the indirection methods"""
for smali_line in u.open_file_input(smali_file): # For each line
if re.search(r'^([ ]*?)# direct methods', smali_line) is not None: # Before the directs methods
print smali_line, # Print the line unchanged
print new_method.getvalue() # Print the method
else:
print smali_line, # Print the line unchanged
def change_all_file(smali_file_list):
"""Apply indirection to all smali file"""
for smali_file in smali_file_list: # For all smali file
new_method = StringIO.StringIO() # Inizialize a string buffer
change_all_method(smali_file, new_method)
add_all_method(smali_file, new_method)
new_method.close() # Close the string buffer
def obfuscate():
""" The main obfuscator function """
smali_file_list = u.load_smali_file()
change_all_file(smali_file_list)
|
164053
|
from behave import given, when, then
from pages.dashboard import dashboard
@then(u'Dashboard Status shows correct values for row "{row}"')
def step_impl_dashboard_status(context, row):
dashboard.verify_status(row)
@then(u'Clicking on Status Refresh should refresh status component')
def step_impl_status_refresh(context):
dashboard.verify_refresh()
@then(u'Dashboard Battery shows Battery or AC Power with correct icon.')
def step_impl_battery_status(context):
dashboard.verify_battery_status()
@then(u'Clicking on Battery Refresh should refresh battery component')
def step_impl_battery_refresh(context):
dashboard.verify_battery_refresh()
@then(u'Dashboard Detector Settings shows correct values for row "{row}"')
def step_impl_detector_settings(context, row):
dashboard.veify_detector_setting(row)
@then(u'Dashboard GPS shows correct values for row "{row}"')
def step_impl_gps_settings(context, row):
dashboard.verify_gps_setting(row)
|
164054
|
from unittest import IsolatedAsyncioTestCase
events = []
class Test(IsolatedAsyncioTestCase):
def setUp(self):
events.append("setUp")
async def asyncSetUp(self):
self._async_connection = await AsyncConnection()
events.append("asyncSetUp")
async def test_response(self):
events.append("test_response")
response = await self._async_connection.get("https://example.com")
self.assertEqual(response.status_code, 200)
self.addAsyncCleanup(self.on_cleanup)
def tearDown(self):
events.append("tearDown")
async def asyncTearDown(self):
await self._async_connection.close()
events.append("asyncTearDown")
async def on_cleanup(self):
events.append("cleanup")
if __name__ == "__main__":
unittest.main()
|
164062
|
from dask.distributed import Client
import dask.array as da
import dask_ml
import dask_bigquery
import numpy as np
client = Client("localhost:8786")
x = da.sum(np.ones(5))
x.compute()
|
164063
|
from datequarter import DateQuarter
if __name__ == "__main__":
quarter1 = DateQuarter(2019, 1)
quarter2 = DateQuarter(2018, 4)
print(list(DateQuarter.between(quarter1, quarter2)))
|
164074
|
import uuid
from djongo import models
from node.blockchain.inner_models import Block as PydanticBlock
from node.core.models import CustomModel
class PendingBlock(CustomModel):
_id = models.UUIDField(primary_key=True, default=uuid.uuid4)
number = models.PositiveBigIntegerField()
hash = models.CharField(max_length=128) # noqa: A003
signer = models.CharField(max_length=64)
body = models.BinaryField()
def get_block(self) -> PydanticBlock:
return PydanticBlock.parse_raw(self.body)
class Meta:
unique_together = ('number', 'signer')
ordering = unique_together
def __str__(self):
return f'block_number={self.number}, hash={self.hash}'
|
164118
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/$', views.ImageUploadView.as_view(), name='upload'),
url(r'^md2html/$', views.MarkdownToHTML.as_view(), name='md2html'),
]
|
164120
|
from pylagrit import PyLaGriT
import numpy
x = numpy.arange(0,10.1,1)
y = x
z = [0,1]
lg = PyLaGriT()
mqua = lg.gridder(x,y,z,elem_type='hex',connect=True)
mqua.rotateln([mqua.xmin-0.1,0,0],[mqua.xmax+0.1,0,0],25)
mqua.dump_exo('rotated.exo')
mqua.dump_ats_xml('rotated.xml','rotated.exo')
mqua.paraview()
|
164128
|
import os
import datetime
from typing import Dict, Optional, Any, List
from markdown_subtemplate import caching as __caching
from markdown_subtemplate.infrastructure import markdown_transformer
from markdown_subtemplate.exceptions import ArgumentExpectedException, TemplateNotFoundException
from markdown_subtemplate import logging as __logging
import markdown_subtemplate.storage as __storage
from markdown_subtemplate.logging import SubtemplateLogger
from markdown_subtemplate.storage import SubtemplateStorage
# noinspection DuplicatedCode
def get_page(template_path: str, data: Dict[str, Any]) -> str:
if not template_path or not template_path.strip():
raise ArgumentExpectedException('template_path')
template_path = template_path.strip().lower()
cache = __caching.get_cache()
log = __logging.get_log()
key = f'html: {template_path}'
entry = cache.get_html(key)
if entry:
log.trace(f"CACHE HIT: Reusing {template_path} from HTML cache.")
contents = entry.contents
# Is there data that needs to be folded in? Process it.
if data:
contents = process_variables(contents, data)
# Return the cached data, no need to transform for variables.
return contents
t0 = datetime.datetime.now()
# Get the markdown with imports and substitutions
markdown = get_markdown(template_path)
inline_variables = {}
markdown = get_inline_variables(markdown, inline_variables, log)
# Convert markdown to HTML
html = get_html(markdown)
# Cache inline variables, but not the passed in data as that varies per request (query string, etc).
html = process_variables(html, inline_variables)
cache.add_html(key, key, html)
# Replace the passed variables each time.
html = process_variables(html, data)
dt = datetime.datetime.now() - t0
msg = f"Created contents for {template_path}:{data} in {int(dt.total_seconds() * 1000):,} ms."
log.info(f"GENERATING HTML: {msg}")
return html
def get_html(markdown_text: str, unsafe_data=False) -> str:
html = markdown_transformer.transform(markdown_text, unsafe_data)
return html
# noinspection DuplicatedCode
def get_markdown(template_path: str, data: Dict[str, Any] = None) -> str:
if data is None:
data = {}
cache = __caching.get_cache()
log = __logging.get_log()
key = f'markdown: {template_path}'
entry = cache.get_markdown(key)
if entry:
log.trace(f"CACHE HIT: Reusing {template_path} from MARKDOWN cache.")
if not data:
return entry.contents
else:
return process_variables(entry.contents, data)
t0 = datetime.datetime.now()
text = load_markdown_contents(template_path)
cache.add_markdown(key, key, text)
if data:
text = process_variables(text, data)
dt = datetime.datetime.now() - t0
msg = f"Created contents for {template_path} in {int(dt.total_seconds() * 1000):,} ms."
log.trace(f"GENERATING MARKDOWN: {msg}")
return text
def load_markdown_contents(template_path: str) -> Optional[str]:
if not template_path:
return ''
log = __logging.get_log()
log.verbose(f"Loading markdown template: {template_path}")
page_md = get_page_markdown(template_path)
if not page_md:
return ''
lines = page_md.split('\n')
lines = process_imports(lines)
final_markdown = "\n".join(lines).strip()
return final_markdown
def get_page_markdown(template_path: str) -> Optional[str]:
if not template_path or not template_path.strip():
raise TemplateNotFoundException("No template file specified: template_path=''.")
store: SubtemplateStorage = __storage.get_storage()
return store.get_markdown_text(template_path)
def get_shared_markdown(import_name: str) -> Optional[str]:
if not import_name or not import_name.strip():
raise ArgumentExpectedException('import_name')
store: SubtemplateStorage = __storage.get_storage()
return store.get_shared_markdown(import_name)
def process_imports(lines: List[str]) -> List[str]:
log = __logging.get_log()
line_data = list(lines)
for idx, line in enumerate(line_data):
if not line.strip().startswith('[IMPORT '):
continue
import_statement = line.strip()
import_name = import_statement \
.replace('[IMPORT ', '') \
.replace(']', '') \
.strip()
log.verbose(f"Loading import: {import_name}...")
markdown = get_shared_markdown(import_name)
if markdown is not None:
markdown_lines = markdown.split('\n')
else:
markdown_lines = ['', f'ERROR: IMPORT {import_name} not found', '']
line_data = line_data[:idx] + markdown_lines + line_data[idx + 1:]
return process_imports(line_data)
return line_data
def process_variables(raw_text: str, data: Dict[str, Any]) -> str:
if not raw_text:
return raw_text
log = __logging.get_log()
keys = list(data.keys())
key_placeholders = {
key: f"${key.strip().upper()}$"
for key in keys
if key and isinstance(key, str)
}
transformed_text = raw_text
for key in keys:
if key_placeholders[key] not in transformed_text:
continue
log.verbose(f"Replacing {key_placeholders[key]}...")
transformed_text = transformed_text.replace(key_placeholders[key], str(data[key]))
return transformed_text
def get_inline_variables(markdown: str, new_vars: Dict[str, str], log: Optional[SubtemplateLogger]) -> str:
pattern = '[VARIABLE '
if pattern not in markdown and pattern.lower() not in markdown:
return markdown
lines: List[str] = markdown.split('\n')
final_lines = []
for l in lines:
if not( l and l.strip().upper().startswith(pattern)):
final_lines.append(l)
continue
text = l.strip()
text = text[len(pattern):].strip("]")
parts = text.split('=')
if len(parts) != 2:
if log:
log.error(f"Invalid variable definition in markdown: {l}.")
continue
name = parts[0].strip().upper()
value = parts[1].strip()
has_quotes = (
(value.startswith('"') or value.startswith("'")) and
(value.endswith('"') or value.endswith("'"))
)
if not has_quotes:
if log:
log.error(f"Invalid variable definition in markdown, missing quotes surrounding value: {l}.")
continue
value = value.strip('\'"').strip()
new_vars[name]=value
if new_vars:
return "\n".join(final_lines)
else:
return markdown
|
164143
|
from .statistics_diff import (
SmoothFRStatistic, SmoothKNNStatistic, MMDStatistic, EnergyStatistic)
from .statistics_nondiff import FRStatistic, KNNStatistic
__all__ = ['SmoothFRStatistic', 'SmoothKNNStatistic', 'MMDStatistic',
'EnergyStatistic', 'FRStatistic', 'KNNStatistic']
|
164184
|
import os, argparse, cPickle
from shutil import copyfile
import numpy as np
import cPickle
from nibabel import load as load_nii
import nibabel as nib
from scipy import ndimage
from nolearn.lasagne import NeuralNet, BatchIterator, TrainSplit
from nolearn_utils.hooks import SaveTrainingHistory, PlotTrainingHistory, EarlyStopping
from lasagne import objectives, updates
from nolearn.lasagne.handlers import SaveWeights
import lasagne
import theano as T
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer, FeaturePoolLayer, LocalResponseNormalization2DLayer, BatchNormLayer, prelu, ConcatLayer, ElemwiseSumLayer, ExpressionLayer, PadLayer, ScaleLayer
from lasagne.layers import Conv3DLayer, MaxPool3DLayer, Conv2DLayer, MaxPool2DLayer, Pool3DLayer, batch_norm
from lasagne.nonlinearities import softmax, rectify
nib.Nifti1Header.quaternion_threshold = -np.finfo(np.float32).eps * 10
from datetime import datetime
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
"""
Handle class to update the learning rate after each iteration
"""
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class Rotate_batch_Iterator(BatchIterator):
"""
handle class for on-the-fly data augmentation on batches.
Applying 90,180 and 270 degrees rotations and flipping
"""
def transform(self, Xb, yb):
Xb, yb = super(Rotate_batch_Iterator, self).transform(Xb, yb)
# Flip a given percentage of the images at random:
bs = Xb['in1'].shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
x_axial = Xb['in1'][indices]
x_cor = Xb['in2'][indices]
x_sag = Xb['in3'][indices]
if len(x_axial) > 0:
# apply rotation to the input batch
rotate_90 = x_axial[:,:,::-1,:].transpose(0,1,3,2)
rotate_180 = rotate_90[:,:,::-1,:].transpose(0,1,3,2)
#rotate_270 = rotate_180[:,:,:,::-1,:].transpose(0,1,2,4,3)
# apply flipped versions of rotated patches
rotate_0_flipped = x_axial[:,:,:,::-1]
#rotate_90_flipped = rotate_90[:,:,:,:,::-1]
rotate_180_flipped = rotate_180[:,:,:,::-1]
#rotate_270_flipped = rotate_270[:,:,:,:,::-1]
augmented_x = np.stack([rotate_180,
rotate_0_flipped,
rotate_180_flipped],
axis=1)
r_indices = np.random.randint(0,3,size=augmented_x.shape[0])
Xb['in1'][indices] = np.stack([augmented_x[i,r_indices[i],:,:,:] for i in range(augmented_x.shape[0])])
# apply rotation to the input batch
rotate_90 = x_cor[:,:,::-1,:].transpose(0,1,3,2)
rotate_180 = rotate_90[:,:,::-1,:].transpose(0,1,3,2)
#rotate_270 = rotate_180[:,:,:,::-1,:].transpose(0,1,2,4,3)
# apply flipped versions of rotated patches
rotate_0_flipped = x_cor[:,:,:,::-1]
#rotate_90_flipped = rotate_90[:,:,:,:,::-1]
rotate_180_flipped = rotate_180[:,:,:,::-1]
#rotate_270_flipped = rotate_270[:,:,:,:,::-1]
augmented_x = np.stack([rotate_180,
rotate_0_flipped,
rotate_180_flipped],
axis=1)
r_indices = np.random.randint(0,3,size=augmented_x.shape[0])
Xb['in2'][indices] = np.stack([augmented_x[i,r_indices[i],:,:,:] for i in range(augmented_x.shape[0])])
# apply rotation to the input batch
rotate_90 = x_sag[:,:,::-1,:].transpose(0,1,3,2)
rotate_180 = rotate_90[:,:,::-1,:].transpose(0,1,3,2)
#rotate_270 = rotate_180[:,:,:,::-1,:].transpose(0,1,2,4,3)
# apply flipped versions of rotated patches
rotate_0_flipped = x_sag[:,:,:,::-1]
#rotate_90_flipped = rotate_90[:,:,:,:,::-1]
rotate_180_flipped = rotate_180[:,:,:,::-1]
#rotate_270_flipped = rotate_270[:,:,:,:,::-1]
augmented_x = np.stack([rotate_180,
rotate_0_flipped,
rotate_180_flipped],
axis=1)
r_indices = np.random.randint(0,3,size=augmented_x.shape[0])
Xb['in3'][indices] = np.stack([augmented_x[i,r_indices[i],:,:,:] for i in range(augmented_x.shape[0])])
return Xb, yb
def build_model(weights_path, options):
"""
Build the CNN model. Create the Neural Net object and return it back.
Inputs:
- subject name: used to save the net weights accordingly.
- options: several hyper-parameters used to configure the net.
Output:
- net: a NeuralNet object
"""
net_model_name = options['experiment']
try:
os.mkdir(os.path.join(weights_path, net_model_name))
except:
pass
net_weights = os.path.join(weights_path, net_model_name, net_model_name + '.pkl')
net_history = os.path.join(weights_path, net_model_name, net_model_name + '_history.pkl')
# select hyper-parameters
t_verbose = options['net_verbose']
train_split_perc = options['train_split']
num_epochs = options['max_epochs']
max_epochs_patience = options['patience']
early_stopping = EarlyStopping(patience=max_epochs_patience)
save_weights = SaveWeights(net_weights, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(net_history)
# build the architecture
ps = options['patch_size'][0]
num_channels = 1
fc_conv = 180
fc_fc = 180
dropout_conv = 0.5
dropout_fc = 0.5
# --------------------------------------------------
# channel_1: axial
# --------------------------------------------------
axial_ch = InputLayer(name='in1', shape=(None, num_channels, ps, ps))
axial_ch = prelu(batch_norm(Conv2DLayer(axial_ch, name='axial_ch_conv1', num_filters=20, filter_size=3)), name = 'axial_ch_prelu1')
axial_ch = prelu(batch_norm(Conv2DLayer(axial_ch, name='axial_ch_conv2', num_filters=20, filter_size=3)), name = 'axial_ch_prelu2')
axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_1', pool_size=2)
axial_ch = prelu(batch_norm(Conv2DLayer(axial_ch, name='axial_ch_conv3', num_filters=40, filter_size=3)), name = 'axial_ch_prelu3')
axial_ch = prelu(batch_norm(Conv2DLayer(axial_ch, name='axial_ch_conv4', num_filters=40, filter_size=3)), name = 'axial_ch_prelu4')
axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_2', pool_size=2)
axial_ch = prelu(batch_norm(Conv2DLayer(axial_ch, name='axial_ch_conv5', num_filters=60, filter_size=3)), name = 'axial_ch_prelu5')
axial_ch = DropoutLayer(axial_ch, name = 'axial_l1drop', p = dropout_conv)
axial_ch = DenseLayer(axial_ch, name='axial_d1', num_units = fc_conv)
axial_ch = prelu(axial_ch, name = 'axial_prelu_d1')
# --------------------------------------------------
# channel_1: coronal
# --------------------------------------------------
coronal_ch = InputLayer(name='in2', shape=(None, num_channels, ps, ps))
coronal_ch = prelu(batch_norm(Conv2DLayer(coronal_ch, name='coronal_ch_conv1', num_filters=20, filter_size=3)), name = 'coronal_ch_prelu1')
coronal_ch = prelu(batch_norm(Conv2DLayer(coronal_ch, name='coronal_ch_conv2', num_filters=20, filter_size=3)), name = 'coronal_ch_prelu2')
coronal_ch = MaxPool2DLayer(coronal_ch, name='coronal_max_pool_1', pool_size=2)
coronal_ch = prelu(batch_norm(Conv2DLayer(coronal_ch, name='coronal_ch_conv3', num_filters=40, filter_size=3)), name = 'coronal_ch_prelu3')
coronal_ch = prelu(batch_norm(Conv2DLayer(coronal_ch, name='coronal_ch_conv4', num_filters=40, filter_size=3)), name = 'coronal_ch_prelu4')
coronal_ch = MaxPool2DLayer(coronal_ch, name='coronal_max_pool_2', pool_size=2)
coronal_ch = prelu(batch_norm(Conv2DLayer(coronal_ch, name='coronal_ch_conv5', num_filters=60, filter_size=3)), name = 'coronal_ch_prelu5')
coronal_ch = DropoutLayer(coronal_ch, name = 'coronal_l1drop', p = dropout_conv)
coronal_ch = DenseLayer(coronal_ch, name='coronal_d1', num_units = fc_conv)
coronal_ch = prelu(coronal_ch, name = 'coronal_prelu_d1')
# --------------------------------------------------
# channel_1: saggital
# --------------------------------------------------
saggital_ch = InputLayer(name='in3', shape=(None, num_channels, ps, ps))
saggital_ch = prelu(batch_norm(Conv2DLayer(saggital_ch, name='saggital_ch_conv1', num_filters=20, filter_size=3)), name = 'saggital_ch_prelu1')
saggital_ch = prelu(batch_norm(Conv2DLayer(saggital_ch, name='saggital_ch_conv2', num_filters=20, filter_size=3)), name = 'saggital_ch_prelu2')
saggital_ch = MaxPool2DLayer(saggital_ch, name='saggital_max_pool_1', pool_size=2)
saggital_ch = prelu(batch_norm(Conv2DLayer(saggital_ch, name='saggital_ch_conv3', num_filters=40, filter_size=3)), name = 'saggital_ch_prelu3')
saggital_ch = prelu(batch_norm(Conv2DLayer(saggital_ch, name='saggital_ch_conv4', num_filters=40, filter_size=3)), name = 'saggital_ch_prelu4')
saggital_ch = MaxPool2DLayer(saggital_ch, name='saggital_max_pool_2', pool_size=2)
saggital_ch = prelu(batch_norm(Conv2DLayer(saggital_ch, name='saggital_ch_conv5', num_filters=60, filter_size=3)), name = 'saggital_ch_prelu5')
saggital_ch = DropoutLayer(saggital_ch, name = 'saggital_l1drop', p = dropout_conv)
saggital_ch = DenseLayer(saggital_ch, name='saggital_d1', num_units = fc_conv)
saggital_ch = prelu(saggital_ch, name = 'saggital_prelu_d1')
# FC layer 540
layer = ConcatLayer(name = 'elem_channels', incomings = [axial_ch, coronal_ch, saggital_ch])
layer = DropoutLayer(layer, name = 'f1_drop', p = dropout_fc)
layer = DenseLayer(layer, name='FC1', num_units =540)
layer = prelu(layer, name = 'prelu_f1')
# concatenate channels 540 + 15
layer = DropoutLayer(layer, name = 'f2_drop', p = dropout_fc)
atlas_layer = DropoutLayer(InputLayer(name='in4', shape=(None, 15)), name = 'Dropout_atlas', p = .2)
atlas_layer = InputLayer(name='in4', shape=(None, 15))
layer = ConcatLayer(name = 'elem_channels2', incomings = [layer, atlas_layer])
# FC layer 270
layer = DenseLayer(layer, name='fc_2', num_units = 270)
layer = prelu(layer, name = 'prelu_f2')
# FC output 15 (softmax)
net_layer = DenseLayer(layer, name='out_layer', num_units = 15, nonlinearity=softmax)
net = NeuralNet(
layers= net_layer,
objective_loss_function=objectives.categorical_crossentropy,
update = updates.adam,
update_learning_rate=0.001,
on_epoch_finished=[
save_weights,
save_training_history,
early_stopping,
],
verbose= t_verbose,
max_epochs= num_epochs,
train_split=TrainSplit(eval_size= train_split_perc),
)
if options['load_weights'] == 'True':
try:
print " --> loading weights from ", net_weights
net.load_params_from(net_weights)
except:
pass
return net
|
164214
|
import argparse
from unittest import TestCase
import pytest
from pytorch_lightning import Trainer
from pl_bolts.models.rl.double_dqn_model import DoubleDQN
from pl_bolts.models.rl.dqn_model import DQN
from pl_bolts.models.rl.dueling_dqn_model import DuelingDQN
from pl_bolts.models.rl.noisy_dqn_model import NoisyDQN
from pl_bolts.models.rl.per_dqn_model import PERDQN
class TestValueModels(TestCase):
def setUp(self) -> None:
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser = Trainer.add_argparse_args(parent_parser)
parent_parser = DQN.add_model_specific_args(parent_parser)
args_list = [
"--warm_start_size",
"100",
"--gpus",
"0",
"--env",
"PongNoFrameskip-v4",
]
self.hparams = parent_parser.parse_args(args_list)
self.trainer = Trainer(
gpus=self.hparams.gpus,
max_steps=100,
max_epochs=100, # Set this as the same as max steps to ensure that it doesn't stop early
val_check_interval=1, # This just needs 'some' value, does not effect training right now
fast_dev_run=True,
)
def test_dqn(self):
"""Smoke test that the DQN model runs."""
model = DQN(self.hparams.env, num_envs=5)
self.trainer.fit(model)
def test_double_dqn(self):
"""Smoke test that the Double DQN model runs."""
model = DoubleDQN(self.hparams.env)
self.trainer.fit(model)
def test_dueling_dqn(self):
"""Smoke test that the Dueling DQN model runs."""
model = DuelingDQN(self.hparams.env)
self.trainer.fit(model)
def test_noisy_dqn(self):
"""Smoke test that the Noisy DQN model runs."""
model = NoisyDQN(self.hparams.env)
self.trainer.fit(model)
@pytest.mark.skip(reason="CI is killing this test")
def test_per_dqn(self):
"""Smoke test that the PER DQN model runs."""
model = PERDQN(self.hparams.env)
self.trainer.fit(model)
# def test_n_step_dqn(self):
# """Smoke test that the N Step DQN model runs"""
# model = DQN(self.hparams.env, n_steps=self.hparams.n_steps)
# result = self.trainer.fit(model)
|
164219
|
from django.conf import settings
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import AuthenticationFailed
from datetime import timedelta
from django.utils import timezone
# this return left time
def expires_at(token):
time_elapsed = timezone.now() - token.created
left_time = getattr(settings, 'EXPIRY_TOKEN_LIFETIME') - time_elapsed
return left_time
# token checker if token expired or not
def is_token_expired(token):
return expires_at(token) < timedelta(seconds=0)
# if token is expired new token will be established
# If token is expired then it will be removed
# and new one with different key will be created
def token_expire_handler(token):
is_expired = is_token_expired(token)
if is_expired:
token.delete()
token = Token.objects.create(user=token.user)
return is_expired, token
class ExpiryTokenAuthentication(TokenAuthentication):
"""
If token is expired then it will be removed
and new one with different key will be created
"""
def authenticate_credentials(self, key):
_, token = super(ExpiryTokenAuthentication, self).authenticate_credentials(key)
is_expired = is_token_expired(token)
if is_expired:
token.delete()
raise AuthenticationFailed('The Token is expired')
return token.user, token
|
164253
|
import numpy as np
class RunningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
@staticmethod
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class**2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
tp = np.diag(hist)
sum_a1 = hist.sum(axis=1)
acc = tp.sum() / (hist.sum() + np.finfo(np.float32).eps)
acc_cls = tp / (sum_a1 + np.finfo(np.float32).eps)
acc_cls = np.nanmean(acc_cls)
iu = tp / (sum_a1 + hist.sum(axis=0) - tp + np.finfo(np.float32).eps)
mean_iu = np.nanmean(iu)
freq = sum_a1 / (hist.sum() + np.finfo(np.float32).eps)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {'Overall_Acc': acc,
'Mean_Acc': acc_cls,
'FreqW_Acc': fwavacc,
'Mean_IoU': mean_iu}, cls_iu
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
if __name__ == "__main__":
n_class = 2
score = RunningScore(n_class)
label_true = np.array([1, 0, 0, 1, 1, 0, 1, 0, 1, 0])
label_pred = np.array([1, 1, 0, 1, 0, 0, 1, 1, 0, 0])
score.update(label_true, label_pred)
print(score.confusion_matrix)
|
164259
|
import h2o
from h2o.exceptions import H2OResponseError
from tests import pyunit_utils
import tempfile
from collections import OrderedDict
from h2o.grid.grid_search import H2OGridSearch
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def test_frame_reload():
work_dir = tempfile.mkdtemp()
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
df_key = iris.key
df_pd_orig = iris.as_data_frame()
iris.save(work_dir)
try:
iris.save(work_dir, force=False) # fails because file exists
except H2OResponseError as e:
assert e.args[0].exception_msg.startswith("File already exists")
try:
h2o.load_frame(df_key, work_dir, force=False) # fails because frame exists
except H2OResponseError as e:
assert e.args[0].exception_msg == "Frame Key<Frame> iris_wheader.hex already exists."
df_loaded_force = h2o.load_frame(df_key, work_dir)
h2o.remove(iris)
df_loaded = h2o.load_frame(df_key, work_dir, force=False)
df_pd_loaded_force = df_loaded_force.as_data_frame()
df_pd_loaded = df_loaded.as_data_frame()
assert df_pd_orig.equals(df_pd_loaded_force)
assert df_pd_orig.equals(df_pd_loaded)
# try running grid search on the frame
h2o.remove_all()
df_loaded = h2o.load_frame(df_key, work_dir)
hyper_parameters = OrderedDict()
hyper_parameters["ntrees"] = [5, 10, 20, 30]
grid_small = H2OGridSearch(
H2OGradientBoostingEstimator,
hyper_params=hyper_parameters
)
grid_small.train(x=list(range(4)), y=4, training_frame=df_loaded)
assert len(grid_small.models) == 4
if __name__ == "__main__":
pyunit_utils.standalone_test(test_frame_reload)
else:
test_frame_reload()
|
164288
|
from tests.util import match_object_snapshot
from tests.analyzer.util import analyze
input = """
list:
- value
- value
- value
- value
- value
""".strip()
def test_list_item_analysis():
analysis = analyze(input)
assert match_object_snapshot(analysis, 'tests/analyzer/snapshots/list_item_analysis.snap.yaml')
|
164291
|
from contextlib import contextmanager
import logging
from pkg_resources import parse_version
import sys
import time
from pykafka.exceptions import RdKafkaStoppedException, ConsumerStoppedException
from pykafka.simpleconsumer import SimpleConsumer, OffsetType
from pykafka.utils.compat import get_bytes
from pykafka.utils.error_handlers import valid_int
from . import _rd_kafka
from . import helpers
log = logging.getLogger(__name__)
class RdKafkaSimpleConsumer(SimpleConsumer):
"""A pykafka.SimpleConsumer with librdkafka-based fetchers
This aims to conform to the SimpleConsumer interface as closely as
possible. There are some notable differences:
1. rotating over partitions: while message ordering within partitions is
conserved (of course!), the order in which partitions are visited will
deviate. In particular, here we may emit more than one message from
the same partition before visiting another
2. ignores num_consumer_fetchers: librdkafka will typically spawn at least
as many threads as there are kafka cluster nodes
For an overview of how configuration keys are mapped to librdkafka's, see
_mk_rdkafka_config_lists.
The `broker_version` argument on `KafkaClient` must be set correctly to use the
rdkafka consumer.
"""
def __init__(self,
topic,
cluster,
consumer_group=None,
partitions=None,
fetch_message_max_bytes=1024 * 1024,
num_consumer_fetchers=1,
auto_commit_enable=False,
auto_commit_interval_ms=60 * 1000,
queued_max_messages=10**5, # NB differs from SimpleConsumer
fetch_min_bytes=1,
fetch_error_backoff_ms=500,
fetch_wait_max_ms=100,
offsets_channel_backoff_ms=1000,
offsets_commit_max_retries=5,
auto_offset_reset=OffsetType.EARLIEST,
consumer_timeout_ms=-1,
auto_start=True,
reset_offset_on_start=False,
compacted_topic=False,
generation_id=-1,
consumer_id=b'',
deserializer=None,
reset_offset_on_fetch=True):
callargs = {k: v for k, v in vars().items()
if k not in ("self", "__class__")}
self._rdk_consumer = None
self._poller_thread = None
self._stop_poller_thread = cluster.handler.Event()
self._broker_version = cluster._broker_version
self._fetch_error_backoff_ms = valid_int(fetch_error_backoff_ms)
# super() must come last for the case where auto_start=True
super(RdKafkaSimpleConsumer, self).__init__(**callargs)
def _setup_fetch_workers(self):
brokers = b','.join(b.host + b":" + get_bytes(str(b.port))
for b in self._cluster.brokers.values())
partition_ids = list(self._partitions_by_id.keys())
start_offsets = [
self._partitions_by_id[p].next_offset for p in partition_ids]
conf, topic_conf = self._mk_rdkafka_config_lists()
self._rdk_consumer = _rd_kafka.Consumer()
log.debug("Configuring _rdk_consumer...")
self._rdk_consumer.configure(conf=conf)
self._rdk_consumer.configure(topic_conf=topic_conf)
start_kwargs = {"brokers": brokers,
"topic_name": self._topic.name,
"partition_ids": partition_ids,
"start_offsets": start_offsets}
log.debug("Starting _rdk_consumer with {}".format(start_kwargs))
self._rdk_consumer.start(**start_kwargs)
# Poll: for a consumer, the main reason to poll the handle is that
# this de-queues log messages at error level that might otherwise be
# held up in librdkafka
def poll(rdk_handle, stop_event):
while not stop_event.is_set():
try:
rdk_handle.poll(timeout_ms=1000)
except RdKafkaStoppedException:
break
except:
self._worker_exception = sys.exc_info()
log.debug("Exiting RdKafkaSimpleConsumer poller thread cleanly.")
self._stop_poller_thread.clear()
self._poller_thread = self._cluster.handler.spawn(
poll, args=(self._rdk_consumer, self._stop_poller_thread))
def consume(self, block=True, unblock_event=None):
timeout_ms = self._consumer_timeout_ms if block else 1
try:
msg = self._consume(timeout_ms, unblock_event)
# if _rdk_consumer is None we'll catch an AttributeError here
except (RdKafkaStoppedException, AttributeError) as e:
if not self._running:
raise ConsumerStoppedException
else: # unexpected other reason
raise
else:
if not self._running:
# Even if we did get a msg back, we'll still want to abort
# here, because the new offset wouldn't get committed anymore
raise ConsumerStoppedException
if msg is not None:
# set offset in OwnedPartition so the autocommit_worker can find it
self._partitions_by_id[msg.partition_id].set_offset(msg.offset)
return msg
def _consume(self, timeout_ms, unblock_event):
"""Helper to allow catching interrupts around rd_kafka_consume"""
inner_timeout_ms = 500 # unblock at this interval at least
if timeout_ms < 0:
while True:
self._raise_worker_exceptions()
if unblock_event and unblock_event.is_set():
return
msg = self._rdk_consumer.consume(inner_timeout_ms)
if msg is not None:
return msg
else:
t_start = time.time()
leftover_ms = timeout_ms
while leftover_ms > 0:
self._raise_worker_exceptions()
if unblock_event and unblock_event.is_set():
return
inner_timeout_ms = int(min(leftover_ms, inner_timeout_ms))
msg = self._rdk_consumer.consume(inner_timeout_ms)
if msg is not None:
return msg
elapsed_ms = 1000 * (time.time() - t_start)
leftover_ms = timeout_ms - elapsed_ms
def stop(self):
# NB we should always call super() first, because it takes care of
# shipping all important state (ie stored offsets) out
ret = super(RdKafkaSimpleConsumer, self).stop()
if self._rdk_consumer is not None:
self._stop_poller_thread.set()
# Call _rd_kafka.Consumer.stop explicitly, so we may catch errors:
self._rdk_consumer.stop()
log.debug("Issued stop to _rdk_consumer.")
self._rdk_consumer = None
return ret
def fetch_offsets(self):
# Restart, because _rdk_consumer needs its internal offsets resynced
with self._stop_start_rdk_consumer():
return super(RdKafkaSimpleConsumer, self).fetch_offsets()
def reset_offsets(self, partition_offsets=None):
# Restart, because _rdk_consumer needs its internal offsets resynced
with self._stop_start_rdk_consumer():
return super(
RdKafkaSimpleConsumer, self).reset_offsets(partition_offsets)
@contextmanager
def _stop_start_rdk_consumer(self):
"""Context manager for methods to temporarily stop _rdk_consumer
We need this because we hold read-offsets both in pykafka and
internally in _rdk_consumer. We'll hold the one in pykafka to be the
ultimate source of truth. So whenever offsets are to be changed (other
than through consume() that is), we need to clobber _rdk_consumer.
"""
restart_required = self._running and self._rdk_consumer is not None
if restart_required:
# Note we must not call a full self.stop() as that would stop
# SimpleConsumer threads too, and if we'd have to start() again
# that could have other side effects (eg resetting offsets).
self._rdk_consumer.stop()
log.debug("Temporarily stopped _rdk_consumer.")
yield
if restart_required:
self._setup_fetch_workers()
log.debug("Restarted _rdk_consumer.")
def _mk_rdkafka_config_lists(self):
"""Populate conf, topic_conf to configure the rdkafka consumer"""
# For documentation purposes, all consumer-relevant settings (all those
# marked 'C' or '*') that appear in librdkafka/CONFIGURATION.md should
# be listed below, in either `conf` or `topic_conf`, even if we do not
# set them and they are commented out.
ver10 = parse_version(self._broker_version) >= parse_version("0.10.0")
conf = { # destination: rd_kafka_conf_set
"client.id": "pykafka.rdkafka",
# Handled via rd_kafka_brokers_add instead:
##"metadata.broker.list"
# NB these refer not to payloads, but to wire messages
##"message.max.bytes" # leave at default
"receive.message.max.bytes": ( # ~ sum of PartitionFetchRequests
self._fetch_message_max_bytes * (len(self.partitions) + 1)),
# No direct equivalents:
##"metadata.request.timeout.ms"
##"topic.metadata.refresh.interval.ms"
##"topic.metadata.refresh.fast.cnt"
##"topic.metadata.refresh.fast.interval.ms"
##"topic.metadata.refresh.sparse"
##"debug": "all",
"socket.timeout.ms": self._cluster._socket_timeout_ms,
##"socket.send.buffer.bytes"
##"socket.receive.buffer.bytes"
##"socket.keepalive.enable"
##"socket.max.fails"
##"broker.address.ttl"
##"broker.address.family"
# None of these need to be hooked up
##"statistics.interval.ms"
##"error_cb" # we let errors be reported via log_cb
##"stats_cb"
##"log_cb" # gets set in _rd_kafka module
##"log_level": 7,
##"socket_cb"
##"open_cb"
##"opaque"
##"internal.termination.signal"
# Although the names seem to disagree, SimpleConsumer currently
# uses _queued_max_messages in a way analogous to
# queued.min.messages; that is "keep trying to fetch until there's
# this number of messages on the queue". There's no equivalent of
# queued.max.messages.kbytes so for now we infer the implied
# maximum (which, with default settings, is ~2GB per partition):
"queued.min.messages": self._queued_max_messages,
"queued.max.messages.kbytes": str(
self._queued_max_messages
* self._fetch_message_max_bytes // 1024),
"fetch.wait.max.ms": self._fetch_wait_max_ms,
"fetch.message.max.bytes": self._fetch_message_max_bytes,
"fetch.min.bytes": self._fetch_min_bytes,
"fetch.error.backoff.ms": self._fetch_error_backoff_ms,
"api.version.request": ver10,
# We're outsourcing message fetching, but not offset management or
# consumer rebalancing to librdkafka. Thus, consumer-group id
# *must* remain unset, or we would appear to be two consumer
# instances to the kafka cluster:
##"group.id"
}
# broker.version.fallback is incompatible with >-0.10
if not ver10:
conf["broker.version.fallback"] = self._broker_version
conf.update(helpers.rdk_ssl_config(self._cluster))
map_offset_types = {
OffsetType.EARLIEST: "smallest",
OffsetType.LATEST: "largest",
}
topic_conf = {
##"opaque"
##"group.id" # see note above re group.id
# pykafka handles offset commits
"auto.commit.enable": "false",
##"auto.commit.interval.ms"
"auto.offset.reset": map_offset_types[self._auto_offset_reset],
##"offset.store.path"
##"offset.store.sync.interval.ms"
##"offset.store.method"
}
# librdkafka expects all config values as strings:
conf = [(key, str(conf[key])) for key in conf]
topic_conf = [(key, str(topic_conf[key])) for key in topic_conf]
return conf, topic_conf
|
164314
|
import sys
import subprocess
import commands
import os
import six
import copy
import argparse
import time
from utils.stream import stream_by_running as get_stream_m
from utils.args import ArgumentGroup, print_arguments, inv_arguments
from finetune_args import parser as finetuning_parser
from extend_pos import extend_word_emb, extend_sent_emb
import subprocess
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
multip_g = ArgumentGroup(parser, "multiprocessing",
"start paddle training using multi-processing mode.")
multip_g.add_arg("node_ips", str, None,
"paddle trainer ips")
multip_g.add_arg("node_id", int, None,
"the trainer id of the node for multi-node distributed training.")
multip_g.add_arg("print_config", bool, True,
"print the config of multi-processing mode.")
multip_g.add_arg("current_node_ip", str, None,
"the ip of current node.")
multip_g.add_arg("split_log_path", str, "log",
"log path for each trainer.")
multip_g.add_arg("log_prefix", str, "",
"the prefix name of job log.")
multip_g.add_arg("nproc_per_node", int, 4,
"the number of process to use on each node.")
multip_g.add_arg("selected_gpus", str, "0,1,2,3",
"the gpus selected to use.")
multip_g.add_arg("training_script", str, None, "the program/script to be lauched "
"in parallel followed by all the arguments", positional_arg=True)
multip_g.add_arg("training_script_args", str, None,
"training script args", positional_arg=True, nargs=argparse.REMAINDER)
# yapf: enable
def start_procs(args):
procs = []
log_fns = []
default_env = os.environ.copy()
node_id = args.node_id
node_ips = [x.strip() for x in args.node_ips.split(',')]
current_ip = args.current_node_ip
num_nodes = len(node_ips)
selected_gpus = [x.strip() for x in args.selected_gpus.split(',')]
selected_gpu_num = len(selected_gpus)
all_trainer_endpoints = ""
if selected_gpu_num < args.nproc_per_node:
for ip in node_ips:
for i in range(selected_gpu_num):
if all_trainer_endpoints != "":
all_trainer_endpoints += ","
all_trainer_endpoints += "%s:617%d" % (ip, int(selected_gpus[i]))
nranks = num_nodes * selected_gpu_num
else:
for ip in node_ips:
for i in range(args.nproc_per_node):
if all_trainer_endpoints != "":
all_trainer_endpoints += ","
all_trainer_endpoints += "%s:617%d" % (ip, i)
nranks = num_nodes * args.nproc_per_node
gpus_per_proc = args.nproc_per_node % selected_gpu_num
if gpus_per_proc == 0:
if selected_gpu_num < args.nproc_per_node:
gpus_per_proc = 1
else:
gpus_per_proc = selected_gpu_num / args.nproc_per_node
else:
gpus_per_proc = selected_gpu_num / args.nproc_per_node + 1
selected_gpus_per_proc = [selected_gpus[i:i + gpus_per_proc] for i in range(0, len(selected_gpus), gpus_per_proc)]
if args.print_config:
print("all_trainer_endpoints: ", all_trainer_endpoints,
", node_id: ", node_id,
", current_ip: ", current_ip,
", num_nodes: ", num_nodes,
", node_ips: ", node_ips,
", gpus_per_proc: ", gpus_per_proc,
", selected_gpus_per_proc: ", selected_gpus_per_proc,
", nranks: ", nranks)
current_env = copy.copy(default_env)
procs = []
cmds = []
log_fns = []
for i in range(0, args.nproc_per_node):
trainer_id = node_id * args.nproc_per_node + i
current_env.update({
"FLAGS_selected_gpus": "%s" % ",".join([str(s) for s in selected_gpus_per_proc[i]]),
"PADDLE_TRAINER_ID" : "%d" % trainer_id,
"PADDLE_CURRENT_ENDPOINT": "%s:617%d" % (current_ip, i),
"PADDLE_TRAINERS_NUM": "%d" % nranks,
"PADDLE_TRAINER_ENDPOINTS": all_trainer_endpoints,
"PADDLE_NODES_NUM": "%d" % num_nodes
})
cmd = [sys.executable, "-u",
args.training_script] + args.training_script_args
cmds.append(cmd)
if args.split_log_path:
fn = open("%s/%sjob.log.%d" % (args.split_log_path, args.log_prefix, trainer_id), "a")
log_fns.append(fn)
process = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
else:
process = subprocess.Popen(cmd, env=current_env)
procs.append(process)
for i in range(len(procs)):
proc = procs[i]
proc.wait()
if len(log_fns) > 0:
log_fns[i].close()
if proc.returncode != 0:
raise subprocess.CalledProcessError(returncode=procs[i].returncode,
cmd=cmds[i])
else:
print("proc %d finsh" % i)
def stream(args, lanch_args):
#stream model list
#stream_m = get_stream_m(args.stream_job, args.stream_cluster)
stream_m = get_stream_m(args.stream_job)
while len(stream_m) == 0:
time.sleep(600)
stream_m = get_stream_m(args.stream_job)
download, tar, init_path = stream_m[-1]
retcode, ret = commands.getstatusoutput(download)
if not os.path.exists(init_path):
retcode, ret = commands.getstatusoutput(tar)
if not args.use_fp16:
retcode, ret = commands.getstatusoutput(
'rename .master "" ' + init_path + '/*.master'
)
arg_name = '--init_pretraining_params'
val_index = -1
if arg_name in lanch_args.training_script_args:
val_index = lanch_args.training_script_args.index(arg_name) + 1
lanch_args.training_script_args[val_index] = init_path
else:
lanch_args.training_script_args += [arg_name, init_path]
main(lanch_args)
while True:
#updated_m = get_stream_m(args.stream_job, args.stream_cluster)
updated_m = get_stream_m(args.stream_job)
download, tar, init_path = updated_m[-1]
if len(updated_m) > len(stream_m):
retcode, ret = commands.getstatusoutput(download)
if not os.path.exists(init_path):
retcode, ret = commands.getstatusoutput(tar)
if not args.use_fp16:
retcode, ret = commands.getstatusoutput(
'rename .master "" ' + init_path + '/*.master'
)
lanch_args.training_script_args[val_index] = init_path
main(lanch_args)
#update
stream_m = updated_m
def main(args):
extend_vocab = False
extend_sent = False
if args.print_config:
print_arguments(args)
"""
zs = '*'*20
print(zs+'\n')
print(args.training_script_args)
print(type(args.training_script_args))
print(zs+'\n')
"""
def get_param(name):
key = "--" + name
if key not in args.training_script_args:
return None
index = args.training_script_args.index(key) + 1
return args.training_script_args[index]
if extend_vocab:
extend_word_emb(get_param("ernie_config_path"), get_param("init_pretraining_params"))
if extend_sent:
extend_sent_emb(get_param("ernie_config_path"), get_param("init_pretraining_params"))
start_procs(args)
if __name__ == "__main__":
fine_tune_rep = 0
lanch_args = parser.parse_args()
finetuning_args = finetuning_parser.parse_args(
lanch_args.training_script_args)
if finetuning_args.stream_job and finetuning_args.stream_job != "":
stream(finetuning_args, lanch_args)
else:
init_path = finetuning_args.init_pretraining_params
print("init model: %s" % init_path)
finetuning_data = os.path.dirname(finetuning_args.train_set)
task_name = finetuning_data.split('/')[-1]
if not finetuning_args.use_fp16:
retcode, ret = commands.getstatusoutput(
'rename .master "" ' + init_path + '/*.master'
)
#while True:
#"""
while fine_tune_rep < 20:
with open('finetune_reprec.txt','a') as f:
f.write('finetune repeat {0} start @ {1}\n'.format(fine_tune_rep, time.strftime("%Y-%m-%d %X",time.localtime())))
subprocess.call("python3 pt_scaffold_split1.py {0}".format(task_name), shell=True)
#file_dir = './package/task_data/clintox_0'
#subprocess.call("cp {0}/train_{1}.tsv {2}/train.tsv".format(file_dir, fine_tune_rep, file_dir), shell=True)
#subprocess.call("cp {0}/test_{1}.tsv {2}/test.tsv".format(file_dir, fine_tune_rep, file_dir), shell=True)
#subprocess.call("cp {0}/dev_{1}.tsv {2}/dev.tsv".format(file_dir, fine_tune_rep, file_dir), shell=True)
#subprocess.call("python3 package/task_data/reactAf/reactA_split.py", shell=True)
main(lanch_args)
with open('finetune_reprec.txt','a') as f:
f.write('finetune repeat {0} finished @ {1}\n\n'.format(fine_tune_rep, time.strftime("%Y-%m-%d %X",time.localtime())))
fine_tune_rep += 1
#"""
#main(lanch_args)
|
164387
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
APP = ['src/clippy.py']
DATA_FILES = []
OPTIONS = {
'iconfile': 'clipboard.icns'
}
setup(
name="clippy",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Clipboard manager using tkinter",
python_requires=">=3.6.0",
long_description=long_description,
url="https://github.com/prashantgupta24/clipboard-manager",
keywords=['clipboard', 'clipboard-manager'],
app=APP,
data_files=DATA_FILES,
install_requires=["pyperclip>=1.6.4", "py2app==0.13"],
options={'py2app': OPTIONS},
setup_requires=['py2app'],
test_suite='test',
license='MIT',
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
164432
|
from geode.vector import *
from numpy import *
def test_register():
random.seed(217301)
for _ in xrange(10):
for d in 2,3:
X0 = random.randn(10,d)
t = random.randn(d)
A = random.randn(d,d)
r,_ = linalg.qr(A)
if linalg.det(r)<0:
r[0] = -r[0]
X1 = t + Matrix(r)*X0
X2 = t + Matrix(A)*X0
f = rigid_register(X0,X1)
B = affine_register(X0,X2)
assert relative_error(t,f.t) < 1e-5
assert relative_error(r,f.r.matrix()) < 1e-5
assert relative_error(t,B[:d,d]) < 1e-5
assert relative_error(A,B[:d,:d]) < 1e-5
assert all(B[d]==hstack([zeros(d),1]))
|
164451
|
def helper(N):
if (N <= 2):
print ("NO")
return
value = (N * (N + 1)) // 2
if(value%2==1):
print ("NO")
return
s1 = []
s2 = []
if (N%2==0):
shift = True
start = 1
last = N
while (start < last):
if (shift):
s1.append(start)
s1.append(last)
turn = 0
else:
s2.append(start)
s2.append(last)
turn = 1
start += 1
last -= 1
else:
rem = value // 2
vis = [False] * (N + 1)
for i in range (1, N + 1):
vis[i] = False
vis[0] = True
for i in range (N , 0, -1):
if (rem > i):
s1.append(i)
vis[i] = True
rem -= i
else:
s1.append(rem)
vis[rem] = True
break
for i in range (1, N + 1):
if (not vis[i]):
s2.append(i)
s1.sort()
s2.sort()
print("YES")
print (len(s1))
print(s1)
print(len(s2))
print(s2)
n = int(input())
helper(n)
|
164459
|
from collections import namedtuple
import xml.etree.ElementTree as ET
OPERA_TIMESTAMP_FORMAT = "%Y%m%dT%H:%M:%S"
def parse_receipts_xml(receipt_xml_data):
tree = ET.fromstring(receipt_xml_data)
return map(receipt_to_namedtuple, tree.findall('receipt'))
def receipt_element_to_dict(element):
"""
Turn an ElementTree element '<data><el>1</el></data>' into {el: 1}.
Not recursive!
>>> data = ET.fromstring("<data><el>1</el></data>")
>>> receipt_element_to_dict(data)
{'el': '1'}
>>>
"""
return dict([(child.tag, child.text) for child in element.getchildren()])
def receipt_to_namedtuple(element):
"""
Turn an ElementTree element into an object with named params.
Not recursive!
>>> data = ET.fromstring("<data><el>1</el></data>")
>>> receipt_to_namedtuple(data)
data(el='1')
"""
d = receipt_element_to_dict(element)
klass = namedtuple(element.tag, d.keys())
return klass._make(d.values())
def parse_post_event_xml(post_event_xml_data):
tree = ET.fromstring(post_event_xml_data)
fields = tree.findall('field')
return dict([(field.attrib['name'], field.text) for field in fields])
|
164471
|
from typing import Any, Callable, List, Optional, Sequence, Type, TypeVar, Union
from visions.relations import IdentityRelation, InferenceRelation
from visions.types.type import VisionsBaseType
T = TypeVar("T")
def process_relation(items: Union[dict, Type[VisionsBaseType]]) -> IdentityRelation:
if isinstance(items, dict):
return IdentityRelation(**items)
elif issubclass(items, VisionsBaseType):
return IdentityRelation(related_type=items)
else:
raise TypeError("identity should be a list, a dict of params or related_type.")
def create_type(
name: str,
contains: Callable[[Any, dict], bool],
identity: Optional[
Union[Type[VisionsBaseType], List[Union[dict, Type[VisionsBaseType]]], dict]
] = None,
inference: Optional[Union[List[dict], dict]] = None,
):
def get_relations():
if isinstance(identity, Sequence):
relations = [process_relation(item) for item in identity]
else:
relations = [] if identity is None else [process_relation(identity)]
if inference is not None:
if isinstance(inference, dict):
relations += [InferenceRelation(**inference)]
elif isinstance(inference, list):
relations += [InferenceRelation(**params) for params in inference]
else:
raise TypeError("inference should be a list or a dict of params.")
return relations
def contains_op(series, state):
return contains(series, state)
return type(
name,
(VisionsBaseType,),
{
"get_relations": staticmethod(get_relations),
"contains_op": staticmethod(contains_op),
},
)
|
164476
|
from typing import Tuple, Union
from Crypto.Cipher.AES import MODE_ECB
from Crypto.Cipher.AES import new as new_aes_ctx
from Crypto.Cipher.PKCS1_OAEP import new as new_pkcs1_oaep_ctx
from Crypto.Hash import SHA256
from Crypto.PublicKey.RSA import import_key as import_rsa_key
from enum import IntEnum
from json import dumps as json_serialize
from json import loads as json_deserialize
from json import JSONDecodeError
from pathlib import Path
from random import randint
from zlib import compress as zlib_compress
from zlib import decompress as zlib_decompress
zstd_found = False
zstandard_found = False
try:
from zstandard import ZstdCompressor
from zstandard import ZstdDecompressor
zstandard_found = True
except ImportError:
pass
if not zstandard_found:
try:
from zstd import ZSTD_compress
from zstd import ZSTD_uncompress
zstd_found = True
except ImportError:
pass
if not zstd_found:
raise ImportError('Unable to find any compatiable zstandard library!')
class CompressionFlag(IntEnum):
ZLIB_COMPRESSION = 0x0E
ZSTD_COMPRESSION = 0x0D
NO_COMPRESSION = 0x00
class EncryptionFlag(IntEnum):
ENCRYPT = 0xF0
NO_ENCRYPT = 0x00
def create_tinfoil_index(
index_to_write: dict,
out_path: Path,
compression_flag: int,
rsa_pub_key_path: Path = None,
vm_path: Path = None
):
to_compress_buffer = b""
if vm_path is not None and vm_path.is_file():
to_compress_buffer += b"\x13\x37\xB0\x0B"
vm_buffer = b""
with open(vm_path, "rb") as vm_stream:
vm_buffer += vm_stream.read()
to_compress_buffer += len(vm_buffer).to_bytes(4, "little")
to_compress_buffer += vm_buffer
to_compress_buffer += bytes(json_serialize(index_to_write).encode())
to_write_buffer = b""
session_key = b""
if compression_flag == CompressionFlag.ZSTD_COMPRESSION:
if zstandard_found:
to_write_buffer += ZstdCompressor(level=22).compress(
to_compress_buffer
)
elif zstd_found:
to_write_buffer += ZSTD_compress(to_compress_buffer, level=22)
elif compression_flag == CompressionFlag.ZLIB_COMPRESSION:
to_write_buffer += zlib_compress(to_compress_buffer, 9)
elif compression_flag == CompressionFlag.NO_COMPRESSION:
to_write_buffer += to_compress_buffer
else:
raise NotImplementedError(
"Compression method supplied is not implemented yet."
)
data_size = len(to_write_buffer)
flag = None
to_write_buffer += (b"\x00" * (0x10 - (data_size % 0x10)))
if rsa_pub_key_path is not None and rsa_pub_key_path.is_file():
def rand_aes_key_generator() -> bytes:
return randint(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF).to_bytes(
0x10, byteorder="big"
)
rsa_pub_key = import_rsa_key(open(rsa_pub_key_path).read())
rand_aes_key = rand_aes_key_generator()
pkcs1_oaep_ctx = new_pkcs1_oaep_ctx(
rsa_pub_key,
hashAlgo=SHA256,
label=b""
)
aes_ctx = new_aes_ctx(rand_aes_key, MODE_ECB)
session_key += pkcs1_oaep_ctx.encrypt(rand_aes_key)
to_write_buffer = aes_ctx.encrypt(to_write_buffer)
flag = compression_flag | EncryptionFlag.ENCRYPT
else:
session_key += b"\x00" * 0x100
flag = compression_flag | EncryptionFlag.NO_ENCRYPT
Path(out_path.parent).mkdir(parents=True, exist_ok=True)
with open(out_path, "wb") as out_stream:
out_stream.write(b"TINFOIL")
out_stream.write(flag.to_bytes(1, byteorder="little"))
out_stream.write(session_key)
out_stream.write(data_size.to_bytes(8, "little"))
out_stream.write(to_write_buffer)
def read_index(index_path: Path, rsa_priv_key_path: Path = None) -> dict:
if index_path is None or not index_path.is_file():
raise RuntimeError(
f"Unable to read non-existant index file \"{index_path}\""
)
encryption_flag = None
compression_flag = None
session_key = None
data_size = None
to_read_buffer = None
with open(index_path, "rb") as index_stream:
magic = str(index_stream.read(7))
if magic != "TINFOIL":
raise RuntimeError(
"Invalid tinfoil index magic.\n\nExpected Magic = " +
f"\"TINFOIL\"\nMagic in index file = \"{magic}\""
)
flags = index_stream.read(1)[0]
encryption_flag = flags & 0xF0
key_available = rsa_priv_key_path is not None and \
rsa_priv_key_path.is_file()
if encryption_flag == EncryptionFlag.ENCRYPT and not key_available:
raise RuntimeError(
"Unable to decrypt encrypted index without private key."
)
compression_flag = flags & 0x0F
if compression_flag not in CompressionFlag:
raise RuntimeError(
"Unimplemented compression method encountered while reading " +
"index header."
)
session_key = index_stream.read(0x100)
data_size = int.from_bytes(index_stream.read(8), byteorder="little")
to_read_buffer = index_stream.read()
if encryption_flag == EncryptionFlag.ENCRYPT:
rsa_priv_key = import_rsa_key(open(rsa_priv_key_path).read())
pkcs1_oaep_ctx = new_pkcs1_oaep_ctx(
rsa_priv_key,
hashAlgo=SHA256,
label=b""
)
aes_key = pkcs1_oaep_ctx.decrypt(session_key)
aes_ctx = new_aes_ctx(aes_key, MODE_ECB)
to_read_buffer = aes_ctx.decrypt(to_read_buffer)
if compression_flag == CompressionFlag.ZSTD_COMPRESSION:
if zstandard_found:
to_read_buffer = ZstdDecompressor().decompress(
to_read_buffer[:data_size]
)
elif zstd_found:
to_read_buffer = ZSTD_uncompress(
to_read_buffer[:data_size]
)
elif compression_flag == CompressionFlag.ZLIB_COMPRESSION:
to_read_buffer = zlib_decompress(to_read_buffer[:data_size])
elif compression_flag == CompressionFlag.NO_COMPRESSION:
to_read_buffer = to_read_buffer[:data_size]
try:
return json_deserialize(to_read_buffer)
except JSONDecodeError:
raise RuntimeError("Unable to deserialize index data.")
def format_bytes(size: int, nround: int = 2) -> Tuple[int, Union[float, int]]:
power = 2**10
n = 0
power_labels = {0: '', 1: 'K', 2: 'M', 3: 'G', 4: 'T', 5: 'P'}
while size >= power:
size /= power
n += 1
return (round(size, nround), power_labels[n]+'B')
|
164533
|
import io
import re
from rich.console import Console, RenderableType
from rich.__main__ import make_test_card
from ._card_render import expected
re_link_ids = re.compile(r"id=[\d\.\-]*?;.*?\x1b")
def replace_link_ids(render: str) -> str:
"""Link IDs have a random ID and system path which is a problem for
reproducible tests.
"""
return re_link_ids.sub("id=0;foo\x1b", render)
def render(renderable: RenderableType) -> str:
console = Console(
width=100, file=io.StringIO(), color_system="truecolor", legacy_windows=False
)
console.print(renderable)
output = replace_link_ids(console.file.getvalue())
return output
def test_card_render():
card = make_test_card()
result = render(card)
assert result == expected
if __name__ == "__main__":
card = make_test_card()
with open("_card_render.py", "wt") as fh:
card_render = render(card)
print(card_render)
fh.write(f"expected={card_render!r}")
|
164563
|
from setuptools import setup
from pppd import __version__ as version
def long_description():
description = open('README.rst').read()
try:
description += '\n\n' + open('CHANGELOG.rst').read()
except:
pass
return description
setup(
name='python-pppd',
version=version,
url='https://github.com/cour4g3/python-pppd',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Simple library for controlling PPP connections with pppd.',
long_description=long_description(),
py_modules=['pppd'],
provides=['pppd'],
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Networking',
]
)
|
164588
|
from openpyxl import load_workbook
xlsx_file = "E:\\hello-git-sourcetree\\R_GO\\Python_RPA\\"
xlsx = load_workbook(xlsx_file+"result.xlsx", read_only =True)
sheet=xlsx.active
print(sheet['A25'].value)
print(sheet['B1'].value)
row = sheet['1']
for data in row:
print(data.value)
xlsx=load_workbook(xlsx_file+"result.xlsx")
sheet=xlsx.active
col = sheet['A']
'''
for data in col:
print(data.value)
'''
print('-'*10,'multi row_data call','-'*10)
rows = sheet['1:2']
for row in rows:
for rowdata in row:
print(rowdata.value)
print('시트 일부의 셀 데이터 읽기')
rows = sheet['A3:B5']
for row in rows:
for cel in row:
print(cel.value)
from openpyxl import Workbook
xlsx=Workbook()
sheet=xlsx.active
sheet['A1'] = 'my input data'
#xlsx.save('other.xlsx')
sheet.append(['A1-data','B1-data','C1-data'])
sheet.append(['A2-data','B2-data','C2-data'])
xlsx.save('other2.xlsx')
sheet = xlsx.create_sheet('new sheet')
sheet['A2'] = 'AIRIM'
xlsx.save('new_xlsx.xlsx')
|
164612
|
from rest_framework import serializers
from .models import Notification
from users.serializers import UserProfileSerializer
from feed.serializers import MumbleSerializer
from article.serializers import ArticleSerializer
from discussion.serializers import DiscussionSerializer
class NotificationSerializer(serializers.ModelSerializer):
created_by = serializers.SerializerMethodField(read_only=True)
followed_by = serializers.SerializerMethodField(read_only=True)
mumble = serializers.SerializerMethodField(read_only=True)
article = serializers.SerializerMethodField(read_only=True)
discussion = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Notification
fields = '__all__'
def get_created_by(self, obj):
return UserProfileSerializer(obj.created_by.userprofile, many=False).data
def get_followed_by(self, obj):
if obj.notification_type == 'follow':
return UserProfileSerializer(obj.followed_by.userprofile, many=False).data
return None
def get_mumble(self, obj):
if obj.notification_type == 'mumble':
return MumbleSerializer(obj.mumble, many=False).data
return None
def get_article(self, obj):
if obj.notification_type == 'article':
return ArticleSerializer(obj.article, many=False).data
return None
def get_discussion(self, obj):
if obj.notification_type == 'discussion':
return DiscussionSerializer(obj.discussion, many=False).data
return None
|
164623
|
import sqlite3
conexion = sqlite3.connect('RandUser.db')
cursor = conexion.cursor()
cursor.execute('''
CREATE TABLE Users(
Gender TEXT NOT NULL,
First TEXT NOT NULL,
Last TEXT NOT NULL,
Location TEXT NOT NULL,
Email TEXT NOT NULL)
''')
conexion.close()
|
164638
|
import os
import glob
import random
import numpy as np
import torchaudio as T
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
def create_dataloader(params, train, is_distributed=False):
dataset = AudioDataset(params, train)
return DataLoader(
dataset=dataset,
batch_size=params.batch_size,
shuffle=not is_distributed,
sampler=DistributedSampler(dataset) if is_distributed else None,
num_workers=0,
pin_memory=True,
drop_last=True,
)
class AudioDataset(Dataset):
def __init__(self, params, train):
self.params = params
self.train = train
self.path = params.path
self.wav_list = glob.glob(
os.path.join(self.path, "**", "*.wav"), recursive=True
)
self.mapping = [i for i in range(len(self.wav_list))]
self.downsample = T.transforms.Resample(
params.new_sample_rate,
params.sample_rate,
resampling_method="sinc_interpolation",
)
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
return self.my_getitem(idx)
def shuffle_mapping(self):
random.shuffle(self.mapping)
def my_getitem(self, idx):
wavpath = self.wav_list[idx]
id = os.path.basename(wavpath).split(".")[0]
audio, sr = T.load_wav(wavpath)
if self.params.new_sample_rate != sr:
raise ValueError(f"Invalid sample rate {sr}.")
start = np.random.randint(0, audio.shape[1] - self.params.n_segment - 1)
if audio.shape[0] == 2:
audio = audio[0, :]
audio = audio.squeeze(0)[start : start + self.params.n_segment]
lr_audio = self.downsample(audio)
lr_audio = lr_audio / 32767.5
audio = audio / 32767.5
return {"audio": audio, "lr_audio": lr_audio, "id": id}
|
164759
|
import logging
import math
logging.basicConfig(level=logging.DEBUG,
handlers=[logging.FileHandler('logs.log', 'a', 'utf-8')],
format="%(asctime)s %(levelname)-6s - %(funcName)-8s - %(filename)s - %(lineno)-3d - %(message)s",
datefmt="[%Y-%m-%d] %H:%M:%S - ",
)
logging.info("This is an info log")
def square_root(x):
logging.debug(f"Getting the square root of {x}")
try:
result = math.sqrt(x)
except ValueError:
logging.exception("Cannot get square root of a negative number")
# or
# logging.error("Cannot get square root of a negative number", exc_info=True)
return None
logging.info(f"The square root of {x} is {result:.5f}")
return result
square_root(5)
square_root(-5)
|
164770
|
from typing import Any, Mapping, Optional, Union
import copy
import os
import re
import yaml
from machinable.errors import ConfigurationError
from machinable.utils import sentinel, unflatten_dict, update_dict
class Loader(yaml.SafeLoader):
def __init__(self, stream, cwd="./"):
if isinstance(stream, str):
self._root = cwd
else:
# from filestream
self._root = os.path.split(stream.name)[0]
super().__init__(stream)
def include(self, node):
target = self.construct_scalar(node)
if target.startswith("$/"):
target = target[2:]
filename = (
target
if os.path.isabs(target)
else os.path.join(self._root, target)
)
return from_file(filename)
# Support $/ notation for includes
Loader.add_constructor("!include", Loader.include)
Loader.add_implicit_resolver(
"!include", re.compile(r"\$\/([^#^ ]*)"), first=None
)
# Support scientific number formats
Loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
def from_string(text: str, cwd="./") -> dict:
config = yaml.load(text, lambda stream: Loader(stream, cwd))
if "+" not in config:
return config
includes = config.pop("+")
if not isinstance(includes, list):
includes = [includes]
for include in includes:
if isinstance(include, str) and include.startswith("$/"):
target = include[2:]
filename = (
target if os.path.isabs(target) else os.path.join(cwd, target)
)
include = from_file(filename)
if not isinstance(include, Mapping):
raise ConfigurationError(
f"Include must be a mapping. {include} given."
)
config = update_dict(config, include)
return config
def from_file(filename: str, default: Any = sentinel) -> Union[dict, Any]:
if not os.path.isfile(filename):
if default is not sentinel:
return default
return None
with open(filename) as f:
text = f.read()
return from_string(text, os.path.abspath(os.path.dirname(filename)))
def parse(config: dict, components: Optional[dict] = None) -> dict:
if components is None:
components = {}
modules = {}
for section, elements in config.items():
# todo: use regex to validate section string
if ":" not in section or elements is None:
continue
if not isinstance(elements, Mapping):
raise ConfigurationError(
f"Invalid configuration under '{section}'. Expected mapping but found {type(elements).__name__}."
)
component_kind, prefix = section.split(":")
for key, data in elements.items():
if data is None:
data = {}
if not isinstance(data, Mapping):
raise ConfigurationError(
f"Invalid configuration for component '{key}'. Expected mapping but found {type(data).__name__}."
)
# resolve dot notation
if data.pop("_unflatten", True):
data = unflatten_dict(data, copy=True)
# todo: regex validation of key
alias = None
if key.find("=") != -1:
key, alias = key.split("=")
if alias in modules:
raise ConfigurationError(
f"Alias '{alias}' for '{key.split('^')[0]}' is ambiguous"
)
parent = None
lineage = []
if key.find("^") != -1:
# resolve inheritance
key, parent = key.split("^")
# find in current scope, considering aliases
inherited = None
for candidate in modules.values():
if (
candidate["module"] == parent
or candidate["key"] == parent
or candidate["alias"] == parent
):
inherited = candidate
if inherited is None:
# search in global scope, using full module name only
inherited = components.get(parent, None)
if inherited is None:
raise ConfigurationError(
f"Parent component '^{parent}' of '{key}' does not exist."
)
# push standard name to lineage
lineage += [inherited["name"]] + inherited["lineage"]
# inherit the parent's configuration
# todo: deepcopy might be too conservative here
data = update_dict(
copy.deepcopy(inherited["config_data"]), data
)
module = key if prefix == "" else prefix + "." + key
modules[module] = {
# note that the key specifies the full module path that may
# change when imported from a vendor project while name
# always specifies the module path relative to the project
# the config is defined in
"name": module,
"module": module,
"kind": component_kind,
"prefix": prefix,
"key": key,
"alias": alias,
"parent": parent,
"lineage": lineage,
"config_data": data,
}
return modules
def prefix(config: dict, module_prefix: str) -> dict:
return {
module_prefix
+ "."
+ data["module"]: {
**data,
"module": module_prefix + "." + data["module"],
}
for data in config.values()
}
|
164790
|
import time
import numpy as np
from PIL import Image as pil_image
from keras.preprocessing.image import save_img
from keras import layers
from keras.applications import vgg16
from keras import backend as K
import matplotlib.pyplot as plt
def normalize(x):
"""utility function to normalize a tensor.
# Arguments
x: An input tensor.
# Returns
The normalized input tensor.
"""
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def deprocess_image(x):
"""utility function to convert a float array into a valid uint8 image.
# Arguments
x: A numpy-array representing the generated image.
# Returns
A processed numpy-array, which could be used in e.g. imshow.
"""
# normalize tensor: center on 0., ensure std is 0.25
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def process_image(x, former):
"""utility function to convert a valid uint8 image back into a float array.
Reverses `deprocess_image`.
# Arguments
x: A numpy-array, which could be used in e.g. imshow.
former: The former numpy-array.
Need to determine the former mean and variance.
# Returns
A processed numpy-array representing the generated image.
"""
if K.image_data_format() == 'channels_first':
x = x.transpose((2, 0, 1))
return (x / 255 - 0.5) * 4 * former.std() + former.mean()
def visualize_layer(model,
layer_name,
step=0.5,
epochs=25,
upscaling_steps=10,
upscaling_factor=1.1,
output_dim=(128, 128),
filter_range=(0, None),
grid_columns=8,
show_filters=True,
image_size_multiplier=2):
"""Visualizes the most relevant filters of one conv-layer in a certain model.
# Arguments
model: The model containing layer_name.
layer_name: The name of the layer to be visualized.
Has to be a part of model.
step: step size for gradient ascent.
epochs: Number of iterations for gradient ascent.
upscaling_steps: Number of upscaling steps.
Starting image is in this case (80, 80).
upscaling_factor: Factor to which to slowly upgrade
the image towards output_dim.
output_dim: [img_width, img_height] The output image dimensions.
filter_range: Tupel[lower, upper]
Determines the to be computed filter numbers.
If the second value is `None`,
the last filter will be inferred as the upper boundary.
"""
def _generate_filter_image(input_img,
layer_output,
filter_index,
channels=3):
"""Generates image for one particular filter.
# Arguments
input_img: The input-image Tensor.
layer_output: The output-image Tensor.
filter_index: The to be processed filter number.
Assumed to be valid.
#Returns
Either None if no image could be generated.
or a tuple of the image (array) itself and the last loss.
"""
s_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# we start from a gray image with some random noise
intermediate_dim = tuple(
int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
def _get_input_random_image():
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random(
(1, channels, intermediate_dim[0], intermediate_dim[1]))
else:
input_img_data = np.random.random(
(1, intermediate_dim[0], intermediate_dim[1], channels))
input_img_data = (input_img_data - 0.5) * 20 + 128
return input_img_data
def _get_random_noise(array):
return np.random.randn(*array.shape) * 0.1
input_img_data = _get_input_random_image()
# Slowly upscaling towards the original size prevents
# a dominating high-frequency of the to visualized structure
# as it would occur if we directly compute the 412d-image.
# Behaves as a better starting point for each following dimension
# and therefore avoids poor local minima
losses, grads = [], []
reinit_enabled = True
for up in reversed(range(upscaling_steps)):
# we run gradient ascent for e.g. 20 steps
for epoch in range(epochs):
loss_value, grads_value = iterate([input_img_data])
losses.append(loss_value)
grads.append(np.mean(np.abs(grads_value)))
input_img_data += grads_value * step
if reinit_enabled and (np.sum(losses) <= 1e-04 or (len(losses) > 1 and np.diff(losses)[-1] < 0.5)):
input_img_data = input_img_data + _get_random_noise(input_img_data)
reinit_enabled = False
intermediate_dim = tuple(
int(x / (upscaling_factor ** up)) for x in output_dim)
# Upscale
mode = "L" if channels == 1 else None
img = deprocess_image(input_img_data[0])
if channels == 1:
img = img.reshape((img.shape[0], img.shape[1]))
img = np.array(pil_image.fromarray(img, mode).resize(intermediate_dim,
pil_image.BICUBIC))
img = img.reshape((img.shape[0], img.shape[1], 1))
else:
img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
pil_image.BICUBIC))
input_img_data = [process_image(img, input_img_data[0])]
# decode the resulting input image
img = deprocess_image(input_img_data[0])
e_time = time.time()
print('{:3}'.format(filter_index,),end =" ")
return img, loss_value
def _draw_filters(filters, columns=8, show_filters=True, channels=3):
"""Draw the best filters in a nxn grid.
# Arguments
filters: A List of generated images and their corresponding losses
for each processed filter.
n: dimension of the grid.
If none, the largest possible square will be used
"""
rows = int(np.ceil(len(filters) / columns))
output_dim = (filters[0][0].shape[0], filters[0][0].shape[1])
# build a black picture with enough space for
# e.g. our 8 x 8 filters of size 412 x 412, with a 5px margin in between
MARGIN = 1
width = rows * output_dim[0] + (rows - 1) * MARGIN
height = columns * output_dim[1] + (columns - 1) * MARGIN
stitched_filters = np.zeros((width, height, channels), dtype='uint8')
# fill the picture with our saved filters
for i in range(rows):
for j in range(columns):
idx = min(i * columns + j, len(filters) - 1)
if i * columns + j > len(filters) - 1:
img = np.zeros_like(filters[0][0])
else:
img, _ = filters[idx]
width_margin = (output_dim[0] + MARGIN) * i
height_margin = (output_dim[1] + MARGIN) * j
stitched_filters[
width_margin: width_margin + output_dim[0],
height_margin: height_margin + output_dim[1], :] = img
if show_filters:
fig_height = rows * image_size_multiplier
fig_width = columns * image_size_multiplier
fig = plt.figure(figsize=(fig_width, fig_height))
plt.imshow(stitched_filters)
plt.title('{0:}_{1:}x{2:}.png'.format(layer_name, rows, columns))
plt.show()
# save the result to disk
save_img('{0:}_{1:}x{2:}.png'.format(layer_name, rows, columns), stitched_filters)
# this is the placeholder for the input images
assert len(model.inputs) == 1
input_img = model.inputs[0]
channels = K.int_shape(model.inputs[0])[-1]
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[0:]])
output_layer = layer_dict[layer_name]
assert isinstance(output_layer, layers.Conv2D)
# Compute to be processed filter range
filter_lower = filter_range[0]
filter_upper = (min(filter_range[1],len(output_layer.get_weights()[1]))
if filter_range[1] is not None
else len(output_layer.get_weights()[1]))
assert (filter_lower >= 0
and filter_upper <= len(output_layer.get_weights()[1])
and filter_upper > filter_lower)
print('Compute filters {:} to {:}'.format(filter_lower, filter_upper))
# iterate through each filter and generate its corresponding image
processed_filters = []
for f in range(filter_lower, filter_upper):
img_loss = _generate_filter_image(input_img, output_layer.output, f, channels)
if img_loss is not None:
processed_filters.append(img_loss)
print('{} filter processed.'.format(len(processed_filters)))
# Finally draw and store the best filters to disk
print("Filter Losses: ", [loss for f, loss in processed_filters])
_draw_filters(processed_filters, grid_columns, show_filters)
if __name__ == '__main__':
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
LAYER_NAME = 'block5_conv1'
# build the VGG16 network with ImageNet weights
vgg = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')
vgg.summary()
# example function call
visualize_layer(vgg, LAYER_NAME, filter_range=(0, 4))
|
164869
|
TASKS = {
"binary_classification": 1,
"multi_class_classification": 2,
"entity_extraction": 4,
"extractive_question_answering": 5,
"summarization": 8,
"single_column_regression": 10,
"speech_recognition": 11,
}
DATASETS_TASKS = ["text-classification", "question-answering-extractive"]
|
164913
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
df = pd.read_csv("datasets/flagsfictional.csv")
data = pd.DataFrame(list(generate_batches([dict(row) for _,row in df.iterrows()], 3)))
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
galactic_color = sorted(Image.from_url_with_cache(df.filter_rows("place ~ Galactic").iloc[0].flag).convert("RGBA").getcolors(65536), reverse=True)[0][1]
def process(d):
return Image.from_column([
Image.from_text(d['source'], arial(24, bold=True), fg=fg, bg=bg),
Image.from_text(d['place'], arial(24, italics=True), max_width=320, align="center", fg=fg, bg=bg, padding=(0,0,0,4)),
(Image.from_url_with_cache(d['flag']).resize((318,201)).crop((0,0,318,198)) if "Zamunda" in d['place'] else
Image.from_url_with_cache(d['flag']).resize((318,198)) if "Galactic" not in d['place'] else
Image.from_url_with_cache(printed(d['flag'])).resize_fixed_aspect(height=198).crop_to_aspect(1).pad_to_aspect(318,198,bg=galactic_color)).pad(1, "grey"),
Image.from_url_with_cache(get_non(d, 'image', default_img)).crop_to_aspect(320,200).resize((320,200))
], padding=4, bg=bg)
title = Image.from_column([
Image.from_text("Fictional flags from film & TV", arial(72, bold=True), fg=fg, bg=bg),
Image.from_text(" ", arial(48, italics=True), fg=fg, bg=bg)
], bg=bg)
grid = grid_chart(data, process, padding=(10,20), bg=bg, yalign=0, title=title).pad(20, bg)
grid.save("output/flagsfictional.png")
|
164923
|
from a10sdk.common.A10BaseClass import A10BaseClass
class RoleList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param role: {"description": "Role in a given partition", "format": "string", "minLength": 1, "maxLength": 32, "type": "string", "$ref": "/axapi/v3/rba/role"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "role-list"
self.DeviceProxy = ""
self.role = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class RuleList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param operation: {"enum": ["no-access", "read", "write"], "type": "string", "description": "'no-access': no-access; 'read': read; 'write': write; ", "format": "enum"}
:param object: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Lineage of object class for permitted operation", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "rule-list"
self.DeviceProxy = ""
self.operation = ""
self.A10WW_object = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Partition(A10BaseClass):
"""Class Description::
RBA configuration for the access privilege of a user within one partition.
Class partition supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param partition_name: {"description": "partition name", "format": "string", "minLength": 1, "optional": false, "maxLength": 14, "type": "string", "$ref": "/axapi/v3/partition"}
:param role_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "role": {"description": "Role in a given partition", "format": "string", "minLength": 1, "maxLength": 32, "type": "string", "$ref": "/axapi/v3/rba/role"}}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param rule_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"operation": {"enum": ["no-access", "read", "write"], "type": "string", "description": "'no-access': no-access; 'read': read; 'write': write; ", "format": "enum"}, "object": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Lineage of object class for permitted operation", "format": "string"}, "optional": true}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/rba/user/{name}/partition/{partition_name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "partition_name"]
self.b_key = "partition"
self.a10_url="/axapi/v3/rba/user/{name}/partition/{partition_name}"
self.DeviceProxy = ""
self.partition_name = ""
self.role_list = []
self.uuid = ""
self.rule_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
|
164941
|
from pathlib import Path
# set home dir for default
HOME_DIR = str(Path.home())
DEFAULT_MODEL_DIR = os.path.join(HOME_DIR,'arabicnlp_models')
|
164954
|
from fabric.colors import green as _green, yellow as _yellow, red as _red
from settings import cloud_connections, DEFAULT_PROVIDER
from ghost_log import log
from ghost_tools import get_aws_connection_data
from libs.blue_green import get_blue_green_from_app
from libs.ec2 import create_ec2_instance
COMMAND_DESCRIPTION = "Create a new instance"
RELATED_APP_FIELDS = ['environment_infos']
def is_available(app_context=None):
if not app_context:
return True
return app_context.get('ami', '') != ''
class Createinstance:
_app = None
_job = None
_log_file = -1
def __init__(self, worker):
self._app = worker.app
self._job = worker.job
self._db = worker._db
self._config = worker._config
self._worker = worker
self._log_file = worker.log_file
self._connection_data = get_aws_connection_data(
self._app.get('assumed_account_id', ''),
self._app.get('assumed_role_name', ''),
self._app.get('assumed_region_name', '')
)
self._cloud_connection = cloud_connections.get(self._app.get('provider', DEFAULT_PROVIDER))(
self._config,
**self._connection_data
)
blue_green, self._color = get_blue_green_from_app(self._app)
def execute(self):
subnet_id = self._job['options'][0] if 'options' in self._job and len(self._job['options']) > 0 else self._app['environment_infos']['subnet_ids'][0]
private_ip_address = self._job['options'][1] if 'options' in self._job and len(self._job['options']) > 1 else None
try:
log(_green("STATE: Started"), self._log_file)
instance = create_ec2_instance(self._cloud_connection, self._app, self._color, self._config,
private_ip_address, subnet_id,
self._log_file)
self._worker.update_status("done", message="Creating Instance OK: [{0}]\n\nPublic IP: {1}".format(self._app['name'], str(instance.ip_address)))
log(_green("STATE: End"), self._log_file)
except Exception as e:
self._worker.update_status("failed", message="Creating Instance Failed: [{0}]\n{1}".format(self._app['name'], e))
log(_red("STATE: END"), self._log_file)
|
164968
|
import json
from shared import get_session_for_account, send_notification
from policyuniverse.policy import Policy
def audit(resource, remediate=False):
is_compliant = True
if resource["type"] != "sqs":
raise Exception(
"Mismatched type. Expected {} but received {}".format(
"sqs", resource["type"]
)
)
# Get a session in the account where this resource is
sqs = get_session_for_account(resource["account"], resource["region"], "sqs")
# Get the policy
policy_string = sqs.get_queue_attributes(
QueueUrl=resource["id"], AttributeNames=["Policy"]
)
if policy_string is None:
return is_compliant
policy_string = policy_string.get("Attributes", {}).get("Policy", {})
if len(policy_string) == 0:
# Policy is empty or not present
return is_compliant
policy = json.loads(policy_string)
description = "Policy " + policy_string
# Check if it is public
policy = Policy(policy)
if policy.is_internet_accessible():
is_compliant = False
issue = "SQS {} is public".format(resource["id"])
if remediate:
if not remediation_make_private(sqs, resource):
issue += " - Not remediated"
send_notification(issue, description, resource)
# TODO Check for unknown accounts being allowed access
return is_compliant
def remediation_make_private(sqs, resource):
print("Remediating: Making {} private".format(resource["id"]))
sqs.set_queue_attributes(QueueUrl=resource["id"], Attributes={"Policy": ""})
return True
|
164979
|
import numpy as np
#I'm dumb, so I'm reducing the problem to 2D so I can see what's happening
##Make fake data
# an N x 5 array containing a regular mesh representing the stimulus params
stim_params=np.mgrid[10:25,20:22].reshape(2,-1).T
# an N x 3 array representing the output values for each simulation run
stimnum=15*2
output_vals=np.arange(stimnum*3).reshape(stimnum,3)
# shuffle the rows for a bit of added realism
shuf=np.random.permutation(stim_params.shape[0])
stim_params=stim_params[shuf]
output_vals=output_vals[shuf]
##Now we have to arrays, one with the independent variables (stim_params) that
##will represent T and mu values, the other (output_vals) is all the measurements
##you made.
##You can use lexical sort to get the indices of the independent variables in the
##right order, then apply the same indexes to the measurement array
# get the number of unique values for each stimulus parameter
#Due to float precision, you might have to round to the nearest decimal via round
params_shape=tuple(np.unique(col).shape[0] for col in stim_params.T)
# get the set of row indices that will sort the stimulus parameters in ascending
# order, starting with the final column
indx=np.lexsort(stim_params[:,::-1].T)
# sort and reshape the stimulus parameters:
sorted_params=stim_params[indx].T.reshape((2,)+params_shape)
# sort and reshape the output values
sorted_output=output_vals[indx].T.reshape((3,)+params_shape)
###What do the dimensions mean?
## array of stimulus parameters, with dimensions (n_params, p1, p2, p3, p4, p5)
#print(sorted_params.shape)
#
## to check that the sorting worked as expected, we can look at the values of the
## 5th parameter when all the others are held constant at 0:
#print(sorted_params[4,0,0,0,0,:])
#
## ... and the 1st parameter when we hold all the others constant:
#print(sorted_params[0,:,0,0,0,0])
#
## ... now let the 1st and 2nd parameters covary:
#print(sorted_params[:2, :, :, 0, 0, 0])
#
###The same indexing logic applies to the sorted simulation outputs:
## array of outputs, with dimensions (n_outputs, p1, p2, p3, p4, p5)
#print(sorted_output.shape)
#
## the first output variable whilst holding the first 4 simulation parameters
## constant at 0:
#print(sorted_output[0, 0, 0, 0, 0, :])
|
164988
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^list/$', views.targets_listing, name='crits-targets-views-targets_listing'),
url(r'^list/(?P<option>\S+)/$', views.targets_listing, name='crits-targets-views-targets_listing'),
url(r'^divisions/list/$', views.divisions_listing, name='crits-targets-views-divisions_listing'),
url(r'^divisions/list/(?P<option>\S+)/$', views.divisions_listing, name='crits-targets-views-divisions_listing'),
url(r'^add_target/$', views.add_update_target, name='crits-targets-views-add_update_target'),
url(r'^details/(?P<email_address>[\S ]+)/$', views.target_details, name='crits-targets-views-target_details'),
url(r'^details/$', views.target_details, name='crits-targets-views-target_details'),
url(r'^info/(?P<email_address>[\S ]+)/$', views.target_info, name='crits-targets-views-target_info'),
]
|
164993
|
import sys
import os
import json
import copy
import multiprocessing
if len(sys.argv) < 5:
print "Usage: python grid_search.py [model_template_file] [parameters_config] [process_number] [run_dir]"
model_tpl_file = sys.argv[1]
param_conf_file = sys.argv[2]
proc_num = int(sys.argv[3])
run_dir = sys.argv[4]
if not os.path.isdir(run_dir):
os.mkdir(run_dir)
print 'Create Run Directory'
model_template = open(model_tpl_file).read()
param_config = {}
for line in open(sys.argv[2]):
line = line.strip().split()
param_config[line[0]] = line[1:]
print 'Read Template & Config over.'
def render_template(template, params):
for k, v in params.items():
template = template.replace('{{%s}}' % k, v)
return template
_p = [ [0, k, 0, len(v)] for k, v in param_config.items() ]
def get_one_config(p, d):
rtn = []
if d == len(p):
return [{k:param_config[k][idx] for idx, k, _, __ in p}]
for i in range(p[d][3]):
rtn += get_one_config(copy.deepcopy(p), d+1)
p[d][0] += 1
return rtn
models_list = []
config_out_file = open(run_dir + '/run.conf', 'w')
for idx, config in enumerate(get_one_config(_p, 0)):
model = render_template(model_template, config)
try:
obj = json.loads(model)
except Exception as e:
print e
exit()
model_file = run_dir + '/' + model_tpl_file.split('/')[-1] + '.run%d' % idx
log_file = run_dir + '/' + model_tpl_file.split('/')[-1] + '.log%d' % idx
print model_file
print >>config_out_file, model_file, config
open(model_file, 'w').write(model)
models_list.append((model_file, log_file))
config_out_file.close()
def run_one_model(model_path, log_path):
BIN = '/home/pangliang/matching/textnet_statistic/bin/textnet'
command = '%s %s >%s 2>&1' % (BIN, model_path, log_path)
print command
os.system(command)
# Schedule
pool = multiprocessing.Pool(processes = proc_num)
for model_path, log_path in models_list:
pool.apply_async(run_one_model, (model_path, log_path))
pool.close()
pool.join()
|
165009
|
import jellyfish
import time
from difflib import SequenceMatcher
def getStringSimilarity(s1, s2):
return jellyfish.jaro_winkler(s1, s2)
def getStringsimilarity_difflib(s1,s2):
return SequenceMatcher(None, s1, s2).ratio()
def getInputData(path):
fin = open(path, "r", encoding="utf8")
data = []
for line in fin:
line = line.strip()
data.append(line)
fin.close()
# print(len(data))
return data
def printResults(path, data):
fout = open(path, "w+", encoding = "utf8" )
for key in data.keys():
fout.write( key + " -> ( "+ data[key][0] +", " + str(data[key][1])+" )\n")
fout.close()
return
def getMostSimilar(data):
mostSimilar = dict() # key-> Target: Value-> Most Similar Name, Sim Value
for i, name1 in enumerate(data):
maxSim = -float('inf')
mostSimilar[name1] = ["", maxSim]
for j, name2 in enumerate(data):
if i==j or name1==name2:
continue
sim1 = getStringSimilarity(name1, name2)
# sim2 = getStringsimilarity_difflib(name1, name2)
sim = sim1
if sim > mostSimilar[name1][1]:
mostSimilar[name1][1] = sim
mostSimilar[name1]=[name2, sim]
return mostSimilar
if __name__=='__main__':
start=time.time()
inPath = "../../resources/datasets/"
outPath = "../../resources/results/"
data = getInputData(inPath+"names.txt")
mostSimilar = getMostSimilar(data)
print(mostSimilar[:10])
print ("\nRun time: "+ str(time.time()-start)+" seconds" )
printResults(outPath + "mostSimilarName.txt", mostSimilar)
|
165064
|
import os
import tensorflow as tf
from functools import partial
from random import shuffle
def _get_shard_dataset(record_path, split='train'):
pattern = os.path.join(record_path, split + "*")
files = tf.data.Dataset.list_files(pattern)
return files
def _decode_jpeg(image_buffer, size, scope=None):
with tf.name_scope(
values=[image_buffer], name=scope, default_name='decode_jpeg'):
image = tf.io.decode_jpeg(image_buffer, channels=3)
image = tf.image.resize(image, (size, size))
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def _decode_imagenet(proto, preprocess):
feature_map = {
'image': tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
'label': tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
#'label_name':
#tf.FixedLenFeature([], dtype=tf.string, default_value=''),
}
parsed_features = tf.io.parse_single_example(proto, feature_map)
features = tf.io.decode_jpeg(parsed_features['image'], channels=3)
labels = parsed_features['label']
if preprocess != None:
features = preprocess(features)
return features, tf.cast(labels, tf.int32)
def imagerecord_dataset(root,
batch_size,
is_training=True,
preprocess=None,
num_workers=4):
if is_training:
split = 'train'
else:
split = 'val'
shard_ds = _get_shard_dataset(root, split=split)
imagenet_ds = shard_ds.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=num_workers, sloppy=True))
# Prefetch a batch at a time to smooth time taken to load for shuffling and preprocessing.
imagenet_ds = imagenet_ds.prefetch(buffer_size=batch_size)
if is_training:
imagenet_ds = imagenet_ds.shuffle(buffer_size=10000)
decode_fn = partial(_decode_imagenet, preprocess=preprocess)
imagenet_ds = imagenet_ds.apply(
tf.data.experimental.map_and_batch(
map_func=decode_fn, batch_size=batch_size, num_parallel_batches=4))
imagenet_ds = imagenet_ds.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
# Set up extra threadpool resources
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = num_workers
imagenet_ds = imagenet_ds.with_options(options)
return imagenet_ds
def _parse_imagefolder_samples(filename, label, preprocess=None):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
if preprocess is not None:
image_decoded = preprocess(image_decoded)
return image_decoded, label
def imagefolder_dataset(root,
batch_size,
is_training=True,
preprocess=None,
num_workers=4):
valid_extensions = ('.png', '.jpg', '.jpeg')
if is_training:
split = 'train'
else:
split = 'val'
split_dir = os.path.join(root, split)
labels_list = os.listdir(split_dir)
# Iterate through folders and compose list of files and their label.
samples = []
for i, label in enumerate(labels_list):
files = os.listdir(os.path.join(split_dir, label))
for f in files:
# Make sure found files are valid images.
if f.lower().endswith(valid_extensions):
samples.append((os.path.join(split_dir, label, f), i))
# Perform an initial shuffling of the dataset.
shuffle(samples)
# Now that dataset is populated, parse it into proper tf dataset.
decode_fn = partial(_parse_imagefolder_samples, preprocess=preprocess)
files, labels = zip(*samples)
imagenet_ds = tf.data.Dataset.from_tensor_slices((list(files),
list(labels)))
if is_training:
imagenet_ds = imagenet_ds.shuffle(buffer_size=10000)
imagenet_ds = imagenet_ds.prefetch(buffer_size=None)
imagenet_ds = imagenet_ds.apply(
tf.data.experimental.map_and_batch(
map_func=decode_fn,
batch_size=batch_size,
num_parallel_batches=num_workers))
return imagenet_ds
|
165068
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_node(self, ServerName: str, NodeName: str, EngineAttributes: List) -> Dict:
"""
Associates a new node with the server. For more information about how to disassociate a node, see DisassociateNode .
On a Chef server: This command is an alternative to ``knife bootstrap`` .
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*CHEF_ORGANIZATION* ,Value=default" "Name=*CHEF_NODE_PUBLIC_KEY* ,Value=*public-key-pem* "``
On a Puppet server, this command is an alternative to the ``puppet cert sign`` command that signs a Puppet node CSR.
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*PUPPET_NODE_CSR* ,Value=*csr-pem* "``
A node can can only be associated with servers that are in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid. The AssociateNode API call can be integrated into Auto Scaling configurations, AWS Cloudformation templates, or the user data of a server's instance.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/AssociateNode>`_
**Request Syntax**
::
response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the association request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server with which to associate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the node.
:type EngineAttributes: list
:param EngineAttributes: **[REQUIRED]**
Engine attributes used for associating the node.
**Attributes accepted in a AssociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node is associated. By default only one organization named ``default`` can exist.
* ``CHEF_NODE_PUBLIC_KEY`` : A PEM-formatted public key. This key is required for the ``chef-client`` agent to access the Chef API.
**Attributes accepted in a AssociateNode request for Puppet**
* ``PUPPET_NODE_CSR`` : A PEM-formatted certificate-signing request (CSR) that is created by the node.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_backup(self, ServerName: str, Description: str = None) -> Dict:
"""
Creates an application-level backup of a server. While the server is in the ``BACKING_UP`` state, the server cannot be changed, and no additional backup can be created.
Backups can be created for servers in ``RUNNING`` , ``HEALTHY`` , and ``UNHEALTHY`` states. By default, you can create a maximum of 50 manual backups.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when the maximum number of manual backups is reached. An ``InvalidStateException`` is thrown when the server is not in any of the following states: RUNNING, HEALTHY, or UNHEALTHY. A ``ResourceNotFoundException`` is thrown when the server is not found. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateBackup>`_
**Request Syntax**
::
response = client.create_backup(
ServerName='string',
Description='string'
)
**Response Syntax**
::
{
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Backup** *(dict) --*
Backup created by request.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to back up.
:type Description: string
:param Description:
A user-defined description of the backup.
:rtype: dict
:returns:
"""
pass
def create_server(self, ServerName: str, InstanceProfileArn: str, InstanceType: str, ServiceRoleArn: str, AssociatePublicIpAddress: bool = None, DisableAutomatedBackup: bool = None, Engine: str = None, EngineModel: str = None, EngineVersion: str = None, EngineAttributes: List = None, BackupRetentionCount: int = None, KeyPair: str = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None, SecurityGroupIds: List = None, SubnetIds: List = None, BackupId: str = None) -> Dict:
"""
Creates and immedately starts a new server. The server is ready to use when it is in the ``HEALTHY`` state. By default, you can create a maximum of 10 servers.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when you have created the maximum number of servers (10). A ``ResourceAlreadyExistsException`` is thrown when a server with the same name already exists in the account. A ``ResourceNotFoundException`` is thrown when you specify a backup ID that is not valid or is for a backup that does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
If you do not specify a security group by adding the ``SecurityGroupIds`` parameter, AWS OpsWorks creates a new security group.
*Chef Automate:* The default security group opens the Chef server to the world on TCP port 443. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
*Puppet Enterprise:* The default security group opens TCP ports 22, 443, 4433, 8140, 8142, 8143, and 8170. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
By default, your server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateServer>`_
**Request Syntax**
::
response = client.create_server(
AssociatePublicIpAddress=True|False,
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
The server that is created by the request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type AssociatePublicIpAddress: boolean
:param AssociatePublicIpAddress:
Associate a public IP address with a server that you are launching. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Enable or disable scheduled backups. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type Engine: string
:param Engine:
The configuration management engine to use. Valid values include ``Chef`` and ``Puppet`` .
:type EngineModel: string
:param EngineModel:
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
:type EngineVersion: string
:param EngineVersion:
The major release version of the engine that you want to use. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
:type EngineAttributes: list
:param EngineAttributes:
Optional engine attributes on a specified server.
**Attributes accepted in a Chef createServer request:**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA public key. The corresponding private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, a private key is generated and returned in the response.
* ``CHEF_DELIVERY_ADMIN_PASSWORD`` : The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.
**Attributes accepted in a Puppet createServer request:**
* ``PUPPET_ADMIN_PASSWORD`` : To work with the Puppet Enterprise console, a password must use ASCII characters.
* ``PUPPET_R10K_REMOTE`` : The r10k remote is the URL of your control repository (for example, ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k remote opens TCP port 8170.
* ``PUPPET_R10K_PRIVATE_KEY`` : If you are using a private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify an SSH URL and a PEM-encoded private SSH key.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks CM deletes the oldest backups if this number is exceeded. The default value is ``1`` .
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: **[REQUIRED]**
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: **[REQUIRED]**
The Amazon EC2 instance type to use. For example, ``m4.large`` . Recommended instance types include ``t2.medium`` and greater, ``m4.*`` , or ``c4.xlarge`` and greater.
:type KeyPair: string
:param KeyPair:
The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
The start time for a one-hour period each week during which AWS OpsWorks CM performs maintenance on the instance. Valid values must be specified in the following format: ``DDD:HH:MM`` . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See ``TimeWindowDefinition`` for more information.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
The start time for a one-hour period during which AWS OpsWorks CM backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats:
* ``HH:MM`` for daily backups
* ``DDD:HH:MM`` for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
**Example:** ``08:00`` , which represents a daily start time of 08:00 UTC.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds:
A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by ``SubnetIds`` .
If you do not specify this parameter, AWS OpsWorks CM creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
- *(string) --*
:type ServiceRoleArn: string
:param ServiceRoleArn: **[REQUIRED]**
The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.
:type SubnetIds: list
:param SubnetIds:
The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.
For more information about supported Amazon EC2 platforms, see `Supported Platforms <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html>`__ .
- *(string) --*
:type BackupId: string
:param BackupId:
If you specify this field, AWS OpsWorks CM creates the server by using the backup represented by BackupId.
:rtype: dict
:returns:
"""
pass
def delete_backup(self, BackupId: str) -> Dict:
"""
Deletes a backup. You can delete both manual and automated backups. This operation is asynchronous.
An ``InvalidStateException`` is thrown when a backup deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteBackup>`_
**Request Syntax**
::
response = client.delete_backup(
BackupId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ``ServerName-yyyyMMddHHmmssSSS`` .
:rtype: dict
:returns:
"""
pass
def delete_server(self, ServerName: str) -> Dict:
"""
Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance). When you run this command, the server state is updated to ``DELETING`` . After the server is deleted, it is no longer returned by ``DescribeServer`` requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when a server deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteServer>`_
**Request Syntax**
::
response = client.delete_server(
ServerName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ServerName: string
:param ServerName: **[REQUIRED]**
The ID of the server to delete.
:rtype: dict
:returns:
"""
pass
def describe_account_attributes(self) -> Dict:
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeAccountAttributes>`_
**Request Syntax**
::
response = client.describe_account_attributes()
**Response Syntax**
::
{
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
**Response Structure**
- *(dict) --*
- **Attributes** *(list) --*
The attributes that are currently set for the account.
- *(dict) --*
Stores account attributes.
- **Name** *(string) --*
The attribute name. The following are supported attribute names.
* *ServerLimit:* The number of current servers/maximum number of servers allowed. By default, you can have a maximum of 10 servers.
* *ManualBackupLimit:* The number of current manual backups/maximum number of backups allowed. By default, you can have a maximum of 50 manual backups saved.
- **Maximum** *(integer) --*
The maximum allowed value.
- **Used** *(integer) --*
The current usage, such as the current number of servers that are associated with the account.
:rtype: dict
:returns:
"""
pass
def describe_backups(self, BackupId: str = None, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeBackups>`_
**Request Syntax**
::
response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Backups** *(list) --*
Contains the response to a ``DescribeBackups`` request.
- *(dict) --*
Describes a single backup.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeBackups`` requests.
:type BackupId: string
:param BackupId:
Describes a single backup.
:type ServerName: string
:param ServerName:
Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeBackups`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeBackups`` requests.
:rtype: dict
:returns:
"""
pass
def describe_events(self, ServerName: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeEvents>`_
**Request Syntax**
::
response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ServerEvents** *(list) --*
Contains the response to a ``DescribeEvents`` request.
- *(dict) --*
An event that is related to the server, such as the start of maintenance or backup.
- **CreatedAt** *(datetime) --*
The time when the event occurred.
- **ServerName** *(string) --*
The name of the server on or for which the event occurred.
- **Message** *(string) --*
A human-readable informational or status message.
- **LogUrl** *(string) --*
The Amazon S3 URL of the event's log file.
- **NextToken** *(string) --*
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object's ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken:
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object\'s ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type MaxResults: integer
:param MaxResults:
To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a ``NextToken`` value that you can assign to the ``NextToken`` request parameter to get the next set of results.
:rtype: dict
:returns:
"""
pass
def describe_node_association_status(self, NodeAssociationStatusToken: str, ServerName: str) -> Dict:
"""
Returns the current status of an existing association or disassociation request.
A ``ResourceNotFoundException`` is thrown when no recent association or disassociation request with the specified token is found, or when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeNodeAssociationStatus>`_
**Request Syntax**
::
response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
**Response Syntax**
::
{
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatus** *(string) --*
The status of the association or disassociation request.
**Possible values:**
* ``SUCCESS`` : The association or disassociation succeeded.
* ``FAILED`` : The association or disassociation failed.
* ``IN_PROGRESS`` : The association or disassociation is still in progress.
- **EngineAttributes** *(list) --*
Attributes specific to the node association. In Puppet, the attibute PUPPET_NODE_CERT contains the signed certificate (the result of the CSR).
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: **[REQUIRED]**
The token returned in either the AssociateNodeResponse or the DisassociateNodeResponse.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:rtype: dict
:returns:
"""
pass
def describe_servers(self, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks CM does not query other services.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeServers>`_
**Request Syntax**
::
response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Servers': [
{
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Servers** *(list) --*
Contains the response to a ``DescribeServers`` request.
*For Puppet Server:* ``DescribeServersResponse$Servers$EngineAttributes`` contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.
- *(dict) --*
Describes a configuration management server.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeServers`` requests.
:type ServerName: string
:param ServerName:
Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeServers`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeServers`` requests.
:rtype: dict
:returns:
"""
pass
def disassociate_node(self, ServerName: str, NodeName: str, EngineAttributes: List = None) -> Dict:
"""
Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes. After a node is disassociated, the node key pair is no longer valid for accessing the configuration manager's API. For more information about how to associate a node, see AssociateNode .
A node can can only be disassociated from a server that is in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DisassociateNode>`_
**Request Syntax**
::
response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the disassociation request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the client node.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are used for disassociating the node. No attributes are required for Puppet.
**Attributes required in a DisassociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node was associated. By default only one organization named ``default`` can exist.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def export_server_engine_attribute(self, ExportAttributeName: str, ServerName: str, InputAttributes: List = None) -> Dict:
"""
Exports a specified server engine attribute as a base64-encoded string. For example, you can export user data that you can use in EC2 to associate nodes with a server.
This operation is synchronous.
A ``ValidationException`` is raised when parameters of the request are not valid. A ``ResourceNotFoundException`` is thrown when the server does not exist. An ``InvalidStateException`` is thrown when the server is in any of the following states: CREATING, TERMINATED, FAILED or DELETING.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/ExportServerEngineAttribute>`_
**Request Syntax**
::
response = client.export_server_engine_attribute(
ExportAttributeName='string',
ServerName='string',
InputAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'EngineAttribute': {
'Name': 'string',
'Value': 'string'
},
'ServerName': 'string'
}
**Response Structure**
- *(dict) --*
- **EngineAttribute** *(dict) --*
The requested engine attribute pair with attribute name and value.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **ServerName** *(string) --*
The server name used in the request.
:type ExportAttributeName: string
:param ExportAttributeName: **[REQUIRED]**
The name of the export attribute. Currently, the supported export attribute is ``Userdata`` . This exports a user data script that includes parameters and values provided in the ``InputAttributes`` list.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which you are exporting the attribute.
:type InputAttributes: list
:param InputAttributes:
The list of engine attributes. The list type is ``EngineAttribute`` . An ``EngineAttribute`` list item is a pair that includes an attribute name and its value. For the ``Userdata`` ExportAttributeName, the following are supported engine attribute names.
* **RunList** In Chef, a list of roles or recipes that are run in the specified order. In Puppet, this parameter is ignored.
* **OrganizationName** In Chef, an organization name. AWS OpsWorks for Chef Automate always creates the organization ``default`` . In Puppet, this parameter is ignored.
* **NodeEnvironment** In Chef, a node environment (for example, development, staging, or one-box). In Puppet, this parameter is ignored.
* **NodeClientVersion** In Chef, the version of the Chef engine (three numbers separated by dots, such as 13.8.5). If this attribute is empty, OpsWorks for Chef Automate uses the most current version. In Puppet, this parameter is ignored.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def restore_server(self, BackupId: str, ServerName: str, InstanceType: str = None, KeyPair: str = None) -> Dict:
"""
Restores a backup to a server that is in a ``CONNECTION_LOST`` , ``HEALTHY`` , ``RUNNING`` , ``UNHEALTHY`` , or ``TERMINATED`` state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of the server's client devices (nodes) should continue to work.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when the server is not in a valid state. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/RestoreServer>`_
**Request Syntax**
::
response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType:
The type of the instance to create. Valid values must be specified in the following format: ``^([cm][34]|t2).*`` For example, ``m4.large`` . Valid values are ``t2.medium`` , ``m4.large`` , and ``m4.2xlarge`` . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair:
The name of the key pair to set on the new EC2 instance. This can be helpful if the administrator no longer has the SSH key.
:rtype: dict
:returns:
"""
pass
def start_maintenance(self, ServerName: str, EngineAttributes: List = None) -> Dict:
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server is in an ``UNDER_MAINTENANCE`` state while maintenance is in progress.
Maintenance can only be started on servers in ``HEALTHY`` and ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/StartMaintenance>`_
**Request Syntax**
::
response = client.start_maintenance(
ServerName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``StartMaintenance`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server on which to run maintenance.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are specific to the server on which you want to run maintenance.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def update_server(self, ServerName: str, DisableAutomatedBackup: bool = None, BackupRetentionCount: int = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None) -> Dict:
"""
Updates settings for a server.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServer>`_
**Request Syntax**
::
response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``UpdateServer`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Setting DisableAutomatedBackup to ``true`` disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:rtype: dict
:returns:
"""
pass
def update_server_engine_attributes(self, ServerName: str, AttributeName: str, AttributeValue: str = None) -> Dict:
"""
Updates engine-specific attributes on a specified server. The server enters the ``MODIFYING`` state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's public key (``CHEF_PIVOTAL_KEY`` ) or a Puppet server's admin password (``PUPPET_ADMIN_PASSWORD`` ).
This operation is asynchronous.
This operation can only be called for servers in ``HEALTHY`` or ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is raised. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServerEngineAttributes>`_
**Request Syntax**
::
response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to an ``UpdateServerEngineAttributes`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type AttributeName: string
:param AttributeName: **[REQUIRED]**
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue:
The value to set for the attribute.
:rtype: dict
:returns:
"""
pass
|
165079
|
from django.core.management.base import BaseCommand
from django_q.models import Schedule
class Command(BaseCommand):
help = "Setups up all background tasks"
def handle(self, *args, **options):
Schedule.objects.get_or_create(
func='news.tasks.get_news',
schedule_type='D',
repeats=-1
)
self.stdout.write('Add task to fetch news')
Schedule.objects.get_or_create(
func='wikipedia.tasks.fetch_wikipedia',
schedule_type='D',
repeats=-1
)
self.stdout.write('Add task to fetch wikipedia articles')
Schedule.objects.get_or_create(
func='weather.tasks.fetch_weather',
schedule_type='D',
repeats=-1
)
self.stdout.write('Add task to fetch weather')
|
165094
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'series_tiempo_ar_api.apps.api'
|
165123
|
from datetime import datetime, date
from django.urls import reverse
from django.contrib.gis.geos import Point
from rest_framework.test import APITestCase
from rest_framework import status
from robber import expect
import pytz
from data.factories import PoliceUnitFactory, OfficerFactory, OfficerHistoryFactory, OfficerAllegationFactory
from email_service.constants import TRR_ATTACHMENT_REQUEST
from email_service.factories import EmailTemplateFactory
from trr.factories import TRRFactory, ActionResponseFactory
from trr.tests.mixins import TRRTestCaseMixin
class TRRViewSetTestCase(TRRTestCaseMixin, APITestCase):
def test_retrieve(self):
unit = PoliceUnitFactory(unit_name='001', description='Unit 001')
officer = OfficerFactory(
first_name='Vinh',
last_name='Vu',
rank='Detective',
race='White',
gender='M',
appointed_date=date(2000, 1, 1),
birth_year=1980,
complaint_percentile=44.4444,
civilian_allegation_percentile=11.1111,
internal_allegation_percentile=22.2222,
trr_percentile=33.3333,
last_unit=unit
)
OfficerHistoryFactory(officer=officer, unit=unit)
trr = TRRFactory(
taser=False,
firearm_used=False,
officer_assigned_beat='Beat 1',
officer_in_uniform=True,
officer_on_duty=False,
trr_datetime=datetime(2001, 1, 1, tzinfo=pytz.utc),
subject_gender='M',
subject_age=37,
officer=officer,
location_recode='Factory',
block='34XX',
street='Douglas Blvd',
beat=1021,
point=Point(1.0, 1.0)
)
OfficerAllegationFactory(
officer=officer,
allegation__incident_date=datetime(2003, 1, 1, tzinfo=pytz.utc),
start_date=date(2004, 1, 1),
end_date=date(2005, 1, 1),
final_finding='SU'
)
ActionResponseFactory(trr=trr, force_type='Verbal Commands', action_sub_category='1')
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': trr.id,
'officer_assigned_beat': 'Beat 1',
'officer_in_uniform': True,
'officer_on_duty': False,
'officer': {
'id': officer.id,
'rank': 'Detective',
'gender': 'Male',
'race': 'White',
'full_name': '<NAME>',
'appointed_date': '2000-01-01',
'unit': {'unit_name': '001', 'description': 'Unit 001'},
'birth_year': 1980,
'percentile_trr': '33.3333',
'percentile_allegation_internal': '22.2222',
'percentile_allegation_civilian': '11.1111',
'percentile_allegation': '44.4444',
},
'subject_race': 'White',
'subject_gender': 'Male',
'subject_age': 37,
'force_category': 'Other',
'force_types': ['Verbal Commands'],
'date_of_incident': '2001-01-01',
'location_type': 'Factory',
'address': '34XX Douglas Blvd',
'beat': 1021,
'point': {
'lng': 1.0,
'lat': 1.0,
},
})
def test_retrieve_no_point(self):
unit = PoliceUnitFactory(unit_name='001', description='Unit 001')
officer = OfficerFactory(
first_name='Vinh',
last_name='Vu',
race='White',
gender='M',
rank='Detective',
appointed_date=date(2000, 1, 1),
birth_year=1980,
complaint_percentile=44.4444,
civilian_allegation_percentile=11.1111,
internal_allegation_percentile=22.2222,
trr_percentile=33.3333,
last_unit=unit
)
OfficerHistoryFactory(officer=officer, unit=unit)
trr = TRRFactory(
taser=False,
firearm_used=False,
officer_assigned_beat='Beat 1',
officer_in_uniform=True,
officer_on_duty=False,
trr_datetime=datetime(2001, 1, 1, tzinfo=pytz.utc),
subject_gender='M',
subject_age=37,
officer=officer,
location_recode='Factory',
block='34XX',
street='Douglas Blvd',
beat=1021,
)
OfficerAllegationFactory(
officer=officer,
allegation__incident_date=datetime(2003, 1, 1, tzinfo=pytz.utc),
start_date=date(2004, 1, 1),
end_date=date(2005, 1, 1), final_finding='SU')
ActionResponseFactory(trr=trr, force_type='Verbal Commands', action_sub_category=1)
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': trr.id,
'officer_assigned_beat': 'Beat 1',
'officer_in_uniform': True,
'officer_on_duty': False,
'officer': {
'id': officer.id,
'rank': 'Detective',
'gender': 'Male',
'race': 'White',
'full_name': '<NAME>',
'appointed_date': '2000-01-01',
'unit': {'unit_name': '001', 'description': 'Unit 001'},
'birth_year': 1980,
'percentile_trr': '33.3333',
'percentile_allegation_internal': '22.2222',
'percentile_allegation_civilian': '11.1111',
'percentile_allegation': '44.4444',
},
'subject_race': 'White',
'subject_gender': 'Male',
'subject_age': 37,
'force_category': 'Other',
'force_types': ['Verbal Commands'],
'date_of_incident': '2001-01-01',
'location_type': 'Factory',
'address': '34XX Douglas Blvd',
'beat': 1021,
})
def test_retrieve_not_found(self):
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': 123}))
expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)
def test_retrieve_missing_percentile(self):
officer = OfficerFactory(
civilian_allegation_percentile=None,
internal_allegation_percentile=None,
trr_percentile=None
)
trr = TRRFactory(officer=officer)
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
def test_request_document(self):
EmailTemplateFactory(type=TRR_ATTACHMENT_REQUEST)
TRRFactory(pk=112233)
response = self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': 112233}),
{'email': '<EMAIL>'}
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'message': 'Thanks for subscribing',
'trr_id': 112233
})
def test_request_same_document_twice(self):
EmailTemplateFactory(type=TRR_ATTACHMENT_REQUEST)
trr = TRRFactory(pk=112233)
self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': trr.id}),
{'email': '<EMAIL>'}
)
response2 = self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': trr.id}),
{'email': '<EMAIL>'}
)
expect(response2.status_code).to.eq(status.HTTP_200_OK)
expect(response2.data).to.eq({
'message': 'Email already added',
'trr_id': 112233
})
def test_request_document_without_email(self):
TRRFactory(pk=321)
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}))
expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)
expect(response.data).to.eq({
'message': 'Please enter a valid email'
})
def test_request_document_with_invalid_email(self):
TRRFactory(pk=321)
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}),
{'email': 'invalid@email'})
expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)
expect(response.data).to.eq({
'message': 'Please enter a valid email'
})
def test_request_document_with_invalid_trr(self):
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}))
expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)
|
165134
|
from . import metadata
from .client import ServiceClient
from .constants import DEFAULT_HOSTNAME, DEFAULT_PROTOCOL
def login(token: str, hostname: str = DEFAULT_HOSTNAME, protocol: str = DEFAULT_PROTOCOL, verify: bool = True) -> str:
"""Make a login request to SaaS."""
client = ServiceClient(f"{protocol}://{hostname}", token, verify=verify)
response = client.cli_login(metadata=metadata.Metadata())
return response.username
|
165143
|
from odoo.tests.common import TransactionCase
class TestProductTemplate(TransactionCase):
def test_name_search(self):
partner = self.env['res.partner'].create({
'name': 'Azure Interior',
})
seller = self.env['product.supplierinfo'].create({
'name': partner.id,
'price': 12.0,
'delay': 1,
'product_code': 'VOB2a',
})
product_tmpl = self.env['product.template'].create({
'name': '<NAME>',
'type': 'product',
'default_code': 'VOB2A',
'seller_ids': [seller.id],
'purchase_ok': True,
})
ns = self.env['product.template'].with_context(partner_id=partner.id)._name_search('VOB2', [['purchase_ok', '=', True]])
self.assertEqual(len(ns), 1, "_name_search should have 1 item")
self.assertEqual(ns[0][1]._value, '[VOB2A] Rubber Duck', "_name_search should return the expected result")
|
165146
|
import os
import shutil
import subprocess
import time
from bw_plex import LOG
TYPES = {'cut': 0,
'mute': 1,
'scene marker': 2,
'commercial break': 3}
TYPES.update(dict((v, k) for (k, v) in TYPES.items()))
def db_to_edl(item, type=3):
elds = {}
if (item.correct_theme_start and
item.correct_theme_start != -1 and
item.correct_theme_end and
item.correct_theme_end != -1):
elds["manual intro"] = [item.correct_theme_start, item.correct_theme_end, TYPES[type]]
elds["manual intro end"] = [item.correct_theme_end, item.correct_theme_end, 2]
elif (item.theme_start and
item.theme_start != -1 and
item.theme_end and
item.theme_end != -1):
elds["intro"] = [item.theme_start, item.theme_end, TYPES[type]]
elds["intro end"] = [item.theme_end, item.theme_end, 2]
if (item.credits_start and
item.credits_start != -1 and
item.credits_end and
item.credits_end != -1):
elds["credits"] = [item.credits_start, item.credits_end, TYPES[type]]
elds["credits end"] = [item.credits_end, item.credits_end, 2]
return elds
def edl_dict_to_metadata_file(path, eld):
"""Convert a .edl file to a ffmepg metadata file.
This way we can add chapters to shows as this isnt suppored by plex
Args:
path (str): path to the edl we should use.
Return
path to metadata file.
"""
# Should we check if this file has metadata/chapters so we dont overwrite it
# Lets come back to this later.
#if not os.path.isfile(path) and path.endswith('.edl'):
# return
header = ';FFMETADATA1\ntitle=%s\nartist=Made by bw_plex\n\n' % os.path.splitext(os.path.basename(path))[0]
chapter_template = """[CHAPTER]\nTIMEBASE=1/1000\nSTART=%s\nEND=%s\ntitle=%s\n\n"""
meta_name = os.path.splitext(path)[0] + '.metadata'
with open(meta_name, 'w') as mf:
mf.write(header)
for key, value in eld.items():
mf.write(chapter_template % (float(value[0]) * 1000, float(value[1]) * 1000, key))
LOG.debug('Created a metadatafile %s', meta_name)
return meta_name
def write_chapters_to_file(path, input_edl=None, replace=True, cleanup=True):
"""Use ffmpeg to add chapters to a videofile.mf_file
Args:
path(str): path to the video file we should add chapters to
input_edl (str): path the the edl.
replace (bool): Default True
cleanup(bool): Default False, remove the .metadatafile
after chapters has been added.
Return:
path
"""
if 'https://' or 'http://' in path:
LOG.debug("Can't add chapters to as we dont have access to the file on the file system")
mf_file = edl_dict_to_metadata_file(path, input_edl)
mf_file = str(mf_file)
outfile, ext = os.path.splitext(path)
outfile = outfile + '__bw_plex_meta' + ext
cmd = ['ffmpeg', '-i', path, '-i', mf_file, '-map_metadata', '1', '-codec', 'copy', outfile]
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
code = proc.wait()
if code != 0:
LOG.debug('Failed to write_chapters_to_file %s', code)
# Try to replace the orginal with the one with have added
# chapters too.
if replace:
for _ in range(3):
try:
shutil.move(outfile, path)
break
except OSError:
time.sleep(1)
if cleanup:
os.remove(mf_file)
LOG.debug('Deleted %s', mf_file)
LOG.debug('writing chapters to file using command %s', ' '.join(cmd))
return path
|
165167
|
import imp
import sys
def new_module(name):
"""
Do all of the gruntwork associated with creating a new module.
"""
parent = None
if '.' in name:
parent_name = name.rsplit('.', 1)[0]
parent = __import__(parent_name, fromlist=[''])
module = imp.new_module(name)
sys.modules[name] = module
if parent:
setattr(parent, name.rsplit('.', 1)[1], module)
return module
class SettingsImporter(object):
def __init__(self, module_name, settings):
self.module_name = module_name
self.settings = settings
def find_module(self, name, path=None):
if name == self.module_name:
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
# Unroll the settings into a new module.
module = new_module(self.module_name)
for k, v in self.settings.items():
if callable(v) and not getattr(v, 'is_callable_setting', False):
v = v()
setattr(module, k, v)
return module
|
165201
|
import asyncio
import logging
from collections import deque
from contextlib import asynccontextmanager
from functools import wraps
logger = logging.getLogger(__name__)
class AcurlSessionWrapper:
def __init__(self, session):
self.__session = session
self.__callback = None
self.additional_metrics = {}
def __getattr__(self, attrname):
try:
r = object.__getattr__(self, attrname)
return r
except AttributeError:
pass
return getattr(self.__session, attrname)
def set_response_callback(self, cb):
self.__callback = cb
@property
def _response_callback(self):
return self.__callback
class SessionPool:
"""No longer actually goes pooling as this is built into acurl. API just left in place.
Will need a refactor"""
# A memoization cache for instances of this class per event loop
_session_pools = {}
def __init__(self, use_new_acurl_implementation=False):
if use_new_acurl_implementation:
import acurl_ng
self._wrapper = acurl_ng.CurlWrapper(asyncio.get_event_loop())
else:
import acurl
self._wrapper = acurl.EventLoop()
self._pool = deque()
@asynccontextmanager
async def session_context(self, context):
context.http = await self._checkout(context)
yield
await self._checkin(context.http)
del context.http
@classmethod
def decorator(cls, func):
@wraps(func)
async def wrapper(ctx, *args, **kwargs):
loop = asyncio.get_event_loop()
try:
instance = cls._session_pools[loop]
except KeyError:
use_new_acurl_implementation = ctx.config.get(
"enable_new_acurl_implementation", False
)
instance = cls(use_new_acurl_implementation)
cls._session_pools[loop] = instance
async with instance.session_context(ctx):
return await func(ctx, *args, **kwargs)
return wrapper
async def _checkout(self, context):
session = self._wrapper.session()
session_wrapper = AcurlSessionWrapper(session)
def response_callback(r):
if session_wrapper._response_callback is not None:
session_wrapper._response_callback(r, session_wrapper.additional_metrics)
context.send(
"http_metrics",
start_time=r.start_time,
effective_url=r.url,
response_code=r.status_code,
dns_time=r.namelookup_time,
connect_time=r.connect_time,
tls_time=r.appconnect_time,
transfer_start_time=r.pretransfer_time,
first_byte_time=r.starttransfer_time,
total_time=r.total_time,
primary_ip=r.primary_ip,
method=r.request.method,
**session_wrapper.additional_metrics,
)
session.set_response_callback(response_callback)
return session_wrapper
async def _checkin(self, session):
pass
def mite_http(func):
return SessionPool.decorator(func)
|
165241
|
from unittest import TestCase
from dexpy.simplex_centroid import build_simplex_centroid
from dexpy.eval import det_xtxi
from dexpy.model import make_quadratic_model
import numpy as np
import patsy
class TestSimplexCentroid(TestCase):
@classmethod
def test_d_optimality(cls):
answer_d = [ 2.513455e3, 2.197654e6, 5.52777e9,
1.85905e13, 3.447727e16, 1.275709e19 ]
actual_d = []
for i in range(3, 9):
design = build_simplex_centroid(i)
model = "-1 + " + make_quadratic_model(design.columns,
include_squared=False)
x_matrix = patsy.dmatrix(model,
design,
return_type="dataframe")
actual_d.append(det_xtxi(x_matrix, use_log=False))
np.testing.assert_allclose(answer_d, actual_d, rtol=1e-5)
|
165271
|
from django.test import TestCase, override_settings
from hunts.models import Hunt
from puzzles.models import Puzzle
from .models import ChatRoom
from .service import ChatService
from .fake_service import FakeChatService
@override_settings(
CHAT_DEFAULT_SERVICE="DEFAULT",
CHAT_SERVICES={
"DEFAULT": ChatService,
"FAKE": FakeChatService,
},
)
class TestChatRoom(TestCase):
def setUp(self):
hunt = Hunt.objects.create(name="fake hunt", url="google.com")
self.meta = Puzzle.objects.create(
name="meta",
hunt=hunt,
url="meta.com",
sheet="sheet.com",
is_meta=True,
)
self.feeder = Puzzle.objects.create(
name="puzzle",
hunt=hunt,
url="url.com",
sheet="sheet2.com",
is_meta=False,
)
self.room = ChatRoom.objects.create(
puzzle=self.feeder, name="Test Room 🧩", service="FAKE"
)
self.meta_room = ChatRoom.objects.create(
puzzle=self.meta, name="Meta Room", service="FAKE"
)
self.fake_service = FakeChatService.get_instance()
def test_chat_room_str_uses_name(self):
self.assertEqual(str(self.room), self.room.name)
def test_chat_room_default_service(self):
self.room = ChatRoom.objects.create(name="Default Test Room 🧩")
self.assertEqual(self.room.service, "DEFAULT")
def test_chat_room_service(self):
self.assertEqual(self.room.service, "FAKE")
def test_chat_room_create_channels_based_on_name(self):
self.room.create_channels()
self.assertIn(self.room.text_channel_id, self.fake_service.text_channels)
self.assertIn(self.room.audio_channel_id, self.fake_service.audio_channels)
def test_chat_room_delete_channels(self):
self.room.create_channels()
self.room.delete_channels()
self.assertNotIn(self.room.text_channel_id, self.fake_service.text_channels)
self.assertNotIn(self.room.audio_channel_id, self.fake_service.audio_channels)
def test_chat_room_object_delete_calls_delete_channels(self):
self.room.create_channels()
self.room.delete()
self.assertNotIn(self.room.text_channel_id, self.fake_service.text_channels)
self.assertNotIn(self.room.audio_channel_id, self.fake_service.audio_channels)
def test_chat_room_archive_and_unarchive(self):
self.room.create_channels()
self.room.archive_channels()
self.assertIn(self.room.text_channel_id, self.fake_service.archived_channels)
self.assertIn(self.room.audio_channel_id, self.fake_service.archived_channels)
self.room.unarchive_channels()
self.assertNotIn(self.room.text_channel_id, self.fake_service.archived_channels)
self.assertNotIn(
self.room.audio_channel_id, self.fake_service.archived_channels
)
def test_metas_category(self):
meta_category = self.meta_room.puzzle.hunt.settings.discord_metas_category
self.meta_room.create_channels()
self.meta_room.update_category()
self.assertIn(
self.meta_room.text_channel_id,
self.fake_service.category_to_channel[meta_category],
)
self.assertIn(
self.meta_room.audio_channel_id,
self.fake_service.category_to_channel[meta_category],
)
self.meta_room.puzzle.is_meta = False
self.meta_room.puzzle.save()
self.meta_room.update_category()
self.assertNotIn(
self.meta_room.text_channel_id,
self.fake_service.category_to_channel[meta_category],
)
self.assertNotIn(
self.meta_room.audio_channel_id,
self.fake_service.category_to_channel[meta_category],
)
def test_unassigned_feeder_category(self):
text_category = (
self.meta_room.puzzle.hunt.settings.discord_unassigned_text_category
)
voice_category = (
self.meta_room.puzzle.hunt.settings.discord_unassigned_voice_category
)
self.room.create_channels()
self.room.update_category()
self.assertIn(
self.room.text_channel_id,
self.fake_service.category_to_channel[text_category],
)
self.assertIn(
self.room.audio_channel_id,
self.fake_service.category_to_channel[voice_category],
)
def test_meta_feeder_category(self):
self.room.create_channels()
self.room.update_category()
# should be archived after answering
self.feeder.set_answer("ANSWER")
archive_category = self.meta_room.puzzle.hunt.settings.discord_archive_category
self.room.update_category()
self.assertIn(
self.room.text_channel_id,
self.fake_service.category_to_channel[archive_category],
)
self.assertIn(
self.room.audio_channel_id,
self.fake_service.category_to_channel[archive_category],
)
# assigning meta should not unarchive it
self.feeder.metas.add(self.meta)
self.room.update_category()
self.assertIn(
self.room.text_channel_id,
self.fake_service.category_to_channel[archive_category],
)
self.assertIn(
self.room.audio_channel_id,
self.fake_service.category_to_channel[archive_category],
)
# should be in metas category after deleting the answer
self.feeder.clear_answer("ANSWER")
self.room.update_category()
self.assertIn(
self.room.text_channel_id,
self.fake_service.category_to_channel[self.meta.name],
)
self.assertIn(
self.room.audio_channel_id,
self.fake_service.category_to_channel[self.meta.name],
)
def test_send_message_and_announce(self):
self.room.create_channels()
msg = self.room.name
self.room.send_message(msg)
self.assertIn(msg, self.fake_service.messages)
class TestChatService(TestCase):
def test_base_chat_service_constructor_raises_error(self):
with self.assertRaises(NotImplementedError):
ChatService.get_instance()
def test_base_chat_service_methods_raise_not_implemented_error(self):
class PartiallyImplementedChatService(ChatService):
def __init__(self, django_settings):
pass
service = PartiallyImplementedChatService.get_instance()
for f in dir(ChatService):
# Filter for public interface methods.
if f.startswith("_") or f == "get_instance":
continue
with self.assertRaises(NotImplementedError):
func = service.__getattribute__(f)
if f == "send_message" or f == "announce":
func("channel-name-or-id", "msg")
elif f == "handle_tag_added" or f == "handle_tag_removed":
func("channel-id", "puzzle", "tag")
elif f == "handle_puzzle_rename":
func("channel", "name")
elif f == "categorize_channel":
func("guild-id", "channel-name-or-id", "category-name")
elif f == "get_text_channel_participants":
func("channel-id")
else:
func("guild-id", "channel-name-or-id")
|
165291
|
import pytest
from simple_zpl2 import ZPLDocument
def test_comment():
zdoc = ZPLDocument()
zdoc.add_comment('Testing Comment')
assert zdoc.zpl_bytes == b'^XA\n^FXTesting Comment^FS\n^XZ'
|
165292
|
import numpy as np
import time
from multiprocessing import Pool, cpu_count
import sys
sys.path.append('..')
from numpyVectorization.motion_gauss import simulateParticles_loop
# How do we pass multiple arguments via pool? Use wrapper functions! Note:
# lambda functions will NOT work for this purpose
def wrapper_fun(args):
return simulateParticles_loop(*args)
if __name__ == "__main__":
num_particles = 100000
num_steps = 100
num_cpus = cpu_count()
# Determine how many particles to run on each worker
num_particles_per_core = int(num_particles / num_cpus)
# # input arrays
# inArys = [np.zeros((num_particles_per_core, num_steps)) for i in range(num_cpus)]
# for ary in inArys: print ary.shape
# Set up our pool
p = Pool(num_cpus)
# Make our arguments
args = [(num_particles_per_core, num_steps, False)] * num_cpus
# from itertools import izip, repeat
# args = izip(repeat(num_particles_per_core, num_cpus), repeat(num_steps, num_cpus), repeat(False, num_cpus))
# Send the wrapper function and the iterable args to map
tic = time.time() # Start time of code running
result = p.map_async(wrapper_fun, args)
# Get the result when it's ready
poolresult = result.get()
toc = time.time()
p.close()
p.join()
print "%.5f sec to compute %s particles" %((toc-tic), num_particles)
|
165317
|
import dash_core_components as dcc
import dash_html_components as html
from dash_docs import styles
from dash_docs import reusable_components as rc
layout = html.Div(children=[
rc.Markdown('''
# Deploying Dash Apps
By default, Dash apps run on `localhost` - you can only access them on your
own machine. To share a Dash app, you need to "deploy" it to a server.
Our recommend method for securely deploying Dash applications is
[Dash Enterprise](https://plotly.com/dash).
> Dash Enterprise can be installed on the Kubernetes
> services of
> [AWS](https://go.plotly.com/dash-aws),
> [Azure](https://go.plotly.com/dash-azure),
> GCP,
> or an
> [on-premise Linux Server](https://plotly.com/dash/on-premises-linux/?utm_source=docs&utm_medium=workspace&utm_campaign=nov&utm_content=linux).
> [Find out if your company is using Dash Enterprise](https://go.plotly.com/company-lookup)
## Dash Enterprise Deployment
> If your company has licensed Dash Enterprise, then view the deployment
> documentation by visiting
>
> **`https://<your-dash-enterprise-platform>/Docs/dash-enterprise`**
>
> (Replace `<your-dash-enterprise-platform>` with the hostname of your
> licensed Dash Enterprise in your VPC).
>
> [Look up the hostname for your company's license](https://go.plotly.com/company-lookup)
[Dash Enterprise](https://plotly.com/dash/)
is Plotly's commercial product for developing & deploying
Dash Apps on your company's on-premises Linux servers or VPC
([AWS](https://plotly.com/dash/aws), [Google Cloud](https://plotly.com/dash), or [Azure](https://plotly.com/dash/azure)).
In addition to [easy, git-based deployment](https://plotly.com/dash/app-manager), the Dash Enterprise platform provides a complete Analytical App Stack.
This includes:
- [LDAP & SAML Authentication Middleware](https://plotly.com/dash/authentication)
- [Data Science Workspaces](https://plotly.com/dash/workspaces)
- [High Availability & Horizontal Scaling](https://plotly.com/dash/kubernetes)
- [Job Queue Support](https://plotly.com/dash/job-queue)
- [Enterprise-Wide Dash App Portal](https://plotly.com/dash/app-manager)
- [Design Kit](https://plotly.com/dash/design-kit)
- [Reporting, Alerting, Saved Views, and PDF Reports](https://plotly.com/dash/snapshot-engine)
- [Dashboard Toolkit](https://plotly.com/dash/toolkit)
- [Embedding Dash apps in Existing websites or Salesforce](https://plotly.com/dash/embedding)
- [AI App Catalog](https://plotly.com/dash/ai-and-ml-templates)
- [Big Data Best Practices](https://plotly.com/dash/big-data-for-python)
- [GPU support](https://plotly.com/dash/gpu-dask-acceleration)

## Heroku for Sharing Public Dash apps for Free
Heroku is one of the easiest platforms for deploying and managing public Flask
applications. The git & buildpack-based deployment of UIs of Heroku and Dash Enterprise
are nearly identical, enabling an easy transition to Dash Enterprise if you
are already using Heroku.
[View the official Heroku guide to Python](https://devcenter.heroku.com/articles/getting-started-with-python#introduction).
Here is a simple example. This example requires a Heroku account,
`git`, and `virtualenv`.
***
**Step 1. Create a new folder for your project:**
'''),
rc.Markdown('''
```shell
$ mkdir dash_app_example
$ cd dash_app_example
```
''', style=styles.code_container),
rc.Markdown('''
***
**Step 2. Initialize the folder with `git` and a `virtualenv`**
'''),
rc.Markdown('''
```shell
$ git init # initializes an empty git repo
$ virtualenv venv # creates a virtualenv called "venv"
$ source venv/bin/activate # uses the virtualenv
```
''',style=styles.code_container),
rc.Markdown('''
`virtualenv` creates a fresh Python instance. You will need to reinstall your
app's dependencies with this virtualenv:
'''),
rc.Markdown('''
```shell
$ pip install dash
$ pip install plotly
```
''', style=styles.code_container),
rc.Markdown('''
You will also need a new dependency, `gunicorn`, for deploying the app:
'''),
rc.Markdown('''
```shell
$ pip install gunicorn
```
''', style=styles.code_container),
rc.Markdown('''***
**Step 3. Initialize the folder with a sample app (`app.py`), a `.gitignore` file, `requirements.txt`, and a `Procfile` for deployment**
Create the following files in your project folder:
**`app.py`**
'''),
rc.Markdown('''
```python
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.H2('Hello World'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['LA', 'NYC', 'MTL']],
value='LA'
),
html.Div(id='display-value')
])
@app.callback(dash.dependencies.Output('display-value', 'children'),
[dash.dependencies.Input('dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
```
''', style=styles.code_container),
rc.Markdown('''
***
**`.gitignore`**
'''),
rc.Markdown('''
```shell
venv
*.pyc
.DS_Store
.env
```
''', style=styles.code_container),
rc.Markdown('''
***
**`Procfile`**
'''),
rc.Markdown('''
```shell
web: gunicorn app:server
```
''', style=styles.code_container),
rc.Markdown('''
(Note that `app` refers to the filename `app.py`.
`server` refers to the variable `server` inside that file).
***
**`requirements.txt`**
`requirements.txt` describes your Python dependencies.
You can fill this file in automatically with:
'''),
rc.Markdown('''
```shell
$ pip freeze > requirements.txt
```
''', style=styles.code_container),
rc.Markdown('''
***
**4. Initialize Heroku, add files to Git, and deploy**
'''),
rc.Markdown('''
```shell
$ heroku create my-dash-app # change my-dash-app to a unique name
$ git add . # add all files to git
$ git commit -m 'Initial app boilerplate'
$ git push heroku master # deploy code to heroku
$ heroku ps:scale web=1 # run the app with a 1 heroku "dyno"
```
''', style=styles.code_container),
rc.Markdown('''
You should be able to view your app at `https://my-dash-app.herokuapp.com`
(changing `my-dash-app` to the name of your app).
**5. Update the code and redeploy**
When you modify `app.py` with your own code, you will need to add the changes
to git and push those changes to heroku.
'''),
rc.Markdown('''
```shell
$ git status # view the changes
$ git add . # add all the changes
$ git commit -m 'a description of the changes'
$ git push heroku master
```
''', style=styles.code_container),
rc.Markdown('''
***
This workflow for deploying apps on Heroku is very similar to how deployment
works with the Plotly Enterprise's Dash Enterprise.
[Learn more](https://plotly.com/dash/) or [get in touch](https://plotly.com/get-demo/).
''')
])
|
165354
|
import numpy as np
import pyCubbyFlow
from pytest import approx
from pytest_utils import *
cnt = 0
def test_grid2():
global cnt
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
assert_bounding_box_similar(
a.boundingBox, pyCubbyFlow.BoundingBox2D((7, 5), (10, 13)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0), (7.5, 6))
b = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.HasSameShape(b)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < 3
assert idx[1] >= 0 and idx[1] < 4
cnt += 1
cnt = 0
a.ForEachCellIndex(func)
assert cnt == 12
def test_scalar_grid2():
global cnt
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
a.Resize(resolution=(12, 7),
gridSpacing=(3, 4),
gridOrigin=(9, 2))
assert a.resolution == (12, 7)
assert_vector_similar(a.gridOrigin, (9, 2))
assert_vector_similar(a.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
a[5, 6] = 17.0
assert a[5, 6] == 17.0
a.Fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2
a.Fill(func)
pos = a.DataPosition()
acc = np.array(a.DataView(), copy=False)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j)
assert func(pt) == a[i, j]
assert func(pt) == approx(a.Sample(pt))
assert acc[j, i] == a[i, j]
# Can't compare to analytic solution because FDM with such a coarse
# grid will return inaccurate results by design.
assert_vector_similar(a.GradientAtDataPoint((i, j)), a.Gradient(pt))
assert a.LaplacianAtDataPoint((i, j)) == a.Laplacian(pt)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < a.resolution.x + 1
assert idx[1] >= 0 and idx[1] < a.resolution.y + 1
cnt += 1
cnt = 0
a.ForEachDataPointIndex(func)
assert cnt == (a.resolution.x + 1) * (a.resolution.y + 1)
blob = a.Serialize()
b = pyCubbyFlow.VertexCenteredScalarGrid2()
b.Deserialize(blob)
assert b.resolution == (12, 7)
assert_vector_similar(b.gridOrigin, (9, 2))
assert_vector_similar(b.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == b[i, j]
def test_cell_centered_scalar_grid2():
# CTOR
a = pyCubbyFlow.VertexCenteredScalarGrid2()
assert a.resolution == (1, 1)
assert_vector_similar(a.gridOrigin, (0.0, 0.0))
assert_vector_similar(a.gridSpacing, (1.0, 1.0))
a = pyCubbyFlow.VertexCenteredScalarGrid2((3, 4), (1, 2), (7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
domainSizeX=12.0,
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (4, 4))
# Properties
a = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert_vector_similar(a.dataSize, (4, 5))
assert_vector_similar(a.dataOrigin, (7, 5))
# Modifiers
b = pyCubbyFlow.VertexCenteredScalarGrid2(resolution=(6, 3),
gridSpacing=(5, 9),
gridOrigin=(1, 2))
a.Fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
a.Swap(b)
assert a.resolution == (6, 3)
assert_vector_similar(a.gridOrigin, (1, 2))
assert_vector_similar(a.gridSpacing, (5, 9))
assert b.resolution == (3, 4)
assert_vector_similar(b.gridOrigin, (7, 5))
assert_vector_similar(b.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
for j in range(b.resolution.y):
for i in range(b.resolution.x):
assert b[i, j] == 42.0
a.Set(b)
assert a.resolution == (3, 4)
assert_vector_similar(a.gridOrigin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
c = a.Clone()
assert c.resolution == (3, 4)
assert_vector_similar(c.gridOrigin, (7, 5))
assert_vector_similar(c.gridSpacing, (1, 2))
for j in range(c.resolution.y):
for i in range(c.resolution.x):
assert c[i, j] == 42.0
# ------------------------------------------------------------------------------
def test_grid3():
global cnt
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 3))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
assert_bounding_box_similar(
a.boundingBox, pyCubbyFlow.BoundingBox3D((7, 5, 3), (10, 13, 18)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0, 0), (7.5, 6, 4.5))
b = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.HasSameShape(b)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < 3
assert idx[1] >= 0 and idx[1] < 4
assert idx[2] >= 0 and idx[2] < 5
cnt += 1
cnt = 0
a.ForEachCellIndex(func)
assert cnt == 60
def test_scalar_grid3():
global cnt
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
a.Resize(resolution=(12, 7, 2),
gridSpacing=(3, 4, 5),
gridOrigin=(9, 2, 5))
assert a.resolution == (12, 7, 2)
assert_vector_similar(a.gridOrigin, (9, 2, 5))
assert_vector_similar(a.gridSpacing, (3, 4, 5))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 0.0
a[5, 6, 1] = 17.0
assert a[5, 6, 1] == 17.0
a.Fill(42.0)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2 + pt.z ** 2
a.Fill(func)
pos = a.DataPosition()
acc = np.array(a.DataView(), copy=False)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j, k)
assert func(pt) == a[i, j, k]
assert func(pt) == approx(a.Sample(pt))
assert acc[k, j, i] == a[i, j, k]
# Can't compare to analytic solution because FDM with such a
# coarse grid will return inaccurate results by design.
assert_vector_similar(
a.GradientAtDataPoint((i, j, k)), a.Gradient(pt))
assert a.LaplacianAtDataPoint((i, j, k)) == a.Laplacian(pt)
def func(idx):
global cnt
assert idx[0] >= 0 and idx[0] < a.resolution.x
assert idx[1] >= 0 and idx[1] < a.resolution.y
assert idx[2] >= 0 and idx[2] < a.resolution.z
cnt += 1
cnt = 0
a.ForEachDataPointIndex(func)
assert cnt == a.resolution.x * a.resolution.y * a.resolution.z
blob = a.Serialize()
b = pyCubbyFlow.CellCenteredScalarGrid3()
b.Deserialize(blob)
assert b.resolution == (12, 7, 2)
assert_vector_similar(b.gridOrigin, (9, 2, 5))
assert_vector_similar(b.gridSpacing, (3, 4, 5))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == b[i, j, k]
def test_cell_centered_scalar_grid3():
# CTOR
a = pyCubbyFlow.CellCenteredScalarGrid3()
assert a.resolution == (1, 1, 1)
assert_vector_similar(a.gridOrigin, (0.0, 0.0, 0.0))
assert_vector_similar(a.gridSpacing, (1.0, 1.0, 1.0))
a = pyCubbyFlow.CellCenteredScalarGrid3((3, 4, 5), (1, 2, 3), (7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
domainSizeX=12.0,
gridOrigin=(7, 5, 2))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (4, 4, 4))
# Properties
a = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 2))
assert_vector_similar(a.dataSize, (3, 4, 5))
assert_vector_similar(a.dataOrigin, (7.5, 6, 3.5))
# Modifiers
b = pyCubbyFlow.CellCenteredScalarGrid3(resolution=(6, 3, 7),
gridSpacing=(5, 9, 3),
gridOrigin=(1, 2, 8))
a.Fill(42.0)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
a.Swap(b)
assert a.resolution == (6, 3, 7)
assert_vector_similar(a.gridOrigin, (1, 2, 8))
assert_vector_similar(a.gridSpacing, (5, 9, 3))
assert b.resolution == (3, 4, 5)
assert_vector_similar(b.gridOrigin, (7, 5, 2))
assert_vector_similar(b.gridSpacing, (1, 2, 3))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 0.0
for k in range(b.resolution.z):
for j in range(b.resolution.y):
for i in range(b.resolution.x):
assert b[i, j, k] == 42.0
a.Set(b)
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.gridOrigin, (7, 5, 2))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
c = a.Clone()
assert c.resolution == (3, 4, 5)
assert_vector_similar(c.gridOrigin, (7, 5, 2))
assert_vector_similar(c.gridSpacing, (1, 2, 3))
for k in range(c.resolution.z):
for j in range(c.resolution.y):
for i in range(c.resolution.x):
assert c[i, j, k] == 42.0
|
165379
|
import random
import scipy
import numpy as np
import h5py
class DataLoader(object):
def __init__(self, cfg):
self.cfg = cfg
self.augment = cfg.data_augment
def get_data(self, mode='train'):
h5f = h5py.File('./classification/DataLoaders/mnist_background.h5', 'r')
self.x_test = np.reshape(h5f['X'][:], [12000, 28, 28, 1])
self.y_test = h5f['Y'][:]
h5f.close()
print()
def next_batch(self, start=None, end=None, mode='train'):
if mode == 'train':
x, y = self.mnist.train.next_batch(self.cfg.batch_size)
x = x.reshape((-1, self.cfg.height, self.cfg.width, self.cfg.channel))
if self.augment:
x = random_rotation_2d(x, self.cfg.max_angle)
elif mode == 'valid':
x = self.x_valid[start:end]
y = self.y_valid[start:end]
elif mode == 'test':
x = self.x_test[start:end]
y = self.y_test[start:end]
return x, y
def count_num_batch(self, batch_size, mode='train'):
if mode == 'train':
num_batch = int(self.y_train.shape[0] / batch_size)
elif mode == 'valid':
num_batch = int(self.y_valid.shape[0] / batch_size)
elif mode == 'test':
num_batch = int(self.y_test.shape[0] / batch_size)
return num_batch
def randomize(self):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(self.y_train.shape[0])
shuffled_x = self.x_train[permutation, :, :, :]
shuffled_y = self.y_train[permutation]
return shuffled_x, shuffled_y
def random_rotation_2d(batch, max_angle):
""" Randomly rotate an image by a random angle (-max_angle, max_angle).
Arguments:
max_angle: `float`. The maximum rotation angle.
Returns:
batch of rotated 2D images
"""
size = batch.shape
batch = np.squeeze(batch)
batch_rot = np.zeros(batch.shape)
for i in range(batch.shape[0]):
if bool(random.getrandbits(1)):
image = np.squeeze(batch[i])
angle = random.uniform(-max_angle, max_angle)
batch_rot[i] = scipy.ndimage.interpolation.rotate(image, angle, mode='nearest', reshape=False)
else:
batch_rot[i] = batch[i]
return batch_rot.reshape(size)
|
165403
|
from django.views import View
from django import http
from uxhelpers.utils import json_response
import json
import logging
logger = logging.getLogger(__name__)
LEVELS = {
'CRITICAL': 50,
'ERROR': 40,
'WARNING': 30,
'INFO': 20,
'DEBUG': 10,
'NOTSET': 0
}
class LogPostView(View):
def post(self, request):
try:
json_data = json.loads(request.body)
level = json_data['level']
logger.log(LEVELS[level], json_data['message'])
except (ValueError, KeyError):
logger.warning('Malformed client log received')
return json_response(request, {'status': 200})
|
165419
|
from django.contrib import admin
from .models import Article
from .models import Tag
from .models import Author
from .models import Affiliation
# Register your models here.
admin.site.register(Article)
admin.site.register(Tag)
admin.site.register(Author)
admin.site.register(Affiliation)
|
165433
|
import base64
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from regcore_write.views import security
def _wrapped_fn(request):
return HttpResponse(status=204)
def _encode(username, password):
as_unicode = '{0}:{1}'.format(username, password).encode()
encoded = base64.b64encode(as_unicode).decode('utf-8')
return 'Basic ' + encoded
class SecurityTest(TestCase):
@override_settings(HTTP_AUTH_USER="a_user", HTTP_AUTH_PASSWORD="<PASSWORD>")
def test_secure_write(self):
"""Basic Auth must match the configuration"""
fn = security.secure_write(_wrapped_fn)
request = RequestFactory().get('/')
self.assertEqual(fn(request).status_code, 401)
request = RequestFactory().get(
'/', HTTP_AUTHORIZATION=_encode('wrong', 'pass'))
self.assertEqual(fn(request).status_code, 401)
request = RequestFactory().get(
'/', HTTP_AUTHORIZATION=_encode('a_user', 'pass'))
self.assertEqual(fn(request).status_code, 401)
request = RequestFactory().get(
'/', HTTP_AUTHORIZATION=_encode('wrong', 'a_pass'))
self.assertEqual(fn(request).status_code, 401)
request = RequestFactory().get(
'/', HTTP_AUTHORIZATION=_encode('a_user', 'a_pass'))
self.assertEqual(fn(request).status_code, 204)
@override_settings(HTTP_AUTH_USER=None, HTTP_AUTH_PASSWORD=None)
def test_secure_write_unset(self):
"""Basic Auth should not be required when the environment isn't set"""
fn = security.secure_write(_wrapped_fn)
request = RequestFactory().get('/')
self.assertEqual(fn(request).status_code, 204)
@override_settings(HTTP_AUTH_USER="", HTTP_AUTH_PASSWORD="")
def test_secure_write_empty(self):
"""Basic Auth should not be required when the environment is empty"""
fn = security.secure_write(_wrapped_fn)
request = RequestFactory().get('/')
self.assertEqual(fn(request).status_code, 204)
|
165455
|
from kango.buildsteps import BuildStepBase
class BuildStep(BuildStepBase):
def pre_pack(self, output_path, project_path, info, args):
pass
|
165495
|
import itertools
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
import dask.dataframe as dd
import holoviews as hv
import numpy as np
import pandas as pd
from bokeh.models import HoverTool
from sid.colors import get_colors
from sid.policies import compute_pseudo_effect_sizes_of_policies
DEFAULT_FIGURE_KWARGS = {
"height": 400,
"width": 600,
"line_width": 12,
"title": "Gantt Chart of Policies",
}
def plot_policy_gantt_chart(
policies,
effects=False,
colors="categorical",
fig_kwargs=None,
):
"""Plot a Gantt chart of the policies."""
if fig_kwargs is None:
fig_kwargs = {}
fig_kwargs = {**DEFAULT_FIGURE_KWARGS, **fig_kwargs}
if isinstance(policies, dict):
df = (
pd.DataFrame(policies)
.T.reset_index()
.rename(columns={"index": "name"})
.astype({"start": "datetime64", "end": "datetime64"})
.drop(columns="policy")
)
elif isinstance(policies, pd.DataFrame):
df = policies
else:
raise ValueError("'policies' should be either a dict or pandas.DataFrame.")
if effects:
effect_kwargs = effects if isinstance(effects, dict) else {}
effects = compute_pseudo_effect_sizes_of_policies(
policies=policies, **effect_kwargs
)
effects_s = pd.DataFrame(
[{"policy": name, "effect": effects[name]["mean"]} for name in effects]
).set_index("policy")["effect"]
df = df.merge(effects_s, left_on="name", right_index=True)
df["alpha"] = (1 - df["effect"] + 0.1) / 1.1
else:
df["alpha"] = 1
df = df.reset_index()
df = _complete_dates(df)
df = _add_color_to_gantt_groups(df, colors)
df = _add_positions(df)
hv.extension("bokeh", logo=False)
segments = hv.Segments(
df,
[
hv.Dimension("start", label="Date"),
hv.Dimension("position", label="Affected contact model"),
"end",
"position",
],
)
y_ticks_and_labels = list(zip(*_create_y_ticks_and_labels(df)))
tooltips = [("Name", "@name")]
if effects:
tooltips.append(("Effect", "@effect"))
hover = HoverTool(tooltips=tooltips)
gantt = segments.opts(
color="color",
alpha="alpha",
tools=[hover],
yticks=y_ticks_and_labels,
**fig_kwargs,
)
return gantt
def _complete_dates(df):
"""Complete dates."""
for column in ("start", "end"):
df[column] = pd.to_datetime(df[column])
df["start"] = df["start"].fillna(df["start"].min())
df["end"] = df["end"].fillna(df["end"].max())
return df
def _add_color_to_gantt_groups(df, colors):
"""Add a color for each affected contact model."""
colors_ = itertools.cycle(get_colors(colors, 4))
acm_to_color = dict(zip(df["affected_contact_model"].unique(), colors_))
df["color"] = df["affected_contact_model"].replace(acm_to_color)
return df
def _add_positions(df):
"""Add positions.
This functions computes the positions of policies, displayed as segments on the time
line. For example, if two policies affecting the same contact model have an
overlapping time windows, the segments are stacked and drawn onto different
horizontal lines.
"""
min_position = 0
def _add_within_group_positions(df):
"""Add within group positions."""
nonlocal min_position
position = pd.Series(data=min_position, index=df.index)
for i in range(1, len(df)):
start = df.iloc[i]["start"]
end = df.iloc[i]["end"]
is_overlapping = (
(df.iloc[:i]["start"] <= start) & (start <= df.iloc[:i]["end"])
) | ((df.iloc[:i]["start"] <= end) & (end <= df.iloc[:i]["end"]))
if is_overlapping.any():
possible_positions = set(range(min_position, i + min_position + 1))
positions_of_overlapping = set(position.iloc[:i][is_overlapping])
position.iloc[i] = min(possible_positions - positions_of_overlapping)
min_position = max(position) + 1
return position
positions = df.groupby("affected_contact_model", group_keys=False).apply(
_add_within_group_positions
)
df["position_local"] = positions
df["position"] = df.groupby(
["affected_contact_model", "position_local"], sort=True
).ngroup()
return df
def _create_y_ticks_and_labels(df):
"""Create the positions and their related labels for the y axis."""
pos_per_group = df.groupby("position", as_index=False).first()
mean_pos_per_group = (
pos_per_group.groupby("affected_contact_model")["position"].mean().reset_index()
)
return mean_pos_per_group["position"], mean_pos_per_group["affected_contact_model"]
ERROR_MISSING_CHANNEL = (
"'channel_infected_by_contact' is necessary to plot infection rates by contact "
"models. Re-run the simulation and pass `saved_columns={'channels': "
"'channel_infected_by_contact'}` to `sid.get_simulate_func`."
)
DEFAULT_IR_PER_CM_KWARGS = {
"width": 600,
"height": 400,
"tools": ["hover"],
"title": "Contribution of Contact Models to Infections",
"xlabel": "Date",
"ylabel": "Contact Model",
"invert_yaxis": True,
"colorbar": True,
"cmap": "YlOrBr",
}
def plot_infection_rates_by_contact_models(
df_or_time_series: Union[pd.DataFrame, dd.core.DataFrame],
show_reported_cases: bool = False,
unit: str = "share",
fig_kwargs: Optional[Dict[str, Any]] = None,
) -> hv.HeatMap:
"""Plot infection rates by contact models.
Parameters
----------
df_or_time_series : Union[pandas.DataFrame, dask.dataframe.core.DataFrame]
The input can be one of the following two.
1. It is a :class:`dask.dataframe.core.DataFrame` which holds the time series
from a simulation.
2. It can be a :class:`pandas.DataFrame` which is created with
:func:`prepare_data_for_infection_rates_by_contact_models`. It allows to
compute the data for various simulations with different seeds and use the
average over all seeds.
show_reported_cases : bool, optional
A boolean to select between reported or real cases of infections. Reported cases
are identified via testing mechanisms.
unit : str
The arguments specifies the unit shown in the figure.
- ``"share"`` means that daily units represent the share of infection caused
by a contact model among all infections on the same day.
- ``"population_share"`` means that daily units represent the share of
infection caused by a contact model among all people on the same day.
- ``"incidence"`` means that the daily units represent incidence levels per
100,000 individuals.
fig_kwargs : Optional[Dict[str, Any]], optional
Additional keyword arguments which are passed to ``heatmap.opts`` to style the
plot. The keyword arguments overwrite or extend the default arguments.
Returns
-------
heatmap : hv.HeatMap
The heatmap object.
"""
fig_kwargs = (
DEFAULT_IR_PER_CM_KWARGS
if fig_kwargs is None
else {**DEFAULT_IR_PER_CM_KWARGS, **fig_kwargs}
)
if _is_data_prepared_for_heatmap(df_or_time_series):
df = df_or_time_series
else:
df = prepare_data_for_infection_rates_by_contact_models(
df_or_time_series, show_reported_cases, unit
)
hv.extension("bokeh", logo=False)
heatmap = hv.HeatMap(df)
plot = heatmap.opts(**fig_kwargs)
return plot
def _is_data_prepared_for_heatmap(df):
"""Is the data prepared for the heatmap plot."""
return (
isinstance(df, pd.DataFrame)
and df.columns.isin(["date", "channel_infected_by_contact", "share"]).all()
and not df["channel_infected_by_contact"]
.isin(["not_infected_by_contact"])
.any()
)
def prepare_data_for_infection_rates_by_contact_models(
time_series: dd.core.DataFrame,
show_reported_cases: bool = False, # noqa: U100
unit: str = "share",
) -> pd.DataFrame:
"""Prepare the data for the heatmap plot.
Parameters
----------
time_series : dask.dataframe.core.DataFrame
The time series of a simulation.
show_reported_cases : bool, optional
A boolean to select between reported or real cases of infections. Reported cases
are identified via testing mechanisms.
unit : str
The arguments specifies the unit shown in the figure.
- ``"share"`` means that daily units represent the share of infection caused
by a contact model among all infections on the same day.
- ``"population_share"`` means that daily units represent the share of
infection caused by a contact model among all people on the same day.
- ``"incidence"`` means that the daily units represent incidence levels per
100,000 individuals.
Returns
-------
time_series : pandas.DataFrame
The time series with the prepared data for the plot.
"""
if isinstance(time_series, pd.DataFrame):
time_series = dd.from_pandas(time_series, npartitions=1)
elif not isinstance(time_series, dd.core.DataFrame):
raise ValueError("'time_series' must be either pd.DataFrame or dask.dataframe.")
if "channel_infected_by_contact" not in time_series:
raise ValueError(ERROR_MISSING_CHANNEL)
if show_reported_cases:
time_series = _adjust_channel_infected_by_contact_to_new_known_cases(
time_series
)
counts = (
time_series[["date", "channel_infected_by_contact"]]
.groupby(["date", "channel_infected_by_contact"])
.size()
.reset_index()
.rename(columns={0: "n"})
)
if unit == "share":
out = counts.query(
"channel_infected_by_contact != 'not_infected_by_contact'"
).assign(
share=lambda x: x["n"]
/ x.groupby("date")["n"].transform("sum", meta=("n", "f8")),
)
elif unit == "population_share":
out = counts.assign(
share=lambda x: x["n"]
/ x.groupby("date")["n"].transform("sum", meta=("n", "f8")),
).query("channel_infected_by_contact != 'not_infected_by_contact'")
elif unit == "incidence":
out = counts.query(
"channel_infected_by_contact != 'not_infected_by_contact'"
).assign(share=lambda x: x["n"] * 7 / 100_000)
else:
raise ValueError(
"'unit' should be one of 'share', 'population_share' or 'incidence'"
)
out = out.drop(columns="n").compute()
return out
def _adjust_channel_infected_by_contact_to_new_known_cases(df):
"""Adjust channel of infections by contacts to new known cases.
Channel of infections are recorded on the date an individual got infected which is
not the same date an individual is tested positive with a PCR test.
This function adjusts ``"channel_infected_by_contact"`` such that the infection
channel is shifted to the date when an individual is tested positive.
"""
channel_of_infection_by_contact = _find_channel_of_infection_for_individuals(df)
df = _patch_channel_infected_by_contact(df, channel_of_infection_by_contact)
return df
def _find_channel_of_infection_for_individuals(df):
"""Find the channel of infected by contact for each individual."""
df["channel_infected_by_contact"] = df["channel_infected_by_contact"].cat.as_known()
df["channel_infected_by_contact"] = df[
"channel_infected_by_contact"
].cat.remove_categories(["not_infected_by_contact"])
df = df.dropna(subset=["channel_infected_by_contact"])
df = df["channel_infected_by_contact"].compute()
return df
def _patch_channel_infected_by_contact(df, s):
"""Patch channel of infections by contact to only show channels for known cases."""
df = df.drop(columns="channel_infected_by_contact")
df = df.merge(s.to_frame(name="channel_infected_by_contact"), how="left")
df["channel_infected_by_contact"] = df["channel_infected_by_contact"].mask(
~df["new_known_case"], np.nan
)
df["channel_infected_by_contact"] = (
df["channel_infected_by_contact"]
.cat.add_categories("not_infected_by_contact")
.fillna("not_infected_by_contact")
)
return df
|
165519
|
import os.path as op
import inspect
__all__ = ["get_fname", "with_name"]
def get_fname(subses_dict, suffix,
tracking_params=None, segmentation_params=None):
split_fdwi = op.split(subses_dict["dwi_file"])
fname = op.join(subses_dict["results_dir"], split_fdwi[1].split('.')[0])
if tracking_params is not None and 'odf_model' in tracking_params:
odf_model = tracking_params['odf_model']
directions = tracking_params['directions']
fname = fname + (
f'_space-RASMM_model-{odf_model}'
f'_desc-{directions}'
)
if segmentation_params is not None and 'seg_algo' in segmentation_params:
seg_algo = segmentation_params['seg_algo']
fname = fname + f'-{seg_algo}'
return fname + suffix
# Turn list of tasks into dictionary with names for each task
def with_name(task_list):
task_dict = {}
for task in task_list:
task_dict[task.function.__name__ + "_res"] = task
return task_dict
def get_default_args(func):
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
|
165528
|
from django.conf.urls.defaults import *
urlpatterns = patterns('wouso.games.quiz.views',
url(r'^$', 'index', name='quiz_index_view'),
url(r'^cat/(?P<id>\d+)/$', 'category', name='quiz_category_view'),
url(r'^(?P<id>\d+)/$', 'quiz', name='quiz_view'),
)
|
165545
|
from settings_local import (
BITCOIND_TEST_MODE,
BITCOIND_RPC_USERNAME,
BITCOIND_RPC_PASSWORD,
BITCOIND_RPC_PORT,
BITCOIND_RPC_HOST,
BITCOIND_TEST_RPC_USERNAME,
BITCOIND_TEST_RPC_PASSWORD,
BITCOIND_TEST_RPC_HOST,
BITCOIND_TEST_RPC_PORT,
ORACLE_ADDRESS,
ORGANIZATION_ADDRESS,
ORACLE_FEE,
ORGANIZATION_FEE
)
from shared.liburl_wrapper import safe_blockchain_multiaddress, safe_nonbitcoind_blockchain_getblock, safe_get_raw_transaction
import json
import jsonrpclib
from bitcoinrpc.authproxy import AuthServiceProxy
import time
from xmlrpclib import ProtocolError
from decimal import Decimal
import socket
import logging
logging.getLogger("requests").setLevel(logging.CRITICAL)
TEST_MODE = BITCOIND_TEST_MODE
class UnknownServerError(Exception):
pass
def slice_list(list, chunk):
return [list[i*chunk:(i+1)*chunk] for i in range(0, int((len(list)+chunk)/chunk))]
class BitcoinClient:
def __init__(self, account=None):
self.account = account
self.connect()
self.blockchain_connect()
def _connect(self, connection_function):
try_factor = 1
while 1:
try:
connection_function()
return
except:
try_factor *= 2
if try_factor > 512:
logging.critical('can\'t connect to bitcoind server')
return
logging.info('can\'t connect to bitcoind server, waiting {}'.format(try_factor))
time.sleep(try_factor)
def connect(self):
def server_connection():
self.server = jsonrpclib.Server('http://{0}:{1}@{2}:{3}'.format(
BITCOIND_RPC_USERNAME,
BITCOIND_RPC_PASSWORD,
BITCOIND_RPC_HOST,
BITCOIND_RPC_PORT))
socket.setdefaulttimeout(None)
self.server.help()
self._connect(server_connection)
def blockchain_connect(self):
"""
If your Oracle is in test mode, then blockchain server is different than default server
"""
def server_connection():
if TEST_MODE:
self.blockchain_server = jsonrpclib.Server('http://{0}:{1}@{2}:{3}'.format(
BITCOIND_TEST_RPC_USERNAME,
BITCOIND_TEST_RPC_PASSWORD,
BITCOIND_TEST_RPC_HOST,
BITCOIND_TEST_RPC_PORT))
else:
self.blockchain_server = jsonrpclib.Server('http://{0}:{1}@{2}:{3}'.format(
BITCOIND_RPC_USERNAME,
BITCOIND_RPC_PASSWORD,
BITCOIND_RPC_HOST,
BITCOIND_RPC_PORT))
socket.setdefaulttimeout(None)
self.server.help()
self._connect(server_connection)
def keep_alive(server):
def wrapper(fun):
def ping_and_reconnect(self, *args, **kwargs):
if server == 'server':
server_instance = self.server
connection_function = self.connect
elif server == 'blockchain_server':
return fun(self, *args, **kwargs)
else:
raise UnknownServerError()
try:
# Cheap API call that checks wether we're connected
server_instance.help()
response = fun(self, *args, **kwargs)
return response
except:
connection_function()
return fun(self, *args, **kwargs)
return ping_and_reconnect
return wrapper
@keep_alive('server')
def decode_raw_transaction(self, hex_transaction):
return self.server.decoderawtransaction(hex_transaction)
@keep_alive('server')
def get_json_transaction(self, hex_transaction):
return self.server.decoderawtransaction(hex_transaction)
@keep_alive('server')
def sign_transaction(self, raw_transaction, prevtx = [], priv_keys=None):
if priv_keys:
result = self.server.signrawtransaction(raw_transaction, prevtx, priv_keys)
else:
result = self.server.signrawtransaction(raw_transaction, prevtx)
return result['hex']
@keep_alive('server')
def get_txid(self, raw_transaction):
transaction_dict = self.server.decoderawtransaction(raw_transaction)
return transaction_dict['txid']
@keep_alive('server')
def signatures_count(self, raw_transaction, prevtx):
transaction_dict = self.server.decoderawtransaction(raw_transaction)
prevtx_dict = {}
for tx in prevtx:
prevtx_dict["{}#{}".format(tx['txid'], tx['vout'])] = tx['redeemScript']
has_signatures = 999
for vin in transaction_dict['vin']:
redeem_script = prevtx_dict["{}#{}".format(tx['txid'], tx['vout'])]
try:
asm = vin['scriptSig']['asm']
except KeyError:
logging.error('transaction doesn\'t have scriptSig asm')
continue
asm_elements = asm.split()
try:
asm_script_dict = self.server.decodescript(redeem_script)
int(asm_script_dict['reqSigs'])
except KeyError:
logging.error('script is missing reqSigs field')
continue
# first elements is op_zero, last is script, rest is signatuers
asm_signatures = asm_elements[1:-1]
# if tried to sign a tx with the same signature again, the sig will equal '0', and we should ignore it
current_signatures = 0
for a in asm_signatures:
if a != '0':
current_signatures += 1
has_signatures = min(has_signatures, current_signatures)
return has_signatures
@keep_alive('server')
def signatures(self, raw_transaction, prevtx):
transaction_dict = self.server.decoderawtransaction(raw_transaction)
prevtx_dict = {}
for tx in prevtx:
prevtx_dict[str((tx['txid'], tx['vout']))] = tx['redeemScript']
has_signatures = 999
for vin in transaction_dict['vin']:
redeem_script = prevtx_dict[str((vin['txid'], vin['vout']))]
try:
asm = vin['scriptSig']['asm']
except KeyError:
logging.error('transaction doesn\'t have scriptSig asm')
continue
asm_elements = asm.split()
try:
asm_script_dict = self.server.decodescript(redeem_script)
int(asm_script_dict['reqSigs'])
except KeyError:
logging.error('script is missing reqSigs field')
continue
# first elements is op_zero, last is script, rest is signatuers
return asm_elements
current_signatures = len(asm_elements) - 2
current_signatures = max(current_signatures, 0)
has_signatures = min(has_signatures, current_signatures)
return has_signatures
@keep_alive('server')
def is_valid_transaction(self, raw_transaction):
# Is raw transaction valid and decodable?
try:
self.server.decoderawtransaction(raw_transaction)
except ProtocolError:
logging.exception('tx invalid')
return False
return True
@keep_alive('server')
def address_is_mine(self, address):
result = self.server.validateaddress(address)
return result['ismine']
@keep_alive('server')
def decode_script(self, script):
return self.server.decodescript(script)
@keep_alive('server')
def get_inputs_outputs(self, raw_transaction):
transaction_dict = self.server.decoderawtransaction(raw_transaction)
vin = transaction_dict["vin"]
vouts = transaction_dict["vout"]
result = (
sorted([json.dumps({'txid': tx_input['txid'], 'vout':tx_input['vout']}) for tx_input in vin]),
json.dumps(
{
'vout': sorted([
{
"value": vout["value"],
"scriptPubKey": vout["scriptPubKey"]["hex"]
} for vout in vouts
])
}
)
)
return result
@keep_alive('server')
def transaction_already_signed(self, raw_transaction, prevtx):
signed_transaction = self.sign_transaction(raw_transaction, prevtx)
if signed_transaction == raw_transaction:
return True
return False
@keep_alive('server')
def transaction_need_signature(self, raw_transaction):
"""
This is shameful ugly function. It tries to send transaction to network
(even though we're working locally) and if it fails we know it still needs
some signatures.
"""
try:
self.server.sendrawtransaction(raw_transaction)
return False
except ProtocolError:
return True
@keep_alive('server')
def transaction_contains_output(self, raw_transaction, address, fee):
transaction_dict = self.server.decoderawtransaction(raw_transaction)
if not 'vout' in transaction_dict:
return False
for vout in transaction_dict['vout']:
# Sanity checks
if not 'value' in vout:
continue
if not 'scriptPubKey' in vout:
continue
if not 'addresses' in vout['scriptPubKey']:
continue
for address in vout['scriptPubKey']['addresses']:
if address == address:
value = Decimal(vout['value'])
if value >= Decimal(fee):
return True
return False
@keep_alive('server')
def transaction_contains_oracle_fee(self, raw_transaction):
return self.transaction_contains_output(raw_transaction, ORACLE_ADDRESS, ORACLE_FEE)
@keep_alive('server')
def transaction_contains_org_fee(self, raw_transaction):
return self.transaction_contains_output(raw_transaction, ORGANIZATION_ADDRESS, ORGANIZATION_FEE)
@keep_alive('server')
def create_multisig_address(self, min_sigs, keys):
keys = sorted(keys)
return self.server.createmultisig(min_sigs, keys)
@keep_alive('server')
def add_multisig_address(self, min_sigs, keys):
keys = sorted(keys)
if self.account:
return self.server.addmultisigaddress(min_sigs, keys, self.account)
return self.server.addmultisigaddress(min_sigs, keys)
@keep_alive('server')
def create_raw_transaction(self, tx_inputs, outputs):
return self.server.createrawtransaction(tx_inputs, outputs)
@keep_alive('server')
def get_new_address(self):
if self.account:
return self.server.getnewaddress(self.account)
return self.server.getnewaddress()
@keep_alive('server')
def get_addresses_for_account(self, account):
all_addresses = self.server.listreceivedbyaddress(0,True)
addresses = [elt['address'] for elt in all_addresses if elt['account'] == account]
return addresses
@keep_alive('server')
def validate_address(self, address):
return self.server.validateaddress(address)
@keep_alive('blockchain_server')
def get_block_hash(self, block_number):
try:
return self.blockchain_server.getblockhash(block_number)
except:
return None
@keep_alive('blockchain_server')
def bitcoind_get_block(self, block_hash):
return self.blockchain_server.getblock(block_hash)
def get_block(self, block_hash):
if not TEST_MODE:
return self.bitcoind_get_block(block_hash)
else:
# Temporary solution before blockchain.info will fix their API
not_proper_data = safe_nonbitcoind_blockchain_getblock(block_hash)
max_height = self.blockchain_server.getblockcount()
proper_data = {}
try:
proper_data['hash'] = not_proper_data['hash']
except:
logging.exception('problematic blockchain data: %r' % not_proper_data)
proper_data['height'] = not_proper_data['height']
proper_data['size'] = not_proper_data['size']
proper_data['merkleroot'] = not_proper_data['mrkl_root']
proper_data['confirmations'] = max_height - proper_data['height'] + 1
proper_data['version'] = not_proper_data['ver']
proper_data['time'] = not_proper_data['time']
proper_data['nonce'] = not_proper_data['nonce']
proper_data['bits'] = not_proper_data['bits']
proper_data['previousblockhash'] = not_proper_data['prev_block']
proper_data['tx'] = [tx['hash'] for tx in not_proper_data['tx']]
return proper_data
@keep_alive('blockchain_server')
def get_block_count(self):
return self.blockchain_server.getblockcount()
@keep_alive('blockchain_server')
def send_transaction(self, tx):
try:
if not TEST_MODE:
self.blockchain_server.sendrawtransaction(tx)
return True
except ProtocolError:
return False
def get_raw_transaction(self, txid):
if not TEST_MODE:
return self.server.getrawtransaction(txid)
else:
return safe_get_raw_transaction(txid)
def get_transactions_from_block(self, block, addresses):
if not TEST_MODE:
return self.bitcoind_get_transactions_from_block(block, addresses)
else:
return self.blockchain_get_transactions_from_block(block, addresses)
def blockchain_get_transactions_from_block(self, block, addresses):
transactions_per_address = {}
for addr in addresses:
transactions_per_address[addr]= []
transaction_ids = block['tx']
address_chunks = slice_list(addresses, 5)
transactions_on_addresses = []
for chunk in address_chunks:
if len(chunk) == 0:
continue
data = safe_blockchain_multiaddress(chunk)
if data:
txs = data['txs']
for tx in txs:
if tx['hash'] in transaction_ids:
transactions_on_addresses.append(tx['hash'])
logging.info(transactions_on_addresses)
for tx in transactions_on_addresses:
try:
logging.debug('getting tx from address')
raw_transaction = self.get_raw_transaction(tx)
except ProtocolError:
continue
transaction = self.decode_raw_transaction(raw_transaction)
for vout in transaction['vout']:
if not 'addresses' in vout['scriptPubKey']:
continue
addresses_in_vout = set(vout['scriptPubKey']['addresses'])
for addr in addresses:
if addr in addresses_in_vout:
transactions_per_address[addr].append(transaction)
logging.info(transactions_per_address)
return transactions_per_address
def bitcoind_get_transactions_from_block(self, block, addresses):
logging.info(addresses)
transaction_ids = block['tx']
transactions_per_address = {}
for addr in addresses:
transactions_per_address[addr] = []
for tx in transaction_ids:
try:
raw_transaction = self.get_raw_transaction(tx)
except ProtocolError:
continue
transaction = self.decode_raw_transaction(raw_transaction)
for vout in transaction['vout']:
if not 'addresses' in vout['scriptPubKey']:
continue
addresses_in_vout = set(vout['scriptPubKey']['addresses'])
for addr in addresses:
if addr in addresses_in_vout:
transactions_per_address[addr].append(transaction)
logging.info(transactions_per_address)
return transactions_per_address
|
165571
|
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from AirSimClient import *
import sys
import time
import random
import msvcrt
np.set_printoptions(threshold=np.nan)
# if true, use Q-learning. Else, use SARSA
qlearning = True
readWeights = True # read in saved weights to resume progress
# drone boundaries
goal_limit = 0.5
max_radius = 3
# Set learning parameters
y = 0.1 # discount rate
e = 0.2 # epsilon
target_z = -2 # target height in NED coordinate system
num_episodes = 50000
episode_length = 100 # number of actions per episode
# ANN parameters
step_size = 2.5 # action space in increments of degrees
num_increments = 5
translate_scale = -5
num_outputs = num_increments**2
num_inputs = 6
learning_rate = 0.001
num_hidden = 10
def reward(state):
# if did_reach_goal(state):
if is_in_bounds_3d(state[:3], goal_limit):
return 10
else:
return -50
def did_reach_goal(state):
return is_in_bounds_3d(state[:3], goal_limit)
# takes an index of the action space (max Q) and converts to the action values a = (roll, pitch)
def get_action(index):
return (normalize_deg((index // num_increments)*step_size + translate_scale),
normalize_deg((index % num_increments)*step_size + translate_scale))
def normalize_deg(x):
return x/90
def scale_pos(s):
pos_scaler = 5
return [[ s[0]/pos_scaler, s[1]/pos_scaler, s[2]/pos_scaler, s[3], s[4], s[5] ]]
def distance(x, y):
ref_x = 0
ref_y = 0
return np.sqrt((ref_x - x)**2 + (ref_y - y)**2)
def distance_3d(pos):
x1 = 0; y1 = 0; z1 = target_z
return np.sqrt((x1-pos[0])**2 + (y1-pos[1])**2 + (z1-pos[2])**2)
def is_in_bounds(x, y):
return distance(x, y) < max_radius
def is_in_bounds_3d(pos, limit):
x1 = 0; y1 = 0; z1 = target_z
return np.sqrt((x1-pos[0])**2 + (y1-pos[1])**2 + (z1-pos[2])**2) < limit
def loadweights(type):
if type == 1:
f = open('weights_output.txt', 'r')
return np.array([ list(map(np.float32,line.split())) for line in f ])
else:
f = open('weights_hidden.txt', 'r')
return np.array([ list(map(np.float32,line.split())) for line in f ])
def draw_rewards(reward_list, qlearning, block):
plt.close()
plt.subplot(2, 1, 1) # set to first column plot
if qlearning:
plt.title("Average Reward per Episode (Q-Learning)")
else:
plt.title("Average Reward per Episode (SARSA)")
plt.xlabel("Episode number")
plt.ylabel("Reward")
plt.plot(reward_list, label="Reward")
plt.legend()
plt.subplot(2, 1, 2) # set to first column plot
if qlearning:
plt.title("Average Reward per 100 Episodes (Q-Learning)")
else:
plt.title("Average Reward per 100 Episodes (SARSA)")
plt.xlabel("Episode number (100's)")
plt.ylabel("Reward")
avg = np.zeros(len(reward_list)//100 + 1)
for index, val in enumerate(reward_list):
avg[index//100] += val
for i in range(len(avg)-1):
avg[i] /= 100
avg[len(avg)-1] /= len(reward_list) % 100
plt.plot(avg, label="Reward")
plt.legend()
plt.tight_layout()
plt.show(block=block)
# init drone
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
# hidden layer
inputs1 = tf.placeholder(shape=[1,num_inputs], dtype=tf.float32)
if readWeights:
weights_hidden = tf.Variable(loadweights(0))
else:
weights_hidden = tf.Variable(tf.random_normal([num_inputs, num_hidden]))
bias_hidden = tf.Variable(tf.random_normal([num_hidden]))
# preactivations_hidden = tf.add(tf.matmul(inputs1, weights_hidden), bias_hidden)
preactivations_hidden = tf.matmul(inputs1, weights_hidden)
# activations_hidden = tf.nn.sigmoid(preactivations_hidden)
activations_hidden = tf.tanh(preactivations_hidden)
# output layer
if readWeights:
weights_output = tf.Variable(loadweights(1))
else:
weights_output = tf.Variable(tf.random_normal([num_hidden, num_outputs]))
bias_output = tf.Variable(tf.random_normal([num_outputs]))
# Qout = tf.add(tf.matmul(activations_hidden, weights_output), bias_output)
Qout = tf.matmul(activations_hidden, weights_output)
predict = tf.argmax(Qout,1)
# training
nextQ = tf.placeholder(shape=[1,num_outputs], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
updateModel = trainer.minimize(loss)
init = tf.global_variables_initializer()
#create lists to contain total rewards and steps per episode
total_reward_list = np.zeros(num_episodes)
steps_to_success = np.zeros(num_episodes)
percent_success_actions = np.zeros(num_episodes)
num_to_graph = 0
with tf.Session() as sess:
sess.run(init)
# episode loop
for i in range(num_episodes):
if msvcrt.kbhit():
# script must be run from cmd.exe in order to register keypresses
print("You pressed ", msvcrt.getch(), " so now i will quit.")
break
print("\n\n\nEPISODE " + str(i) + "\n\n\n")
#Reset drone and get state
init_orient = (0, 0, 0)
print("===== Initial Orientation " + str(init_orient))
client.simSetPose(Pose(Vector3r(0,0,target_z),
AirSimClientBase.toQuaternion(init_orient[0], init_orient[1], init_orient[2])), True)
success_counter = 0
num_success_actions = 0
num_actions_taken = 0
# action loop
for j in range(episode_length):
# get current state
print("===== Action " + str(j))
curr_pos = client.getPosition()
curr_orient = client.getRollPitchYaw()
curr_s = [curr_pos.x_val, curr_pos.y_val, curr_pos.z_val,
curr_orient[0], curr_orient[1], curr_orient[2]]
scaled_curr_s = scale_pos(curr_s)
print(" STATE " + str(curr_s))
print(" ====== scaled s " + str(scaled_curr_s))
if not is_in_bounds(curr_s[0], curr_s[1]):
# drone has gone too far -- reset
print("===== OUT OF BOUNDS")
break
# a_index index of max action, allQ all Q-vals for current state
a_index,allQ = sess.run([predict,Qout],feed_dict={inputs1:scaled_curr_s})
if j == 0:
sarsa_index = a_index
if(qlearning):
# decide next action (angle change relative to previous roll and pitch)
if np.random.rand(1) < e:
# epsilon-greedy - random option
print(" !!!!!!!! EPSILON")
next_action = get_action(np.random.randint(0, num_outputs, dtype="int64"))
else:
next_action = get_action(a_index[0])
else:
# SARSA
next_action = get_action(sarsa_index[0])
# calculate action input to AirSim
roll_diff = np.asscalar(next_action[0])
pitch_diff = np.asscalar(next_action[1])
print(" ====== next action " + str(next_action))
rpy = client.getRollPitchYaw()
roll = rpy[0] + roll_diff
pitch = rpy[1] + pitch_diff
yaw = 0; duration = 0.5; sleep_time = 0.1
print(" ====== moving to (" + str(roll*90) + " " + str(pitch*90) + ")")
# take action
client.moveByAngle(pitch, roll, target_z, yaw, 0.1)
# time.sleep(sleep_time) # wait for action to occur
# get next state and reward as result of action
s1Position = client.getPosition()
s1Orientation = client.getRollPitchYaw()
s1 = [s1Position.x_val, s1Position.y_val, s1Position.z_val, s1Orientation[0], s1Orientation[1], s1Orientation[2]]
scaled_s1 = scale_pos(s1)
r = reward(s1)
total_reward_list[i] += r
print(" ==== Reward " + str(r))
# evaluate goal criteria
if did_reach_goal(s1):
print(" ******* reached goal " )
num_success_actions += 1
success_counter += 1
if success_counter >= 30:
print("\n\n SUCCESS " + str(i) + "\n\n")
# record number of steps to success
steps_to_success[i] = j
# break
else:
# make sure successful actions are consecutive
success_counter = 0
# Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(Qout,feed_dict={inputs1:scaled_s1})
if qlearning:
# Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1) # from neural net
print(" ===== MAX Q1 " + str(maxQ1))
targetQ = allQ
targetQ[0,a_index[0]] = r + y*maxQ1
print(" ===== TARGET " + str(r + y*maxQ1))
else:
# SARSA
if np.random.rand(1) < e:
sarsa_index[0] = np.random.randint(0, num_outputs)
# epsilon-greedy - random option
print(" !!!!!!!! EPSILON IN SARSA")
else:
sarsa_index[0] = np.asscalar(np.argmax(Q1))
actual_q = Q1[0][sarsa_index[0]]
targetQ = allQ
targetQ[0,sarsa_index[0]] = r + y*actual_q
print(" ===== TARGET " + str(r + y*actual_q))
# train ANN using target Q
_,W1,W0 = sess.run([updateModel,weights_output, weights_hidden ], feed_dict={inputs1:scaled_curr_s,nextQ:targetQ})
with open("weights_output.txt", "w") as weights_file:
weights_file.write(str(W1))
with open("weights_hidden.txt", "w") as weights_file:
weights_file.write(str(W0))
num_actions_taken += 1
# episode done
print("\n\n\nTotal Reward")
print(total_reward_list[i])
print(num_actions_taken)
print("\n\n\n")
total_reward_list[i] = total_reward_list[i]/num_actions_taken
percent_success_actions[i] = num_success_actions/num_actions_taken
e = 2./((i/1000) + 10)
num_to_graph += 1
if i % 50 == 0:
draw_rewards(total_reward_list[:num_to_graph], qlearning, False)
print("Epsilon " + str(e))
# print("WEIGHTS\n" + str(W1))
plt.close()
plt.title("Number of Actions Taken to Reach Goal")
plt.xlabel("Episode number")
plt.ylabel("Actions")
plt.plot(steps_to_success[:num_to_graph], label="Actions")
plt.legend()
plt.show()
plt.title("Percentage of Successful Actions Per Episode")
plt.xlabel("Episode number")
plt.ylabel("Percentage")
plt.plot(np.multiply(percent_success_actions[:num_to_graph],100.0), label="Percent")
plt.legend()
plt.show()
draw_rewards(total_reward_list[:num_to_graph], qlearning, True)
|
165578
|
class HtmlWriter():
def __init__(self, filename):
self.filename = filename
self.html_file = open(self.filename, 'w')
self.html_file.write(
"""<!DOCTYPE html>\n""" + \
"""<html>\n<body>\n<table border="1" style="width:100%"> \n""")
def add_element(self, col_dict):
self.html_file.write(' <tr>\n')
for key in range(len(col_dict)):
self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key]))
self.html_file.write(' </tr>\n')
def image_tag(self, image_path, height=240, width=320):
return """<img src="{}" alt="{}" height={} width={}>""".format(
image_path,image_path,height,width)
def video_tag(self, video_path, height=240, width=320, autoplay=True):
if autoplay:
autoplay_str = 'autoplay loop'
else:
autoplay_str = ''
tag = \
"""<video width="{}" height="{}" controls {}>""".format(width,height,autoplay_str) + \
""" <source src="{}" type="video/mp4">""".format(video_path) + \
""" Your browser does not support the video tag.""" + \
"""</video>"""
return tag
def colored_text(self,text,color):
return '<span style=\"color:' + color + '\">' + text + '</span>'
def bg_colored_text(self,text,bg_color,text_color='rgb(0,0,0)'):
return f'<span style=\"background-color:{bg_color}; color:{text_color}\">' + text + '</span>'
def editable_content(self,content):
return """<div contenteditable="True">{}</div>""".format(content)
def close(self):
self.html_file.write('</table>\n</body>\n</html>')
self.html_file.close()
|
165591
|
from .ini_reader import INIReader
from .. import config_validator
from pagewalker.utilities import error_utils
from pagewalker.config import config
class CookiesConfigParser(INIReader):
def __init__(self):
super(CookiesConfigParser, self).__init__(config.custom_cookies_file)
self.config_types = config_validator.ConfigValidatorCustomCookie()
def apply(self):
cookies_data = []
for section in self._get_sections():
cookies_data.append(self._single_cookie(section))
if cookies_data:
config.custom_cookies_data = cookies_data
def _single_cookie(self, section):
print("[INFO] Custom cookie: %s" % section)
cookie = {}
for name, value in self._get_non_empty_values(section).items():
self._validate_allowed_option(name)
if name in ["secure", "httponly"]:
cookie[name] = self.config_types.boolean(value, name)
else:
cookie[name] = value
self._validate_required_options(cookie, section)
return cookie
def _validate_allowed_option(self, option_name):
allowed_options = ["name", "value", "domain", "path", "secure", "httponly"]
if option_name not in allowed_options:
msg = "Unknown option '%s' in config file '%s'" % (option_name, config.custom_cookies_file)
error_utils.exit_with_message(msg)
def _validate_required_options(self, cookie, section):
required_options = ["name", "value"]
for option in required_options:
if option not in cookie:
msg = "Missing '%s' option in [%s] cookie in config file '%s'" \
% (option, section, config.custom_cookies_file)
error_utils.exit_with_message(msg)
|
165601
|
from torch.cuda.amp import GradScaler
from utils.loss_accumulator import LossAccumulator
from torch.nn import Module
import logging
from trainer.losses import create_loss
import torch
from collections import OrderedDict
from trainer.inject import create_injector
from utils.util import recursively_detach, opt_get
logger = logging.getLogger('base')
# Defines the expected API for a single training step
class ConfigurableStep(Module):
def __init__(self, opt_step, env):
super(ConfigurableStep, self).__init__()
self.step_opt = opt_step
self.env = env
self.opt = env['opt']
self.gen_outputs = opt_step['generator_outputs']
self.loss_accumulator = LossAccumulator()
self.optimizers = None
self.scaler = GradScaler(enabled=self.opt['fp16'])
self.grads_generated = False
self.min_total_loss = opt_step['min_total_loss'] if 'min_total_loss' in opt_step.keys() else -999999999
self.clip_grad_eps = opt_get(opt_step, ['clip_grad_eps'], None)
# This is a half-measure that can be used between anomaly_detection and running a potentially problematic
# trainer bare. With this turned on, the optimizer will not step() if a nan grad is detected. If a model trips
# this warning 10 times in a row, the training session is aborted and the model state is saved. This has a
# noticeable affect on training speed, but nowhere near as bad as anomaly_detection.
self.check_grads_for_nan = opt_get(opt_step, ['check_grads_for_nan'], False)
self.nan_counter = 0
self.injectors = []
if 'injectors' in self.step_opt.keys():
injector_names = []
for inj_name, injector in self.step_opt['injectors'].items():
assert inj_name not in injector_names # Repeated names are always an error case.
injector_names.append(inj_name)
self.injectors.append(create_injector(injector, env))
losses = []
self.weights = {}
if 'losses' in self.step_opt.keys():
for loss_name, loss in self.step_opt['losses'].items():
assert loss_name not in self.weights.keys() # Repeated names are always an error case.
losses.append((loss_name, create_loss(loss, env)))
self.weights[loss_name] = loss['weight']
self.losses = OrderedDict(losses)
def get_network_for_name(self, name):
return self.env['generators'][name] if name in self.env['generators'].keys() \
else self.env['discriminators'][name]
# Subclasses should override this to define individual optimizers. They should all go into self.optimizers.
# This default implementation defines a single optimizer for all Generator parameters.
# Must be called after networks are initialized and wrapped.
def define_optimizers(self):
opt_configs = [opt_get(self.step_opt, ['optimizer_params'], None)]
self.optimizers = []
if opt_configs[0] is None:
return
training = self.step_opt['training']
training_net = self.get_network_for_name(training)
nets = [training_net]
training = [training]
for net_name, net, opt_config in zip(training, nets, opt_configs):
# Configs can organize parameters by-group and specify different learning rates for each group. This only
# works in the model specifically annotates which parameters belong in which group using PARAM_GROUP.
optim_params = {'default': {'params': [], 'lr': opt_config['lr']}}
if opt_config is not None and 'param_groups' in opt_config.keys():
for k, pg in opt_config['param_groups'].items():
optim_params[k] = {'params': [], 'lr': pg['lr']}
import torch.nn as nn
norm_modules = (nn.BatchNorm2d, nn.InstanceNorm2d, nn.BatchNorm1d, nn.InstanceNorm1d,
nn.BatchNorm3d, nn.InstanceNorm3d, nn.GroupNorm, nn.LayerNorm)
emb_modules = (nn.Embedding, nn.EmbeddingBag)
param_names_notweights = set()
all_param_names = set()
param_map = {}
for mn, m in net.named_modules():
for k, v in m.named_parameters():
v.is_bias = k.endswith(".bias")
v.is_weight = k.endswith(".weight")
v.is_norm = isinstance(m, norm_modules)
v.is_emb = isinstance(m, emb_modules)
fpn = '%s.%s' % (mn, k) if mn else k # full param name
all_param_names.add(fpn)
param_map[fpn] = v
if v.is_bias or v.is_norm or v.is_emb:
param_names_notweights.add(fpn)
# Some models can specify some parameters to be in different groups.
param_group = "default"
if hasattr(v, 'PARAM_GROUP'):
if v.PARAM_GROUP in optim_params.keys():
param_group = v.PARAM_GROUP
else:
logger.warning(f'Model specifies a custom param group {v.PARAM_GROUP} which is not configured. '
f'The same LR will be used for all parameters.')
if v.requires_grad:
optim_params[param_group]['params'].append(v)
else:
if self.env['rank'] <= 0:
logger.warning('Params [{:s}] will not optimize.'.format(k))
params_notweights = [param_map[k] for k in sorted(list(param_names_notweights))]
params_weights = [param_map[k] for k in sorted(list(all_param_names ^ param_names_notweights))]
if 'optimizer' not in self.step_opt.keys() or self.step_opt['optimizer'] == 'adam':
opt = torch.optim.Adam(list(optim_params.values()), lr=opt_config['lr'],
weight_decay=opt_config['weight_decay'],
betas=(opt_config['beta1'], opt_config['beta2']))
elif self.step_opt['optimizer'] == 'adamw':
groups = [
{ 'params': params_weights, 'weight_decay': opt_get(opt_config, ['weight_decay'], 0) },
{ 'params': params_notweights, 'weight_decay': 0 }
]
opt = torch.optim.AdamW(groups, lr=opt_config['lr'],
weight_decay=opt_get(opt_config, ['weight_decay'], 1e-2),
betas=(opt_get(opt_config, ['beta1'], .9), opt_get(opt_config, ['beta2'], .999)))
elif self.step_opt['optimizer'] == 'lars':
from trainer.optimizers.larc import LARC
from trainer.optimizers.sgd import SGDNoBiasMomentum
optSGD = SGDNoBiasMomentum(list(optim_params.values()), lr=opt_config['lr'], momentum=opt_config['momentum'],
weight_decay=opt_config['weight_decay'])
opt = LARC(optSGD, trust_coefficient=opt_config['lars_coefficient'])
elif self.step_opt['optimizer'] == 'sgd':
from torch.optim import SGD
opt = SGD(list(optim_params.values()), lr=opt_config['lr'], momentum=opt_config['momentum'], weight_decay=opt_config['weight_decay'])
opt._config = opt_config # This is a bit seedy, but we will need these configs later.
opt._config['network'] = net_name
self.optimizers.append(opt)
# Returns all optimizers used in this step.
def get_optimizers(self):
assert self.optimizers is not None
return self.optimizers
# Returns optimizers which are opting in for default LR scheduling.
def get_optimizers_with_default_scheduler(self):
assert self.optimizers is not None
return self.optimizers
# Returns the names of the networks this step will train. Other networks will be frozen.
def get_networks_trained(self):
if isinstance(self.step_opt['training'], list):
return self.step_opt['training']
else:
return [self.step_opt['training']]
def get_training_network_name(self):
if isinstance(self.step_opt['training'], list):
return self.step_opt['training'][0]
else:
return self.step_opt['training']
# Performs all forward and backward passes for this step given an input state. All input states are lists of
# chunked tensors. Use grad_accum_step to dereference these steps. Should return a dict of tensors that later
# steps might use. These tensors are automatically detached and accumulated into chunks.
def do_forward_backward(self, state, grad_accum_step, amp_loss_id, train=True, no_ddp_sync=False, loss_accumulator=None):
local_state = {} # <-- Will store the entire local state to be passed to injectors & losses.
new_state = {} # <-- Will store state values created by this step for returning to ExtensibleTrainer.
for k, v in state.items():
local_state[k] = v[grad_accum_step]
local_state['train_nets'] = str(self.get_networks_trained())
loss_accumulator = self.loss_accumulator if loss_accumulator is None else loss_accumulator
# Some losses compute backward() internally. Accommodate this by stashing the amp_loss_id in env.
self.env['amp_loss_id'] = amp_loss_id
self.env['current_step_optimizers'] = self.optimizers
self.env['training'] = train
# Inject in any extra dependencies.
for inj in self.injectors:
# Don't do injections tagged with eval unless we are not in train mode.
if train and 'eval' in inj.opt.keys() and inj.opt['eval']:
continue
# Likewise, don't do injections tagged with train unless we are not in eval.
if not train and 'train' in inj.opt.keys() and inj.opt['train']:
continue
# Don't do injections tagged with 'after' or 'before' when we are out of spec.
if 'after' in inj.opt.keys() and self.env['step'] < inj.opt['after'] or \
'before' in inj.opt.keys() and self.env['step'] > inj.opt['before'] or \
'every' in inj.opt.keys() and self.env['step'] % inj.opt['every'] != 0:
continue
if 'no_accum' in inj.opt.keys() and grad_accum_step > 0:
continue
training_net = self.get_network_for_name(self.step_opt['training'])
if no_ddp_sync and hasattr(training_net, 'no_sync'):
with training_net.no_sync():
injected = inj(local_state)
elif opt_get(inj.opt, ['no_grad'], False):
with torch.no_grad():
injected = inj(local_state)
else:
injected = inj(local_state)
local_state.update(injected)
new_state.update(injected)
if len(self.losses) > 0:
# Finally, compute the losses.
total_loss = 0
for loss_name, loss in self.losses.items():
# Some losses only activate after a set number of steps. For example, proto-discriminator losses can
# be very disruptive to a generator.
if 'after' in loss.opt.keys() and loss.opt['after'] > self.env['step'] or \
'before' in loss.opt.keys() and self.env['step'] > loss.opt['before'] or \
'every' in loss.opt.keys() and self.env['step'] % loss.opt['every'] != 0:
continue
if loss.is_stateful():
l, lstate = loss(self.get_network_for_name(self.step_opt['training']), local_state)
local_state.update(lstate)
new_state.update(lstate)
else:
l = loss(self.get_network_for_name(self.step_opt['training']), local_state)
total_loss += l * self.weights[loss_name]
# Record metrics.
if isinstance(l, torch.Tensor):
loss_accumulator.add_loss(loss_name, l)
for n, v in loss.extra_metrics():
loss_accumulator.add_loss("%s_%s" % (loss_name, n), v)
loss.clear_metrics()
# In some cases, the loss could not be set (e.g. all losses have 'after')
if train and isinstance(total_loss, torch.Tensor):
loss_accumulator.add_loss("%s_total" % (self.get_training_network_name(),), total_loss)
reset_required = total_loss < self.min_total_loss
# Scale the loss down by the accumulation factor.
total_loss = total_loss / self.env['mega_batch_factor']
# Get dem grads!
self.scaler.scale(total_loss).backward()
if reset_required:
# You might be scratching your head at this. Why would you zero grad as opposed to not doing a
# backwards? Because DDP uses the backward() pass as a synchronization point and there is not a good
# way to simply bypass backward. If you want a more efficient way to specify a min_loss, use or
# implement it at the loss level.
self.get_network_for_name(self.step_opt['training']).zero_grad()
loss_accumulator.increment_metric("%s_skipped_steps" % (self.get_training_network_name(),))
self.grads_generated = True
# Detach all state variables. Within the step, gradients can flow. Once these variables leave the step
# we must release the gradients.
new_state = recursively_detach(new_state)
return new_state
# Performs the optimizer step after all gradient accumulation is completed. Default implementation simply steps()
# all self.optimizers.
def do_step(self, step):
if not self.grads_generated:
return
self.grads_generated = False
for opt in self.optimizers:
# Optimizers can be opted out in the early stages of training.
after = opt._config['after'] if 'after' in opt._config.keys() else 0
after_network = self.opt['networks'][opt._config['network']]['after'] if 'after' in self.opt['networks'][opt._config['network']].keys() else 0
after = max(after, after_network)
if self.env['step'] < after:
continue
before = opt._config['before'] if 'before' in opt._config.keys() else -1
if before != -1 and self.env['step'] > before:
continue
nan_found = False
if self.check_grads_for_nan:
for pg in opt.param_groups:
for p in pg['params']:
if not torch.isfinite(p.grad).any():
nan_found = True
break
if nan_found:
break
if nan_found:
print("NaN found in grads. Throwing this step out.")
self.nan_counter += 1
else:
self.nan_counter = 0
if self.clip_grad_eps is not None:
for pg in opt.param_groups:
grad_norm = torch.nn.utils.clip_grad_norm_(pg['params'], self.clip_grad_eps)
if torch.isnan(grad_norm):
nan_found = True
self.nan_counter += 1
if not nan_found:
self.scaler.step(opt)
self.scaler.update()
def get_metrics(self):
return self.loss_accumulator.as_dict()
|
165629
|
import os
from platform import node
from app.project_type.project_type import ProjectType
from app.util.conf.configuration import Configuration
from app.util.log import get_logger
class Directory(ProjectType):
"""
Example API call to invoke a directory-type build.
{
"type": "directory",
"project_directory": "examples/directory job",
}
"""
def __init__(self, project_directory, config=None, job_name=None, build_project_directory=None,
remote_files=None):
"""
Note: the first line of each parameter docstring will be exposed as command line argument documentation for the
clusterrunner build client.
:param project_directory: path to the directory that contains the project and clusterrunner.yaml
:type project_directory: string
:param config: a yaml string to be used in place of a clusterrunner.yaml
:type config: string|None
:param job_name: a list of job names we intend to run
:type job_name: list [str] | None
:param remote_files: dictionary mapping of output file to URL
:type remote_files: dict[str, str] | None
"""
super().__init__(config, job_name, remote_files)
self._logger = get_logger(__name__)
self.project_directory = os.path.abspath(project_directory)
self._logger.debug('Project directory is {}'.format(project_directory))
def _fetch_project(self):
dir_exists = os.path.isdir(self.project_directory)
if not dir_exists:
raise RuntimeError('Could not find the directory "{}" on {}. Note that if you are running ClusterRunner '
'on multiple hosts, "directory" type builds are not supported.'
.format(self.project_directory, node()))
def execute_command_in_project(self, *args, **kwargs):
"""
Execute a command inside the directory. See superclass for parameter documentation.
"""
if 'cwd' not in kwargs:
kwargs['cwd'] = self.project_directory
return super().execute_command_in_project(*args, **kwargs)
def timing_file_path(self, job_name):
"""
Construct the sys path of the directory where the timing file should reside based on the project_directory.
project_directory is the sys path of the project which contains the clusterrunner.yaml file.
e.g.:
Configuration['timings_directory'] = '/var/timings_directory'
project_directory = '/Users/me/project'
The final timing file sys path should be:
'/var/timings_directory/Users/me/project'
:type job_name: str
:return: the absolute path to where the timing file for job_name SHOULD be. This method does not guarantee
that the timing file exists.
:rtype: string
"""
# cut off mount point and leading separator (e.g. '/' on POSIX or '\\' on Windows)
# e.g. '/var/bar' would become 'var/bar' on POSIX and 'c:\\temp\\foo' would become 'temp\\foo'
timings_subdirectory = os.path.splitdrive(self.project_directory)[1][len(os.sep):]
return os.path.join(
Configuration['timings_directory'],
timings_subdirectory,
'{}.timing.json'.format(job_name)
)
def project_id(self):
return self.project_directory
|
165639
|
import logging.handlers
import os.path
from popupdict.gtk import GLib
logger = logging.getLogger('popupdict')
formatter = logging.Formatter('[%(asctime)s.%(msecs)03d] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
cache_dir = os.path.join(GLib.get_user_cache_dir(), 'popup-dict')
if not os.path.exists(cache_dir):
os.mkdir(cache_dir, 0o755)
filename = os.path.join(cache_dir, 'popup-dict.log')
file_handler = logging.handlers.RotatingFileHandler(filename,
mode='a',
maxBytes=50 * 1024 * 1024,
backupCount=5,
encoding='utf-8')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
|
165667
|
from django.contrib.auth import models as auth_models
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class BaseUserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError("Email missing")
user = self.model(email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class BaseUser(auth_models.AbstractBaseUser, auth_models.PermissionsMixin):
EMAIL_FIELD = "email"
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
email = models.EmailField(_("email"), max_length=254, unique=True)
is_active = models.BooleanField(_("is active"), default=True)
is_staff = models.BooleanField(_("is staff"), default=False)
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
objects = BaseUserManager()
class Meta:
abstract = True
verbose_name = _("user")
verbose_name_plural = _("users")
def __str__(self):
return self.email
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
|
165701
|
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.models import Sequential, model_from_json
from keras.layers import Lambda
from tensorflow.python.framework import ops
from scipy.ndimage.interpolation import zoom
import keras
import tempfile
import os
def loss_calculation(x, category_index, nb_classes):
return tf.multiply(x, K.one_hot((category_index), nb_classes))
def loss_calculation_shape(input_shape):
return input_shape
def normalize(x):
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def prepareGradCAM(input_model, conv_layer_index, nb_classes):
model = input_model
# because non-manufacturability is 1
explanation_catagory = 1
loss_function = lambda x: loss_calculation(x, 1, nb_classes)
model.add(Lambda(loss_function,
output_shape=loss_calculation_shape))
# use the loss from the layer before softmax. As best practices
loss = K.sum(model.layers[-1].output)
# last fully Convolutional layer to use for computing GradCAM
conv_output = model.layers[-6].output
grads = normalize(K.gradients(loss, conv_output)[0])
gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, grads])
return gradient_function
def registerGradient():
if "GuidedBackProp" not in ops._gradient_registry._registry:
@ops.RegisterGradient("GuidedBackProp")
def _GuidedBackProp(op, grad):
dtype = op.inputs[0].dtype
return grad * tf.cast(grad > 0., dtype) * \
tf.cast(op.inputs[0] > 0., dtype)
def compileSaliencyFunction(model, ld_model_fn, model_no, channels, activation, voxelCount, nbClasses, activation_layer=-5):
guidedModel = modifyBackprop(model, 'GuidedBackProp', ld_model_fn, model_no, channels, activation, voxelCount, nbClasses)
input_img = guidedModel.input
layer_output = guidedModel.layers[activation_layer].output
saliency = K.gradients(K.sum(layer_output), input_img)[0]
return K.function([input_img, K.learning_phase()], [saliency])
def modifyBackprop(model, name, ld_model_fn, model_no, channels, activation, voxelCount, nbClasses):
registerGradient()
g = tf.get_default_graph()
with g.gradient_override_map({'Relu': name}):
# get layers that have an activation
layer_dict = [layer for layer in model.layers[1:]
if hasattr(layer, 'activation')]
# replace relu activation
for layer in layer_dict:
if layer.activation == keras.activations.relu:
layer.activation = tf.nn.relu
model = ld_model_fn(model_no, channels, activation, voxelCount, nbClasses)
model.load_weights('log/weights/model%s_%schannel_%sactivation_%svoxel_count_%sclasses.h5' % (model_no, channels, activation, voxelCount, nbClasses))
# Popping the softmax layer as it creates ambiguity in the explanation
model.pop()
return model
def GradCAM(gradient_function, input_file):
explanation_catagory = 1
# Shape of the fully convolutional layer to use
f = 5
output, grads_val = gradient_function([input_file, 0])
grads_val = grads_val / (np.max(grads_val) + K.epsilon())
print(grads_val.shape)
weights = np.mean(grads_val, axis=(1, 2, 3))
weights.flatten()
print('weights', weights)
print('output', output.shape)
if K.image_data_format() == "channels_last":
grad_cam = np.ones(output.shape[1:-1], dtype=K.floatx())
else:
grad_cam = np.ones(output.shape[2:], dtype=K.floatx())
for i, w in enumerate(np.transpose(weights)):
if K.image_data_format() == "channels_last":
grad_cam += w * output[0, ..., i]
else:
grad_cam += w * output[0, i, ...]
grad_cam = np.maximum(grad_cam, 0)
print(weights)
grad_cam = grad_cam / np.max(grad_cam)
attMap = np.zeros_like(input_file)
zoom_factor = [i / (j * 1.0) for i, j in iter(zip(input_file.shape, grad_cam.shape))]
attMap[..., 0] = zoom(grad_cam, zoom_factor)
attMap = (1 * np.float32(attMap)) + (1 * np.float32(input_file))
attMap = (attMap / np.max(attMap))
return attMap
|
165727
|
from pyunicorn.timeseries import RecurrencePlot
import numpy as np
from statistics import median
import time
# Measure the times (in ms) of evaluating an expression n times
def measuretime(f, n, *args):
t = [0]*n
res = f(*args)
for n in range(n):
t0 = time.time()
f(*args)
t[n] = time.time() - t0
return(1000*np.array(t), res)
# Function that will be measured
def fun_rqa(v,metric):
# Attempt sparse RQA if metric is euclidean
metric_sup = (metric is "supremum")
rp = RecurrencePlot(v, metric=metric, sparse_rqa=metric_sup,
threshold=1.2, dim=3, tau=6)
rqa = rp.rqa_summary()
rqa["Lmax"] = rp.max_diaglength()
rqa["ENT"] = rp.diag_entropy()
rqa["TT"] = rp.trapping_time()
return(rqa)
# Analyse 12 series from 250 to 3000 points
# (With variable metric)
def benchmark(metric):
m = np.loadtxt("rossler.txt")
for r in range(12):
x = m[:250*(r+1), 2*r]
(tt, res) = measuretime(fun_rqa, 5, x, metric)
t = median(tt)
with open("benchmark_rqa_python_%s.txt"%metric, "a") as f:
f.write("%d\t%f\t"%(r,t))
for k in ["RR","DET","L","Lmax","ENT","LAM","TT"]:
f.write("%s\t"%(res[k]))
f.write("\n")
# Do it with max and euclidean norms
benchmark("euclidean")
benchmark("supremum")
|
165753
|
class MyClass():
"This is my second class"
a = 10
def func(self):
print('Hello')
# create a new MyClass
ob = MyClass()
class Model(BaseModel):
age: int,
first_name = 'John'
last_name: NoneStr = None
signup_ts: Options[datetime] = None
list_of_ints: List[int]
class Addition:
first = 0
second = 0
answer = 0
# parameterized constructor
def __init__(self, f, s):
self.first = f
self.second = s
def display(self):
print("First number = " + str(self.first))
print("Second number = " + str(self.second))
print("Addition of two numbers = " + str(self.answer))
def calculate(self):
self.answer = self.first + self.second
# creating object of the class
# this will invoke parameterized constructor
obj = Addition(1000, 2000)
# perform Addition
obj.calculate()
# display result
obj.display()
# Output: <function MyClass.func at 0x000000000335B0D0>
print(MyClass.func)
# Output: <bound method MyClass.func of <__main__.MyClass object at 0x000000000332DEF0>>
print(ob.func)
# Calling function func()
# Output: Hello
ob.func()
copy_local(
src_path = src_path,
dst_path = dst_path,
data = _data,
include = include,
pretend = pretend,
force = force,
skip = skip,
quiet = quiet
)
foo('one','two', c='three', d='four')
def ask_ok(prompt, retries = 4, reminder = 'Please try again!'):
while True:
ok = input(prompt)
if ok in ('y', 'ye', 'yes'):
return True
if ok in ('n', 'no', 'nop', 'nope'):
return False
retries = retries - 1
if retries < 0:
raise ValueError('invalid user response')
print(reminder)
def copy(
src_path: str,
dst_path: str,
data: Optional[dict] = None,
*,
exclude: Optional[List[str]] = None,
include: Optional[List[str]] = None,
pretend: bool = False,
force: bool = False,
skip: bool = False,
quiet: bool = False
) -> None:
pass
|
165825
|
import re
from flask import abort
from .validation import get_validation_errors
def validate_framework_agreement_details_data(framework_agreement_details, enforce_required=True, required_fields=None):
errs = get_validation_errors(
'framework-agreement-details',
framework_agreement_details,
enforce_required=enforce_required,
required_fields=required_fields
)
if errs:
abort(400, errs)
def format_framework_integrity_error_message(error, json_framework):
if 'violates check constraint "ck_framework_has_direct_award_or_further_competition"' in str(error):
error_message = "At least one of `hasDirectAward` or `hasFurtherCompetition` must be True"
elif 'duplicate key value violates unique constraint "ix_frameworks_slug"' in str(error):
error_message = "Slug '{}' already in use".format(json_framework.get('slug', '<unknown slug>'))
elif re.search('Not a [a-z]+? value:', str(error)):
error_message = 'Invalid framework'
else:
error_message = format(error)
return error_message
|
165874
|
import torch
import ipdb
from copy import deepcopy
import random
def modify_sentence(ids, min_change=2, prob=0.1, k=2):
def _random_deletion(rids):
num_deletion = max(min_change, int(prob*len(rids)))
delete_idx = random.sample(range(len(rids)), num_deletion)
n_ids = [rids[i] for i in range(len(rids)) if i not in delete_idx]
return n_ids
def _random_swap(rids):
num_swap = max(min_change, int(prob*len(rids)))
swap_idx = [random.sample(range(len(rids)), 2) for _ in range(num_swap)]
n_ids = deepcopy(rids)
for i, j in swap_idx:
n_ids[i], n_ids[j] = n_ids[j], n_ids[i]
return n_ids
def _random_duplicate(rids):
# 1-gram or 2-gram
num_duplicate = max(min_change, int(prob*len(rids)))
duplicate_idx = random.sample(range(len(rids)-1), num_duplicate)
n_rids = []
for idx, i in enumerate(rids):
if idx in duplicate_idx:
if random.random() > 0.5:
# 2-gram
n_rids.extend([rids[idx], rids[idx+1], rids[idx], rids[idx+1]])
else:
n_rids.extend([rids[idx], rids[idx]])
else:
n_rids.append(i)
return n_rids
rest = []
for _ in range(k):
rids = _random_deletion(ids)
rids = _random_swap(rids)
rids = _random_duplicate(rids)
rest.append(rids)
return rest
def truncate_pair_with_other_ids(cids, rids, tcids, trids, scids, srids, max_length):
# change the cids and rids in place
max_length -= 3 # [CLS], [SEP], [SEP]
while True:
l = len(cids) + len(rids)
if l <= max_length:
break
if len(cids) > 2 * len(rids):
cids.pop(0)
tcids.pop(0)
scids.pop(0)
else:
rids.pop()
trids.pop()
srids.pop()
def truncate_pair_with_labels(cids, cids_labels, rids, max_length, rids_labels=None):
# change the cids and rids in place
max_length -= 3 # [CLS], [SEP], [SEP]
while True:
l = len(cids) + len(rids)
if l <= max_length:
break
if len(cids) > 2 * len(rids):
cids.pop(0)
cids_labels.pop(0)
else:
rids.pop()
if rids_labels:
rids_labels.pop()
def truncate_pair(cids, rids, max_length):
# change the cids and rids in place
max_length -= 3 # [CLS], [SEP], [SEP]
while True:
l = len(cids) + len(rids)
if l <= max_length:
break
if len(cids) > 2 * len(rids):
cids.pop(0)
else:
rids.pop()
def truncate_pair_two_candidates(cids, rids1, rids2, max_length, sids=None):
max_length -= 4 # [CLS] ctx [SEP] rids1 [SEP] rids2 [SEP]
while True:
l = len(cids) + len(rids1) + len(rids2)
if l <= max_length:
break
if len(cids) > len(rids1) + len(rids2):
cids.pop(0)
if sids:
sids.pop(0)
elif len(rids1) > len(rids2):
rids1.pop()
else:
rids2.pop()
def generate_mask(ids):
'''generate the mask matrix of the ids'''
attn_mask_index = ids.nonzero().tolist() # [PAD] IS 0
attn_mask_index_x, attn_mask_index_y = [i[0] for i in attn_mask_index], [i[1] for i in attn_mask_index]
attn_mask = torch.zeros_like(ids)
attn_mask[attn_mask_index_x, attn_mask_index_y] = 1
return attn_mask
def to_cuda(*args):
'''map the tensor on cuda device'''
if not torch.cuda.is_available():
return args
tensor = []
for i in args:
i = i.cuda()
tensor.append(i)
return tensor
def mask_sentence(
ids, min_mask_num, max_mask_num, masked_lm_prob,
special_tokens=[], mask=-1, vocab_size=21128,
):
'''change the ids, and return the mask_label'''
num_valid = len([i for i in ids if i not in special_tokens])
num_mask = max(
min_mask_num,
min(
int(masked_lm_prob * num_valid),
max_mask_num,
)
)
mask_pos = [idx for idx, i in enumerate(ids) if i not in special_tokens]
mask_idx = random.sample(mask_pos, num_mask)
mask_label = []
for idx, i in enumerate(ids):
if idx in mask_idx:
ratio = random.random()
if ratio < 0.8:
ids[idx] = mask
elif ratio < 0.9:
# random change
ids[idx] = random.choice(list(range(vocab_size)))
mask_label.append(i)
else:
mask_label.append(-1)
return mask_label
# ========== dual-bert ========== #
def length_limit(ids, max_len):
'''the first token must be [CLS]'''
if len(ids) > max_len:
ids = [ids[0]] + ids[-(max_len-1):]
return ids
def length_limit_res(ids, max_len, sep=0):
'''the last token must be [SEP], and the first token must be [CLS]'''
if len(ids) > max_len:
ids = ids[:max_len-1] + [sep]
return ids
# ======== Evaluation Perturbation ========== #
def delete(ids, tids, delete_ratio=0.15, min_delete_num=2, special_tokens=[]):
delete_num = max(
min_delete_num,
min(
len(ids),
int(len(ids) * delete_ratio),
)
)
delete_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]
delete_idx = random.sample(delete_idx, delete_num)
new_ids, delete_label, new_tids = [], [], []
for i in ids:
if i not in delete_idx:
new_ids.append(i)
delete_label.append(-1)
else:
delete_label.append(len(new_ids))
pert_label = [-1 if i == -1 else 0 for i in delete_label]
return new_ids, delete_label, pert_label
def duplicate(ids, duplicate_ratio=0.15, min_duplicate_num=2, special_tokens=[]):
duplicate_num = max(
min_duplicate_num,
min(
len(ids),
int(len(ids) * duplicate_ratio),
)
)
duplicate_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]
duplicate_idx = random.sample(duplicate_idx, duplicate_num)
new_ids, duplicate_label = [], []
for i in ids:
if i not in duplicate_idx:
new_ids.append(i)
duplicate_label.append(-1)
else:
num = random.choice([2, 3, 4])
new_ids.extend([i] * num)
duplicate_label.extend([len(new_ids)-i_ for i_ in range(num)])
pert_label = [-1 if i == -1 else 1 for i in duplicate_label]
return new_ids, duplicate_label, pert_label
def replacement(ids, replace_ratio=0.15, min_replace_num=2, vocab_size=0, special_tokens=[]):
replace_num = max(
min_replace_num,
min(
len(ids),
int(len(ids) * replace_ratio),
)
)
replace_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]
replace_idx = random.sample(replace_idx, replace_num)
new_ids, replace_label = [], []
for i in ids:
if i not in replace_idx:
new_ids.append(i)
replace_label.append(-1)
else:
# random replace
new_ids.append(random.choice(range(vocab_size)))
replace_label.append(i)
pert_label = [-1 if i == -1 else 2 for i in replace_label]
return new_ids, replace_label, pert_label
def mask_sentence_only_mask(
ids, min_mask_num, max_mask_num, masked_lm_prob,
special_tokens=[], mask=-1, vocab_size=21128,
):
'''change the ids, and return the mask_label'''
num_valid = len([i for i in ids if i not in special_tokens])
num_mask = max(
min_mask_num,
min(
int(masked_lm_prob * num_valid),
max_mask_num,
)
)
mask_pos = [idx for idx, i in enumerate(ids) if i not in special_tokens]
mask_idx = random.sample(mask_pos, num_mask)
mask_label = []
for idx, i in enumerate(ids):
if idx in mask_idx:
ids[idx] = mask
mask_label.append(i)
else:
mask_label.append(-1)
return mask_label
# ========== context augmentation ========== #
def sentence_shuffle(context_utterances):
if len(context_utterances) == 1:
return context_utterances
else:
random_idx = list(range(len(context_utterances)))
while True:
random.shuffle(random_idx)
if random_idx[-1] != len(context_utterances) - 1:
break
context_utterances = [context_utterances[i] for i in random_idx]
return context_utterances
def token_shuffle(context_utterances):
for i in range(len(context_utterances)):
random.shuffle(context_utterances[i])
return context_utterances
def sentence_deletion(context_utterances):
if len(context_utterances) == 1:
return context_utterances
else:
random_idx = random.choice(range(len(context_utterances)-1))
context_utterances = [context_utterances[i] for i in range(len(context_utterances)) if i != random_idx]
return context_utterances
def replace_last_utterance(context_utterances, pool):
response = random.choice(pool)['rids']
response = response[1:-1]
context_utterances[-1] = response
return context_utterances
def random_insert_before_context(context_utterances, pool):
u = random.choice(random.choice(pool)['cids'])
context_utterances.insert(0, u)
return context_utterances
def random_insert_context(context_utterances, pool):
u = random.choice(random.choice(pool)['cids'])
idx = random.choice(range(len(context_utterances)))
context_utterances.insert(idx, u)
return context_utterances
|
165893
|
DOCUMENT_MAPPING = [
{
'name': 'title',
'pattern': '<{}> dc:title|rdfs:label ?title .',
'type': 'string'
},
{
'name': 'journalTitle',
'pattern': """?journal vivo:publicationVenueFor <{}> ;
rdfs:label ?journalTitle .""",
'type': 'string'
},
{
'name': 'doi',
'pattern': '<{}> bibo:doi ?doi .',
'type': 'string'
},
{
'name': 'issn',
'pattern': """?journal vivo:publicationVenueFor <{}> ;
bibo:issn ?issn .""",
'type': 'string'
},
{
'name': 'isbn',
'pattern': """?journal vivo:publicationVenueFor <{}> ;
bibo:isbn ?isbn .""",
'type': 'string'
},
{
'name': 'date',
'pattern': """<{}> vivo:dateTimeValue ?dateURI .
?dateURI vivo:dateTime ?date .""",
'type': 'string'
},
{
'name': 'volume',
'pattern': '<{}> bibo:volume ?volume .',
'type': 'string'
},
{
'name': 'number',
'pattern': '<{}> bibo:number ?number .',
'type': 'string'
},
{
'name': 'startPage',
'pattern': '<{}> bibo:pageStart ?startPage .',
'type': 'string'
},
{
'name': 'endPage',
'pattern': '<{}> bibo:pageEnd ?endPage .',
'type': 'string'
},
{
'name': 'pmid',
'pattern': '<{}> bibo:pmid ?pmid .',
'type': 'string'
},
{
'name': 'publisher',
'pattern': """?journal vivo:publicationVenueFor <{}> .
?publisherURI vivo:publisherOf ?journal ;
rdfs:label ?publisher .""",
'type': 'string'
},
{
'name': 'subjects',
'fields': ['subject'],
'pattern': """?subjectURI vivo:subjectAreaOf <{}> ;
rdfs:label ?subject .""",
'type': 'array'
},
{
'name': 'abstract',
'pattern': '<{}> bibo:abstract ?abstract .',
'type': 'string'
},
{
'name': 'keywords',
'fields': ['keyword'],
'pattern': '<{}> vivo:freetextKeyword ?keyword .',
'type': 'array'
},
{
'name': 'types',
'fields': ['type'],
'pattern': """<{}> vitro:mostSpecificType ?typeURI .
?typeURI rdfs:label ?type .""",
'type': 'array'
},
{
'name': 'authors',
'fields': ['sameAs', 'name', 'givenName', 'familyName'],
'pattern': """?authorship a vivo:Authorship .
?authorship vivo:relates <{}> .
?sameAs vivo:relatedBy ?authorship .
?sameAs a foaf:Person .
?sameAs rdfs:label ?name .
?sameAs obo:ARG_2000028 ?vcard .
?vcard vcard:hasName ?nameUri .
?nameUri vcard:familyName ?familyName .
?nameUri vcard:givenName ?givenName .""",
'type': 'dict'
}
]
AUTHOR_MAPPING = {
'email': {
'name': 'email',
'pattern': """<{}> obo:ARG_2000028 ?vcard .
?vcard vcard:hasEmail ?emailUri .
?emailUri vcard:email ?email .""",
'type': 'string'
},
'affiliation': {
'name': 'affiliation',
'fields': ['name'],
'pattern': """?role obo:RO_0000052 <{}> .
?role vivo:roleContributesTo ?organization .
?organization rdfs:label ?name .
?role vivo:dateTimeInterval ?interval .
FILTER NOT EXISTS {{ ?interval vivo:end ?end }}""",
'type': 'dict'
},
'orcidId': {
'name': 'orcidId',
'pattern': """<{}> vivo:orcidId ?orcidId .
FILTER isURI(?orcidId)""",
'type': 'string'
}
}
|
165917
|
import os
def prepare_videos(
videos, extension, start, duration, kinect_mask=True, width=1920, height=1080
):
video_start_secs = start % 60
video_start_mins = start // 60
print(f"Dumping frames and segmenting {len(videos)} input videos")
for i, video in enumerate(videos):
try:
os.makedirs(video)
except FileExistsError:
continue
print(f"Dumping frames from {video} ({i+1}/{len(videos)})...")
ffmpeg_duration = ""
if duration != "-1":
ffmpeg_duration = f"-t {duration}"
code = os.system(
f"ffmpeg -y -ss 00:{video_start_mins:02}:{video_start_secs:02}.000 "
f"-vsync 0 "
f"-i {video}{extension} -vf scale={width}:{height} "
f"-map 0:0 {ffmpeg_duration} {video}/%04d_img.png -hide_banner"
f" > bg_matting_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Segmenting frames...")
if kinect_mask:
code = os.system(
f"KinectMaskGenerator.exe {video}{extension} {video} {start} {duration}"
f" > segmentation_logs_{i}.txt 2>&1"
)
if code != 0:
exit(code)
else:
code = os.system(
f"python segmentation_deeplab.py -i {video}"
f" > segmentation_logs_{i}.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Extracting background...")
code = os.system(
f"ffmpeg -y -i {video}{extension} -vf scale={width}:{height} "
f"-map 0:0 -ss 00:00:02.000 -vframes 1 {video}.png -hide_banner"
" > bg_matting_logs.txt 2>&1"
)
if code != 0:
exit(code)
|
165925
|
from collections import defaultdict
class Graph:
def __init__(self,V,directed=False):
self.V = V
self.directed = directed
self.graph = defaultdict(list)
def add_edge(self,a,b):
self.graph[a].append(b)
if not self.directed:
self.graph[b].append(a)
def color_greedy(self):
result = [-1]*self.V
max_color = 0
for v,adj in self.graph.items():
color = 0
while color in [result[x] for x in adj]:
color+=1
max_color = max(max_color,color)
result[v] = color
return result,max_color
if __name__ == "__main__":
g = Graph(5)
g.add_edge(0,1)
g.add_edge(0,2)
g.add_edge(1,2)
g.add_edge(1,3)
g.add_edge(2,3)
g.add_edge(3,4)
res,m = g.color_greedy()
print("max colors: {} list: {}".format(m,res))
|
165947
|
import contextlib
from typing import Any, Dict, Iterable, Optional
import discord
import iso8601
import validators
from redbot.core import commands
from redbot.vendored.discord.ext import menus
class GenericMenu(menus.MenuPages, inherit_buttons=False):
def __init__(
self,
source: menus.PageSource,
cog: Optional[commands.Cog] = None,
ctx=None,
clear_reactions_after: bool = True,
delete_message_after: bool = False,
add_reactions: bool = True,
using_custom_emoji: bool = False,
using_embeds: bool = False,
keyword_to_reaction_mapping: Dict[str, str] = None,
timeout: int = 180,
message: discord.Message = None,
**kwargs: Any,
) -> None:
self.cog = cog
self.ctx = ctx
super().__init__(
source,
clear_reactions_after=clear_reactions_after,
delete_message_after=delete_message_after,
check_embeds=using_embeds,
timeout=timeout,
message=message,
**kwargs,
)
def reaction_check(self, payload):
"""The function that is used to check whether the payload should be processed.
This is passed to :meth:`discord.ext.commands.Bot.wait_for <Bot.wait_for>`.
There should be no reason to override this function for most users.
Parameters
------------
payload: :class:`discord.RawReactionActionEvent`
The payload to check.
Returns
---------
:class:`bool`
Whether the payload should be processed.
"""
if payload.message_id != self.message.id:
return False
if payload.user_id not in (*self.bot.owner_ids, self._author_id):
return False
return payload.emoji in self.buttons
def _skip_single_arrows(self):
max_pages = self._source.get_max_pages()
if max_pages is None:
return True
return max_pages == 1
def _skip_double_triangle_buttons(self):
max_pages = self._source.get_max_pages()
if max_pages is None:
return True
return max_pages <= 2
# left
@menus.button(
"\N{BLACK LEFT-POINTING TRIANGLE}", position=menus.First(1), skip_if=_skip_single_arrows
)
async def prev(self, payload: discord.RawReactionActionEvent):
if self.current_page == 0:
await self.show_page(self._source.get_max_pages() - 1)
else:
await self.show_checked_page(self.current_page - 1)
@menus.button("\N{CROSS MARK}", position=menus.First(2))
async def stop_pages_default(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
with contextlib.suppress(discord.NotFound):
await self.message.delete()
@menus.button(
"\N{BLACK RIGHT-POINTING TRIANGLE}", position=menus.First(2), skip_if=_skip_single_arrows
)
async def next(self, payload: discord.RawReactionActionEvent):
if self.current_page == self._source.get_max_pages() - 1:
await self.show_page(0)
else:
await self.show_checked_page(self.current_page + 1)
@menus.button(
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.First(0),
skip_if=_skip_double_triangle_buttons,
)
async def go_to_first_page(self, payload: discord.RawReactionActionEvent):
"""go to the first page"""
await self.show_page(0)
@menus.button(
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.Last(1),
skip_if=_skip_double_triangle_buttons,
)
async def go_to_last_page(self, payload: discord.RawReactionActionEvent):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
class ArticleFormat(menus.ListPageSource):
def __init__(self, entries: Iterable[str]):
super().__init__(entries, per_page=1)
async def format_page(self, menu: GenericMenu, article) -> str:
embed = discord.Embed(
title=article["title"],
color=await menu.ctx.embed_colour(),
description=f"\n{article['description']}",
timestamp=iso8601.parse_date(article["publishedAt"]),
url=article["url"],
)
if article["urlToImage"] is not None and validators.url(article["urlToImage"]):
embed.set_image(url=article["urlToImage"])
embed.set_author(name=f"{article['author']} - {article['source']['name']}")
embed.set_footer(text=f"Article {menu.current_page + 1 }/{menu._source.get_max_pages()}")
return embed
|
165975
|
from inspect import getfullargspec
from typing import Union, List, Optional
import operator
from collections import namedtuple
Patch = namedtuple('Patch', ['var'])
match_err = object()
class Pattern:
def match(self, expr):
raise NotImplemented
def __repr__(self):
return self.__str__()
class Type(Pattern):
pass
class TypeVar(Type):
def __init__(self,
u_types: set,
inf: set,
sup: set,
traits: set,
yield_out: bool = True):
self.negative_types = u_types
self.inf = inf
self.sup = sup
self.traits = traits
self.yield_out = yield_out
def __str__(self):
return f'Type[{self.inf}<= this <={self.sup}' \
f'| this /= {self.negative_types}, traits:{{{self.traits}}}]'
def __le__(self, other: type):
return TypeVar(self.negative_types - {other}, self.inf,
self.sup | {other}, self.traits, self.yield_out)
def __ge__(self, other: type):
return TypeVar(self.negative_types - {other}, self.inf | {other},
self.sup, self.traits, self.yield_out)
def __lt__(self, other: type):
return TypeVar(self.negative_types | {other}, self.inf,
self.sup | {other}, self.traits, self.yield_out)
def __gt__(self, other: type):
return TypeVar(self.negative_types | {other}, self.inf | {other},
self.sup, self.traits, self.yield_out)
def __eq__(self, other: type):
return TypeVar(self.negative_types - {other}, self.inf | {other},
self.sup | {other}, self.traits, self.yield_out)
def __ne__(self, other: type):
return TypeVar(self.negative_types | {other}, self.inf, self.sup,
self.traits, self.yield_out)
def __and__(self, other: Type):
if not isinstance(other, Type):
other = TypeVar(set(), {other}, {other}, set(), yield_out=False)
return IntersectionType([self, other])
def __or__(self, other: Type):
if not isinstance(other, Type):
other = TypeVar(set(), {other}, {other}, set(), yield_out=False)
return UnionType([self, other])
def __invert__(self):
return DifferenceType(self)
def __mod__(self, **kwargs):
return TypeVar(self.negative_types, self.inf, self.sup,
set(kwargs.items()) | self.traits)
def when(self, trait):
return TypeVar(self.negative_types, self.inf, self.sup,
self.traits | {trait}, self.yield_out)
def match(self, expr: type):
def isn(u_type):
return u_type is not expr
def is_inf(u_type):
return issubclass(expr, u_type)
def is_sup(u_type):
return issubclass(u_type, expr)
if all(map(isn, self.negative_types)) and \
all(map(is_inf, self.inf)) and \
all(map(is_sup, self.sup)) and \
all(trait(expr) for trait in self.traits):
if self.yield_out:
return expr,
return ()
else:
return match_err
class UnionType(Type):
def __init__(self, types: List[Type]):
self.types = types
def __str__(self):
return 'Union[{}]'.format(', '.join(
[f'<{_type}>' for _type in self.types]))
def match(self, expr):
for typ in self.types:
e = typ.match(expr)
if e is not match_err:
return e
return match_err
def __and__(self, other):
return IntersectionType([self, other])
def __or__(self, other):
return UnionType([*self.types, other])
def __invert__(self):
return DifferenceType(self)
class IntersectionType(Type):
def __str__(self):
return 'Intersection[{}]'.format(', '.join(
[f'<{_type}>' for _type in self.types]))
def __init__(self, types: List[Type]):
self.types = types
def match(self, expr):
ret = []
for typ in self.types:
e = typ.match(expr)
if e is match_err:
return match_err
ret.extend(e)
return tuple(ret)
def __and__(self, other):
return IntersectionType([*self.types, other])
def __or__(self, other):
return UnionType([self, other])
def __invert__(self):
return DifferenceType(self)
class DifferenceType(Type):
def __str__(self):
return f'Difference[{self.type}]'
def __init__(self, type):
self.type = type
def match(self, expr):
e = self.type.match(expr)
if e is not match_err:
return match_err
return ()
def __and__(self, other):
return IntersectionType([self, other])
def __or__(self, other):
return UnionType([self, other])
def __not__(self):
return self.type
class Var(Pattern):
def __init__(self,
match_fns: list,
type: Optional[Type],
arg_nums: int = -1,
yield_out: bool = True):
self.match_fns = match_fns
if not isinstance(type, Type) and type is not None:
self.type = TypeVar(set(), {type}, {type}, set(), False)
else:
self.type = type
self.arg_nums = arg_nums
self.yield_out = yield_out
def __str__(self):
type = self.type if self.type is not None else 'any'
if self.arg_nums == -1:
return str(type)
else:
return f'{type}/{self.arg_nums}'
def __call__(self, *args, **kwargs):
return Var(self.match_fns, self.type, self.arg_nums, self.yield_out)
def __truediv__(self, other: Union[int, tuple]):
return Var(self.match_fns, self.type, other, self.yield_out)
def __getitem__(self, item: Union[type, TypeVar]):
return Var(
self.match_fns, item if isinstance(item, Type) else TypeVar(
set(),
{item},
{item}, set(), False), self.arg_nums, self.yield_out)
def compare_with(self, other, by):
def match_it(v):
return by(v, other)
return Var(self.match_fns + [match_it], self.type, self.arg_nums,
self.yield_out)
def __ge__(self, other):
return self.compare_with(other, operator.ge)
def __le__(self, other):
return self.compare_with(other, operator.le)
def __eq__(self, other):
return self.compare_with(other, operator.eq)
def __gt__(self, other):
return self.compare_with(other, operator.gt)
def __lt__(self, other):
return self.compare_with(other, operator.lt)
def when(self, condition):
return Var(self.match_fns + [condition], self.type, self.arg_nums,
self.yield_out)
def match(self, expr: object):
if self.type is not None:
now = self.type.match(expr.__class__)
else:
now = ()
if now is match_err:
return match_err
# check param nums
if self.arg_nums is not -1:
if not callable(expr):
return match_err
arg_info = getfullargspec(expr)
arg_least_num = len(arg_info.args) + len(arg_info.kwonlyargs)
if hasattr(expr, '__self__'): # instance bound method
arg_least_num -= 1
has_var_arg = arg_info.varkw or arg_info.varargs
if isinstance(self.arg_nums, tuple):
if len(self.arg_nums) is 1:
if self.arg_nums[0] < arg_least_num:
return match_err
else:
if has_var_arg or not (self.arg_nums[0] <= arg_least_num <=
self.arg_nums[1]):
return match_err
else:
assert isinstance(self.arg_nums, int)
if has_var_arg or arg_least_num != self.arg_nums:
return match_err
if self.match_fns:
def check_if_match(f):
return f(expr)
if not all(map(check_if_match, self.match_fns)):
return match_err
if self.yield_out:
return (expr, ) + now
else:
return now
def __iter__(self):
yield Patch(self)
es = set()
T = TypeVar(es, es, es, es, yield_out=True)
t = TypeVar(es, es, es, es, yield_out=False)
var = Var([], None, yield_out=True)
_ = Var([], None, yield_out=False)
|
165977
|
import logging
from ledfxcontroller.devices import Device
import voluptuous as vol
import numpy as np
import sacn
import time
_LOGGER = logging.getLogger(__name__)
class E131Device(Device):
"""E1.31 device support"""
CONFIG_SCHEMA = vol.Schema({
vol.Required('host'): str,
vol.Required('universe', default=1): int,
vol.Required('universe_size', default=512): int,
vol.Required('channel_offset', default=1): int,
vol.Required(vol.Any('pixel_count', 'channel_count')): vol.Coerce(int)
})
def __init__(self, config):
self._config = config
# Allow for configuring in terms of "pixels" or "channels"
if 'pixel_count' in self._config:
self._config['channel_count'] = self._config['pixel_count'] * 3
else:
self._config['pixel_count'] = self._config['channel_count'] // 3
span = self._config['channel_offset'] + self._config['channel_count'] - 1
self._config['universe_end'] = self._config['universe'] + int(span / self._config['universe_size'])
if span % self._config['universe_size'] == 0:
self._config['universe_end'] -= 1
self._sacn = None
@property
def pixel_count(self):
return int(self._config['pixel_count'])
def activate(self):
if self._sacn:
raise Exception('sACN sender already started.')
# Configure sACN and start the dedicated thread to flush the buffer
self._sacn = sacn.sACNsender()
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
_LOGGER.info("sACN activating universe {}".format(universe))
self._sacn.activate_output(universe)
if (self._config['host'] == None):
self._sacn[universe].multicast = True
else:
self._sacn[universe].destination = self._config['host']
self._sacn[universe].multicast = False
#self._sacn.fps = 60
self._sacn.start()
_LOGGER.info("sACN sender started.")
super().activate()
def deactivate(self):
super().deactivate()
if not self._sacn:
raise Exception('sACN sender not started.')
# Turn off all the LEDs when deactivating. With how the sender
# works currently we need to sleep to ensure the pixels actually
# get updated. Need to replace the sACN sender such that flush
# directly writes the pixels.
self.flush(np.zeros(self._config['channel_count']))
time.sleep(1.5)
self._sacn.stop()
self._sacn = None
_LOGGER.info("sACN sender stopped.")
def flush(self, data):
"""Flush the data to all the E1.31 channels account for spanning universes"""
if not self._sacn:
raise Exception('sACN sender not started.')
if data.size != self._config['channel_count']:
raise Exception('Invalid buffer size.')
data = data.flatten()
current_index = 0
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
# Calculate offset into the provide input buffer for the channel. There are some
# cleaner ways this can be done... This is just the quick and dirty
universe_start = (universe - self._config['universe']) * self._config['universe_size']
universe_end = (universe - self._config['universe'] + 1) * self._config['universe_size']
dmx_start = max(universe_start, self._config['channel_offset']) % self._config['universe_size']
dmx_end = min(universe_end, self._config['channel_offset'] + self._config['channel_count']) % self._config['universe_size']
if dmx_end == 0:
dmx_end = self._config['universe_size']
input_start = current_index
input_end = current_index + dmx_end - dmx_start
current_index = input_end
dmx_data = np.array(self._sacn[universe].dmx_data)
dmx_data[dmx_start:dmx_end] = data[input_start:input_end]
self._sacn[universe].dmx_data = dmx_data
|
166010
|
n = int(raw_input())
data = [int(i) for i in raw_input().split()]
data = sorted(data)
ans = 0
for i in xrange(0, n, 2):
if data[i] != data[i + 1]:
print str(data[i]),
i -= 1
ans += 1
if ans != 2:
print str(data[n-1]) + " ",
|
166026
|
from datetime import datetime, timezone
import json
import falcon
from sikr import settings
class APIInfo(object):
"""Show the main information about the API like endpoints, version, etc.
"""
def on_get(self, req, res):
payload = {
"version": {
"api_version": settings.__version__,
"api_codename": settings.__codename__,
"api_status": settings.__status__,
"documentation": settings.__docs__
},
"date": str(datetime.utcnow().replace(tzinfo=timezone.utc)),
}
res.status = falcon.HTTP_200
res.body = json.dumps(payload)
def on_options(self, req, res):
res.status = falcon.HTTP_200
def on_post(self, req, res):
raise falcon.HTTPError(falcon.HTTP_405,
title="Client error",
description="{0} method not allowed.".format(req.method),
href=settings.__docs__)
def on_put(self, req, res):
raise falcon.HTTPError(falcon.HTTP_405,
title="Client error",
description="{0} method not allowed.".format(req.method),
href=settings.__docs__)
def on_update(self, req, res):
raise falcon.HTTPError(falcon.HTTP_405,
title="Client error",
description="{0} method not allowed.".format(req.method),
href=settings.__docs__)
def on_delete(self, req, res):
raise falcon.HTTPError(falcon.HTTP_405,
title="Client error",
description="{0} method not allowed.".format(req.method),
href=settings.__docs__)
|
166028
|
import os
import time
import datetime
import tensorflow as tf
from PIL import Image
import numpy as np
import argparse
import json
from util import *
# parse args
argParser = argparse.ArgumentParser(description="runs validation and visualizations")
argParser.add_argument("-f",dest="showFlow",action="store_true")
argParser.add_argument("-e",dest="showError",action="store_true")
argParser.add_argument("-w",dest="showWarp",action="store_true")
argParser.add_argument("-a",dest="testAll",action="store_true")
argParser.add_argument("-t",dest="use2015",action="store_true")
argParser.add_argument("-i","--iteration",dest="iteration",action="store",default=0,type=int)
argParser.add_argument("-g","--gpu",dest="gpu",action="store",default=0,type=int)
cmdArgs = argParser.parse_args()
# multi gpu management
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(cmdArgs.gpu)
# load instance params
with open("hyperParams.json") as f:
instanceParams = json.load(f)
# find latest snapshot
snapshotFiles = os.listdir("snapshots")
snapshotFiles = [filename for filename in snapshotFiles if filename[-11:] == ".ckpt.index"]
snapshotFiles.sort()
if cmdArgs.iteration > 0:
iter = cmdArgs.iteration
print "testing "+str(iter)
elif len(snapshotFiles) > 0:
iter = int(snapshotFiles[-1][5:-11])
print "testing "+ str(iter)
else:
print "No snapshots found"
exit()
# kitti2015 override
if cmdArgs.use2015:
instanceParams["dataset"] = "kitti2015"
# import data
if instanceParams["dataset"] == "kitti2012":
datasetRoot = "../example_data/"
frame0Path = datasetRoot+"datalists/valPath1.txt";
frame1Path = datasetRoot+"datalists/valPath2.txt";
if cmdArgs.testAll:
flowPath = datasetRoot+"datalists/valPathFloAll.txt";
else:
flowPath = datasetRoot+"datalists/valPathFlo.txt";
desiredHeight = 384
desiredWidth = 1280
elif instanceParams["dataset"] == "kitti2015":
datasetRoot = "/home/jjyu/datasets/KITTI2015/"
frame0Path = datasetRoot+"datalists/val_im1.txt";
frame1Path = datasetRoot+"datalists/val_im2.txt";
if cmdArgs.testAll:
flowPath = datasetRoot+"datalists/val_flo_all.txt";
else:
flowPath = datasetRoot+"datalists/val_flo.txt";
desiredHeight = 384
desiredWidth = 1280
elif instanceParams["dataset"] == "sintel":
datasetRoot = "/home/jjyu/datasets/Sintel/"
frame0Path = datasetRoot+"datalists/val_im1.txt";
frame1Path = datasetRoot+"datalists/val_im2.txt";
flowPath = datasetRoot+"datalists/val_flow.txt";
desiredHeight = 448
desiredWidth = 1024
else:
print "unknown dataset"
exit()
with open(frame0Path) as f:
imagePairs0 = [datasetRoot+x[:-1] for x in f.readlines()]
with open(frame1Path) as f:
imagePairs1 = [datasetRoot+x[:-1] for x in f.readlines()]
with open(flowPath) as f:
imageFlows = [datasetRoot+x[:-1] for x in f.readlines()]
iterations = len(imageFlows)
testData = TestData(imagePairs0,imagePairs1,imageFlows,1,desiredHeight,desiredWidth)
# build graph
with tf.device("/gpu:"+str(cmdArgs.gpu)):
with tf.variable_scope("netShare"):
networkBody = NetworkBody(testData,instanceParams)
flowFinal = networkBody.flows[0]
epe, errorMap = epeEval(flowFinal,testData.flow,testData.flowMask)
# visualize
flowViz = flowToRgb(flowFinal)
errorMap = errorMap/20
transformGrid = flowTransformGrid(flowFinal)
mean = [0.448553, 0.431021, 0.410602]
mean = tf.expand_dims(tf.expand_dims(tf.expand_dims(mean,0),0),0)
warped = flowWarp(testData.frame1["rgb"]+mean,flowFinal)
# saver
saver = tf.train.Saver()
# config tensorflow
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
# start testing
epes = []
with tf.Session(config=config) as sess:
saver.restore(sess,"snapshots/iter_"+str(iter).zfill(16)+".ckpt")
# run
lastPrint = time.time()
for i in range(iterations):
feed_dict = {
testData.im0File: imagePairs0[i],
testData.im1File: imagePairs1[i],
testData.flowFile: imageFlows[i],
}
result = sess.run([epe,flowViz,errorMap,warped,testData.height,testData.width],feed_dict=feed_dict)
h = result[4]
w = result[5]
flattened = [result[0][0]]
epes += flattened
print sum(epes)/float(len(epes))
if cmdArgs.showFlow:
arr = np.minimum(np.asarray(result[1]),1)
arr = np.maximum(arr,0)
arr = np.squeeze(np.asarray(arr*255,np.uint8))
im = Image.fromarray(arr[:h,:w,:])
im.show()
raw_input("press to continue")
if cmdArgs.showError:
arr = np.minimum(np.asarray(result[2]),1)
arr = np.maximum(arr,0)
arr = np.squeeze(np.asarray(arr*255,np.uint8))
im = Image.fromarray(arr[:h,:w])
im.show()
raw_input("press to continue")
if cmdArgs.showWarp:
arr = np.minimum(np.asarray(result[3]),1)
arr = np.maximum(arr,0)
arr = np.squeeze(np.asarray(arr*255,np.uint8))
im = Image.fromarray(arr[:h,:w,:])
im.show()
raw_input("press to continue")
|
166034
|
from os import path
from queue import Queue
from sys import stderr
from threading import Thread
from time import sleep
from typing import Callable, List, NamedTuple, Optional, Sequence, Text
from psutil import AccessDenied, NoSuchProcess, Popen, Process
from psutil._pslinux import popenfile
from .progress import Output, Progress
Argv = Optional[Sequence[Text]]
def run_main(main: Callable[[], None]):
"""
Runs the main function. Add try/catch wrappers or whatever you need here.
That's useful in case you want to have several points to call main().
Parameters
----------
main
Main function
"""
return main()
class FileInfo(NamedTuple):
"""
Current information about the file
"""
path: Text
size: int
position: int
class SpyProcess:
"""
Spying process to detect the currently open files and their current reading
advancement.
Notes
-----
There is three threads at play here:
- The main thread, which handles the display
- The ticking thread which drives refreshing
- The process watching thread, which waits for the process to be done
Both the ticking and the process threads communicate their ticks to the
main thread through a queue. This way the main thread can easily display
an update every second and close instantly when the process is done.
"""
def __init__(
self, args: Sequence[Text], period: float, output: Output, attach=Optional[int]
):
self.args = args
self.attach = attach
self.proc: Optional[Popen] = None
self.files_cache = {}
self.display_ticks = Queue()
self.process_thread = Thread(target=self.watch_process)
self.ticks_thread = Thread(
target=self.generate_ticks, args=(period,), daemon=True
)
self.progress = Progress(output)
self.counters = {}
def start(self):
"""
If a PID was supplied to attach in the CLI options then use it.
Otherwise use the provided arguments to start the command.
"""
if self.attach is None:
try:
self.proc = Popen(self.args)
except FileNotFoundError:
stderr.write(f'Could not find command "{self.args[0]}"\n')
exit(1)
else:
try:
self.proc = Process(self.attach)
except (AccessDenied, NoSuchProcess):
stderr.write(
f"Could not attach process {self.attach}. Does it exist? "
f"Do you have the rights?\n"
)
exit(1)
def open_files(self) -> List[popenfile]:
"""
Returns the list of open files
"""
return self.proc.open_files()
def list_files(self) -> Sequence[FileInfo]:
"""
Generates the FileInfo object of all interesting files.
"""
files_in_use = set()
out = []
try:
for f in self.open_files():
if f.mode != "r":
continue
files_in_use.add(f.path)
if f.path not in self.files_cache:
self.files_cache[f.path] = path.getsize(f.path)
if hasattr(f, "position"):
out.append(
FileInfo(
path=f.path,
size=self.files_cache[f.path],
position=f.position,
)
)
except (AccessDenied, NoSuchProcess):
pass
for k in set(self.files_cache.keys()) - files_in_use:
del self.files_cache[k]
return out
def print_progress(self):
"""
UI display thread, looping around until the thing is done
"""
stop = False
try:
while not stop:
files = self.list_files()
self.progress.update(files)
stop = self.display_ticks.get()
finally:
self.progress.close()
def generate_ticks(self, period: float):
"""
Ticks into the queue every "period" second
Parameters
----------
period
Number of seconds between two ticks
"""
while True:
self.display_ticks.put(False)
sleep(period)
def start_display(self):
"""
Starts the threads that will tick the display
"""
self.ticks_thread.start()
self.process_thread.start()
self.print_progress()
def watch_process(self):
"""
Waits until the process finishes and raises the tick
"""
self.return_code()
self.display_ticks.put(True)
def return_code(self) -> int:
"""
Waits for the process to finish and returns its return code
"""
return self.proc.wait()
def send_signal(self, sig):
"""
Sends a signal to the child process
Parameters
----------
sig
Unix signal
"""
try:
self.proc.send_signal(sig)
except NoSuchProcess:
pass
def positive_int(x) -> int:
"""
Checks that the provided input is a positive integer. Used for PID
validation in the CLI arguments.
Parameters
----------
x
A positive integer
Returns
-------
"""
x = int(x)
if x < 0:
raise ValueError("A positive integer is expected")
return x
|
166073
|
from flask_login import current_user
from depc.controllers.teams import TeamController
class TeamPermission:
@classmethod
def _get_team(cls, team_id):
obj = TeamController._get(filters={"Team": {"id": team_id}})
# Add the members of the team
team = TeamController.resource_to_dict(obj)
team.update(
{
"members": [m.name for m in obj.members],
"editors": [m.name for m in obj.editors],
"managers": [m.name for m in obj.managers],
}
)
return team
@classmethod
def is_user(cls, team_id):
team = cls._get_team(team_id)
team_users = team["members"] + team["editors"] + team["managers"]
return current_user.name in team_users
@classmethod
def is_manager_or_editor(cls, team_id):
team = cls._get_team(team_id)
team_users = team["editors"] + team["managers"]
return current_user.name in team_users
@classmethod
def is_manager(cls, team_id):
return current_user.name in cls._get_team(team_id)["managers"]
@classmethod
def is_editor(cls, team_id):
return current_user.name in cls._get_team(team_id)["editors"]
@classmethod
def is_member(cls, team_id):
return current_user.name in cls._get_team(team_id)["members"]
|
166103
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst', 'r') as f:
long_description = f.read()
setup(name='PyGnuplot',
py_modules=['PyGnuplot'],
version='0.11.16',
license='MIT',
description='Python Gnuplot wrapper',
long_description=long_description,
author='<NAME>',
author_email=' ',
url='https://github.com/benschneider/PyGnuplot',
download_url='https://github.com/benschneider/PyGnuplot/archive/0.11.16.tar.gz',
keywords=['gnuplot', 'plot'],
# install_requires=['numpy'],
classifiers=["Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Development Status :: 4 - Beta"],
)
|
166214
|
import unittest
import qnet
import qnetu
import numpy
import mytime
import netutils
import estimation
import yaml
import StringIO
import pwfun
import distributions
import sampling
import arrivals
import qstats
import queues
import test_qnet
from scipy import integrate
from numpy import random
import arrivals
import sys
from math import sqrt
class TestPS (unittest.TestCase):
def test_sample_small (self):
sampling.set_seed (2341243)
net = self.twoq
arrv = net.sample (5)
print arrv
arrv.validate()
def test_sample_validate (self):
sampling.set_seed (2341243)
net = self.twoq
nr = 10
nt = 100
for ri in range(nr):
arrv = net.sample (nt)
arrv.validate()
mu = qstats.mean_service (arrv)
expected = [ 1.0, 0.5, 0.5 ]
for mu0, exp0 in zip(mu,expected):
sd = 1 / (exp0 * sqrt(nt))
self.assertTrue (abs (mu0 - exp0) < 3*sd, "Mismatch (SD: %.5f)\nTRU %s\nEXP %s" % (sd,mu,expected))
def test_read_multif (self):
sampling.set_seed (2341243)
net = self.twoq
nr = 10
nt = 100
for ri in range(nr):
arrv = net.sample (nt)
print "ORIG", arrv
arrv.validate()
qnetu.write_multif_to_prefix ("ps_test_sample_validate", arrv)
arrv2 = qnetu.read_multif_of_prefix ("ps_test_sample_validate", net)
# print "COPY", arrv2
arrv2.validate()
def test_read_multif2 (self):
sampling.set_seed (2341243)
net = self.twoq
nr = 10
nt = 100
for ri in range(nr):
arrv = net.sample (nt)
arrv.validate()
obs = arrv.subset_by_task (0.5)
# print "ORIG", arrv
qnetu.write_multif_to_prefix ("ps_test_sample_validate2", obs)
arrv2 = qnetu.read_multif_of_prefix ("ps_test_sample_validate2", net)
# print "COPY", arrv2
arrv2.validate()
def test_initialize (self):
sampling.set_seed (2341243)
net = self.twoq
nr = 10
nt = 100
for ri in range(nr):
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.5)
ini = net.gibbs_initialize (obs)
print "TRUE", arrv
print "INI", ini
ini.validate()
def test_initialize_for_ps (self):
sampling.set_seed (2341243)
net = self.twoq
nr = 10
nt = 100
for ri in range(nr):
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.5)
ini = qnet.gibbs_initialize_for_ps (net, obs)
ini.validate()
def test_sem (self):
sampling.set_seed (67826)
# net = self.twoq
net = self.oneq
nr = 1
nt = 50
theta0 = net.parameters[:]
for ri in range(nr):
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.25)
ini = net.gibbs_initialize (obs)
estimation.sem (net, ini, 0, 100)
print "MU ", net.parameters
net.parameters = theta0[:]
print "TRU ", theta0
def test_bayes (self):
sampling.set_seed (67826)
# net = self.twoq
net = self.oneq
nr = 1
nt = 100
theta0 = net.parameters[:]
def reporter (net, arrv, iter, lp):
lp_scratch = net.log_prob(arrv)
assert abs(lp - lp_scratch) < 1e-10, \
"Mismatch LP. Running total %.10f from scratch %.10f" % (lp, lp_scratch)
if 0 == (iter % 10):
f = open ("ps_test_bayes_%d.txt" % iter, "w")
arrv.write_csv(f)
f.close()
for ri in range(nr):
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.25)
ini = net.gibbs_initialize (obs)
estimation.bayes (net, ini, 100, report_fn=reporter)
print "MU ", net.parameters
net.parameters = theta0[:]
print "TRUE ", theta0
def test_ps_stationary (self):
nr = 50
nt = 50
net = self.twoq
allmu = numpy.zeros (len(net.parameters))
allmax = numpy.zeros (len(net.parameters))
for i in range (nr):
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.0)
net.slice_resample (arrv, 10)
mu = numpy.array (qstats.mean_service (arrv))
this_max = numpy.array (qstats.max_service (arrv))
print "MU", mu
allmu += mu
allmax += this_max
avg = allmu / nr
print "AVG", avg
print "TRU", net.parameters
print "MAX", allmax / nr
def test_ps_likelihood (self):
sampling.set_seed (23134)
net = self.oneq
net.parameters = [ 10.0, 0.1 ]
nt = 10
arrv = net.sample(nt)
tid = 3
evts = arrv.events_of_task (tid)
e1 = evts[1]
e1.obs_d = 0
lp0 = net.log_prob (arrv)
print arrv
print "LP0", lp0
dexp = distributions.Exponential (net.parameters[1])
gibbs = qnet.GGkGibbs (net, arrv, e1, lp0)
l = e1.a
u = e1.a + 3.0
diff = gibbs.inner_dfun(l) - dexp.lpdf(0)
for i in range(10):
x = l + 0.1*i*(u-l)
gval = gibbs.inner_dfun(x)
print "%.10f %.10f %.10f %.10f" % (x, gval, gval - diff, dexp.lpdf(x-l))
def test_zero_s (self):
sampling.set_seed (23134)
net = self.oneq
arrv = net.sample (1)
e1 = arrv.event (1)
q1 = e1.queue()
print net.parameters
print arrv
e1.d = e1.a
e1.s = 0
lp0 = net.log_prob (arrv)
self.assertAlmostEquals (-1.47313356106, lp0, 5)
e1.d = e1.a + 1.
e1.s = 1.
dl = q1.pyDiffListForDeparture (e1, e1.a)
lp1 = net.log_prob (arrv)
dlik = q1.likelihoodDelta(arrv, dl)
print arrv
print lp1, dlik
self.assertAlmostEquals (-1.47313356106, lp1 + dlik, 5)
def test_likelihood_delta (self):
sampling.set_seed (23134)
net = self.oneq
net.parameters = [ 10.0, 5.0 ]
nt = 10
tid = 3
arrv = net.sample(nt)
evts = arrv.events_of_task (tid)
e1 = evts[1]
e1.obs_d = 0
q1 = e1.queue()
lp0 = net.log_prob (arrv)
print arrv
d0 = e1.d
deltas = [-0.005, 0.0, 0.1, 0.5, 1.0]
for delta in deltas:
d_new = d0 + delta
dl = q1.pyDiffListForDeparture (e1, d_new)
dlik = q1.likelihoodDelta(arrv, dl)
lik_a = lp0 + dlik
a2 = arrv.duplicate()
dl = q1.pyDiffListForDeparture (e1, d_new)
a2.applyDiffList (dl)
lik_b = net.log_prob (a2)
print a2
print lik_a, lik_b
self.assertTrue (abs(lik_b - lik_a) < 1e-5)
# mainly for profiling atm
def test_ps_em (self):
nt = 600
niter = 2
net = self.twoq
arrv = net.sample (nt)
obs = arrv.subset_by_task (0.5)
estimation.sem (net, obs, 0, niter)
print net.parameters
def setUp (self):
self.oneq = qnetu.qnet_from_text (oneq_text)
self.twoq = qnetu.qnet_from_text (twoq_text)
oneq_text = """
states:
- name: I0
queues: [I0]
successors: [S1]
- name: S1
queues: [Q0]
queues:
- { name: I0, service: [M, 1.0] }
- { name: Q0, service: [M, 0.5], type: PS }
"""
twoq_text = """
states:
- name: I0
queues: [I0]
successors: [S1]
- name: S1
queues: [Q1]
successors: [S2]
- name: S2
queues: [Q2]
queues:
- { name: I0, service: [M, 1.0] }
- { name: Q1, service: [M, 0.5], type: PS }
- { name: Q2, service: [M, 0.5], type: PS }
"""
def main():
if len(sys.argv) > 1:
for test_name in sys.argv[1:]:
suite = unittest.TestLoader().loadTestsFromName("test_ps.TestPS.%s" % (test_name,))
unittest.TextTestRunner(verbosity=2).run(suite)
else:
unittest.main()
if __name__ == "__main__":
main()
|
166230
|
import torch
from torch import nn
from torch.nn import functional as F
import math
class Network(nn.Module):
def __init__(self, num_actions, image_channels, vec_size, cnn_module, hidden_size=256,
dueling=True, double_channels=False):
super().__init__()
self.num_actions = num_actions
self.dueling = dueling
self.cnn = cnn_module(image_channels)
self.conv_output_size = self.cnn.output_size
self.fc_im = nn.Linear(self.conv_output_size, hidden_size)
if not double_channels:
vec_channel_size = 128
else:
vec_channel_size = 256
self.fc_vec = nn.Linear(vec_size, vec_channel_size)
self.fc_h_a = nn.Linear(hidden_size + vec_channel_size, hidden_size)
self.fc_a = nn.Linear(hidden_size, num_actions)
if self.dueling:
self.fc_h_v = nn.Linear(hidden_size + vec_channel_size, hidden_size)
self.fc_v = nn.Linear(hidden_size, 1)
def forward(self, x, vec):
x = self.cnn(x)
x = x.view(-1, self.conv_output_size)
x = self.fc_im(x)
vec = self.fc_vec(vec)
x = F.relu(torch.cat((x, vec), 1))
output = self.fc_a(F.relu(self.fc_h_a(x)))
if self.dueling:
v = self.fc_v(F.relu(self.fc_h_v(x)))
output = v + output - output.mean(1, keepdim=True)
return output
class AtariCNN(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.conv_layers = nn.Sequential(nn.Conv2d(input_channels, 32, 8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=0),
nn.ReLU())
self.output_size = 64 * 4 * 4
def forward(self, x):
return self.conv_layers(x)
class ImpalaResNetCNN(nn.Module):
class _ImpalaResidual(nn.Module):
def __init__(self, depth):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1)
def forward(self, x):
out = F.relu(x)
out = self.conv1(out)
out = F.relu(out)
out = self.conv2(out)
return out + x
def __init__(self, input_channels):
super().__init__()
depth_in = input_channels
layers = []
for depth_out in [32, 64, 64]:
layers.extend([
nn.Conv2d(depth_in, depth_out, 3, padding=1),
nn.MaxPool2d(3, stride=2, padding=1),
self._ImpalaResidual(depth_out),
self._ImpalaResidual(depth_out),
])
depth_in = depth_out
self.conv_layers = nn.Sequential(*layers, nn.ReLU())
self.output_size = math.ceil(64 / 8) ** 2 * depth_in
def forward(self, x):
return self.conv_layers(x)
class FixupResNetCNN(nn.Module):
"""source: https://github.com/unixpickle/obs-tower2/blob/master/obs_tower2/model.py"""
class _FixupResidual(nn.Module):
def __init__(self, depth, num_residual):
super().__init__()
self.conv1 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
self.conv2 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)
for p in self.conv1.parameters():
p.data.mul_(1 / math.sqrt(num_residual))
for p in self.conv2.parameters():
p.data.zero_()
self.bias1 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias2 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias3 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.bias4 = nn.Parameter(torch.zeros([depth, 1, 1]))
self.scale = nn.Parameter(torch.ones([depth, 1, 1]))
def forward(self, x):
x = F.relu(x)
out = x + self.bias1
out = self.conv1(out)
out = out + self.bias2
out = F.relu(out)
out = out + self.bias3
out = self.conv2(out)
out = out * self.scale
out = out + self.bias4
return out + x
def __init__(self, input_channels, double_channels=False):
super().__init__()
depth_in = input_channels
layers = []
if not double_channels:
channel_sizes = [32, 64, 64]
else:
channel_sizes = [64, 128, 128]
for depth_out in channel_sizes:
layers.extend([
nn.Conv2d(depth_in, depth_out, 3, padding=1),
nn.MaxPool2d(3, stride=2, padding=1),
self._FixupResidual(depth_out, 8),
self._FixupResidual(depth_out, 8),
])
depth_in = depth_out
layers.extend([
self._FixupResidual(depth_in, 8),
self._FixupResidual(depth_in, 8),
])
self.conv_layers = nn.Sequential(*layers, nn.ReLU())
self.output_size = math.ceil(64 / 8) ** 2 * depth_in
def forward(self, x):
return self.conv_layers(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.