id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
198084
|
import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
class SegmentationBase(Dataset):
def __init__(self,
data_csv, data_root, segmentation_root,
size=None, random_crop=False, interpolation="bicubic",
n_labels=182, shift_segmentation=False,
):
self.n_labels = n_labels
self.shift_segmentation = shift_segmentation
self.data_csv = data_csv
self.data_root = data_root
self.segmentation_root = segmentation_root
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, l)
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
for l in self.image_paths]
}
size = None if size is not None and size<=0 else size
self.size = size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
assert segmentation.mode == "L", segmentation.mode
segmentation = np.array(segmentation).astype(np.uint8)
if self.shift_segmentation:
# used to support segmentations containing unlabeled==255 label
segmentation = segmentation+1
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image,
mask=segmentation
)
else:
processed = {"image": image,
"mask": segmentation
}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class Examples(SegmentationBase):
def __init__(self, size=None, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/sflckr_examples.txt",
data_root="data/sflckr_images",
segmentation_root="data/sflckr_segmentations",
size=size, random_crop=random_crop, interpolation=interpolation)
|
198085
|
from sparcur.utils import want_prefixes
import rdflib
import requests
mis = rdflib.Graph().parse(data=requests.get('https://cassava.ucsd.edu/sparc/exports/curation-export.ttl').content, format='turtle')
graph = mis
def reformat(ot):
return [ot.label if hasattr(ot, 'label') and ot.label else '', ot.curie]
objects = set()
skipped_prefixes = set()
for t in graph:
for e in t:
if isinstance(e, rdflib.URIRef):
oid = OntId(e)
if oid.prefix in want_prefixes + ('tech', 'unit'):
objects.add(oid)
else:
skipped_prefixes.add(oid.prefix)
objs = [OntTerm(o) if o.prefix not in ('TEMP', 'sparc') or
o.prefix == 'TEMP' and o.suffix.isdigit() else
o for o in objects]
term_sets = {title:[o for o in objs if o.prefix == prefix]
for prefix, title in
(('NCBITaxon', 'Species'),
('UBERON', 'Anatomy and age category'), # FIXME
('FMA', 'Anatomy (FMA)'),
('PATO', 'Qualities'),
('tech', 'Techniques'),
('unit', 'Units'),
('sparc', 'MIS terms'),
('TEMP', 'Suggested terms'),
)}
term_sets['Other'] = set(objs) - set(ot for v in term_sets.values() for ot in v)
trows = []
for title, terms in term_sets.items():
rows = [reformat(ot) for ot in
sorted(terms,
key=lambda ot: (ot.prefix, ot.label.lower()
if hasattr(ot, 'label') and ot.label else ''))]
[trows.append(row) for row in rows]
# organize by string putting nulls in the back
rows = sorted(trows, key=lambda x: (str(x[0]).strip() in ['None', ''], x[0].lower()))
for o in mis_label_curies:
# some labels will be empty strings ''
print(o.label+'\t'+o.curie)
|
198097
|
from django.contrib import admin
from nonrelated_inlines.admin import NonrelatedStackedInline
from .models import Customer, Invoice
class CustomerInvoiceStackedInline(NonrelatedStackedInline):
model = Invoice
fields = [
'id',
'amount'
]
def get_form_queryset(self, obj):
return self.model.objects.filter(email=obj.email)
def save_new_instance(self, parent, instance):
instance.email = parent.email
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
list_display = ['email', 'name']
inlines = [
CustomerInvoiceStackedInline
]
@admin.register(Invoice)
class InvoiceAdmin(admin.ModelAdmin):
list_display = ['id', 'email', 'amount']
|
198120
|
from __future__ import absolute_import
from maya import cmds
import logging
from . import maya
class Importer(maya.Importer):
display_name = 'Arnold'
plugin_name = 'mtoa.mll'
def __init__(self):
super(Importer, self).__init__()
@property
def attributes(self):
'''
material_node = cmds.shadingNode('aiStandardSurface', asShader=True)
attrs = cmds.listAttr(material_node, write=True, connectable=True)
attrs = [attr for attr in attrs if attr[-1] not in ['R', 'G', 'B', 'X', 'Y', 'Z']]
print(attrs)
cmds.delete(material_node)
'''
attrs = [
'normalCamera',
# 'aiEnableMatte',
# 'aiMatteColor',
# 'aiMatteColorA',
# 'base',
'baseColor',
# 'diffuseRoughness',
# 'specular',
'specularColor',
'specularRoughness',
# 'specularAnisotropy',
# 'specularRotation',
'metalness',
# 'transmission',
'transmissionColor',
# 'transmissionDepth',
# 'transmissionScatter',
# 'transmissionScatterAnisotropy',
# 'transmissionDispersion',
# 'transmissionExtraRoughness',
# 'transmitAovs',
# 'subsurface',
'subsurfaceColor',
# 'subsurfaceRadius',
# 'subsurfaceScale',
# 'subsurfaceAnisotropy',
# 'subsurfaceType',
# 'sheen',
# 'sheenColor',
# 'sheenRoughness',
# 'thinWalled',
# 'tangent',
# 'coat',
# 'coatColor',
# 'coatRoughness',
# 'coatAnisotropy',
# 'coatRotation',
# 'coatNormal',
# 'thinFilmThickness',
# 'emission',
'emissionColor',
'opacity',
# 'caustics',
# 'internalReflections',
# 'exitToBackground',
# 'indirectDiffuse',
# 'indirectSpecular',
# 'aovId1',
# 'id1',
# 'aovId2',
# 'id2',
# 'aovId3',
# 'id3',
# 'aovId4',
# 'id4',
# 'aovId5',
# 'id5',
# 'aovId6',
# 'id6',
# 'aovId7',
# 'id7',
# 'aovId8',
# 'id8'
]
# extra attributes:
attrs.extend([
'bump',
'displacement'
])
return attrs
def create_material(self, material_node_name, shadingengine_node_name):
material_node = self.create_node('aiStandardSurface', name=material_node_name, asShader=True)
shadingengine_node = self.create_node(
'shadingEngine',
name=shadingengine_node_name,
empty=True,
renderable=True,
noSurfaceShader=True)
out_connection = '{}.outColor'.format(material_node)
in_connection = '{}.surfaceShader'.format(shadingengine_node)
cmds.connectAttr(out_connection, in_connection, force=True)
return material_node, shadingengine_node
def connect_file(self, file_node, material_node, material_attribute):
if material_attribute == 'normalCamera':
normal_node_name = self.resolve_name('normal_node_pattern', self.current_network.material_name)
# some users like to use the maya node instead of the render specific node
if self.settings.value('maya/use_bump2d', False):
normal_node = self.create_node('bump2d', name=normal_node_name, asUtility=True)
cmds.setAttr('{}.bumpInterp'.format(normal_node), 1)
cmds.setAttr('{}.aiFlipR'.format(normal_node), 0)
cmds.setAttr('{}.aiFlipG'.format(normal_node), 0)
out_connection = '{}.outAlpha'.format(file_node)
in_connection = '{}.bumpValue'.format(normal_node)
cmds.connectAttr(out_connection, in_connection, force=True)
out_connection = '{}.outNormal'.format(normal_node)
in_connection = '{}.{}'.format(material_node, material_attribute)
cmds.connectAttr(out_connection, in_connection, force=True)
else:
normal_node = self.create_node('aiNormalMap', name=normal_node_name, asUtility=True)
out_connection = '{}.outColor'.format(file_node)
in_connection = '{}.input'.format(normal_node)
cmds.connectAttr(out_connection, in_connection, force=True)
out_connection = '{}.outValue'.format(normal_node)
in_connection = '{}.{}'.format(material_node, material_attribute)
cmds.connectAttr(out_connection, in_connection, force=True)
elif material_attribute == 'bump':
bump_node = self.create_node('bump2d', asUtility=True)
material_attribute = 'normalCamera'
out_connection = '{}.outAlpha'.format(file_node)
in_connection = '{}.bumpValue'.format(bump_node)
cmds.connectAttr(out_connection, in_connection, force=True)
out_connection = '{}.outNormal'.format(bump_node)
in_connection = '{}.{}'.format(material_node, material_attribute)
cmds.connectAttr(out_connection, in_connection, force=True)
elif material_attribute == 'displacement':
# instead of getting the shadingengine, this should actually use the
# shadingengine from the create_material function
outputs = cmds.listConnections(
'{}.outColor'.format(material_node), destination=True, source=False, type='shadingEngine')
if outputs:
shadingengine_node = outputs[-1]
out_connection = '{}.outColor'.format(file_node)
in_connection = '{}.displacementShader'.format(shadingengine_node)
cmds.connectAttr(out_connection, in_connection, force=True)
else:
if cmds.getAttr('{}.{}'.format(material_node, material_attribute), type=True) == 'float':
cmds.setAttr('{}.alphaIsLuminance'.format(file_node), True)
file_attribute = 'outAlpha'
else:
file_attribute = 'outColor'
out_connection = '{}.{}'.format(file_node, file_attribute)
in_connection = '{}.{}'.format(material_node, material_attribute)
cmds.connectAttr(out_connection, in_connection, force=True)
|
198124
|
import time
from django.core.validators import MinValueValidator
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from model_utils.models import SoftDeletableModel
from jobboard.helpers import BaseAction
from users.models import Member
class ActionInterview(BaseAction, models.Model):
action = models.OneToOneField('pipeline.Action',
on_delete=models.CASCADE,
null=False,
related_name='interview')
start_date = models.DateField(default=now,
help_text=_('Date from you want to interview candidates'))
end_date = models.DateField(blank=True,
null=True,
help_text=_('Date to you want to interview candidates'))
start_time = models.TimeField(default='08:00',
help_text=_('Time from you want to interview'))
end_time = models.TimeField(blank=True,
null=True,
default='18:00',
help_text=_('Time to you want to interview'))
duration = models.IntegerField(help_text=_('Interview duration'),
default=10,
validators=[
MinValueValidator(10, 'Interview duration cannot be less than 10 minutes'), ])
recruiters = models.ManyToManyField('users.Member')
def get_result_url(self, **kwargs):
pass
def get_candidate_url(self):
return reverse('candidate_interviewing', kwargs={'pk': self.id})
@property
def vacancy(self):
return self.action.pipeline.vacancy
class Meta:
abstract = False
class ScheduledMeeting(SoftDeletableModel):
# all_objects = models.Manager()
action_interview = models.ForeignKey(ActionInterview,
on_delete=models.CASCADE,
related_name='scheduled_meetings')
recruiter = models.ForeignKey(Member,
on_delete=models.CASCADE,
related_name='recruiter_scheduled_meetings')
candidate = models.ForeignKey(Member,
on_delete=models.CASCADE,
related_name='candidate_scheduled_meetings')
uuid = models.CharField(max_length=32,
blank=False,
null=False)
conf_id = models.CharField(max_length=32,
blank=False,
null=False)
link_start = models.URLField(max_length=768,
blank=False,
null=False)
link_join = models.URLField(blank=False,
null=False)
date = models.DateField(blank=False,
null=False)
time = models.TimeField(blank=False,
null=False)
@property
def vacancy(self):
return self.action_interview.action.pipeline.vacancy
def __str__(self):
return '{} {}'.format(self.date, self.time)
class Meta:
unique_together = (('action_interview', 'candidate', 'is_removed'),)
class InterviewPassed(models.Model):
interview = models.ForeignKey(ActionInterview,
on_delete=models.CASCADE,
related_name='passes')
recruiter = models.ForeignKey(Member,
on_delete=models.CASCADE,
related_name='recruiter_passed_interviews')
candidate = models.ForeignKey(Member,
on_delete=models.CASCADE,
related_name='candidate_passed_interviews')
data = JSONField(blank=True,
null=True)
date_created = models.DateTimeField(auto_now_add=True)
duration = models.DurationField(blank=True,
null=True)
|
198145
|
import os
import numpy
from chainer_chemistry.dataset.preprocessors import preprocess_method_dict
from chainer_chemistry import datasets as D
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
from rdkit import Chem
from tqdm import tqdm
import utils
class _CacheNamePolicy(object):
train_file_name = 'train.npz'
val_file_name = 'val.npz'
test_file_name = 'test.npz'
smiles_file_name = 'smiles.npz'
def _get_cache_directory_path(self, method, labels, prefix):
if labels:
return os.path.join(prefix, '{}_{}'.format(method, labels))
else:
return os.path.join(prefix, '{}_all'.format(method))
def __init__(self, method, labels, prefix='input'):
self.method = method
self.labels = labels
self.prefix = prefix
self.cache_dir = self._get_cache_directory_path(method, labels, prefix)
def get_train_file_path(self):
return os.path.join(self.cache_dir, self.train_file_name)
def get_val_file_path(self):
return os.path.join(self.cache_dir, self.val_file_name)
def get_test_file_path(self):
return os.path.join(self.cache_dir, self.test_file_name)
def get_smiles_path(self):
return os.path.join(self.cache_dir, self.smiles_file_name)
def create_cache_directory(self):
try:
os.makedirs(self.cache_dir)
except OSError:
if not os.path.isdir(self.cache_dir):
raise
PYRIDINE_SMILES = 'c1ccncc1'
def hassubst(mol, smart=PYRIDINE_SMILES):
return numpy.array(int(mol.HasSubstructMatch(Chem.MolFromSmarts(smart)))).astype('int32')
def load_dataset(method, labels, prefix='input'):
method = 'nfp' if 'nfp' in method else method # to deal with nfpdrop
method = 'ggnn' if 'ggnn' in method else method # to deal with ggnndrop
policy = _CacheNamePolicy(method, labels, prefix)
train_path = policy.get_train_file_path()
val_path = policy.get_val_file_path()
test_path = policy.get_test_file_path()
smiles_path = policy.get_smiles_path()
train, val, test = None, None, None
train_smiles, val_smiles, test_smiles = None, None, None
print()
if os.path.exists(policy.cache_dir):
print('load from cache {}'.format(policy.cache_dir))
train = NumpyTupleDataset.load(train_path)
val = NumpyTupleDataset.load(val_path)
test = NumpyTupleDataset.load(test_path)
train_smiles, val_smiles, test_smiles = utils.load_npz(smiles_path)
if train is None or val is None or test is None:
print('preprocessing dataset...')
preprocessor = preprocess_method_dict[method]()
if labels == 'pyridine':
train, val, test, train_smiles, val_smiles, test_smiles = D.get_tox21(
preprocessor, labels=None, return_smiles=True)
print('converting label into pyridine...')
# --- Pyridine = 1 ---
train_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(train_smiles)]
val_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(val_smiles)]
test_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(test_smiles)]
train_pyridine_label = numpy.array(train_pyridine_label)[:, None]
val_pyridine_label = numpy.array(val_pyridine_label)[:, None]
test_pyridine_label = numpy.array(test_pyridine_label)[:, None]
print('train positive/negative', numpy.sum(train_pyridine_label == 1), numpy.sum(train_pyridine_label == 0))
train = NumpyTupleDataset(*train.features[:, :-1], train_pyridine_label)
val = NumpyTupleDataset(*val.features[:, :-1], val_pyridine_label)
test = NumpyTupleDataset(*test.features[:, :-1], test_pyridine_label)
else:
train, val, test, train_smiles, val_smiles, test_smiles = D.get_tox21(
preprocessor, labels=labels, return_smiles=True)
# Cache dataset
policy.create_cache_directory()
NumpyTupleDataset.save(train_path, train)
NumpyTupleDataset.save(val_path, val)
NumpyTupleDataset.save(test_path, test)
train_smiles = numpy.array(train_smiles)
val_smiles = numpy.array(val_smiles)
test_smiles = numpy.array(test_smiles)
utils.save_npz(smiles_path, (train_smiles, val_smiles, test_smiles))
return train, val, test, train_smiles, val_smiles, test_smiles
|
198150
|
import stripe.checkout
import anvil.server
import anvil.google.auth, anvil.google.drive
from anvil.google.drive import app_files
import anvil.tables as tables
import anvil.tables.query as q
from anvil.tables import app_tables
import anvil.users
__measurements = []
__user = None
__my_ave = None
def my_measurements():
global __measurements
if __measurements:
# print("Using {} cached measurements".format(len(__measurements)))
return __measurements
__measurements = list(anvil.server.call('my_measurements'))
return __measurements
def add_measurement(record_date, weight_in_pounds, resting_heart_rate):
global __measurements
__measurements = []
anvil.server.call('add_measurement', record_date, weight_in_pounds, resting_heart_rate)
def set_details(height, gender):
global __user
anvil.server.call('set_details', height, gender)
__user = None
def the_user():
global __user
if __user:
# print("Using cached user: {}".format(__user['email']))
return __user
__user = anvil.users.get_user()
return __user
def logout():
global __user, __my_ave, __measurements
__measurements = []
__my_ave = None
__user = None
def average_for_me():
global __my_ave
if __my_ave:
return __my_ave
__my_ave = anvil.server.call('averge_for_me')
return __my_ave
def go_pro():
global __user
__user = None
anvil.server.call('go_pro')
|
198192
|
import asyncio
import tempfile
import unittest
from electrum_zcash import constants
from electrum_zcash.simple_config import SimpleConfig
from electrum_zcash import blockchain
from electrum_zcash.interface import Interface, ServerAddr
from electrum_zcash.crypto import sha256
from electrum_zcash.util import bh2u
from . import ElectrumTestCase
class MockTaskGroup:
async def spawn(self, x): return
class MockNetwork:
taskgroup = MockTaskGroup()
asyncio_loop = asyncio.get_event_loop()
class MockInterface(Interface):
def __init__(self, config):
self.config = config
network = MockNetwork()
network.config = config
super().__init__(network=network, server=ServerAddr.from_str('mock-server:50000:t'), proxy=None)
self.q = asyncio.Queue()
self.blockchain = blockchain.Blockchain(config=self.config, forkpoint=0,
parent=None, forkpoint_hash=constants.net.GENESIS, prev_hash=None)
self.tip = 12
self.blockchain._size = self.tip + 1
async def get_block_header(self, height, assert_mode):
assert self.q.qsize() > 0, (height, assert_mode)
item = await self.q.get()
print("step with height", height, item)
assert item['block_height'] == height, (item['block_height'], height)
assert assert_mode in item['mock'], (assert_mode, item)
return item
class TestNetwork(ElectrumTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_regtest()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
self.interface = MockInterface(self.config)
def test_fork_noconflict(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 6
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1,'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1,'check':lambda x: True, 'connect': lambda x: False}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('fork', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_fork_conflict(self):
blockchain.blockchains = {7: {'check': lambda bad_header: False}}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 6
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1,'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1,'check':lambda x: True, 'connect': lambda x: False}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('fork', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_can_connect_during_backward(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 2
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('catchup', 5), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=4)))
self.assertEqual(self.interface.q.qsize(), 0)
def mock_fork(self, bad_header):
forkpoint = bad_header['block_height']
b = blockchain.Blockchain(config=self.config, forkpoint=forkpoint, parent=None,
forkpoint_hash=bh2u(sha256(str(forkpoint))), prev_hash=bh2u(sha256(str(forkpoint-1))))
return b
def test_chain_false_during_binary(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
mock_connect = lambda height: height == 3
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1, 'check': lambda x: True, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1, 'check': lambda x: False, 'fork': self.mock_fork, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'binary':1, 'check': lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('catchup', 7), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=6)))
self.assertEqual(self.interface.q.qsize(), 0)
if __name__=="__main__":
constants.set_regtest()
unittest.main()
|
198233
|
from tensorflow.keras.layers import Layer
# from keras.layers import Layer
import tensorflow as tf
from util_graphs import trim_padding_boxes, normalize_boxes, shrink_and_project_boxes
from losses import focal, iou
class MetaSelectInput(Layer):
def __init__(self, strides=(8, 16, 32, 64, 128), pool_size=7, **kwargs):
self.strides = strides
self.pool_size = pool_size
super(MetaSelectInput, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
batch_gt_boxes = inputs[0][..., :4]
list_batch_fms = inputs[1: 1 + len(self.strides)]
batch_size = tf.shape(batch_gt_boxes)[0]
max_gt_boxes = tf.shape(batch_gt_boxes)[1]
gt_boxes_batch_ids = tf.tile(tf.expand_dims(tf.range(batch_size), axis=-1), (1, max_gt_boxes))
gt_boxes_batch_ids = tf.reshape(gt_boxes_batch_ids, (-1,))
batch_gt_boxes = tf.reshape(batch_gt_boxes, (-1, tf.shape(batch_gt_boxes)[-1]))
# (total_num_gt_boxes, )
gt_boxes, non_zeros = trim_padding_boxes(batch_gt_boxes)
gt_boxes_batch_ids = tf.boolean_mask(gt_boxes_batch_ids, non_zeros)
rois_from_fms = []
for i, batch_fm in enumerate(list_batch_fms):
stride = tf.constant(self.strides[i], dtype=tf.float32)
fm_height = tf.cast(tf.shape(batch_fm)[1], dtype=tf.float32)
fm_width = tf.cast(tf.shape(batch_fm)[2], dtype=tf.float32)
normed_gt_boxes = normalize_boxes(gt_boxes, width=fm_width, height=fm_height, stride=stride)
rois = tf.image.crop_and_resize(batch_fm, normed_gt_boxes, gt_boxes_batch_ids,
(self.pool_size, self.pool_size))
rois_from_fms.append(rois)
rois = tf.concat(rois_from_fms, axis=-1)
return rois, gt_boxes_batch_ids
def compute_output_shape(self, input_shape):
return [[None, self.pool_size, self.pool_size, None], [None, ]]
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(MetaSelectInput, self).get_config()
config.update(strides=self.strides, pool_size=self.pool_size)
return config
def build_meta_select_target(cls_pred, regr_pred, gt_boxes, feature_shapes, strides, shrink_ratio=0.2):
gt_labels = tf.cast(gt_boxes[:, 4], tf.int32)
gt_boxes = gt_boxes[:, :4]
max_gt_boxes = tf.shape(gt_boxes)[0]
focal_loss = focal()
iou_loss = iou()
gt_boxes, non_zeros = trim_padding_boxes(gt_boxes)
num_gt_boxes = tf.shape(gt_boxes)[0]
gt_labels = tf.boolean_mask(gt_labels, non_zeros)
level_losses = []
for level_id in range(len(strides)):
stride = strides[level_id]
fh = feature_shapes[level_id][0]
fw = feature_shapes[level_id][1]
fa = tf.reduce_prod(feature_shapes, axis=-1)
start_idx = tf.reduce_sum(fa[:level_id])
end_idx = start_idx + fh * fw
cls_pred_i = tf.reshape(cls_pred[start_idx:end_idx], (fh, fw, tf.shape(cls_pred)[-1]))
regr_pred_i = tf.reshape(regr_pred[start_idx:end_idx], (fh, fw, tf.shape(regr_pred)[-1]))
# (num_gt_boxes, )
x1, y1, x2, y2 = shrink_and_project_boxes(gt_boxes, fw, fh, stride, shrink_ratio=shrink_ratio)
def compute_gt_box_loss(args):
x1_ = args[0]
y1_ = args[1]
x2_ = args[2]
y2_ = args[3]
gt_box = args[4]
gt_label = args[5]
def do_match_pixels_in_level():
locs_cls_pred_i = cls_pred_i[y1_:y2_, x1_:x2_, :]
locs_cls_pred_i = tf.reshape(locs_cls_pred_i, (-1, tf.shape(locs_cls_pred_i)[-1]))
locs_cls_true_i = tf.zeros_like(locs_cls_pred_i)
gt_label_col = tf.ones_like(locs_cls_true_i[:, 0:1])
locs_cls_true_i = tf.concat([locs_cls_true_i[:, :gt_label],
gt_label_col,
locs_cls_true_i[:, gt_label + 1:],
], axis=-1)
loss_cls = focal_loss(tf.expand_dims(locs_cls_true_i, axis=0), tf.expand_dims(locs_cls_pred_i, axis=0))
locs_regr_pred_i = regr_pred_i[y1_:y2_, x1_:x2_, :]
locs_regr_pred_i = tf.reshape(locs_regr_pred_i, (-1, tf.shape(locs_regr_pred_i)[-1]))
locs_x = tf.cast(tf.range(x1_, x2_), dtype=tf.float32)
locs_y = tf.cast(tf.range(y1_, y2_), dtype=tf.float32)
shift_x = (locs_x + 0.5) * stride
shift_y = (locs_y + 0.5) * stride
shift_xx, shift_yy = tf.meshgrid(shift_x, shift_y)
shift_xx = tf.reshape(shift_xx, (-1,))
shift_yy = tf.reshape(shift_yy, (-1,))
shifts = tf.stack((shift_xx, shift_yy, shift_xx, shift_yy), axis=-1)
l = tf.maximum(shifts[:, 0] - gt_box[0], 0)
t = tf.maximum(shifts[:, 1] - gt_box[1], 0)
r = tf.maximum(gt_box[2] - shifts[:, 2], 0)
b = tf.maximum(gt_box[3] - shifts[:, 3], 0)
locs_regr_true_i = tf.stack([l, t, r, b], axis=-1)
locs_regr_true_i = locs_regr_true_i / 4.0 / stride
loss_regr = iou_loss(tf.expand_dims(locs_regr_true_i, axis=0), tf.expand_dims(locs_regr_pred_i, axis=0))
return loss_cls + loss_regr
def do_not_match_pixels_in_level():
box_loss = tf.constant(1e7, dtype=tf.float32)
return box_loss
level_box_loss = tf.cond(
tf.equal(tf.cast(x1_, tf.int32), tf.cast(x2_, tf.int32)) |
tf.equal(tf.cast(y1_, tf.int32), tf.cast(y2_, tf.int32)),
do_not_match_pixels_in_level,
do_match_pixels_in_level
)
return level_box_loss
level_loss = tf.map_fn(
compute_gt_box_loss,
elems=[x1, y1, x2, y2, gt_boxes, gt_labels],
dtype=tf.float32
)
level_losses.append(level_loss)
losses = tf.stack(level_losses, axis=-1)
gt_box_levels = tf.argmin(losses, axis=-1, output_type=tf.int32)
padding_gt_box_levels = tf.ones((max_gt_boxes - num_gt_boxes), dtype=tf.int32) * -1
gt_box_levels = tf.concat([gt_box_levels, padding_gt_box_levels], axis=0)
return gt_box_levels
class MetaSelectTarget(Layer):
def __init__(self, strides=(8, 16, 32, 64, 128), shrink_ratio=0.2, **kwargs):
self.strides = strides
self.shrink_ratio = shrink_ratio
super(MetaSelectTarget, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
batch_cls_pred = inputs[0]
batch_regr_pred = inputs[1]
feature_shapes = inputs[2][0]
batch_gt_boxes = inputs[3]
def _build_meta_select_target(args):
cls_pred = args[0]
regr_pred = args[1]
gt_boxes = args[2]
return build_meta_select_target(
cls_pred,
regr_pred,
gt_boxes,
feature_shapes=feature_shapes,
strides=self.strides,
shrink_ratio=self.shrink_ratio,
)
# (b, MAX_GT_BOXES)
batch_box_levels = tf.map_fn(
_build_meta_select_target,
elems=[batch_cls_pred, batch_regr_pred, batch_gt_boxes],
dtype=tf.int32,
)
batch_box_levels = tf.reshape(batch_box_levels, (-1,))
mask = tf.not_equal(batch_box_levels, -1)
valid_box_levels = tf.boolean_mask(batch_box_levels, mask)
return valid_box_levels
def compute_output_shape(self, input_shape):
return None,
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(MetaSelectTarget, self).get_config()
config.update(strides=self.strides, shrink_ratio=self.shrink_ratio)
return config
class MetaSelectWeight(Layer):
def __init__(self, max_gt_boxes=100, soft_select=True, batch_size=32, **kwargs):
self.max_gt_boxes = max_gt_boxes
self.soft_select = soft_select
self.batch_size = batch_size
super(MetaSelectWeight, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
if self.soft_select:
gt_boxes_select_weight = inputs[0]
else:
gt_boxes_select_weight = tf.one_hot(inputs[0], 5)
gt_boxes_batch_ids = inputs[1]
# (b, 1) --> (b, )
batch_num_gt_boxes = inputs[2][:, 0]
batch_select_weight = []
for i in range(self.batch_size):
batch_item_select_weight = tf.boolean_mask(gt_boxes_select_weight, tf.equal(gt_boxes_batch_ids, i))
pad_top_bot = tf.stack([tf.constant(0), self.max_gt_boxes - batch_num_gt_boxes[i]], axis=0)
pad = tf.stack([pad_top_bot, tf.constant([0, 0])], axis=0)
batch_select_weight.append(tf.pad(batch_item_select_weight, pad, constant_values=-1))
batch_select_weight = tf.stack(batch_select_weight, axis=0)
return batch_select_weight
def compute_output_shape(self, input_shapes):
return input_shapes[1][0], self.max_gt_boxes, 5
def get_config(self):
base_config = super(MetaSelectWeight, self).get_config()
base_config.update(max_gt_boxes=self.max_gt_boxes, soft_select=self.soft_select)
return base_config
def build_sapd_target(gt_boxes, meta_select_weight, fm_shapes, num_classes, strides, shrink_ratio=0.2):
gt_labels = tf.cast(gt_boxes[:, 4], tf.int32)
gt_boxes = gt_boxes[:, :4]
gt_boxes, non_zeros = trim_padding_boxes(gt_boxes)
gt_labels = tf.boolean_mask(gt_labels, non_zeros)
meta_select_weight = tf.boolean_mask(meta_select_weight, non_zeros)
def do_have_gt_boxes():
cls_target = tf.zeros((0, num_classes + 1 + 1), dtype=tf.float32)
regr_target = tf.zeros((0, 4 + 1 + 1), dtype=tf.float32)
for level_id in range(len(strides)):
level_meta_select_weight = meta_select_weight[:, level_id]
fm_shape = fm_shapes[level_id]
stride = strides[level_id]
fh = fm_shape[0]
fw = fm_shape[1]
pos_x1, pos_y1, pos_x2, pos_y2 = shrink_and_project_boxes(gt_boxes, fw, fh, stride, shrink_ratio)
def build_single_gt_box_sapd_target(args):
pos_x1_ = args[0]
pos_y1_ = args[1]
pos_x2_ = args[2]
pos_y2_ = args[3]
gt_box = args[4]
gt_label = args[5]
level_box_meta_select_weight = args[6]
level_pos_box_cls_target = tf.zeros((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, num_classes),
dtype=tf.float32)
level_pos_box_gt_label_col = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, 1),
dtype=tf.float32)
level_pos_box_cls_target = tf.concat((level_pos_box_cls_target[..., :gt_label],
level_pos_box_gt_label_col,
level_pos_box_cls_target[..., gt_label + 1:]), axis=-1)
neg_top_bot = tf.stack((pos_y1_, fh - pos_y2_), axis=0)
neg_lef_rit = tf.stack((pos_x1_, fw - pos_x2_), axis=0)
neg_pad = tf.stack([neg_top_bot, neg_lef_rit], axis=0)
level_box_cls_target = tf.pad(level_pos_box_cls_target,
tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))
pos_locs_x = tf.cast(tf.range(pos_x1_, pos_x2_), dtype=tf.float32)
pos_locs_y = tf.cast(tf.range(pos_y1_, pos_y2_), dtype=tf.float32)
pos_shift_x = (pos_locs_x + 0.5) * stride
pos_shift_y = (pos_locs_y + 0.5) * stride
pos_shift_xx, pos_shift_yy = tf.meshgrid(pos_shift_x, pos_shift_y)
pos_shifts = tf.stack((pos_shift_xx, pos_shift_yy, pos_shift_xx, pos_shift_yy), axis=-1)
dl = tf.maximum(pos_shifts[:, :, 0] - gt_box[0], 0)
dt = tf.maximum(pos_shifts[:, :, 1] - gt_box[1], 0)
dr = tf.maximum(gt_box[2] - pos_shifts[:, :, 2], 0)
db = tf.maximum(gt_box[3] - pos_shifts[:, :, 3], 0)
deltas = tf.stack((dl, dt, dr, db), axis=-1)
level_box_regr_pos_target = deltas / 4.0 / stride
level_pos_box_ap_weight = tf.minimum(dl, dr) * tf.minimum(dt, db) / tf.maximum(dl, dr) / tf.maximum(dt,
db)
level_pos_box_soft_weight = level_pos_box_ap_weight * level_box_meta_select_weight
level_box_soft_weight = tf.pad(level_pos_box_soft_weight, neg_pad, constant_values=1.)
level_pos_box_regr_mask = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_))
level_box_regr_mask = tf.pad(level_pos_box_regr_mask, neg_pad)
level_box_regr_target = tf.pad(level_box_regr_pos_target,
tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))
level_box_cls_target = tf.concat([level_box_cls_target, level_box_soft_weight[..., None],
level_box_regr_mask[..., None]], axis=-1)
level_box_regr_target = tf.concat([level_box_regr_target, level_box_soft_weight[..., None],
level_box_regr_mask[..., None]], axis=-1)
level_box_pos_area = (dl + dr) * (dt + db)
level_box_area = tf.pad(level_box_pos_area, neg_pad, constant_values=1e7)
return level_box_cls_target, level_box_regr_target, level_box_area
level_cls_target, level_regr_target, level_area = tf.map_fn(
build_single_gt_box_sapd_target,
elems=[pos_x1, pos_y1, pos_x2, pos_y2, gt_boxes, gt_labels, level_meta_select_weight],
dtype=(tf.float32, tf.float32, tf.float32)
)
level_min_area_box_indices = tf.argmin(level_area, axis=0, output_type=tf.int32)
level_min_area_box_indices = tf.reshape(level_min_area_box_indices, (-1,))
# (fw, )
locs_x = tf.range(0, fw)
# (fh, )
locs_y = tf.range(0, fh)
# (fh, fw), (fh, fw)
locs_xx, locs_yy = tf.meshgrid(locs_x, locs_y)
locs_xx = tf.reshape(locs_xx, (-1,))
locs_yy = tf.reshape(locs_yy, (-1,))
# (fh * fw, 3)
level_indices = tf.stack((level_min_area_box_indices, locs_yy, locs_xx), axis=-1)
level_cls_target = tf.gather_nd(level_cls_target, level_indices)
level_regr_target = tf.gather_nd(level_regr_target, level_indices)
cls_target = tf.concat([cls_target, level_cls_target], axis=0)
regr_target = tf.concat([regr_target, level_regr_target], axis=0)
return [cls_target, regr_target]
def do_not_have_gt_boxes():
fa = tf.reduce_prod(fm_shapes, axis=-1)
fa_sum = tf.reduce_sum(fa)
cls_target = tf.zeros((fa_sum, num_classes))
regr_target = tf.zeros((fa_sum, 4))
weight = tf.ones((fa_sum, 1))
mask = tf.zeros((fa_sum, 1))
cls_target = tf.concat([cls_target, weight, mask], axis=-1)
regr_target = tf.concat([regr_target, weight, mask], axis=-1)
return [cls_target, regr_target]
cls_target, regr_target = tf.cond(
tf.not_equal(tf.size(gt_boxes), 0),
do_have_gt_boxes,
do_not_have_gt_boxes
)
return [cls_target, regr_target]
class SAPDTarget(Layer):
def __init__(self, num_classes, strides=(8, 16, 32, 64, 128), shrink_ratio=0.2, **kwargs):
self.num_classes = num_classes
self.strides = strides
self.shrink_ratio = shrink_ratio
super(SAPDTarget, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
fm_shapes = inputs[0][0]
batch_gt_boxes = inputs[1]
batch_meta_select_weight = inputs[2]
def _build_sapd_target(args):
gt_boxes = args[0]
meta_select_weight = args[1]
return build_sapd_target(
gt_boxes,
meta_select_weight,
fm_shapes=fm_shapes,
num_classes=self.num_classes,
strides=self.strides,
shrink_ratio=self.shrink_ratio,
)
outputs = tf.map_fn(
_build_sapd_target,
elems=[batch_gt_boxes, batch_meta_select_weight],
dtype=[tf.float32, tf.float32],
)
return outputs
def compute_output_shape(self, input_shape):
batch_size = input_shape[0][0]
return [[batch_size, None, self.num_classes + 1 + 1],
[batch_size, None, 4 + 1 + 1]]
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(SAPDTarget, self).get_config()
config.update({'num_classes': self.num_classes})
return config
class Locations(Layer):
"""
Keras layer for generating anchors for a given shape.
"""
def __init__(self, strides=(8, 16, 32, 64, 128), **kwargs):
"""
Initializer for an Anchors layer.
Args
strides: The strides mapping to the feature maps.
"""
self.strides = strides
super(Locations, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
pyramid_features = inputs
feature_shapes = [tf.shape(feature)[1:3] for feature in pyramid_features]
locations_per_feature = []
strides_per_feature = []
for feature_shape, stride in zip(feature_shapes, self.strides):
fh = feature_shape[0]
fw = feature_shape[1]
shifts_x = tf.cast(tf.range(0, fw * stride, delta=stride), dtype=tf.float32)
shifts_y = tf.cast(tf.range(0, fh * stride, delta=stride), dtype=tf.float32)
shift_x, shift_y = tf.meshgrid(shifts_x, shifts_y)
# (h * w, )
shift_x = tf.reshape(shift_x, (-1,))
# (h * w, )
shift_y = tf.reshape(shift_y, (-1,))
locations = tf.stack((shift_x, shift_y), axis=1) + stride // 2
locations_per_feature.append(locations)
strides = tf.ones((fh, fw)) * stride
strides = tf.reshape(strides, (-1,))
strides_per_feature.append(strides)
# (sum(h * w), 2)
locations = tf.concat(locations_per_feature, axis=0)
# (batch, sum(h * w), 2)
locations = tf.tile(tf.expand_dims(locations, axis=0), (tf.shape(inputs[0])[0], 1, 1))
strides = tf.concat(strides_per_feature, axis=0)
strides = tf.tile(tf.expand_dims(strides, axis=0), (tf.shape(inputs[0])[0], 1))
return [locations, strides]
def compute_output_shape(self, input_shapes):
feature_shapes = [feature_shape[1:3] for feature_shape in input_shapes]
total = 1
for feature_shape in feature_shapes:
if None not in feature_shape:
total = total * feature_shape[0] * feature_shape[1]
else:
return [[input_shapes[0][0], None, 2], [input_shapes[0][0], None]]
return [[input_shapes[0][0], total, 2], [input_shapes[0][0], total]]
def get_config(self):
base_config = super(Locations, self).get_config()
base_config.update({'strides': self.strides})
return base_config
class RegressBoxes(Layer):
"""
Keras layer for applying regression values to boxes.
"""
def __init__(self, *args, **kwargs):
"""
Initializer for the RegressBoxes layer.
"""
super(RegressBoxes, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
locations, strides, regression = inputs
x1 = locations[:, :, 0] - regression[:, :, 0] * 4.0 * strides[:, :]
y1 = locations[:, :, 1] - regression[:, :, 1] * 4.0 * strides[:, :]
x2 = locations[:, :, 0] + regression[:, :, 2] * 4.0 * strides[:, :]
y2 = locations[:, :, 1] + regression[:, :, 3] * 4.0 * strides[:, :]
bboxes = tf.stack([x1, y1, x2, y2], axis=-1)
return bboxes
def compute_output_shape(self, input_shape):
return input_shape[2]
def get_config(self):
base_config = super(RegressBoxes, self).get_config()
return base_config
class ClipBoxes(Layer):
"""
Keras layer to clip box values to lie inside a given shape.
"""
def call(self, inputs, **kwargs):
image, boxes = inputs
shape = tf.cast(tf.shape(image), tf.float32)
height = shape[1]
width = shape[2]
x1 = tf.clip_by_value(boxes[:, :, 0], 0, width - 1)
y1 = tf.clip_by_value(boxes[:, :, 1], 0, height - 1)
x2 = tf.clip_by_value(boxes[:, :, 2], 0, width - 1)
y2 = tf.clip_by_value(boxes[:, :, 3], 0, height - 1)
return tf.stack([x1, y1, x2, y2], axis=2)
def compute_output_shape(self, input_shape):
return input_shape[1]
|
198314
|
from mock import MagicMock
import pytest
from six import string_types
from sqlalchemy.exc import OperationalError
from chainerui.database import db
from tests.helpers import assert_json_api
@pytest.fixture(autouse=True, scope='function')
def setup_mock_db():
# not setup database
db._initialized = True
mock_session = MagicMock()
mock_session.query = MagicMock(
side_effect=OperationalError(None, None, None))
db._session = mock_session
# GET /
def test_get_index(app):
resp = app.get('/')
assert resp.status_code == 200
assert '<title>ChainerUI</title>' in resp.data.decode()
# GET /favicon.ico
def test_get_favicon(app):
resp = app.get('/favicon.ico')
assert resp.status_code == 200
# raise an exception when GET /api/v1/projects
def test_handle_invalid_usage(app):
resp = app.get('/api/v1/projects')
data = assert_json_api(resp, 400)
assert len(data) == 1
assert len(data['error']) == 2
assert isinstance(data['error']['message'], string_types)
assert 'DBOperationalError' == data['error']['type']
|
198322
|
class Solution:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
degree = [0] * n
for u, v in edges:
degree[v] = 1
return [i for i, d in enumerate(degree) if d == 0]
|
198358
|
from pathlib import Path
import click
from lhotse.bin.modes.cli_base import cli
from lhotse.utils import Pathlike
@cli.command(name="validate")
@click.argument("manifest", type=click.Path(exists=True, dir_okay=False))
@click.option(
"--read-data/--dont-read-data",
default=False,
help="Should the audio/features data be read from disk to perform additional checks "
"(could be extremely slow for large manifests).",
)
def validate_(manifest: Pathlike, read_data: bool):
"""Validate a Lhotse manifest file."""
from lhotse import load_manifest, validate
data = load_manifest(manifest)
validate(data, read_data=read_data)
@cli.command(name="validate-pair")
@click.argument("recordings", type=click.Path(exists=True, dir_okay=False))
@click.argument("supervisions", type=click.Path(exists=True, dir_okay=False))
@click.option(
"--read-data/--dont-read-data",
default=False,
help="Should the audio/features data be read from disk to perform additional checks "
"(could be extremely slow for large manifests).",
)
def validate_(recordings: Pathlike, supervisions: Pathlike, read_data: bool):
"""
Validate a pair of Lhotse RECORDINGS and SUPERVISIONS manifest files.
Checks whether the two manifests are consistent with each other.
"""
from lhotse import load_manifest, validate_recordings_and_supervisions
recs = load_manifest(recordings)
sups = load_manifest(supervisions)
validate_recordings_and_supervisions(
recordings=recs, supervisions=sups, read_data=read_data
)
@cli.command(name="fix")
@click.argument("recordings", type=click.Path(exists=True, dir_okay=False))
@click.argument("supervisions", type=click.Path(exists=True, dir_okay=False))
@click.argument("output_dir", type=click.Path())
def fix_(recordings: Pathlike, supervisions: Pathlike, output_dir: Pathlike):
"""
Fix a pair of Lhotse RECORDINGS and SUPERVISIONS manifests.
It removes supervisions without corresponding recordings and vice versa,
trims the supervisions that exceed the recording, etc.
Stores the output files in OUTPUT_DIR under the same names as the input
files.
"""
from lhotse import RecordingSet, SupervisionSet, fix_manifests
output_dir = Path(output_dir)
recordings = Path(recordings)
supervisions = Path(supervisions)
output_dir.mkdir(parents=True, exist_ok=True)
recs = RecordingSet.from_file(recordings)
sups = SupervisionSet.from_file(supervisions)
recs, sups = fix_manifests(recordings=recs, supervisions=sups)
recs.to_file(output_dir / recordings.name)
sups.to_file(output_dir / supervisions.name)
|
198366
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.contrib.rnn import LSTMStateTuple
class LSTMCell(tf.contrib.rnn.BasicRNNCell):
def __call__(self, x, state, scope="LSTM"):
with tf.variable_scope(scope):
s_old, h_old = state
gates = layers.fully_connected(
tf.concat([x, s_old], 1),
num_outputs=4 * self._num_units,
activation_fn=None)
r1, g1, g2, g3 = tf.split(gates, 4, 1)
r1, g1, g3 = tf.nn.sigmoid(r1), tf.nn.sigmoid(g1), tf.nn.sigmoid(g3)
g2 = tf.nn.tanh(g2)
h_new = h_old * r1 + g1 * g2
s_new = tf.nn.tanh(h_new) * g3
return s_new, LSTMStateTuple(s_new, h_new)
@property
def state_size(self):
return LSTMStateTuple(self._num_units, self._num_units)
|
198382
|
import requests
url = "https://h5api.m.taobao.com/h5/mtop.alimama.union.sem.landing.pc.items/1.0/?jsv=2.4.0&appKey=12574478&t=1582716745850&sign=1b91fff529136fed287df8f0056cecd6&api=mtop.alimama.union.sem.landing.pc.items&v=1.0&AntiCreep=true&dataType=jsonp&type=jsonp&ecode=0&callback=mtopjsonp2&data=%7B%22keyword%22%3A%22%E5%8D%8E%E4%B8%BA%E6%89%8B%E6%9C%BA%22%2C%22ppath%22%3A%22%22%2C%22loc%22%3A%22%22%2C%22minPrice%22%3A%22%22%2C%22maxPrice%22%3A%22%22%2C%22ismall%22%3A%22%22%2C%22ship%22%3A%22%22%2C%22itemAssurance%22%3A%22%22%2C%22exchange7%22%3A%22%22%2C%22custAssurance%22%3A%22%22%2C%22b%22%3A%22%22%2C%22clk1%22%3A%22%22%2C%22pvoff%22%3A%22%22%2C%22pageSize%22%3A%22100%22%2C%22page%22%3A%22%22%2C%22elemtid%22%3A%221%22%2C%22refpid%22%3A%22%22%2C%22pid%22%3A%22430673_1006%22%2C%22featureNames%22%3A%22spGoldMedal%2CdsrDescribe%2CdsrDescribeGap%2CdsrService%2CdsrServiceGap%2CdsrDeliver%2C%20dsrDeliverGap%22%2C%22ac%22%3A%22%22%2C%22wangwangid%22%3A%22%22%2C%22catId%22%3A%22%22%7D"
payload = {}
headers = {
'cookie': '_m_h5_tk=e0c7d67a1c53c77c6b99713095604dd2_1582728834870; _m_h5_tk_enc=4dd920929127292a2f2249db13ad10c4'
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
|
198417
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
import tqdm
from .temporal_transformer_windowed import tcn_unit_attention_block
from .temporal_transformer import tcn_unit_attention
from .gcn_attention import gcn_unit_attention
from .net import Unit2D, conv_init, import_class
from .unit_gcn import unit_gcn
from .unit_agcn import unit_agcn
default_backbone_all_layers = [(3, 64, 1), (64, 64, 1), (64, 64, 1), (64, 64, 1), (64, 128,
2), (128, 128, 1),
(128, 128, 1), (128, 256, 2), (256, 256, 1), (256, 256, 1)]
default_backbone = [(64, 64, 1), (64, 64, 1), (64, 64, 1), (64, 128,
2), (128, 128, 1),
(128, 128, 1), (128, 256, 2), (256, 256, 1), (256, 256, 1)]
class Model(nn.Module):
""" Spatial temporal graph convolutional networks
for skeleton-based action recognition.
Input shape:
Input shape should be (N, C, T, V, M)
where N is the number of samples,
C is the number of input channels,
T is the length of the sequence,
V is the number of joints or graph nodes,
and M is the number of people.
Arguments:
About shape:
channel (int): Number of channels in the input data
num_class (int): Number of classes for classification
window_size (int): Length of input sequence
num_point (int): Number of joints or graph nodes
num_person (int): Number of people
About net:
use_data_bn: If true, the data will first input to a batch normalization layer
backbone_config: The structure of backbone networks
About graph convolution:
graph: The graph of skeleton, represtented by a adjacency matrix
graph_args: The arguments of graph
mask_learning: If true, use mask matrixes to reweight the adjacency matrixes
use_local_bn: If true, each node in the graph have specific parameters of batch normalzation layer
About temporal convolution:
multiscale: If true, use multi-scale temporal convolution
temporal_kernel_size: The kernel size of temporal convolution
dropout: The drop out rate of the dropout layer in front of each temporal convolution layer
"""
def __init__(self,
channel,
num_class,
window_size,
num_point,
attention,
only_attention,
tcn_attention,
only_temporal_attention,
attention_3,
relative,
kernel_temporal,
double_channel,
drop_connect,
concat_original,
dv,
dk,
Nh,
dim_block1,
dim_block2,
dim_block3,
all_layers,
data_normalization,
visualization,
skip_conn,
adjacency,
bn_flag,
weight_matrix,
device,
n,
more_channels,
num_person=1,
use_data_bn=False,
backbone_config=None,
graph=None,
graph_args=dict(),
mask_learning=False,
use_local_bn=False,
multiscale=False,
temporal_kernel_size=9,
dropout=0.5,
agcn = True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
# self.A = torch.from_numpy(self.graph.A).float().cuda(0)
# self.A = torch.from_numpy(self.graph.A).float()
#self.A = self.graph.A
self.A = torch.from_numpy(self.graph.A.astype(np.float32))
self.num_class = num_class
self.use_data_bn = use_data_bn
self.multiscale = multiscale
self.attention = attention
self.tcn_attention = tcn_attention
self.drop_connect = drop_connect
self.more_channels = more_channels
self.concat_original = concat_original
self.all_layers = all_layers
self.dv = dv
self.num = n
self.Nh = Nh
self.dk = dk
self.data_normalization = data_normalization
self.skip_conn = skip_conn
self.visualization = visualization
self.double_channel = double_channel
self.adjacency = adjacency
# Different bodies share batchNorm parameters or not
self.M_dim_bn = True
if self.M_dim_bn:
self.data_bn = nn.BatchNorm1d(channel * num_point * num_person)
else:
self.data_bn = nn.BatchNorm1d(channel * num_point)
if self.all_layers:
if not self.double_channel:
self.starting_ch = 64
else:
self.starting_ch = 128
else:
if not self.double_channel:
self.starting_ch = 128
else:
self.starting_ch = 256
kwargs = dict(
A=self.A,
mask_learning=mask_learning,
use_local_bn=use_local_bn,
dropout=dropout,
kernel_size=temporal_kernel_size,
attention=attention,
only_attention=only_attention,
tcn_attention=tcn_attention,
only_temporal_attention=only_temporal_attention,
attention_3=attention_3,
relative=relative,
weight_matrix=weight_matrix,
device=device,
more_channels=self.more_channels,
drop_connect=self.drop_connect,
data_normalization=self.data_normalization,
skip_conn=self.skip_conn,
adjacency=self.adjacency,
starting_ch=self.starting_ch,
visualization=self.visualization,
all_layers=self.all_layers,
dv=self.dv,
dk=self.dk,
Nh=self.Nh,
num=n,
dim_block1=dim_block1,
dim_block2=dim_block2,
dim_block3=dim_block3,
num_point=num_point,
agcn = agcn
)
if self.multiscale:
unit = TCN_GCN_unit_multiscale
else:
unit = TCN_GCN_unit
# backbone
if backbone_config is None:
if self.all_layers:
backbone_config = default_backbone_all_layers
else:
backbone_config = default_backbone
self.backbone = nn.ModuleList([
unit(in_c, out_c, stride=stride, **kwargs)
for in_c, out_c, stride in backbone_config
])
if self.double_channel:
backbone_in_c = backbone_config[0][0] * 2
backbone_out_c = backbone_config[-1][1] * 2
else:
backbone_in_c = backbone_config[0][0]
backbone_out_c = backbone_config[-1][1]
backbone_out_t = window_size
backbone = []
for i, (in_c, out_c, stride) in enumerate(backbone_config):
if self.double_channel:
in_c = in_c * 2
out_c = out_c * 2
if i == 3 and concat_original:
backbone.append(unit(in_c + channel, out_c, stride=stride, last=i == len(default_backbone) - 1,
last_graph=(i == len(default_backbone) - 1), layer=i, **kwargs))
else:
backbone.append(unit(in_c, out_c, stride=stride, last=i == len(default_backbone) - 1,
last_graph=(i == len(default_backbone) - 1), layer=i, **kwargs))
if backbone_out_t % stride == 0:
backbone_out_t = backbone_out_t // stride
else:
backbone_out_t = backbone_out_t // stride + 1
self.backbone = nn.ModuleList(backbone)
print("self.backbone: ", self.backbone)
for i in range(0, len(backbone)):
pytorch_total_params = sum(p.numel() for p in self.backbone[i].parameters() if p.requires_grad)
print(pytorch_total_params)
# head
if not all_layers:
if not agcn:
self.gcn0 = unit_gcn(
channel,
backbone_in_c,
self.A,
mask_learning=mask_learning,
use_local_bn=use_local_bn)
else:
self.gcn0 = unit_agcn(
channel,
backbone_in_c,
self.A,
mask_learning=mask_learning,
use_local_bn=use_local_bn)
self.tcn0 = Unit2D(backbone_in_c, backbone_in_c, kernel_size=9)
# tail
self.person_bn = nn.BatchNorm1d(backbone_out_c)
self.gap_size = backbone_out_t
self.fcn = nn.Conv1d(backbone_out_c, num_class, kernel_size=1)
conv_init(self.fcn)
def forward(self, x, label, name):
N, C, T, V, M = x.size()
print(x.shape)
if (self.concat_original):
x_coord = x
x_coord = x_coord.permute(0, 4, 1, 2, 3).reshape(N * M, C, T, V)
# data bn
if self.use_data_bn:
if self.M_dim_bn:
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
else:
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N * M, V * C, T)
x = self.data_bn(x)
# to (N*M, C, T, V)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(
N * M, C, T, V)
else:
# from (N, C, T, V, M) to (N*M, C, T, V)
x = x.permute(0, 4, 1, 2, 3).contiguous().view(N * M, C, T, V)
# model
if not self.all_layers:
x = self.gcn0(x, label, name)
x = self.tcn0(x)
for i, m in enumerate(self.backbone):
if i == 3 and self.concat_original:
x = m(torch.cat((x, x_coord), dim=1), label, name)
else:
x = m(x, label, name)
# V pooling
x = F.avg_pool2d(x, kernel_size=(1, V))
# M pooling
c = x.size(1)
t = x.size(2)
x = x.view(N, M, c, t).mean(dim=1).view(N, c, t)
# T pooling
x = F.avg_pool1d(x, kernel_size=x.size()[2])
# C fcn
x = self.fcn(x)
x = F.avg_pool1d(x, x.size()[2:])
x = x.view(N, self.num_class)
return x
class TCN_GCN_unit(nn.Module):
def __init__(self,
in_channel,
out_channel,
A,
attention,
only_attention,
tcn_attention,
only_temporal_attention,
relative,
device,
attention_3,
dv,
dk,
Nh,
num,
dim_block1,
dim_block2,
dim_block3,
num_point,
weight_matrix,
more_channels,
drop_connect,
starting_ch,
all_layers,
adjacency,
data_normalization,
visualization,
skip_conn,
layer=0,
kernel_size=9,
stride=1,
dropout=0.5,
use_local_bn=False,
mask_learning=False,
last=False,
last_graph=False,
agcn = False
):
super(TCN_GCN_unit, self).__init__()
half_out_channel = out_channel / 2
self.A = A
self.V = A.shape[-1]
self.C = in_channel
self.last = last
self.data_normalization = data_normalization
self.skip_conn = skip_conn
self.num_point = num_point
self.adjacency = adjacency
self.last_graph = last_graph
self.layer = layer
self.stride = stride
self.drop_connect = drop_connect
self.visualization = visualization
self.device = device
self.all_layers = all_layers
self.more_channels = more_channels
if (out_channel >= starting_ch and attention or (self.all_layers and attention)):
self.gcn1 = gcn_unit_attention(in_channel, out_channel, dv_factor=dv, dk_factor=dk, Nh=Nh,
complete=True,
relative=relative, only_attention=only_attention, layer=layer, incidence=A,
bn_flag=True, last_graph=self.last_graph, more_channels=self.more_channels,
drop_connect=self.drop_connect, adjacency=self.adjacency, num=num,
data_normalization=self.data_normalization, skip_conn=self.skip_conn,
visualization=self.visualization, num_point=self.num_point)
else:
if not agcn:
self.gcn1 = unit_gcn(
in_channel,
out_channel,
A,
use_local_bn=use_local_bn,
mask_learning=mask_learning)
else:
self.gcn1 = unit_agcn(
in_channel,
out_channel,
A,
use_local_bn=use_local_bn,
mask_learning=mask_learning)
if (out_channel >= starting_ch and tcn_attention or (self.all_layers and tcn_attention)):
if out_channel <= starting_ch and self.all_layers:
self.tcn1 = tcn_unit_attention_block(out_channel, out_channel, dv_factor=dv,
dk_factor=dk, Nh=Nh,
relative=relative, only_temporal_attention=only_temporal_attention,
dropout=dropout,
kernel_size_temporal=9, stride=stride,
weight_matrix=weight_matrix, bn_flag=True, last=self.last,
layer=layer,
device=self.device, more_channels=self.more_channels,
drop_connect=self.drop_connect, n=num,
data_normalization=self.data_normalization,
skip_conn=self.skip_conn,
visualization=self.visualization, dim_block1=dim_block1,
dim_block2=dim_block2, dim_block3=dim_block3, num_point=self.num_point)
else:
self.tcn1 = tcn_unit_attention(out_channel, out_channel, dv_factor=dv,
dk_factor=dk, Nh=Nh,
relative=relative, only_temporal_attention=only_temporal_attention,
dropout=dropout,
kernel_size_temporal=9, stride=stride,
weight_matrix=weight_matrix, bn_flag=True, last=self.last,
layer=layer,
device=self.device, more_channels=self.more_channels,
drop_connect=self.drop_connect, n=num,
data_normalization=self.data_normalization, skip_conn=self.skip_conn,
visualization=self.visualization, num_point=self.num_point)
else:
self.tcn1 = Unit2D(
out_channel,
out_channel,
kernel_size=kernel_size,
dropout=dropout,
stride=stride)
if ((in_channel != out_channel) or (stride != 1)):
self.down1 = Unit2D(
in_channel, out_channel, kernel_size=1, stride=stride)
else:
self.down1 = None
def forward(self, x, label, name):
# N, C, T, V = x.size()
x = self.tcn1(self.gcn1(x, label, name)) + (x if
(self.down1 is None) else self.down1(x))
return x
class TCN_GCN_unit_multiscale(nn.Module):
def __init__(self,
in_channels,
out_channels,
A,
kernel_size=9,
stride=1,
**kwargs):
super(TCN_GCN_unit_multiscale, self).__init__()
self.unit_1 = TCN_GCN_unit(
in_channels,
out_channels / 2,
A,
kernel_size=kernel_size,
stride=stride,
**kwargs)
self.unit_2 = TCN_GCN_unit(
in_channels,
out_channels - out_channels / 2,
A,
kernel_size=kernel_size * 2 - 1,
stride=stride,
**kwargs)
def forward(self, x):
return torch.cat((self.unit_1(x), self.unit_2(x)), dim=1)
|
198419
|
import numpy as np
import sys
import os
sys.path.append(os.path.expanduser('~/darts/cnn'))
#from train_class import Train
OPS = ['max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NUM_VERTICES = 4
INPUT_1 = 'c_k-2'
INPUT_2 = 'c_k-1'
class Arch:
def __init__(self, arch):
self.arch = arch
def serialize(self):
return self.arch
def query(self, epochs=50):
trainer = Train()
val_losses, test_losses = trainer.main(self.arch, epochs=epochs)
val_loss = 100 - np.mean(val_losses)
test_loss = 100 - test_losses[-1]
return val_loss, test_loss
@classmethod
def random_arch(cls):
# output a uniformly random architecture spec
# from the DARTS repository
# https://github.com/quark0/darts
normal = []
reduction = []
for i in range(NUM_VERTICES):
ops = np.random.choice(range(len(OPS)), NUM_VERTICES)
#input nodes for conv
nodes_in_normal = np.random.choice(range(i+2), 2, replace=False)
#input nodes for reduce
nodes_in_reduce = np.random.choice(range(i+2), 2, replace=False)
normal.extend([(nodes_in_normal[0], ops[0]), (nodes_in_normal[1], ops[1])])
reduction.extend([(nodes_in_reduce[0], ops[2]), (nodes_in_reduce[1], ops[3])])
return (normal, reduction)
def mutate(self, edits):
""" mutate a single arch """
# first convert tuple to array so that it is mutable
mutation = []
for cell in self.arch:
mutation.append([])
for pair in cell:
mutation[-1].append([])
for num in pair:
mutation[-1][-1].append(num)
#make mutations
for _ in range(edits):
cell = np.random.choice(2)
pair = np.random.choice(len(OPS))
num = np.random.choice(2)
if num == 1:
mutation[cell][pair][num] = np.random.choice(len(OPS))
else:
inputs = pair // 2 + 2
choice = np.random.choice(inputs)
if pair % 2 == 0 and mutation[cell][pair+1][num] != choice:
mutation[cell][pair][num] = choice
elif pair % 2 != 0 and mutation[cell][pair-1][num] != choice:
mutation[cell][pair][num] = choice
return mutation
def get_paths(self):
""" return all paths from input to output """
path_builder = [[[], [], [], []], [[], [], [], [], ]]
paths = [[], []]
for i, cell in enumerate(self.arch):
for j in range(len(OPS)):
if cell[j][0] == 0:
path = [INPUT_1, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
elif cell[j][0] == 1:
path = [INPUT_2, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
else:
for path in path_builder[i][cell[j][0] - 2]:
path = [*path, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
# check if there are paths of length >=5
contains_long_path = [False, False]
if max([len(path) for path in paths[0]]) >= 5:
contains_long_path[0] = True
if max([len(path) for path in paths[1]]) >= 5:
contains_long_path[1] = True
return paths, contains_long_path
def get_path_indices(self, long_paths=True):
"""
compute the index of each path
There are 4 * (8^0 + ... + 8^4) paths total
If long_paths = False, we give a single boolean to all paths of
size 4, so there are only 4 * (1 + 8^0 + ... + 8^3) paths
"""
paths, contains_long_path = self.get_paths()
normal_paths, reduce_paths = paths
num_ops = len(OPS)
"""
Compute the max number of paths per input per cell.
Since there are two cells and two inputs per cell,
total paths = 4 * max_paths
"""
if not long_paths:
max_paths = 1 + sum([num_ops ** i for i in range(NUM_VERTICES)])
else:
max_paths = sum([num_ops ** i for i in range(NUM_VERTICES + 1)])
path_indices = []
# set the base index based on the cell and the input
for i, paths in enumerate((normal_paths, reduce_paths)):
for path in paths:
index = i * 2 * max_paths
if path[0] == INPUT_2:
index += max_paths
# recursively compute the index of the path
for j in range(NUM_VERTICES + 1):
if j == len(path) - 1:
path_indices.append(index)
break
elif j == (NUM_VERTICES - 1) and not long_paths:
path_indices.append(2 * (i + 1) * max_paths - 1)
break
else:
index += num_ops ** j * (OPS.index(path[j + 1]) + 1)
return (tuple(path_indices), contains_long_path)
def encode_paths(self, long_paths=True):
# output one-hot encoding of paths
path_indices, _ = self.get_path_indices(long_paths=long_paths)
num_ops = len(OPS)
if not long_paths:
max_paths = 1 + sum([num_ops ** i for i in range(NUM_VERTICES)])
else:
max_paths = sum([num_ops ** i for i in range(NUM_VERTICES + 1)])
path_encoding = np.zeros(4 * max_paths)
for index in path_indices:
path_encoding[index] = 1
return path_encoding
def path_distance(self, other):
# compute the distance between two architectures
# by comparing their path encodings
return np.sum(np.array(self.encode_paths() != np.array(other.encode_paths())))
|
198443
|
import copy
import json
import unittest
from metagrok.pkmn.engine import core
update_with_request = core._update_with_request
get_side = core._get_side
postproc = core._postprocess_engine_state
class UpdateWithRequestTest(unittest.TestCase):
def test_begin(self):
state = copy.deepcopy(state_begin)
postproc(state)
update_with_request(state, req_begin)
side = get_side(state, state['whoami'])
poke = side['pokemon'][0]
self.assertEqual('p2: Primeape', poke['ident'])
self.assertEqual(244., poke['maxhp'])
self.assertEqual(244., poke['hp'])
self.assertEqual(
[('icepunch', 0), ('uturn', 0), ('encore', 0), ('closecombat', 0)],
list(map(tuple, poke['moveTrack'])))
self.assertEqual('lifeorb', poke['item'])
self.assertEqual('vitalspirit', poke['ability'])
self.assertEqual('vitalspirit', poke['baseAbility'])
self.assertEqual(True, poke['active'])
self.assertEqual(False, poke['fainted'])
poke = side['pokemon'][1]
self.assertEqual('p2: Zoroark', poke['ident'])
self.assertEqual(222., poke['maxhp'])
self.assertEqual(222., poke['hp'])
self.assertEqual(
[('flamethrower', 0), ('nastyplot', 0), ('suckerpunch', 0), ('darkpulse', 0)],
list(map(tuple, poke['moveTrack'])))
self.assertEqual('lifeorb', poke['item'])
self.assertEqual('illusion', poke['ability'])
self.assertEqual('illusion', poke['baseAbility'])
self.assertEqual(False, poke['active'])
self.assertEqual(False, poke['fainted'])
def test_zoroark_switchin(self):
state = copy.deepcopy(state_zoroark_switch)
postproc(state)
update_with_request(state, req_zoroark_switch)
state_begin = json.loads(r'''{
"turn": 1,
"ended": false,
"usesUpkeep": false,
"weather": "",
"pseudoWeather": [],
"weatherTimeLeft": 0,
"weatherMinTimeLeft": 0,
"mySide": {
"battle": {
"$ref": "$"
},
"name": "metagrok-random",
"id": "metagrokrandom",
"initialized": true,
"n": 0,
"foe": {
"battle": {
"$ref": "$"
},
"name": "borrel-ahorse",
"id": "borrelahorse",
"initialized": true,
"n": 1,
"foe": {
"$ref": "$[\"mySide\"]"
},
"totalPokemon": 6,
"sideConditions": {},
"wisher": null,
"active": [
{
"name": "Primeape",
"species": "Primeape",
"searchid": "p2: Primeape|Primeape, L83, M",
"side": {
"$ref": "$[\"mySide\"][\"foe\"]"
},
"fainted": false,
"hp": 244,
"maxhp": 244,
"ability": "",
"baseAbility": "",
"item": "",
"itemEffect": "",
"prevItem": "",
"prevItemEffect": "",
"boosts": {},
"status": "",
"volatiles": {},
"turnstatuses": {},
"movestatuses": {},
"lastmove": "",
"moveTrack": [],
"statusData": {
"sleepTurns": 0,
"toxicTurns": 0
},
"num": 57,
"types": [
"Fighting"
],
"baseStats": {
"hp": 65,
"atk": 105,
"def": 60,
"spa": 60,
"spd": 70,
"spe": 95
},
"abilities": {
"0": "Vital Spirit",
"1": "Anger Point",
"H": "Defiant"
},
"heightm": 1,
"weightkg": 32,
"color": "Brown",
"prevo": "mankey",
"evoLevel": 28,
"eggGroups": [
"Field"
],
"exists": true,
"id": "primeape",
"speciesid": "primeape",
"baseSpecies": "Primeape",
"forme": "",
"formeLetter": "",
"formeid": "",
"spriteid": "primeape",
"effectType": "Template",
"gen": 1,
"slot": 0,
"details": "Primeape, L83, M",
"ident": "p2: Primeape",
"level": 83,
"gender": "M",
"shiny": false
}
],
"lastPokemon": null,
"pokemon": [
{
"$ref": "$[\"mySide\"][\"foe\"][\"active\"][0]"
}
]
},
"totalPokemon": 6,
"sideConditions": {},
"wisher": null,
"active": [
{
"name": "Reshiram",
"species": "Reshiram",
"searchid": "p1: Reshiram|Reshiram, L73",
"side": {
"$ref": "$[\"mySide\"]"
},
"fainted": false,
"hp": 100,
"maxhp": 100,
"ability": "Turboblaze",
"baseAbility": "Turboblaze",
"item": "",
"itemEffect": "",
"prevItem": "",
"prevItemEffect": "",
"boosts": {},
"status": "",
"volatiles": {},
"turnstatuses": {},
"movestatuses": {},
"lastmove": "",
"moveTrack": [],
"statusData": {
"sleepTurns": 0,
"toxicTurns": 0
},
"num": 643,
"types": [
"Dragon",
"Fire"
],
"gender": "",
"baseStats": {
"hp": 100,
"atk": 120,
"def": 100,
"spa": 150,
"spd": 120,
"spe": 90
},
"abilities": {
"0": "Turboblaze"
},
"heightm": 3.2,
"weightkg": 330,
"color": "White",
"eggGroups": [
"Undiscovered"
],
"exists": true,
"id": "reshiram",
"speciesid": "reshiram",
"baseSpecies": "Reshiram",
"forme": "",
"formeLetter": "",
"formeid": "",
"spriteid": "reshiram",
"effectType": "Template",
"gen": 5,
"slot": 0,
"details": "Reshiram, L73",
"ident": "p1: Reshiram",
"level": 73,
"shiny": false
}
],
"lastPokemon": null,
"pokemon": [
{
"$ref": "$[\"mySide\"][\"active\"][0]"
}
]
},
"yourSide": {
"$ref": "$[\"mySide\"][\"foe\"]"
},
"p1": {
"$ref": "$[\"mySide\"]"
},
"p2": {
"$ref": "$[\"mySide\"][\"foe\"]"
},
"sides": [
{
"$ref": "$[\"mySide\"]"
},
{
"$ref": "$[\"mySide\"][\"foe\"]"
}
],
"lastMove": "",
"gen": 7,
"speciesClause": true,
"gameType": "singles",
"tier": "[Gen 7] Random Battle",
"lastmove": "switch-in"
}''')
req_begin = json.loads('''{
"active": [
{
"moves": [
{
"move": "Ice Punch",
"id": "icepunch",
"pp": 24,
"maxpp": 24,
"target": "normal",
"disabled": false
},
{
"move": "U-turn",
"id": "uturn",
"pp": 32,
"maxpp": 32,
"target": "normal",
"disabled": false
},
{
"move": "Encore",
"id": "encore",
"pp": 8,
"maxpp": 8,
"target": "normal",
"disabled": false
},
{
"move": "Close Combat",
"id": "closecombat",
"pp": 8,
"maxpp": 8,
"target": "normal",
"disabled": false
}
]
}
],
"side": {
"name": "borrel-ahorse",
"id": "p2",
"pokemon": [
{
"ident": "p2: Primeape",
"details": "Primeape, L83, M",
"condition": "244/244",
"active": true,
"stats": {
"atk": 222,
"def": 147,
"spa": 147,
"spd": 164,
"spe": 205
},
"moves": [
"icepunch",
"uturn",
"encore",
"closecombat"
],
"baseAbility": "vitalspirit",
"item": "lifeorb",
"pokeball": "pokeball",
"ability": "vitalspirit"
},
{
"ident": "p2: Zoroark",
"details": "Zoroark, L78, F",
"condition": "222/222",
"active": false,
"stats": {
"atk": 209,
"def": 139,
"spa": 232,
"spd": 139,
"spe": 209
},
"moves": [
"flamethrower",
"nastyplot",
"suckerpunch",
"darkpulse"
],
"baseAbility": "illusion",
"item": "lifeorb",
"pokeball": "pokeball",
"ability": "illusion"
},
{
"ident": "p2: Shiftry",
"details": "Shiftry, L83, M",
"condition": "285/285",
"active": false,
"stats": {
"atk": 214,
"def": 147,
"spa": 197,
"spd": 147,
"spe": 180
},
"moves": [
"swordsdance",
"leafblade",
"lowkick",
"suckerpunch"
],
"baseAbility": "earlybird",
"item": "lifeorb",
"pokeball": "pokeball",
"ability": "earlybird"
},
{
"ident": "p2: Tornadus",
"details": "Tornadus, L78, M",
"condition": "251/251",
"active": false,
"stats": {
"atk": 184,
"def": 154,
"spa": 240,
"spd": 170,
"spe": 218
},
"moves": [
"tailwind",
"heatwave",
"taunt",
"hurricane"
],
"baseAbility": "prankster",
"item": "leftovers",
"pokeball": "pokeball",
"ability": "prankster"
},
{
"ident": "p2: Steelix",
"details": "Steelix, L79, F",
"condition": "248/248",
"active": false,
"stats": {
"atk": 180,
"def": 362,
"spa": 132,
"spd": 148,
"spe": 93
},
"moves": [
"stealthrock",
"earthquake",
"toxic",
"dragontail"
],
"baseAbility": "sturdy",
"item": "steelixite",
"pokeball": "pokeball",
"ability": "sturdy"
},
{
"ident": "p2: Scrafty",
"details": "Scrafty, L81, F",
"condition": "238/238",
"active": false,
"stats": {
"atk": 192,
"def": 233,
"spa": 120,
"spd": 233,
"spe": 141
},
"moves": [
"rest",
"highjumpkick",
"dragondance",
"icepunch"
],
"baseAbility": "intimidate",
"item": "chestoberry",
"pokeball": "pokeball",
"ability": "intimidate"
}
]
},
"rqid": 3
}''')
state_zoroark_switch = json.loads(r'''{"turn":9,"ended":false,"usesUpkeep":true,"weather":"","p
seudoWeather":[],"weatherTimeLeft":0,"weatherMinTimeLeft":0,"mySide":{"battle":{"$ref":"$"},"na
me":"metagrok-random","id":"metagrokrandom","initialized":true,"n":0,"foe":{"battle":{"$ref":"$
"},"name":"borrel-ahorse","id":"borrelahorse","initialized":true,"n":1,"foe":{"$ref":"$[\"mySid
e\"]"},"totalPokemon":6,"sideConditions":{},"wisher":null,"active":[{"name":"Steelix","species"
:"Steelix","searchid":"p2: Steelix|Steelix, L79, F","side":{"$ref":"$[\"mySide\"][\"foe\"]"},"f
ainted":false,"hp":222,"maxhp":222,"ability":"","baseAbility":"","item":"","itemEffect":"","pre
vItem":"","prevItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstatuses":{},"movesta
tuses":{},"lastmove":"","moveTrack":[],"statusData":{"sleepTurns":0,"toxicTurns":0},"num":208,"
types":["Steel","Ground"],"baseStats":{"hp":75,"atk":85,"def":200,"spa":55,"spd":65,"spe":30},"
abilities":{"0":"Rock Head","1":"Sturdy","H":"Sheer Force"},"heightm":9.2,"weightkg":400,"color
":"Gray","prevo":"onix","evoLevel":1,"eggGroups":["Mineral"],"otherFormes":["steelixmega"],"exi
sts":true,"id":"steelix","speciesid":"steelix","baseSpecies":"Steelix","forme":"","formeLetter"
:"","formeid":"","spriteid":"steelix","effectType":"Template","gen":2,"slot":0,"details":"Steel
ix, L79, F","ident":"p2: Steelix","level":79,"gender":"F","shiny":false}],"lastPokemon":{"name"
:"Scrafty","species":"Scrafty","searchid":"p2: Scrafty|Scrafty, L81, F","side":{"$ref":"$[\"myS
ide\"][\"foe\"]"},"fainted":true,"hp":0,"maxhp":238,"ability":"","baseAbility":"","item":"","it
emEffect":"","prevItem":"","prevItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstat
uses":{},"movestatuses":{},"lastmove":"highjumpkick","moveTrack":[["Rest",2],["Dragon Dance",2]
,["Ice Punch",1],["High Jump Kick",1]],"statusData":{"sleepTurns":0,"toxicTurns":0},"num":560,"
types":["Dark","Fighting"],"baseStats":{"hp":65,"atk":90,"def":115,"spa":45,"spd":115,"spe":58}
,"abilities":{"0":"Shed Skin","1":"Moxie","H":"Intimidate"},"heightm":1.1,"weightkg":30,"color"
:"Red","prevo":"scraggy","evoLevel":39,"eggGroups":["Field","Dragon"],"exists":true,"id":"scraf
ty","speciesid":"scrafty","baseSpecies":"Scrafty","forme":"","formeLetter":"","formeid":"","spr
iteid":"scrafty","effectType":"Template","gen":5,"slot":0,"details":"Scrafty, L81, F","ident":"
p2: Scrafty","level":81,"gender":"F","shiny":false},"pokemon":[{"name":"Primeape","species":"Pr
imeape","searchid":"p2: Primeape|Primeape, L83, M","side":{"$ref":"$[\"mySide\"][\"foe\"]"},"fa
inted":true,"hp":0,"maxhp":244,"ability":"","baseAbility":"","item":"Life Orb","itemEffect":"",
"prevItem":"","prevItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstatuses":{},"mov
estatuses":{},"lastmove":"closecombat","moveTrack":[["Ice Punch",1],["Close Combat",1]],"status
Data":{"sleepTurns":0,"toxicTurns":0},"num":57,"types":["Fighting"],"baseStats":{"hp":65,"atk":
105,"def":60,"spa":60,"spd":70,"spe":95},"abilities":{"0":"Vital Spirit","1":"Anger Point","H":
"Defiant"},"heightm":1,"weightkg":32,"color":"Brown","prevo":"mankey","evoLevel":28,"eggGroups"
:["Field"],"exists":true,"id":"primeape","speciesid":"primeape","baseSpecies":"Primeape","forme
":"","formeLetter":"","formeid":"","spriteid":"primeape","effectType":"Template","gen":1,"slot"
:0,"details":"Primeape, L83, M","ident":"p2: Primeape","level":83,"gender":"M","shiny":false},{
"$ref":"$[\"mySide\"][\"foe\"][\"lastPokemon\"]"},{"$ref":"$[\"mySide\"][\"foe\"][\"active\"][0
]"}]},"totalPokemon":6,"sideConditions":{},"wisher":null,"active":[{"name":"Huntail","species":
"Huntail","searchid":"p1: Huntail|Huntail, L83, F","side":{"$ref":"$[\"mySide\"]"},"fainted":fa
lse,"hp":5,"maxhp":100,"ability":"","baseAbility":"","item":"","itemEffect":"","prevItem":"","p
revItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstatuses":{},"movestatuses":{},"l
astmove":"waterfall","moveTrack":[["Waterfall",1]],"statusData":{"sleepTurns":0,"toxicTurns":0}
,"num":367,"types":["Water"],"baseStats":{"hp":55,"atk":104,"def":105,"spa":94,"spd":75,"spe":5
2},"abilities":{"0":"Swift Swim","H":"Water Veil"},"heightm":1.7,"weightkg":27,"color":"Blue","
prevo":"clamperl","evoLevel":1,"eggGroups":["Water 1"],"exists":true,"id":"huntail","speciesid"
:"huntail","baseSpecies":"Huntail","forme":"","formeLetter":"","formeid":"","spriteid":"huntail
","effectType":"Template","gen":3,"slot":0,"details":"Huntail, L83, F","ident":"p1: Huntail","l
evel":83,"gender":"F","shiny":false}],"lastPokemon":{"name":"Krookodile","species":"Krookodile"
,"searchid":"p1: Krookodile|Krookodile, L77, M","side":{"$ref":"$[\"mySide\"]"},"fainted":false
,"hp":91,"maxhp":100,"ability":"","baseAbility":"","item":"Life Orb","itemEffect":"","prevItem"
:"","prevItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstatuses":{},"movestatuses"
:{},"lastmove":"superpower","moveTrack":[["Superpower",1]],"statusData":{"sleepTurns":0,"toxicT
urns":0},"num":553,"types":["Ground","Dark"],"baseStats":{"hp":95,"atk":117,"def":80,"spa":65,"
spd":70,"spe":92},"abilities":{"0":"Intimidate","1":"Moxie","H":"Anger Point"},"heightm":1.5,"w
eightkg":96.3,"color":"Red","prevo":"krokorok","evoLevel":40,"eggGroups":["Field"],"exists":tru
e,"id":"krookodile","speciesid":"krookodile","baseSpecies":"Krookodile","forme":"","formeLetter
":"","formeid":"","spriteid":"krookodile","effectType":"Template","gen":5,"slot":0,"details":"K
rookodile, L77, M","ident":"p1: Krookodile","level":77,"gender":"M","shiny":false},"pokemon":[{
"name":"Reshiram","species":"Reshiram","searchid":"p1: Reshiram|Reshiram, L73","side":{"$ref":"
$[\"mySide\"]"},"fainted":true,"hp":0,"maxhp":100,"ability":"Turboblaze","baseAbility":"Turbobl
aze","item":"Leftovers","itemEffect":"","prevItem":"","prevItemEffect":"","boosts":{},"status":
"","volatiles":{},"turnstatuses":{},"movestatuses":{},"lastmove":"","moveTrack":[["Blue Flare",
1],["Flame Charge",1]],"statusData":{"sleepTurns":0,"toxicTurns":0},"num":643,"types":["Dragon"
,"Fire"],"gender":"","baseStats":{"hp":100,"atk":120,"def":100,"spa":150,"spd":120,"spe":90},"a
bilities":{"0":"Turboblaze"},"heightm":3.2,"weightkg":330,"color":"White","eggGroups":["Undisco
vered"],"exists":true,"id":"reshiram","speciesid":"reshiram","baseSpecies":"Reshiram","forme":"
","formeLetter":"","formeid":"","spriteid":"reshiram","effectType":"Template","gen":5,"slot":0,
"details":"Reshiram, L73","ident":"p1: Reshiram","level":73,"shiny":false},{"name":"Grumpig","s
pecies":"Grumpig","searchid":"p1: Grumpig|Grumpig, L83, F","side":{"$ref":"$[\"mySide\"]"},"fai
nted":false,"hp":100,"maxhp":100,"ability":"","baseAbility":"","item":"","itemEffect":"","prevI
tem":"","prevItemEffect":"","boosts":{},"status":"","volatiles":{},"turnstatuses":{},"movestatu
ses":{},"lastmove":"","moveTrack":[],"statusData":{"sleepTurns":0,"toxicTurns":0},"num":326,"ty
pes":["Psychic"],"baseStats":{"hp":80,"atk":45,"def":65,"spa":90,"spd":110,"spe":80},"abilities
":{"0":"Thick Fat","1":"Own Tempo","H":"Gluttony"},"heightm":0.9,"weightkg":71.5,"color":"Purpl
e","prevo":"spoink","evoLevel":32,"eggGroups":["Field"],"exists":true,"id":"grumpig","speciesid
":"grumpig","baseSpecies":"Grumpig","forme":"","formeLetter":"","formeid":"","spriteid":"grumpi
g","effectType":"Template","gen":3,"slot":0,"details":"Grumpig, L83, F","ident":"p1: Grumpig","
level":83,"gender":"F","shiny":false},{"$ref":"$[\"mySide\"][\"lastPokemon\"]"},{"$ref":"$[\"my
Side\"][\"active\"][0]"}]},"yourSide":{"$ref":"$[\"mySide\"][\"foe\"]"},"p1":{"$ref":"$[\"mySid
e\"]"},"p2":{"$ref":"$[\"mySide\"][\"foe\"]"},"sides":[{"$ref":"$[\"mySide\"]"},{"$ref":"$[\"my
Side\"][\"foe\"]"}],"lastMove":"","gen":7,"speciesClause":true,"gameType":"singles","tier":"[Ge
n 7] Random Battle","lastmove":"switch-in"}
'''.replace('\n', '').replace('\r', ''))
req_zoroark_switch = json.loads(r'''{"active":[{"moves":[{"move":"Flamethrower","id":"flamethro
wer","pp":24,"maxpp":24,"target":"normal","disabled":false},{"move":"Nasty Plot","id":"nastyplo
t","pp":32,"maxpp":32,"target":"self","disabled":false},{"move":"Sucker Punch","id":"suckerpunc
h","pp":8,"maxpp":8,"target":"normal","disabled":false},{"move":"Dark Pulse","id":"darkpulse","
pp":24,"maxpp":24,"target":"any","disabled":false}]}],"side":{"name":"borrel-ahorse","id":"p2",
"pokemon":[{"ident":"p2: Zoroark","details":"Zoroark, L78, F","condition":"222/222","active":tr
ue,"stats":{"atk":209,"def":139,"spa":232,"spd":139,"spe":209},"moves":["flamethrower","nastypl
ot","suckerpunch","darkpulse"],"baseAbility":"illusion","item":"lifeorb","pokeball":"pokeball",
"ability":"illusion"},{"ident":"p2: Scrafty","details":"Scrafty, L81, F","condition":"0 fnt","a
ctive":false,"stats":{"atk":192,"def":233,"spa":120,"spd":233,"spe":141},"moves":["rest","highj
umpkick","dragondance","icepunch"],"baseAbility":"intimidate","item":"chestoberry","pokeball":"
pokeball","ability":"intimidate"},{"ident":"p2: Shiftry","details":"Shiftry, L83, M","condition
":"285/285","active":false,"stats":{"atk":214,"def":147,"spa":197,"spd":147,"spe":180},"moves":
["swordsdance","leafblade","lowkick","suckerpunch"],"baseAbility":"earlybird","item":"lifeorb",
"pokeball":"pokeball","ability":"earlybird"},{"ident":"p2: Tornadus","details":"Tornadus, L78,
M","condition":"251/251","active":false,"stats":{"atk":184,"def":154,"spa":240,"spd":170,"spe":
218},"moves":["tailwind","heatwave","taunt","hurricane"],"baseAbility":"prankster","item":"left
overs","pokeball":"pokeball","ability":"prankster"},{"ident":"p2: Steelix","details":"Steelix,
L79, F","condition":"248/248","active":false,"stats":{"atk":180,"def":362,"spa":132,"spd":148,"
spe":93},"moves":["stealthrock","earthquake","toxic","dragontail"],"baseAbility":"sturdy","item
":"steelixite","pokeball":"pokeball","ability":"sturdy"},{"ident":"p2: Primeape","details":"Pri
meape, L83, M","condition":"0 fnt","active":false,"stats":{"atk":222,"def":147,"spa":147,"spd":
164,"spe":205},"moves":["icepunch","uturn","encore","closecombat"],"baseAbility":"vitalspirit",
"item":"lifeorb","pokeball":"pokeball","ability":"vitalspirit"}]},"rqid":25}
'''.replace('\n', '').replace('\r', ''))
if __name__ == '__main__':
unittest.main()
|
198457
|
from isitfit.utils import logger
import click
from isitfit.cli.click_descendents import IsitfitCliError
import pandas as pd
class MigMan:
"""
Class that manages a local sqlite database to keep track of migrations that were already run
https://www.pythoncentral.io/introduction-to-sqlite-in-python/
"""
def __init__(self):
self.quiet = False
self.not_dry_run = False
from isitfit.dotMan import DotMan
import os
self.db_p = os.path.join(DotMan().get_dotisitfit(), "migrations.sqlite")
import datetime as dt
self.dt_now = dt.datetime.now().date()
def connect(self):
import sqlite3
self.db_h = sqlite3.connect(self.db_p)
def _current(self):
# Append new migrations here after implementing them as a function with a docstring in miglist.py
from isitfit.migrations import miglist
return [
('mig20191203a', miglist.mig20191203a),
('mig20191203b', miglist.mig20191203b),
('mig20191203c', miglist.mig20191203c),
]
def __exit__(self):
self.db_h.close()
def _create(self):
# Note: using migname instead of name because "name" is used in pandas as the "name of the row"
cursor = self.db_h.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS migrations(
migname TEXT PRIMARY KEY,
executed DATE
)
''')
self.db_h.commit()
def read(self):
# in case of first run
self._create()
# insert "new" migrations
df_mer = self._insertNew()
# append docstrings
df_mer['description'] = df_mer.func.apply(lambda x: x.__doc__.strip() if x.__doc__ is not None else None)
logger.debug("Migrations")
logger.debug(df_mer[['migname', 'executed', 'description']])
# subset for those that don't have an executed date yet
df_mer = df_mer[df_mer.executed.isna()]
# save
self.df_mig = df_mer
def _insertNew(self):
# migrations in local database
df_db = pd.read_sql_query("select * from migrations", self.db_h)
# full list of migrations
df_py = pd.DataFrame(self._current(), columns=['migname', 'func'])
# join. Note the left join will drop from the database any migrations that are deleted from self._current
df_mer = df_py.merge(df_db, on='migname', how='left')
# save to db
df_mer[['migname', 'executed']].to_sql('migrations', self.db_h, if_exists='replace')
# done
return df_mer
def migrate_all(self):
if self.df_mig.shape[0]==0:
if self.quiet: return
raise IsitfitCliError("No migrations to execute")
for i in range(self.df_mig.shape[0]):
self._migrate_single(i)
def _migrate_single(self, i):
mig_i = self.df_mig.iloc[i]
prefix = "" if self.not_dry_run else "[Dry run] "
if not self.quiet:
click.echo("%sExecuting migration: %s: %s"%(prefix, mig_i.migname, mig_i.description))
if self.not_dry_run:
mig_i.func()
# no need to update local dataframe
# self.df_mig.iloc[i, self.df_mig.columns == 'executed'] = self.dt_now
# update row in database (per migration in case of error halfway)
cursor = self.db_h.cursor()
cursor.execute('''
UPDATE migrations
SET executed = ?
where migname = ?
''', (self.dt_now, mig_i.migname, ))
self.db_h.commit()
#-----------------------------
# utility functions
#def prompt_migrate():
# migman = MigMan()
# migman.connect()
# migman.read()
# if migman.df_mig.shape[0]==0:
# return
#
# raise IsitfitCliError("There are %i migrations that need to be executed. Please use `isitfit migrations show` to list them"%migman.df_mig.shape[0])
def silent_migrate():
migman = MigMan()
migman.quiet = True
migman.not_dry_run = True
migman.connect()
migman.read()
migman.migrate_all()
return migman.df_mig.migname.tolist()
|
198465
|
from . import app
from . import command
from . import config
from . import models
from . import operations
__version__ = app.__version__
|
198469
|
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def cross_entropy_2d(predict, target):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), f"{predict.size(0)} vs {target.size(0)}"
assert predict.size(2) == target.size(1), f"{predict.size(2)} vs {target.size(1)}"
assert predict.size(3) == target.size(2), f"{predict.size(3)} vs {target.size(3)}"
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target < 200)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, size_average=True)
return loss
def entropy_loss(v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert v.dim() == 4
n, c, h, w = v.size()
return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
|
198481
|
class ITypeHintingFactory(object):
def make_param_provider(self):
"""
:rtype: rope.base.oi.type_hinting.providers.interfaces.IParamProvider
"""
raise NotImplementedError
def make_return_provider(self):
"""
:rtype: rope.base.oi.type_hinting.providers.interfaces.IReturnProvider
"""
raise NotImplementedError
def make_assignment_provider(self):
"""
:rtype: rope.base.oi.type_hinting.providers.interfaces.IAssignmentProvider
"""
raise NotImplementedError
def make_resolver(self):
"""
:rtype: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
raise NotImplementedError
|
198593
|
from bson import json_util
from flask.json import JSONEncoder
from mongoengine.base import BaseDocument
from mongoengine.queryset import QuerySet
def _make_encoder(superclass):
class MongoEngineJSONEncoder(superclass):
"""
A JSONEncoder which provides serialization of MongoEngine
documents and queryset objects.
"""
def default(self, obj):
if isinstance(obj, BaseDocument):
return json_util._json_convert(obj.to_mongo())
elif isinstance(obj, QuerySet):
return json_util._json_convert(obj.as_pymongo())
return superclass.default(self, obj)
return MongoEngineJSONEncoder
MongoEngineJSONEncoder = _make_encoder(JSONEncoder)
def override_json_encoder(app):
"""
A function to dynamically create a new MongoEngineJSONEncoder class
based upon a custom base class.
This function allows us to combine MongoEngine serialization with
any changes to Flask's JSONEncoder which a user may have made
prior to calling init_app.
NOTE: This does not cover situations where users override
an instance's json_encoder after calling init_app.
"""
app.json_encoder = _make_encoder(app.json_encoder)
|
198599
|
from collections import Counter
from hippybot.decorators import botcmd
class Plugin(object):
"""HippyBot plugin to make the bot complete a wave if 3 people in a
row do the action "\o/".
"""
global_commands = ['\o/', 'wave']
command_aliases = {'\o/': 'wave'}
counts = Counter()
def __init__(self, config):
pass
@botcmd
def wave(self, mess, args):
"""
If enough people \o/, techbot will too.
Everyone loves a follower, well, techbot is here to fulfill that need
"""
channel = unicode(mess.getFrom()).split('/')[0]
self.bot.log.info("\o/ %s" %self.counts[channel])
if not self.bot.from_bot(mess):
self.counts[channel] += 1
if self.counts[channel] == 3:
self.counts[channel] = 0
return r'\o/'
|
198603
|
import pytest
import rasa.shared.nlu.training_data.lookup_tables_parser as lookup_tables_parser
def test_add_item_to_lookup_tables():
lookup_item_title = "additional_currencies"
lookup_examples = ["Peso", "Euro", "Dollar"]
lookup_tables = []
for example in lookup_examples:
lookup_tables_parser.add_item_to_lookup_tables(
lookup_item_title, example, lookup_tables
)
assert lookup_tables == [{"name": lookup_item_title, "elements": lookup_examples}]
def test_add_item_to_lookup_tables_unloaded_file():
lookup_item_title = "additional_currencies"
lookup_tables = [{"name": lookup_item_title, "elements": "lookup.txt"}]
with pytest.raises(TypeError):
lookup_tables_parser.add_item_to_lookup_tables(
lookup_item_title, "Pound", lookup_tables
)
|
198610
|
from pathlib import Path
from typing import Union
import os
import shutil
import requests
from fastprogress.fastprogress import progress_bar
import zipfile
PathOrStr = Union[Path,str]
LOCAL_PATH = Path.cwd()
DATA_PATH = Path.home()/'tmp'
UCR_LINK = 'http://www.timeseriesclassification.com/Downloads/Archives/Univariate2018_arff.zip'
## From fastai
def ifnone(a, b): return b if a is None else a
def url2name(url): return url.split('/')[-1].split('.')[0]
def url2path(url, ext='.zip'):
"Change `url` to a path."
name = url2name(url)
return DATA_PATH/(name+ext)
def datapath4file(filename, ext:str='.zip'):
local_path = LOCAL_PATH/'data'/filename
return local_path
def download_data(url:str, fname:PathOrStr, data:bool=True, ext:str='.tgz') -> Path:
"Download `url` to destination `fname`."
fname = Path(fname)
os.makedirs(fname.parent, exist_ok=True)
if not fname.exists():
print(f'Downloading {url}')
download_url(url, fname)
return fname
def unzip_data(url:str=UCR_LINK, fname:PathOrStr=None, dest:PathOrStr=None, force_download=False) -> Path:
"Download `url` to `fname` if `dest` doesn't exist, and un-zip to folder `dest`."
fname = Path(ifnone(fname, url2path(url)))
dest = LOCAL_PATH/'Univariate_arff' if dest is None else Path(dest)
fname = Path(ifnone(fname, url2path(url)))
if force_download:
print(f"A new version of the dataset is available.")
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists():
fname = download_data(url, fname=fname)
with zipfile.ZipFile(fname, 'r') as zip_ref:
zip_ref.extractall(dest.parent)
else: print(f'Files present in : {dest}')
return dest
def download_url(url:str, dest:str, overwrite:bool=False,
show_progress=True, chunk_size=1024*1024, timeout=4, retries=5)->None:
"Download `url` to `dest` unless it exists and not `overwrite`."
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress: pbar = progress_bar(range(file_size), auto_update=False, leave=False)
try:
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
print(f'Try downloading your file manually from {url}')
import sys;sys.exit(1)
|
198621
|
import setuptools
# Makes it easy for contributors to install user-facing dependencies.
reqs = []
with open('requirements.txt') as f:
for line in f:
if not line.strip().startswith('#'):
line = line.rstrip('\n')
reqs.append(line)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="aiqc",
version="3.0.0",#start using double digits.
author="<NAME>",
author_email="<EMAIL>",
description="End-to-end machine learning on your desktop or server.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://aiqc.readthedocs.io/",
packages=setuptools.find_packages(),
include_package_data=True,# Triggers `MANIFEST.in` file.
python_requires='>=3.5, <=3.8.7', # (tf req Py3.5-3.8)
license='BSD 3-Clause',
# Version operands https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers
# According to Python slack wizards, despite wheel-related warnings when installing aiqc on
# a fresh python env, I don't need to require users to install 'wheel'.
install_requires=reqs,
classifiers=[
"Programming Language :: Python :: 3",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 1 - Planning",
"Framework :: Jupyter",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
)
|
198653
|
from snips_nlu.dataset.dataset import Dataset
from snips_nlu.dataset.entity import Entity
from snips_nlu.dataset.intent import Intent
from snips_nlu.dataset.utils import (
extract_intent_entities, extract_utterance_entities,
get_dataset_gazetteer_entities, get_text_from_chunks)
from snips_nlu.dataset.validation import validate_and_format_dataset
|
198664
|
from typing import Iterable
class InfiniteIterator:
"""Infinitely repeat the iterable."""
def __init__(self, iterable: Iterable):
self._iterable = iterable
self.iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self):
for _ in range(2):
try:
return next(self.iterator)
except StopIteration:
# reset iterator
del self.iterator
self.iterator = iter(self._iterable)
|
198667
|
from celery import (
group,
signature,
)
from django.core.management.base import BaseCommand
from pontoon.base.models import Translation
from pontoon.checks import DB_FORMATS
from pontoon.checks.tasks import check_translations
class Command(BaseCommand):
help = "Run checks on all translations"
def add_arguments(self, parser):
parser.add_argument(
"--batch-size",
action="store",
dest="batch_size",
default=10000,
help="Number of translations to check in a single batch/Celery task",
)
parser.add_argument(
"--with-disabled-projects",
action="store_true",
dest="disabled_projects",
default=False,
help="Include disabled projects",
)
parser.add_argument(
"--with-obsolete-entities",
action="store_true",
dest="obsolete_entities",
default=False,
help="Include obsolete entities",
)
def handle(self, *args, **options):
filter_qs = {}
# Don't include disabled projects by default
if not options["disabled_projects"]:
filter_qs["entity__resource__project__disabled"] = False
# Don't include obsolete by default
if not options["obsolete_entities"]:
filter_qs["entity__obsolete"] = False
translations_pks = Translation.objects.filter(
entity__resource__format__in=DB_FORMATS, **filter_qs
).values_list("pk", flat=True)
# Split translations into even batches and send them to Celery workers
batch_size = int(options["batch_size"])
group(
signature(check_translations, args=(translations_pks[i : i + batch_size],))
for i in range(0, len(translations_pks), batch_size)
).apply_async()
|
198693
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Dhcpv6Relay(Base):
__slots__ = ()
_SDM_NAME = 'dhcpv6Relay'
_SDM_ATT_MAP = {
'HeaderMessageType': 'dhcpv6Relay.header.messageType-1',
'HeaderHopCount': 'dhcpv6Relay.header.hopCount-2',
'HeaderLinkAddress': 'dhcpv6Relay.header.linkAddress-3',
'HeaderPeerAddress': 'dhcpv6Relay.header.peerAddress-4',
'ClientIdCode': 'dhcpv6Relay.header.nextOption.option.clientId.code-5',
'ClientIdLength': 'dhcpv6Relay.header.nextOption.option.clientId.length-6',
'DuidLLTCode': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLLT.code-7',
'DuidLLTHwType': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLLT.hwType-8',
'DuidLLTTime': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLLT.time-9',
'LinkLayerAddressLength': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLLT.linkLayerAddress.length-10',
'LinkLayerAddressData': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLLT.linkLayerAddress.data-11',
'DuidENCode': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidEN.code-12',
'DuidENNumber': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidEN.number-13',
'UniqueIdLength': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidEN.uniqueId.length-14',
'UniqueIdData': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidEN.uniqueId.data-15',
'DuidLLCode': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLL.code-16',
'DuidLLHwType': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLL.hwType-17',
'DuidllLinkLayerAddressLength': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLL.linkLayerAddress.length-18',
'DuidllLinkLayerAddressData': 'dhcpv6Relay.header.nextOption.option.clientId.data.duid.duidLL.linkLayerAddress.data-19',
'ServerIdCode': 'dhcpv6Relay.header.nextOption.option.serverId.code-20',
'ServerIdLength': 'dhcpv6Relay.header.nextOption.option.serverId.length-21',
'DuidDuidLLTCode': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLLT.code-22',
'DuidDuidLLTHwType': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLLT.hwType-23',
'DuidDuidLLTTime': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLLT.time-24',
'DuidlltLinkLayerAddressLength': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLLT.linkLayerAddress.length-25',
'DuidlltLinkLayerAddressData': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLLT.linkLayerAddress.data-26',
'DuidDuidENCode': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidEN.code-27',
'DuidDuidENNumber': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidEN.number-28',
'DuidenUniqueIdLength': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidEN.uniqueId.length-29',
'DuidenUniqueIdData': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidEN.uniqueId.data-30',
'DuidDuidLLCode': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLL.code-31',
'DuidDuidLLHwType': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLL.hwType-32',
'LinkLayerAddress1Length': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLL.linkLayerAddress1.length-33',
'LinkLayerAddress1Data': 'dhcpv6Relay.header.nextOption.option.serverId.data.duid.duidLL.linkLayerAddress1.data-34',
'IdAssociationCode': 'dhcpv6Relay.header.nextOption.option.idAssociation.code-35',
'IdAssociationLength': 'dhcpv6Relay.header.nextOption.option.idAssociation.length-36',
'IdAssociationIaid': 'dhcpv6Relay.header.nextOption.option.idAssociation.iaid-37',
'IdAssociationT1': 'dhcpv6Relay.header.nextOption.option.idAssociation.t1-38',
'IdAssociationT2': 'dhcpv6Relay.header.nextOption.option.idAssociation.t2-39',
'OptionsLength': 'dhcpv6Relay.header.nextOption.option.idAssociation.options.length-40',
'OptionsData': 'dhcpv6Relay.header.nextOption.option.idAssociation.options.data-41',
'IaForTmpAddressCode': 'dhcpv6Relay.header.nextOption.option.iaForTmpAddress.code-42',
'IaForTmpAddressLength': 'dhcpv6Relay.header.nextOption.option.iaForTmpAddress.length-43',
'IaForTmpAddressId': 'dhcpv6Relay.header.nextOption.option.iaForTmpAddress.id-44',
'IafortmpaddressOptionsLength': 'dhcpv6Relay.header.nextOption.option.iaForTmpAddress.options.length-45',
'IafortmpaddressOptionsData': 'dhcpv6Relay.header.nextOption.option.iaForTmpAddress.options.data-46',
'IaAddressCode': 'dhcpv6Relay.header.nextOption.option.iaAddress.code-47',
'IaAddressLength': 'dhcpv6Relay.header.nextOption.option.iaAddress.length-48',
'IaAddressIpv6address': 'dhcpv6Relay.header.nextOption.option.iaAddress.ipv6address-49',
'IaAddressPreferredLifetime': 'dhcpv6Relay.header.nextOption.option.iaAddress.preferredLifetime-50',
'IaAddressValidLifetime': 'dhcpv6Relay.header.nextOption.option.iaAddress.validLifetime-51',
'IaaddressOptionsLength': 'dhcpv6Relay.header.nextOption.option.iaAddress.options.length-52',
'IaaddressOptionsData': 'dhcpv6Relay.header.nextOption.option.iaAddress.options.data-53',
'OptionRequestCode': 'dhcpv6Relay.header.nextOption.option.optionRequest.code-54',
'OptionRequestLength': 'dhcpv6Relay.header.nextOption.option.optionRequest.length-55',
'ReqOptionCode': 'dhcpv6Relay.header.nextOption.option.optionRequest.reqOption.code-56',
'PreferenceCode': 'dhcpv6Relay.header.nextOption.option.preference.code-57',
'PreferenceLength': 'dhcpv6Relay.header.nextOption.option.preference.length-58',
'PreferenceValue': 'dhcpv6Relay.header.nextOption.option.preference.value-59',
'ElapsedTimeCode': 'dhcpv6Relay.header.nextOption.option.elapsedTime.code-60',
'ElapsedTimeLength': 'dhcpv6Relay.header.nextOption.option.elapsedTime.length-61',
'ElapsedTimeValue': 'dhcpv6Relay.header.nextOption.option.elapsedTime.value-62',
'RelayMessageCode': 'dhcpv6Relay.header.nextOption.option.relayMessage.code-63',
'RelayMessageLength': 'dhcpv6Relay.header.nextOption.option.relayMessage.length-64',
'MessageLength': 'dhcpv6Relay.header.nextOption.option.relayMessage.message.length-65',
'MessageData': 'dhcpv6Relay.header.nextOption.option.relayMessage.message.data-66',
'AuthenticationCode': 'dhcpv6Relay.header.nextOption.option.authentication.code-67',
'AuthenticationLength': 'dhcpv6Relay.header.nextOption.option.authentication.length-68',
'AuthenticationProtocol': 'dhcpv6Relay.header.nextOption.option.authentication.protocol-69',
'AuthenticationAlgorithm': 'dhcpv6Relay.header.nextOption.option.authentication.algorithm-70',
'AuthenticationRdm': 'dhcpv6Relay.header.nextOption.option.authentication.rdm-71',
'AuthenticationReplayDetection': 'dhcpv6Relay.header.nextOption.option.authentication.replayDetection-72',
'AuthenticationInformationLength': 'dhcpv6Relay.header.nextOption.option.authentication.authenticationInformation.length-73',
'AuthenticationInformationData': 'dhcpv6Relay.header.nextOption.option.authentication.authenticationInformation.data-74',
'ServerUnicastCode': 'dhcpv6Relay.header.nextOption.option.serverUnicast.code-75',
'ServerUnicastLength': 'dhcpv6Relay.header.nextOption.option.serverUnicast.length-76',
'ServerUnicastAddress': 'dhcpv6Relay.header.nextOption.option.serverUnicast.address-77',
'StatusCodeCode': 'dhcpv6Relay.header.nextOption.option.statusCode.code-78',
'StatusCodeLength': 'dhcpv6Relay.header.nextOption.option.statusCode.length-79',
'StatusCodeValue': 'dhcpv6Relay.header.nextOption.option.statusCode.value-80',
'StatusMessageLength': 'dhcpv6Relay.header.nextOption.option.statusCode.statusMessage.length-81',
'StatusMessageData': 'dhcpv6Relay.header.nextOption.option.statusCode.statusMessage.data-82',
'RapidCommitCode': 'dhcpv6Relay.header.nextOption.option.rapidCommit.code-83',
'RapidCommitLength': 'dhcpv6Relay.header.nextOption.option.rapidCommit.length-84',
'UserClassCode': 'dhcpv6Relay.header.nextOption.option.userClass.code-85',
'UserClassLength': 'dhcpv6Relay.header.nextOption.option.userClass.length-86',
'DataLength': 'dhcpv6Relay.header.nextOption.option.userClass.data.length-87',
'DataData': 'dhcpv6Relay.header.nextOption.option.userClass.data.data-88',
'VendorClassCode': 'dhcpv6Relay.header.nextOption.option.vendorClass.code-89',
'VendorClassLength': 'dhcpv6Relay.header.nextOption.option.vendorClass.length-90',
'VendorClassEnterpriseNumber': 'dhcpv6Relay.header.nextOption.option.vendorClass.enterpriseNumber-91',
'VendorclassDataLength': 'dhcpv6Relay.header.nextOption.option.vendorClass.data.length-92',
'VendorclassDataData': 'dhcpv6Relay.header.nextOption.option.vendorClass.data.data-93',
'VendorInformationCode': 'dhcpv6Relay.header.nextOption.option.vendorInformation.code-94',
'VendorInformationLength': 'dhcpv6Relay.header.nextOption.option.vendorInformation.length-95',
'VendorInformationEnterpriseNumber': 'dhcpv6Relay.header.nextOption.option.vendorInformation.enterpriseNumber-96',
'VendorinformationDataLength': 'dhcpv6Relay.header.nextOption.option.vendorInformation.data.length-97',
'VendorinformationDataData': 'dhcpv6Relay.header.nextOption.option.vendorInformation.data.data-98',
'InterfaceIdCode': 'dhcpv6Relay.header.nextOption.option.interfaceId.code-99',
'InterfaceIdLength': 'dhcpv6Relay.header.nextOption.option.interfaceId.length-100',
'IdLength': 'dhcpv6Relay.header.nextOption.option.interfaceId.id.length-101',
'IdData': 'dhcpv6Relay.header.nextOption.option.interfaceId.id.data-102',
'ReconfigureMessageCode': 'dhcpv6Relay.header.nextOption.option.reconfigureMessage.code-103',
'ReconfigureMessageLength': 'dhcpv6Relay.header.nextOption.option.reconfigureMessage.length-104',
'ReconfigureMessageMsgType': 'dhcpv6Relay.header.nextOption.option.reconfigureMessage.msgType-105',
'ReconfigureAcceptCode': 'dhcpv6Relay.header.nextOption.option.reconfigureAccept.code-106',
'ReconfigureAcceptLength': 'dhcpv6Relay.header.nextOption.option.reconfigureAccept.length-107',
'DnsRecursiveNameServerCode': 'dhcpv6Relay.header.nextOption.option.dnsRecursiveNameServer.code-108',
'DnsRecursiveNameServerLength': 'dhcpv6Relay.header.nextOption.option.dnsRecursiveNameServer.length-109',
'DnsRecursiveNameServerAddress': 'dhcpv6Relay.header.nextOption.option.dnsRecursiveNameServer.address-110',
'DomainSearchListCode': 'dhcpv6Relay.header.nextOption.option.domainSearchList.code-111',
'DomainSearchListLength': 'dhcpv6Relay.header.nextOption.option.domainSearchList.length-112',
'NextDomainDomain': 'dhcpv6Relay.header.nextOption.option.domainSearchList.nextDomain.domain-113',
'DomainSearchListNull': 'dhcpv6Relay.header.nextOption.option.domainSearchList.null-114',
}
def __init__(self, parent, list_op=False):
super(Dhcpv6Relay, self).__init__(parent, list_op)
@property
def HeaderMessageType(self):
"""
Display Name: Message Type
Default Value: 12
Value Format: decimal
Available enum values: Relay-forw, 12, Relay-repl, 13
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMessageType']))
@property
def HeaderHopCount(self):
"""
Display Name: Hop Count
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderHopCount']))
@property
def HeaderLinkAddress(self):
"""
Display Name: Link Address
Default Value: 0::0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLinkAddress']))
@property
def HeaderPeerAddress(self):
"""
Display Name: Peer Address
Default Value: 0::0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderPeerAddress']))
@property
def ClientIdCode(self):
"""
Display Name: Code
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientIdCode']))
@property
def ClientIdLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientIdLength']))
@property
def DuidLLTCode(self):
"""
Display Name: LLT Code
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidLLTCode']))
@property
def DuidLLTHwType(self):
"""
Display Name: Hardware type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidLLTHwType']))
@property
def DuidLLTTime(self):
"""
Display Name: Time
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidLLTTime']))
@property
def LinkLayerAddressLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkLayerAddressLength']))
@property
def LinkLayerAddressData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkLayerAddressData']))
@property
def DuidENCode(self):
"""
Display Name: EN Code
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidENCode']))
@property
def DuidENNumber(self):
"""
Display Name: Enterprise Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidENNumber']))
@property
def UniqueIdLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UniqueIdLength']))
@property
def UniqueIdData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UniqueIdData']))
@property
def DuidLLCode(self):
"""
Display Name: LL Code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidLLCode']))
@property
def DuidLLHwType(self):
"""
Display Name: Hardware type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidLLHwType']))
@property
def DuidllLinkLayerAddressLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidllLinkLayerAddressLength']))
@property
def DuidllLinkLayerAddressData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidllLinkLayerAddressData']))
@property
def ServerIdCode(self):
"""
Display Name: Code
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerIdCode']))
@property
def ServerIdLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerIdLength']))
@property
def DuidDuidLLTCode(self):
"""
Display Name: LLT Code
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidLLTCode']))
@property
def DuidDuidLLTHwType(self):
"""
Display Name: Hardware type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidLLTHwType']))
@property
def DuidDuidLLTTime(self):
"""
Display Name: Time
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidLLTTime']))
@property
def DuidlltLinkLayerAddressLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidlltLinkLayerAddressLength']))
@property
def DuidlltLinkLayerAddressData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidlltLinkLayerAddressData']))
@property
def DuidDuidENCode(self):
"""
Display Name: EN Code
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidENCode']))
@property
def DuidDuidENNumber(self):
"""
Display Name: Enterprise Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidENNumber']))
@property
def DuidenUniqueIdLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidenUniqueIdLength']))
@property
def DuidenUniqueIdData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidenUniqueIdData']))
@property
def DuidDuidLLCode(self):
"""
Display Name: LL Code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidLLCode']))
@property
def DuidDuidLLHwType(self):
"""
Display Name: Hardware type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DuidDuidLLHwType']))
@property
def LinkLayerAddress1Length(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkLayerAddress1Length']))
@property
def LinkLayerAddress1Data(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkLayerAddress1Data']))
@property
def IdAssociationCode(self):
"""
Display Name: Code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdAssociationCode']))
@property
def IdAssociationLength(self):
"""
Display Name: Length
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdAssociationLength']))
@property
def IdAssociationIaid(self):
"""
Display Name: IAID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdAssociationIaid']))
@property
def IdAssociationT1(self):
"""
Display Name: T1
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdAssociationT1']))
@property
def IdAssociationT2(self):
"""
Display Name: T2
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdAssociationT2']))
@property
def OptionsLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OptionsLength']))
@property
def OptionsData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OptionsData']))
@property
def IaForTmpAddressCode(self):
"""
Display Name: Code
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaForTmpAddressCode']))
@property
def IaForTmpAddressLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaForTmpAddressLength']))
@property
def IaForTmpAddressId(self):
"""
Display Name: IAID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaForTmpAddressId']))
@property
def IafortmpaddressOptionsLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IafortmpaddressOptionsLength']))
@property
def IafortmpaddressOptionsData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IafortmpaddressOptionsData']))
@property
def IaAddressCode(self):
"""
Display Name: Code
Default Value: 5
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaAddressCode']))
@property
def IaAddressLength(self):
"""
Display Name: Length
Default Value: 24
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaAddressLength']))
@property
def IaAddressIpv6address(self):
"""
Display Name: IPv6 Address
Default Value: 0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaAddressIpv6address']))
@property
def IaAddressPreferredLifetime(self):
"""
Display Name: Preferred Lifetime
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaAddressPreferredLifetime']))
@property
def IaAddressValidLifetime(self):
"""
Display Name: Valid Lifetime
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaAddressValidLifetime']))
@property
def IaaddressOptionsLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaaddressOptionsLength']))
@property
def IaaddressOptionsData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IaaddressOptionsData']))
@property
def OptionRequestCode(self):
"""
Display Name: Code
Default Value: 6
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OptionRequestCode']))
@property
def OptionRequestLength(self):
"""
Display Name: Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OptionRequestLength']))
@property
def ReqOptionCode(self):
"""
Display Name: Requested option code
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReqOptionCode']))
@property
def PreferenceCode(self):
"""
Display Name: Code
Default Value: 7
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PreferenceCode']))
@property
def PreferenceLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PreferenceLength']))
@property
def PreferenceValue(self):
"""
Display Name: Preference value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PreferenceValue']))
@property
def ElapsedTimeCode(self):
"""
Display Name: Code
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ElapsedTimeCode']))
@property
def ElapsedTimeLength(self):
"""
Display Name: Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ElapsedTimeLength']))
@property
def ElapsedTimeValue(self):
"""
Display Name: Elapsed Time
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ElapsedTimeValue']))
@property
def RelayMessageCode(self):
"""
Display Name: Code
Default Value: 9
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RelayMessageCode']))
@property
def RelayMessageLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RelayMessageLength']))
@property
def MessageLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MessageLength']))
@property
def MessageData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MessageData']))
@property
def AuthenticationCode(self):
"""
Display Name: Code
Default Value: 11
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationCode']))
@property
def AuthenticationLength(self):
"""
Display Name: Length
Default Value: 11
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationLength']))
@property
def AuthenticationProtocol(self):
"""
Display Name: Authentication Protocol
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationProtocol']))
@property
def AuthenticationAlgorithm(self):
"""
Display Name: Authentication Algorithm
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationAlgorithm']))
@property
def AuthenticationRdm(self):
"""
Display Name: Replay Detection Mechanism
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationRdm']))
@property
def AuthenticationReplayDetection(self):
"""
Display Name: Replay Detection
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationReplayDetection']))
@property
def AuthenticationInformationLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationInformationLength']))
@property
def AuthenticationInformationData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthenticationInformationData']))
@property
def ServerUnicastCode(self):
"""
Display Name: Code
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerUnicastCode']))
@property
def ServerUnicastLength(self):
"""
Display Name: Length
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerUnicastLength']))
@property
def ServerUnicastAddress(self):
"""
Display Name: Server Address
Default Value: 0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerUnicastAddress']))
@property
def StatusCodeCode(self):
"""
Display Name: Code
Default Value: 13
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusCodeCode']))
@property
def StatusCodeLength(self):
"""
Display Name: Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusCodeLength']))
@property
def StatusCodeValue(self):
"""
Display Name: Status Code
Default Value: 0
Value Format: decimal
Available enum values: Success, 0, UnspecFail, 1, NoAddrsAvail, 2, NoBinding, 3, NotOnLink, 4, UseMulticast, 5
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusCodeValue']))
@property
def StatusMessageLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusMessageLength']))
@property
def StatusMessageData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusMessageData']))
@property
def RapidCommitCode(self):
"""
Display Name: Code
Default Value: 14
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RapidCommitCode']))
@property
def RapidCommitLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RapidCommitLength']))
@property
def UserClassCode(self):
"""
Display Name: Code
Default Value: 15
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserClassCode']))
@property
def UserClassLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserClassLength']))
@property
def DataLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataLength']))
@property
def DataData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataData']))
@property
def VendorClassCode(self):
"""
Display Name: Code
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorClassCode']))
@property
def VendorClassLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorClassLength']))
@property
def VendorClassEnterpriseNumber(self):
"""
Display Name: Enterprise Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorClassEnterpriseNumber']))
@property
def VendorclassDataLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorclassDataLength']))
@property
def VendorclassDataData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorclassDataData']))
@property
def VendorInformationCode(self):
"""
Display Name: Code
Default Value: 17
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorInformationCode']))
@property
def VendorInformationLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorInformationLength']))
@property
def VendorInformationEnterpriseNumber(self):
"""
Display Name: Enterprise Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorInformationEnterpriseNumber']))
@property
def VendorinformationDataLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorinformationDataLength']))
@property
def VendorinformationDataData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VendorinformationDataData']))
@property
def InterfaceIdCode(self):
"""
Display Name: Code
Default Value: 18
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InterfaceIdCode']))
@property
def InterfaceIdLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InterfaceIdLength']))
@property
def IdLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdLength']))
@property
def IdData(self):
"""
Display Name: Data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IdData']))
@property
def ReconfigureMessageCode(self):
"""
Display Name: Code
Default Value: 19
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconfigureMessageCode']))
@property
def ReconfigureMessageLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconfigureMessageLength']))
@property
def ReconfigureMessageMsgType(self):
"""
Display Name: Message Type
Default Value: 5
Value Format: decimal
Available enum values: Renew Message, 5, Information Request Message, 11
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconfigureMessageMsgType']))
@property
def ReconfigureAcceptCode(self):
"""
Display Name: Code
Default Value: 20
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconfigureAcceptCode']))
@property
def ReconfigureAcceptLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconfigureAcceptLength']))
@property
def DnsRecursiveNameServerCode(self):
"""
Display Name: Code
Default Value: 23
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DnsRecursiveNameServerCode']))
@property
def DnsRecursiveNameServerLength(self):
"""
Display Name: Length
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DnsRecursiveNameServerLength']))
@property
def DnsRecursiveNameServerAddress(self):
"""
Display Name: DNS Server Address
Default Value: 0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DnsRecursiveNameServerAddress']))
@property
def DomainSearchListCode(self):
"""
Display Name: Code
Default Value: 24
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainSearchListCode']))
@property
def DomainSearchListLength(self):
"""
Display Name: Length
Default Value: 5
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainSearchListLength']))
@property
def NextDomainDomain(self):
"""
Display Name: Domain
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NextDomainDomain']))
@property
def DomainSearchListNull(self):
"""
Display Name: Null
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainSearchListNull']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
198713
|
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils.formats import localize
from django.utils.translation import activate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
self.old_USE_L10N = settings.USE_L10N
# Create a user that will double as sitemap content
User.objects.create_user('testuser', '<EMAIL>', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEquals(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://example.com/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
# Check for all the important bits:
self.assertEquals(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://example.com/users/testuser/</loc></url>
</urlset>
""")
|
198739
|
import sys
from loguru import logger
from flexget import options
from flexget.event import event
from flexget.plugin import plugins
from flexget.terminal import console
logger = logger.bind(name='doc')
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def print_doc(manager, options):
plugin_name = options.doc
plugin = plugins.get(plugin_name, None)
if plugin:
if not plugin.instance.__doc__:
console('Plugin %s does not have documentation' % plugin_name)
else:
console('')
console(trim(plugin.instance.__doc__))
console('')
else:
console('Could not find plugin %s' % plugin_name)
@event('options.register')
def register_parser_arguments():
parser = options.register_command('doc', print_doc, help='display plugin documentation')
parser.add_argument('doc', metavar='<plugin name>', help='name of plugin to show docs for')
|
198804
|
from django import forms
from django.core.exceptions import ValidationError
from django.core import validators
class BasicForm(forms.Form):
title = forms.CharField(validators=[
validators.MinLengthValidator(2, "Please enter 2 or more characters")
])
mileage = forms.IntegerField()
purchase_date = forms.DateField()
# References
# https://docs.djangoproject.com/en/3.0/ref/forms/api/
# https://docs.djangoproject.com/en/3.0/ref/forms/fields/#datefield
# https://docs.djangoproject.com/en/3.0/ref/forms/validation/#using-validation-in-practice
# https://docs.djangoproject.com/en/3.0/ref/validators/
|
198805
|
from django import forms
from django.conf import settings
from crits.campaigns.campaign import Campaign
from crits.core.forms import (
add_bucketlist_to_form,
add_ticket_to_form,
SourceInForm)
from crits.core import form_consts
from crits.core.widgets import CalWidget
from crits.core.handlers import get_item_names
from crits.core.handlers import get_source_names
from crits.core.user_tools import get_user_organization
from crits.vocabulary.indicators import (
IndicatorTypes,
IndicatorThreatTypes,
IndicatorAttackTypes
)
from crits.vocabulary.acls import Common, IndicatorACL
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.status import Status
relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
class IndicatorActivityForm(forms.Form):
"""
Django form for adding activity.
"""
error_css_class = 'error'
required_css_class = 'required'
description = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
start_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_start_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
end_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_end_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
date = forms.CharField(
widget=forms.HiddenInput(attrs={'size': '50',
'readonly': 'readonly',
'id': 'id_activity_date'}))
class UploadIndicatorCSVForm(SourceInForm):
"""
Django form for uploading Indicators via a CSV file.
"""
error_css_class = 'error'
required_css_class = 'required'
filedata = forms.FileField()
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, user, *args, **kwargs):
super(UploadIndicatorCSVForm, self).__init__(user, *args, **kwargs)
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
class UploadIndicatorTextForm(SourceInForm):
"""
Django form for uploading Indicators via a CSV blob.
"""
error_css_class = 'error'
required_css_class = 'required'
data = forms.CharField(
widget=forms.Textarea(attrs={'cols': '80', 'rows': '20'}),
required=True)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, user, *args, **kwargs):
super(UploadIndicatorTextForm, self).__init__(user, *args, **kwargs)
dt = "Indicator, Type, Threat Type, Attack Type, Description, Campaign, Campaign Confidence, Confidence, Impact, Bucket List, Ticket, Action, Status\n"
self.fields['data'].initial = dt
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
class UploadIndicatorForm(SourceInForm):
"""
Django form for uploading a single Indicator.
"""
error_css_class = 'error'
required_css_class = 'required'
indicator_type = forms.ChoiceField(widget=forms.Select, required=True)
threat_type = forms.ChoiceField(widget=forms.Select, required=True)
attack_type = forms.ChoiceField(widget=forms.Select, required=True)
value = forms.CharField(
widget=forms.Textarea(attrs={'rows': '5', 'cols': '28'}),
required=True)
description = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
status = forms.ChoiceField(widget=forms.Select, required=False)
confidence = forms.ChoiceField(widget=forms.Select, required=True)
impact = forms.ChoiceField(widget=forms.Select, required=True)
campaign = forms.ChoiceField(widget=forms.Select, required=False)
campaign_confidence = forms.ChoiceField(widget=forms.Select, required=False)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, user, *args, **kwargs):
super(UploadIndicatorForm, self).__init__(user, *args, **kwargs)
self.fields['status'].choices = [
(c,c) for c in Status.values()
]
self.fields['indicator_type'].choices = [
(c,c) for c in IndicatorTypes.values(sort=True)
]
self.fields['threat_type'].choices = [
(c,c) for c in IndicatorThreatTypes.values(sort=True)
]
self.fields['threat_type'].initial = IndicatorThreatTypes.UNKNOWN
self.fields['attack_type'].choices = [
(c,c) for c in IndicatorAttackTypes.values(sort=True)
]
self.fields['attack_type'].initial = IndicatorAttackTypes.UNKNOWN
self.fields['indicator_type'].widget.attrs = {'class': 'object-types'}
if user.has_access_to(Common.CAMPAIGN_READ):
self.fields['campaign'].choices = [('', '')] + [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['campaign_confidence'].choices = [
("", ""),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['confidence'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['impact'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
add_bucketlist_to_form(self)
add_ticket_to_form(self)
|
198814
|
r"""
Use this script to visualize the output of a trained speech-model.
Usage: python visualize.py /path/to/audio /path/to/training/json.json \
/path/to/model
"""
from __future__ import absolute_import, division, print_function
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from data_generator import DataGenerator
from model import compile_output_fn
from utils import argmax_decode, load_model
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def visualize(model, test_file, train_desc_file):
""" Get the prediction using the model, and visualize softmax outputs
Params:
model (keras.models.Model): Trained speech model
test_file (str): Path to an audio clip
train_desc_file(str): Path to the training file used to train this
model
"""
datagen = DataGenerator()
datagen.load_train_data(train_desc_file)
datagen.fit_train(100)
print ("Compiling test function...")
test_fn = compile_output_fn(model)
inputs = [datagen.featurize(test_file)]
prediction = np.squeeze(test_fn([inputs, True]))
softmax_file = "softmax.npy".format(test_file)
softmax_img_file = "softmax.png".format(test_file)
print ("Prediction: {}"
.format(argmax_decode(prediction)))
print ("Saving network output to: {}".format(softmax_file))
print ("As image: {}".format(softmax_img_file))
np.save(softmax_file, prediction)
sm = softmax(prediction.T)
sm = np.vstack((sm[0], sm[2], sm[3:][::-1]))
fig, ax = plt.subplots()
ax.pcolor(sm, cmap=plt.cm.Greys_r)
column_labels = [chr(i) for i in range(97, 97 + 26)] + ['space', 'blank']
ax.set_yticks(np.arange(sm.shape[0]) + 0.5, minor=False)
ax.set_yticklabels(column_labels[::-1], minor=False)
plt.savefig(softmax_img_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('test_file', type=str,
help='Path to an audio file')
parser.add_argument('train_desc_file', type=str,
help='Path to the training JSON-line file. This will '
'be used to extract feature means/variance')
parser.add_argument('load_dir', type=str,
help='Directory where a trained model is stored.')
parser.add_argument('--weights_file', type=str, default=None,
help='Path to a model weights file')
args = parser.parse_args()
print ("Loading model")
model = load_model(args.load_dir, args.weights_file)
visualize(model, args.test_file, args.train_desc_file)
if __name__ == '__main__':
main()
|
198851
|
from pathlib import Path
from PyFlow.Core.Common import *
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from common import DeviceNode
class PedestrianDetectionAdas2Node(DeviceNode):
def __init__(self, name):
super(PedestrianDetectionAdas2Node, self).__init__(name)
self.frame = self.createInputPin('frame', 'FramePin')
self.out_tensor = self.createOutputPin('out_tensor', 'NeuralTensorPin')
self.frame.enableOptions(PinOptions.AllowMultipleConnections)
self.out_tensor.enableOptions(PinOptions.AllowMultipleConnections)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('FramePin')
helper.addOutputDataType('NeuralTensorPin')
helper.addInputStruct(StructureType.Multi)
helper.addOutputStruct(StructureType.Multi)
return helper
@staticmethod
def category():
return 'Model Zoo'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return "Description in rst format."
def build_pipeline(self, pipeline):
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(str(Path(str((Path(__file__).parent / Path('models/pedestrian-detection-adas-0002.blob')).resolve().absolute())).resolve().absolute()))
self.connection_map["out_tensor"] = detection_nn.out
self.connection_map["frame"] = detection_nn.input
|
198863
|
import json
import requests
octopus_server_uri = 'https://your.octopus.app/api'
octopus_api_key = 'API-YOURAPIKEY'
headers = {'X-Octopus-ApiKey': octopus_api_key}
def get_octopus_resource(uri):
response = requests.get(uri, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def get_by_name(uri, name):
resources = get_octopus_resource(uri)
return next((x for x in resources if x['Name'] == name), None)
space_name = 'Default'
project_name = 'Your Project'
runbook_name = 'Your Runbook'
environment_names = ['Development', 'Test']
environments = []
# Optional tenant Name
tenant_name = ''
tenantId = None
space = get_by_name('{0}/spaces/all'.format(octopus_server_uri), space_name)
project = get_by_name('{0}/{1}/projects/all'.format(octopus_server_uri, space['Id']), project_name)
runbook = get_by_name('{0}/{1}/runbooks/all'.format(octopus_server_uri, space['Id']), runbook_name)
if tenant_name:
tenant = get_by_name('{0}/{1}/tenants/all'.format(octopus_server_uri, space['Id']), tenant_name)
tenantId = tenant['Id']
environments = get_octopus_resource(
'{0}/{1}/environments/all'.format(octopus_server_uri, space['Id']))
environments = [e['Id']
for e in environments if e['Name'] in environment_names]
for environmentId in environments:
print('Running runbook {0} in {1}'.format(runbook_name, environmentId))
uri = '{0}/{1}/runbookRuns'.format(octopus_server_uri, space['Id'])
runbook_run = {
'RunbookId': runbook['Id'],
'RunbookSnapshotId': runbook['PublishedRunbookSnapshotId'],
'EnvironmentId': environmentId,
'TenantId': tenantId,
'SkipActions': None,
'SpecificMachineIds': None,
'ExcludedMachineIds': None
}
response = requests.post(uri, headers=headers, json=runbook_run)
response.raise_for_status()
|
198876
|
import json
import boto3
from datetime import datetime, timedelta
from botocore.exceptions import ClientError
import os
import time
def get_message_for_slack(event_details, event_type, affected_accounts, affected_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources in region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
print("Message sent to Slack: ", message)
return message
def get_message_for_eventbridge(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
print("SHD Message generated for EventBridge : ", message)
return message
def get_org_message_for_eventbridge(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
json.dumps(message)
print("PHD/SHD Message generated for Event Bridge: ", message)
return message
def get_org_message_for_slack(event_details, event_type, affected_org_accounts, affected_org_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
json.dumps(message)
print("Message sent to Slack: ", message)
return message
def get_message_for_chime(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
summary = ""
if event_type == "create":
message = str("/md" + "\n" + "**:rotating_light:\[NEW\] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event']['service'].upper() + " service in " + event_details['successfulSet'][0]['event']['region'].upper() + " region.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_accounts + "\n"
"**Resource(s)**: " + affected_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
elif event_type == "resolve":
message = str("/md" + "\n" + "**:heavy_check_mark:\[RESOLVED\] The AWS Health issue with the " + event_details['successfulSet'][0]['event']['service'].upper() + " service in " + event_details['successfulSet'][0]['event']['region'].upper() + " region is now resolved.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_accounts + "\n"
"**Resource(s)**: " + affected_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**End Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['endTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
json.dumps(message)
print("Message sent to Chime: ", message)
return message
def get_org_message_for_chime(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
summary = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
message = str("/md" + "\n" + "**:rotating_light:\[NEW\] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event']['service'].upper()) + " service in " + str(event_details['successfulSet'][0]['event']['region'].upper() + " region**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_org_accounts + "\n"
"**Resource(s)**: " + affected_org_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
elif event_type == "resolve":
message = str("/md" + "\n" + "**:heavy_check_mark:\[RESOLVED\] The AWS Health issue with the " + event_details['successfulSet'][0]['event']['service'].upper()) + " service in " + str(event_details['successfulSet'][0]['event']['region'].upper() + " region is now resolved.**" + "\n"
"---" + "\n"
"**Account(s)**: " + affected_org_accounts + "\n"
"**Resource(s)**: " + affected_org_entities + "\n"
"**Service**: " + event_details['successfulSet'][0]['event']['service'] + "\n"
"**Region**: " + event_details['successfulSet'][0]['event']['region'] + "\n"
"**Start Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['startTime']) + "\n"
"**End Time (UTC)**: " + cleanup_time(event_details['successfulSet'][0]['event']['endTime']) + "\n"
"**Status**: " + event_details['successfulSet'][0]['event']['statusCode'] + "\n"
"**Event ARN**: " + event_details['successfulSet'][0]['event']['arn'] + "\n"
"**Updates:**" + "\n" + get_last_aws_update(event_details)
)
print("Message sent to Chime: ", message)
return message
def get_message_for_teams(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
summary = ""
if event_type == "create":
title = "🚨 [NEW] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "FF0000",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": str(title),
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_accounts},
{"name": "Resource(s)", "value": affected_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": get_last_aws_update(event_details)}
],
}
]
}
elif event_type == "resolve":
title = "✅ [RESOLVED] The AWS Health issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region is now resolved."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "00ff00",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": str(title),
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_accounts},
{"name": "Resource(s)", "value": affected_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": get_last_aws_update(event_details)}
],
}
]
}
print("Message sent to Teams: ", message)
return message
def get_org_message_for_teams(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
summary = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
title = "🚨 [NEW] AWS Health reported an issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "FF0000",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": title,
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_org_accounts},
{"name": "Resource(s)", "value": affected_org_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": event_details['successfulSet'][0]['eventDescription']['latestDescription']}
],
}
]
}
elif event_type == "resolve":
title = "✅ [RESOLVED] The AWS Health issue with the " + event_details['successfulSet'][0]['event'][
'service'].upper() + " service in the " + event_details['successfulSet'][0]['event'][
'region'].upper() + " region is now resolved."
message = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "00ff00",
"summary": "AWS Health Aware Alert",
"sections": [
{
"activityTitle": title,
"markdown": False,
"facts": [
{"name": "Account(s)", "value": affected_org_accounts},
{"name": "Resource(s)", "value": affected_org_entities},
{"name": "Service", "value": event_details['successfulSet'][0]['event']['service']},
{"name": "Region", "value": event_details['successfulSet'][0]['event']['region']},
{"name": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime'])},
{"name": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime'])},
{"name": "Status", "value": event_details['successfulSet'][0]['event']['statusCode']},
{"name": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn']},
{"name": "Updates", "value": event_details['successfulSet'][0]['eventDescription']['latestDescription']}
],
}
]
}
return message
print("Message sent to Teams: ", message)
def get_message_for_email(event_details, event_type, affected_accounts, affected_entities):
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
BODY_HTML = f"""
<html>
<body>
<h>Greetings from AWS Health Aware,</h><br>
<p>There is an AWS incident that is in effect which may likely impact your resources. Here are the details:<br><br>
<b>Account(s):</b> {affected_accounts}<br>
<b>Resource(s):</b> {affected_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
For updates, please visit the <a href=https://status.aws.amazon.com>AWS Service Health Dashboard</a><br>
If you are experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
else:
BODY_HTML = f"""
<html>
<body>
<h>Greetings again from AWS Health Aware,</h><br>
<p>Good news! The AWS Health incident from earlier has now been marked as resolved.<br><br>
<b>Account(s):</b> {affected_accounts}<br>
<b>Resource(s):</b> {affected_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>End Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['endTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
If you are still experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
print("Message sent to Email: ", BODY_HTML)
return BODY_HTML
def get_org_message_for_email(event_details, event_type, affected_org_accounts, affected_org_entities):
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All services related resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
BODY_HTML = f"""
<html>
<body>
<h>Greetings from AWS Health Aware,</h><br>
<p>There is an AWS incident that is in effect which may likely impact your resources. Here are the details:<br><br>
<b>Account(s):</b> {affected_org_accounts}<br>
<b>Resource(s):</b> {affected_org_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
For updates, please visit the <a href=https://status.aws.amazon.com>AWS Service Health Dashboard</a><br>
If you are experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
else:
BODY_HTML = f"""
<html>
<body>
<h>Greetings again from AWS Health Aware,</h><br>
<p>Good news! The AWS Health incident from earlier has now been marked as resolved.<br><br>
<b>Account(s):</b> {affected_org_accounts}<br>
<b>Resource(s):</b> {affected_org_entities}<br>
<b>Service:</b> {event_details['successfulSet'][0]['event']['service']}<br>
<b>Region:</b> {event_details['successfulSet'][0]['event']['region']}<br>
<b>Start Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['startTime'])}<br>
<b>End Time (UTC):</b> {cleanup_time(event_details['successfulSet'][0]['event']['endTime'])}<br>
<b>Status:</b> {event_details['successfulSet'][0]['event']['statusCode']}<br>
<b>Event ARN:</b> {event_details['successfulSet'][0]['event']['arn']}<br>
<b>Updates:</b> {event_details['successfulSet'][0]['eventDescription']['latestDescription']}<br><br>
If you are still experiencing issues related to this event, please open an <a href=https://console.aws.amazon.com/support/home>AWS Support</a> case within your account.<br><br>
Thanks, <br><br>AHA: AWS Health Aware
</p>
</body>
</html>
"""
print("Message sent to Email: ", BODY_HTML)
return BODY_HTML
def cleanup_time(event_time):
"""
Takes as input a datetime string as received from The AWS Health event_detail call. It converts this string to a
datetime object, changes the timezone to EST and then formats it into a readable string to display in Slack.
:param event_time: datetime string
:type event_time: str
:return: A formatted string that includes the month, date, year and 12-hour time.
:rtype: str
"""
event_time = datetime.strptime(event_time[:16], '%Y-%m-%d %H:%M')
return event_time.strftime("%Y-%m-%d %H:%M:%S")
def get_last_aws_update(event_details):
"""
Takes as input the event_details and returns the last update from AWS (instead of the entire timeline)
:param event_details: Detailed information about a specific AWS health event.
:type event_details: dict
:return: the last update message from AWS
:rtype: str
"""
aws_message = event_details['successfulSet'][0]['eventDescription']['latestDescription']
return aws_message
def format_date(event_time):
"""
Takes as input a datetime string as received from The AWS Health event_detail call. It converts this string to a
datetime object, changes the timezone to EST and then formats it into a readable string to display in Slack.
:param event_time: datetime string
:type event_time: str
:return: A formatted string that includes the month, date, year and 12-hour time.
:rtype: str
"""
event_time = datetime.strptime(event_time[:16], '%Y-%m-%d %H:%M')
return event_time.strftime('%B %d, %Y at %I:%M %p')
|
198900
|
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.template.response import TemplateResponse
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from flags.state import flag_enabled
from v1.documents import FilterablePagesDocumentSearch
from v1.feeds import FilterableFeed
from v1.forms import FilterableListForm
from v1.models.learn_page import AbstractFilterPage
from v1.util.ref import get_category_children
from v1.util.util import get_secondary_nav_items
class FilterableListMixin(RoutablePageMixin):
"""Wagtail Page mixin that allows for filtering of other pages."""
filterable_per_page_limit = 25
"""Number of results to return per page."""
do_not_index = False
"""Determines whether we tell crawlers to index the page or not."""
filterable_categories = None
"""Used for activity-log and newsroom to determine
which pages to render when sitewide"""
@staticmethod
def get_model_class():
return AbstractFilterPage
@staticmethod
def get_form_class():
return FilterableListForm
@staticmethod
def get_search_class():
return FilterablePagesDocumentSearch
def get_filterable_list_wagtail_block(self):
return next(
(b for b in self.content if b.block_type == 'filter_controls'),
None
)
def get_filterable_root(self):
filterable_list_block = self.get_filterable_list_wagtail_block()
if filterable_list_block is None:
return '/'
if filterable_list_block.value['filter_children']:
return self.get_url()
return '/'
def get_filterable_search(self):
"""Return a FilterablePagesDocumentSearch object"""
site = self.get_site()
if not site:
return None
return self.get_search_class()(
prefix=self.get_filterable_root()
)
def get_cache_key_prefix(self):
return self.url
def get_context(self, request, *args, **kwargs):
context = super().get_context(
request, *args, **kwargs
)
form_data, has_active_filters = self.get_form_data(request.GET)
filterable_search = self.get_filterable_search()
has_unfiltered_results = filterable_search.count() > 0
form = self.get_form_class()(
form_data,
wagtail_block=self.get_filterable_list_wagtail_block(),
filterable_categories=self.filterable_categories,
filterable_search=filterable_search,
cache_key_prefix=self.get_cache_key_prefix(),
)
filter_data = self.process_form(request, form)
# flag check to enable or disable archive filter options
if flag_enabled('HIDE_ARCHIVE_FILTER_OPTIONS', request=request):
has_archived_posts = False
else:
has_archived_posts = any(
result for result in form.all_filterable_results
if result.is_archived == 'yes'
)
context.update({
'filter_data': filter_data,
'get_secondary_nav_items': get_secondary_nav_items,
'has_active_filters': has_active_filters,
'has_archived_posts': has_archived_posts,
'has_unfiltered_results': has_unfiltered_results,
})
return context
def process_form(self, request, form):
filter_data = {}
if form.is_valid():
paginator = Paginator(form.get_page_set(),
self.filterable_per_page_limit)
page = request.GET.get('page')
# Get the page number in the request and get the page from the
# paginator to serve.
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
filter_data['page_set'] = pages
else:
paginator = Paginator([], self.filterable_per_page_limit)
filter_data['page_set'] = paginator.page(1)
filter_data['form'] = form
return filter_data
def set_do_not_index(self, field, value):
"""Do not index queries unless they consist of a single topic field."""
if field != 'topics' or len(value) > 1:
self.do_not_index = True
# Set up the form's data either with values from the GET request
# or with defaults based on whether it's a dropdown/list or a text field
def get_form_data(self, request_dict):
form_data = {'archived': 'include'}
has_active_filters = False
for field in self.get_form_class().declared_fields:
if field in ['categories', 'topics', 'language', 'statuses', 'products']: # noqa: E501
value = request_dict.getlist(field, [])
else:
value = request_dict.get(field, '')
if value:
form_data[field] = value
has_active_filters = True
self.set_do_not_index(field, value)
return form_data, has_active_filters
def render(self, request, *args, context_overrides=None, **kwargs):
"""Render with optional context overrides."""
# TODO: the context-overriding and template rendering can be replaced
# with super().render() in Wagtail 2.11, where RoutablePageMixin gains
# the context_overrides functionality built-in.
context = self.get_context(request, *args, **kwargs)
context.update(context_overrides or {})
response = TemplateResponse(
request,
self.get_template(request, *args, **kwargs),
context
)
# Set noindex for crawlers if needed
if self.do_not_index:
response['X-Robots-Tag'] = 'noindex'
return response
@route(r'^$')
def index_route(self, request):
return self.render(request)
@route(r'^feed/$')
def feed_route(self, request, *args, **kwargs):
context = self.get_context(request)
return FilterableFeed(self, context)(request)
class CategoryFilterableMixin:
filterable_categories = []
"""Determines page categories to be filtered; see filterable_pages."""
def get_filterable_search(self):
"""Return the queryset of pages to be filtered by this page.
The class property filterable_categories can be set to a list of page
categories from the set in v1.util.ref.categories. If set, this page
will only filter pages that are tagged with a tag in those categories.
By default this is an empty list and all page tags are eligible.
"""
category_names = get_category_children(self.filterable_categories)
filterable_search = self.get_search_class()(
prefix=self.get_filterable_root()
)
filterable_search.filter_categories(categories=category_names)
return filterable_search
|
198931
|
from pprint import pprint
from copy import deepcopy
import torch
import json
from tqdm import tqdm
# from nltk.tokenize import sent_tokenize
from collections import Counter, defaultdict
from .data import SlotFeatures
def sent_tokenize(text, start_pos):
"""TODO"""
import spacy
nlp = spacy.load("en_core_web_sm")
for sent in nlp(text).sents:
yield [sent[0].idx + start_pos, sent.text]
def find_subsentence(offset, sentences):
sentences_ = sentences.copy() + [(sentences[-1][0] + len(sentences[-1][1]) + 1, "")]
# if len(sentences) == 1:
# return 0 if offset >= sentences[0][0] and offset < sentences[0][0] + len(sentences[0][1]) else -1
return next((i - 1 for i, (idx, sent) in enumerate(sentences_) if offset < idx), -1)
# def get_cluster_id(entity_id, clusters):
# return next((i for i, c in enumerate(clusters) if entity_id in c), None)
# def create_sent_from_tokens(tokens, start_idx=None, end_idx=None):
# if not start_idx:
# start_idx = tokens[0][1]
# if not end_idx:
# end_idx = tokens[-1][-1]
# sent = ""
# for token, start, end in tokens:
# if start < start_idx:
# continue
# sent += token if len(sent) + start_idx == start else " " + token
# if len(sent) >= end_idx:
# return sent.rstrip()
# return sent.rstrip()
# def create_windowed_sentence(tokens, trigger_tokens, window):
# positions = []
# for trigger in trigger_tokens:
# trigger_pos = next( (i for i, token in enumerate(tokens) if token[0] == trigger), None)
# if trigger_pos is None:
# raise IndexError(f"Trigger {trigger} not in {tokens}\n{trigger}")
# positions.append(trigger_pos)
# trigger_pos = positions[len(positions)//2]
# sent = create_sent_from_tokens(tokens[max(0, trigger_pos-window):trigger_pos+window])
# min_pos, max_pos = tokens[max(0, trigger_pos-window)][1], tokens[min(trigger_pos+window, len(tokens)-1)][-1]
# return sent, min_pos, max_pos
class WikiEventsArgumentDataset(torch.utils.data.Dataset):
def __init__(
self,
data_path,
filter_events=None,
create_negatives=True,
max_sentence_distance=3,
mark_trigger=False,
force_preprocess=False,
**kwargs,
):
super().__init__()
self.data_path = data_path
self.max_sentence_distance = max_sentence_distance
self.create_negatives = create_negatives
self.filter_events = filter_events
self.mark_trigger = mark_trigger
path_name = data_path.replace(".jsonl", "")
path_name = f"{path_name}.prepro.{max_sentence_distance}.{create_negatives}.jsonl"
if self.mark_trigger:
path_name = path_name.replace(".jsonl", ".trigger.jsonl")
try:
if force_preprocess:
raise Exception()
self._from_preprocessed(path_name)
except:
self._load()
self._save_preprocessed(path_name)
print(path_name)
self.labels = list(set(inst.role for inst in self.instances))
self.label2id = {label: i for i, label in enumerate(self.labels)}
self.id2label = self.labels.copy()
def __getitem__(self, idx):
return self.instances[idx]
def __len__(self):
return len(self.instances)
def _save_preprocessed(self, path):
with open(path, "wt") as f:
for instance in self.instances:
f.write(f"{json.dumps(instance.__dict__)}\n")
def _from_preprocessed(self, path):
with open(path) as f:
self.instances = [SlotFeatures(**json.loads(line)) for line in f]
def _load(self):
self.instances = []
with open(self.data_path) as data_f:
for i, data_line in tqdm(enumerate(data_f)):
instance = json.loads(data_line)
entities = {entity["id"]: entity for entity in instance["entity_mentions"]}
tokens = [token for sentence in instance["sentences"] for token in sentence[0]]
if self.max_sentence_distance is not None:
all_sub_sentences = [
list(sent_tokenize(text, tokens[0][1])) for tokens, text in instance["sentences"]
]
for event in instance["event_mentions"]:
if self.filter_events:
event_types = event["event_type"].split(".")
if all(
[
event_types[0] not in self.filter_events,
".".join(event_types[:2]) not in self.filter_events,
".".join(event_types) not in self.filter_events,
]
):
continue
sentence = instance["sentences"][event["trigger"]["sent_idx"]][-1]
sent_entities = {
key: deepcopy(entity)
for key, entity in entities.items()
if entity["sent_idx"] == event["trigger"]["sent_idx"]
}
# sub_sentences = sent_tokenize(sentence, instance['sentences'][event['trigger']['sent_idx']][0][0][1])
# sub_sentences = list(sub_sentences)
if self.max_sentence_distance is not None:
sub_sentences = deepcopy(all_sub_sentences[event["trigger"]["sent_idx"]])
trigger_sub_sentence = find_subsentence(tokens[event["trigger"]["start"]][1], sub_sentences)
if self.mark_trigger:
start_pos, trigger_sub_sentence_ = sub_sentences[trigger_sub_sentence]
marked_sentence = (
trigger_sub_sentence_[: tokens[event["trigger"]["start"]][1] - start_pos]
+ "<trg> "
+ trigger_sub_sentence_[
tokens[event["trigger"]["start"]][1]
- start_pos : tokens[event["trigger"]["end"] - 1][-1]
- start_pos
]
+ " <trg>"
+ trigger_sub_sentence_[tokens[event["trigger"]["end"] - 1][-1] - start_pos :]
)
sub_sentences[trigger_sub_sentence][-1] = marked_sentence
if trigger_sub_sentence < 0:
pprint(event["trigger"])
pprint(tokens[event["trigger"]["start"]])
print(sentence)
pprint(sub_sentences)
raise ValueError(
"Trigger sub-sentence idx must be greater than 0. Found " + str(trigger_sub_sentence)
)
for argument in event["arguments"]:
label = (
argument["role"]
if entities[argument["entity_id"]]["sent_idx"] == event["trigger"]["sent_idx"]
else "OOR"
)
if self.max_sentence_distance is not None:
arg_sub_sentence = find_subsentence(
tokens[entities[argument["entity_id"]]["start"]][1],
sub_sentences,
)
if (
(abs(trigger_sub_sentence - arg_sub_sentence) <= self.max_sentence_distance)
and arg_sub_sentence >= 0
and entities[argument["entity_id"]]["sent_idx"] == event["trigger"]["sent_idx"]
):
sentence = (
" ".join(
[
text
for _, text in sub_sentences[
trigger_sub_sentence : max(
arg_sub_sentence,
trigger_sub_sentence + 1,
)
]
]
)
if trigger_sub_sentence <= arg_sub_sentence
else " ".join(
[
text
for _, text in sub_sentences[
arg_sub_sentence : max(
trigger_sub_sentence,
arg_sub_sentence + 1,
)
]
]
)
)
else:
sentence = sub_sentences[trigger_sub_sentence][-1]
if sentence == "":
print(
trigger_sub_sentence,
arg_sub_sentence,
entities[argument["entity_id"]]["sent_idx"],
event["trigger"]["sent_idx"],
)
print(sub_sentences[trigger_sub_sentence:arg_sub_sentence])
raise ValueError()
label = (
label
if abs(trigger_sub_sentence - arg_sub_sentence) <= self.max_sentence_distance
else "OOR"
)
self.instances.append(
SlotFeatures(
docid=instance["doc_id"],
trigger=event["trigger"]["text"],
trigger_id=event["id"],
trigger_type=event["event_type"],
trigger_sent_idx=event["trigger"]["sent_idx"],
arg=argument["text"],
arg_id=argument["entity_id"],
arg_type=entities[argument["entity_id"]]["entity_type"],
arg_sent_idx=entities[argument["entity_id"]]["sent_idx"],
role=label,
pair_type=f"{event['event_type']}:{entities[argument['entity_id']]['entity_type']}",
context=sentence,
)
)
if argument["entity_id"] in sent_entities:
sent_entities.pop(argument["entity_id"])
if self.create_negatives:
for key, entity in sent_entities.items():
if entity["sent_idx"] != event["trigger"]["sent_idx"]:
continue
if self.max_sentence_distance is not None:
arg_sub_sentence = find_subsentence(tokens[entity["start"]][1], sub_sentences)
if abs(trigger_sub_sentence - arg_sub_sentence) > self.max_sentence_distance:
continue
sentence = (
" ".join(
[
text
for _, text in sub_sentences[
trigger_sub_sentence : max(
arg_sub_sentence,
trigger_sub_sentence + 1,
)
]
]
)
if trigger_sub_sentence <= arg_sub_sentence
else " ".join(
[
text
for _, text in sub_sentences[
arg_sub_sentence : max(
trigger_sub_sentence,
arg_sub_sentence + 1,
)
]
]
)
)
self.instances.append(
SlotFeatures(
docid=instance["doc_id"],
trigger=event["trigger"]["text"],
trigger_id=event["id"],
trigger_type=event["event_type"],
trigger_sent_idx=event["trigger"]["sent_idx"],
arg=entity["text"],
arg_id=key,
arg_type=entity["entity_type"],
arg_sent_idx=entity["sent_idx"],
role="no_relation",
pair_type=f"{event['event_type']}:{entities[key]['entity_type']}",
context=sentence,
)
)
def to_dict(self, predictions):
instances_copy = deepcopy(self.instances)
inst_per_doc = defaultdict(list)
for inst, pred in zip(instances_copy, predictions):
inst.prediction = pred
inst_per_doc[inst.docid].append(inst)
with open(self.data_path) as f:
for line in f:
instance = json.loads(line)
for event in instance["event_mentions"]:
event["arguments"] = []
for pred in inst_per_doc[instance["doc_id"]]:
if pred.trigger_id == event["id"] and pred.prediction not in [
"no_relation",
"OOR",
]:
event["arguments"].append(
{
"entity_id": pred.arg_id,
"role": pred.prediction,
"text": pred.arg,
}
)
yield instance
if __name__ == "__main__":
dataset = WikiEventsArgumentDataset(
"data/wikievents/test.jsonl",
max_sentence_distance=0,
create_negatives=True,
force_preprocess=True,
mark_trigger=True,
)
# pprint(dataset[0])
pprint(len(dataset))
# pprint(next((inst for inst in dataset if inst.role == "Target"), None))
# for i, feature in tqdm(enumerate(dataset)):
# context = feature.context
# pprint(Counter([(inst.role, inst.trigger_type, inst.arg_type) for inst in dataset if inst.role == 'Target']))
# pprint(dataset[0])
# pprint(next(dataset.to_dict(['no_relation']*len(dataset))))
# Count OOR
counter = Counter([inst.role for inst in dataset])
positives = sum([value for key, value in counter.items() if key != "no_relation"])
print(f"OOR%: {counter['OOR']}/{positives} ({counter['OOR']/positives})")
# with open('dev.test_conflict.jsonl', 'wt', encoding='utf-8') as f:
# for inst in dataset.to_dict(
# [inst.role for inst in dataset]
# ):
# f.write(f"{json.dumps(inst)}\n")
|
198940
|
import os
import sys
import numpy as np
import multiprocessing
# Import flags specifying dataset parameters
from flags import getFlags
def preprocess_data(start_index, data_count, data_dir, mesh_dir, soln_dir, RESCALE=True):
RESCALE = False
LORES = True
HIRES = False
for i in range(start_index, start_index + data_count):
if LORES:
mesh = np.load(mesh_dir + 'mesh_' + str(0) + '.npy')
out_of_domain = (mesh == 0)
data = np.load(data_dir + 'data_' + str(i) + '.npy')
data[out_of_domain] = 0.0
#soln = np.load(soln_dir + 'solution_' + str(i) + '.npy')
if RESCALE:
## Rescale data and solutions
scaling = np.max(np.abs(data))
data = data/scaling
#soln = soln/scaling
np.save(data_dir + 'data_' + str(i) + '.npy', data)
#np.save(soln_dir + 'solution_' + str(i) + '.npy', soln)
if HIRES:
hires_mesh = np.load(mesh_dir + 'hires_mesh_' + str(0) + '.npy')
hires_out_of_domain = (hires_mesh == 0)
hires_data = np.load(data_dir + 'hires_data_' + str(i) + '.npy')
hires_data[hires_out_of_domain] = 0.0
#hires_soln = np.load(soln_dir + 'hires_solution_' + str(i) + '.npy')
if RESCALE:
## Rescale data and solutions
hires_scaling = np.max(np.abs(hires_data))
hires_data = hires_data/hires_scaling
#hires_soln = hires_soln/hires_scaling
np.save(data_dir + 'hires_data_' + str(i) + '.npy', hires_data)
#np.save(soln_dir + 'hires_solution_' + str(i) + '.npy', hires_soln)
if __name__ == '__main__':
FLAGS = getFlags()
# Divide tasks into smaller pieces
subdivision = 5
#hires_mesh = np.load(FLAGS.mesh_dir + 'hires_mesh_' + str(0) + '.npy')
def preprocess(d):
preprocess_data(d, int(FLAGS.data_count/subdivision), FLAGS.data_dir, FLAGS.mesh_dir, FLAGS.soln_dir)
# Create multiprocessing pool
NumProcesses = FLAGS.cpu_count
pool = multiprocessing.Pool(processes=NumProcesses)
start_indices = [int(n*FLAGS.data_count/subdivision) for n in range(0,subdivision*FLAGS.cov_count)]
start_indices = [FLAGS.data_start_count + n for n in start_indices]
print('\n [ Preprocessing Data ]\n')
num_tasks = subdivision*FLAGS.cov_count
for i, _ in enumerate(pool.imap_unordered(preprocess, [d for d in start_indices]), 1):
sys.stdout.write('\r Progress: {0:.1%}'.format(i/num_tasks))
sys.stdout.flush()
print('\n')
|
198947
|
import os
import sys
import subprocess
from joblib import Parallel, delayed
import numpy as np
import imageio
imageio.plugins.freeimage.download()
from imageio.plugins import freeimage
import h5py
from lz4.block import decompress
import scipy.misc
import cv2
from path import Path
path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
def dump_example(dataset_name):
print("Converting {:}.h5 ...".format(dataset_name))
file = h5py.File(os.path.join(path, "testdata", "{:}.h5".format(dataset_name)), "r")
for (seq_idx, seq_name) in enumerate(file):
if dataset_name == 'scenes11_test':
scale = 0.4
else:
scale = 1
print("Processing sequence {:d}/{:d}".format(seq_idx, len(file)))
dump_dir = os.path.join(path, '../test', dataset_name + "_" + "{:05d}".format(seq_idx))
if not os.path.isdir(dump_dir):
os.mkdir(dump_dir)
dump_dir = Path(dump_dir)
sequence = file[seq_name]["frames"]["t0"]
poses = []
for (f_idx, f_name) in enumerate(sequence):
frame = sequence[f_name]
for dt_type in frame:
dataset = frame[dt_type]
img = dataset[...]
if dt_type == "camera":
if f_idx == 0:
intrinsics = np.array([[img[0], 0, img[3]], [0, img[1], img[4]], [0, 0, 1]])
pose = np.array([[img[5],img[8],img[11],img[14]*scale], [img[6],img[9],img[12],img[15]*scale], [img[7],img[10],img[13],img[16]*scale]])
poses.append(pose.tolist())
elif dt_type == "depth":
dimension = dataset.attrs["extents"]
depth = np.array(np.frombuffer(decompress(img.tobytes(), dimension[0] * dimension[1] * 2), dtype = np.float16)).astype(np.float32)
depth = depth.reshape(dimension[0], dimension[1])*scale
dump_depth_file = dump_dir/'{:04d}.npy'.format(f_idx)
np.save(dump_depth_file, depth)
elif dt_type == "image":
img = imageio.imread(img.tobytes())
dump_img_file = dump_dir/'{:04d}.jpg'.format(f_idx)
scipy.misc.imsave(dump_img_file, img)
dump_cam_file = dump_dir/'cam.txt'
np.savetxt(dump_cam_file, intrinsics)
poses_file = dump_dir/'poses.txt'
np.savetxt(poses_file, np.array(poses).reshape(-1, 12), fmt='%.6e')
if len(dump_dir.files('*.jpg')) < 2:
dump_dir.rmtree()
def preparedata():
num_threads = 1
SUB_DATASET_NAMES = (["mvs_test", "rgbd_test", "scenes11_test", "sun3d_test"])
dump_root = os.path.join(path, '../test')
if not os.path.isdir(dump_root):
os.mkdir(dump_root)
if num_threads == 1:
for scene in SUB_DATASET_NAMES:
dump_example(scene)
else:
Parallel(n_jobs=num_threads)(delayed(dump_example)(scene) for scene in SUB_DATASET_NAMES)
dump_root = Path(dump_root)
subdirs = dump_root.dirs()
subdirs = [subdir.basename() for subdir in subdirs]
subdirs = sorted(subdirs)
with open(dump_root / 'test.txt', 'w') as tf:
for subdir in subdirs:
tf.write('{}\n'.format(subdir))
print("Finished Converting Data.")
if __name__ == "__main__":
preparedata()
|
198951
|
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected as fc
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler import option_builder
# 每次训练 1000 张
batch_size = 1000
# 设定输入和输出格式
inputs = tf.placeholder(tf.float32, [batch_size, 784])
targets = tf.placeholder(tf.float32, [batch_size, 10])
# 第一层 500 个神经元的全连接
with tf.variable_scope("layer_1"):
fc_1_out = fc(inputs, num_outputs=500, activation_fn=tf.nn.sigmoid)
# 第二层 784 个神经元的全连接
with tf.variable_scope("layer_2"):
fc_2_out = fc(fc_1_out, num_outputs=784, activation_fn=tf.nn.sigmoid)
# 第三层 10 个神经元的输出层
with tf.variable_scope("layer_3"):
logits = fc(fc_2_out, num_outputs=10)
# 设定 loss
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
if __name__ == '__main__':
mnist_save_dir = 'data'
mnist = input_data.read_data_sets(mnist_save_dir, one_hot=True)
with tf.Session() as sess:
# 创建 profiler 对象
my_profiler = model_analyzer.Profiler(graph=sess.graph)
# 创建 metadata 对象
run_metadata = tf.RunMetadata()
# 初始化变量
sess.run(tf.global_variables_initializer())
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# 只训练 3 个 batch,主要看性能
for i in range(3):
print("epoch %d start" % (i+1))
batch_input, batch_target = mnist.train.next_batch(batch_size)
feed_dict = {inputs: batch_input,
targets: batch_target}
sess.run(train_op,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('trace/timeline_3_step_%d.json' % (i+1), 'w') as f:
f.write(chrome_trace)
my_profiler.add_step(step=i, run_meta=run_metadata)
profile_code_builder = option_builder.ProfileOptionBuilder()
# profile_code_builder.with_node_names(show_name_regexes=['main.*'])
profile_code_builder.with_min_execution_time(min_micros=15)
profile_code_builder.select(['micros']) # 可调整为 'bytes', 'occurrence'
profile_code_builder.order_by('micros')
profile_code_builder.with_max_depth(6)
my_profiler.profile_python(profile_code_builder.build())
my_profiler.profile_operations(profile_code_builder.build())
my_profiler.profile_name_scope(profile_code_builder.build())
my_profiler.profile_graph(profile_code_builder.build())
# 6 自动优化建议
my_profiler.advise(options=model_analyzer.ALL_ADVICE)
print("all done")
|
198962
|
from django.contrib.auth.models import Group, User
from django.db import models
from django.utils.translation import gettext_lazy as _l
# this import has to be here so that the signal handlers get registered
from qatrack.notifications.faults import handlers as faults_handlers # noqa: F401
from qatrack.notifications.faults_review import \
handlers as faults_review_handlers # noqa: F401
from qatrack.notifications.parts import handlers as part_handlers # noqa: F401
from qatrack.notifications.qccompleted import handlers as qccompleted_handlers # noqa: F401
from qatrack.notifications.qcreview import handlers as qcreview_handlers # noqa: F401
from qatrack.notifications.qcscheduling import handlers as qcscheduling_handlers # noqa: F401
from qatrack.notifications.service_log import handlers as service_log_handlers # noqa: F401
from qatrack.notifications.service_log_scheduling import \
handlers as service_log_scheduling_handlers # noqa: F401
from qatrack.qa.models import TestList
from qatrack.units.models import Unit
class RecipientGroup(models.Model):
name = models.CharField(
max_length=255,
help_text=_l("Enter a name for this group of recipients"),
)
groups = models.ManyToManyField(
Group,
help_text=_l("Select which groups this notification should be sent to."),
blank=True,
)
users = models.ManyToManyField(
User,
help_text=_l("Select individual users to include in these notifications"),
blank=True,
)
emails = models.TextField(
verbose_name=_l("Extra recipient emails"),
help_text=_l("Enter a comma separated list of extra emails this report should be sent to"),
blank=True
)
def recipient_emails(self):
users = set(self.users.filter(is_active=True).exclude(email='').values_list("email", flat=True))
group_users = set(
email for email, active in self.groups.values_list("user__email", "user__is_active") if active and email
)
emails = {x.strip() for x in self.emails.split(",") if x.strip()}
return users | group_users | emails
def _sort_emails(self):
self.emails = ', '.join(sorted(e.strip() for e in self.emails.split(",")))
def save(self, *args, **kwargs):
self._sort_emails()
super().save(*args, **kwargs)
def __str__(self):
return self.name
class TestListGroup(models.Model):
name = models.CharField(max_length=255, help_text=_l("Enter a name for this group of TestLists"))
test_lists = models.ManyToManyField(
TestList,
help_text=_l(
"Select which Test Lists should be included in this notification group."
),
)
__test__ = False # supress pytest warning
def __str__(self):
return self.name
class UnitGroup(models.Model):
name = models.CharField(max_length=255, help_text=_l("Enter a name for this group of Units"))
units = models.ManyToManyField(
Unit,
help_text=_l(
"Select which Units should be included in this notification group."
),
)
def __str__(self):
return self.name
|
198972
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_$ERA_cff import *
process = cms.Process('CTPPSTest', $ERA)
# load config
import Validation.CTPPS.simu_config.year_$CONFIG_cff as config
process.load("Validation.CTPPS.simu_config.year_$CONFIG_cff")
# minimal logger settings
process.MessageLogger = cms.Service("MessageLogger",
statistics = cms.untracked.vstring(),
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
)
)
# number of events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(int($N_EVENTS))
)
# LHCInfo plotter
process.load("Validation.CTPPS.ctppsLHCInfoPlotter_cfi")
process.ctppsLHCInfoPlotter.outputFile = "$OUT_LHCINFO"
# track distribution plotter
process.ctppsTrackDistributionPlotter = cms.EDAnalyzer("CTPPSTrackDistributionPlotter",
tagTracks = cms.InputTag("ctppsLocalTrackLiteProducer"),
rpId_45_F = process.rpIds.rp_45_F,
rpId_45_N = process.rpIds.rp_45_N,
rpId_56_N = process.rpIds.rp_56_N,
rpId_56_F = process.rpIds.rp_56_F,
outputFile = cms.string("$OUT_TRACKS")
)
# reconstruction plotter
process.ctppsProtonReconstructionPlotter = cms.EDAnalyzer("CTPPSProtonReconstructionPlotter",
tagTracks = cms.InputTag("ctppsLocalTrackLiteProducer"),
tagRecoProtonsSingleRP = cms.InputTag("ctppsProtons", "singleRP"),
tagRecoProtonsMultiRP = cms.InputTag("ctppsProtons", "multiRP"),
rpId_45_F = process.rpIds.rp_45_F,
rpId_45_N = process.rpIds.rp_45_N,
rpId_56_N = process.rpIds.rp_56_N,
rpId_56_F = process.rpIds.rp_56_F,
association_cuts_45 = process.ctppsProtons.association_cuts_45,
association_cuts_56 = process.ctppsProtons.association_cuts_56,
outputFile = cms.string("$OUT_PROTONS")
)
# processing path
process.p = cms.Path(
process.generator
* process.beamDivergenceVtxGenerator
* process.ctppsDirectProtonSimulation
* process.reco_local
* process.ctppsProtons
* process.ctppsLHCInfoPlotter
* process.ctppsTrackDistributionPlotter
* process.ctppsProtonReconstructionPlotter
)
|
199021
|
import os
import unittest
from twined import Twine, exceptions
from .base import BaseTestCase
class TestTwine(BaseTestCase):
"""Testing operation of the Twine class"""
def test_init_twine_with_filename(self):
"""Ensures that the twine class can be instantiated with a file"""
Twine(source=os.path.join(self.path, "apps", "simple_app", "twine.json"))
def test_init_twine_with_json(self):
"""Ensures that a twine can be instantiated with a json string"""
with open(os.path.join(self.path, "apps", "simple_app", "twine.json"), "r", encoding="utf-8") as f:
Twine(source=f.read())
def test_no_twine(self):
"""Tests that the canonical-but-useless case of no twine provided validates empty"""
Twine()
def test_incorrect_version_twine(self):
"""Ensures exception is thrown on mismatch between installed and specified versions of twined"""
incorrect_version_twine = """{"twined_version": "0.0.0"}"""
with self.assertRaises(exceptions.TwineVersionConflict):
Twine(source=incorrect_version_twine)
def test_empty_twine(self):
"""Ensures that an empty twine file can be loaded"""
with self.assertLogs(level="DEBUG") as log:
Twine(source="{}")
self.assertEqual(len(log.output), 3)
self.assertEqual(len(log.records), 3)
self.assertIn("Detected source", log.output[0])
self.assertIn("Validated", log.output[1])
def test_example_twine(self):
"""Ensures that the example (full) twine can be loaded and validated"""
Twine(source=os.path.join(self.path, "apps", "example_app", "twine.json"))
def test_simple_twine(self):
"""Ensures that the simple app schema can be loaded and used to parse some basic config and values data"""
Twine(source=os.path.join(self.path, "apps", "simple_app", "twine.json"))
def test_broken_json_twine(self):
"""Ensures that an invalid json file raises an InvalidTwine exception"""
invalid_json_twine = """
{
"children": [
"configuration_values_schema": {
"$schema": "http://json-schema.org/2019-09/schema#",
"title": "The example configuration form",
"description": "The configuration strand of an example twine",
"type": "object",
"properties": {
}
},
}
"""
with self.assertRaises(exceptions.InvalidTwineJson):
Twine(source=invalid_json_twine)
if __name__ == "__main__":
unittest.main()
|
199042
|
import asyncio
import bs4
import collections
import datetime
import discord
import time
import os
import subprocess
import win_unicode_console
from datetime import timedelta
from itertools import islice
from music.musicstate import MusicState
from music.playlist import Playlist
from utils.votes import ActionVotes
win_unicode_console.enable()
class Player:
def __init__(self, bot, voice_client):
self.bot = bot
self.voice_client = voice_client
self.playlist = Playlist(bot)
self.current_player = None
self.current_entry = None
self.current_process = None
self.start_time = None
self.votes = ActionVotes()
self.lock = asyncio.Lock()
self.qlock = asyncio.Lock()
self.volume = 1.0
self.download_lock = asyncio.Lock()
self.state = MusicState.STOPPED
self.current_time = None
self.index = 0
self.repeat = 0
self.change = False
self.jump_event = asyncio.Event()
self.jump_return = None
self.volume_event = asyncio.Event()
self.seek_event = asyncio.Event()
self.timeout_handle = None
self.autoplay = False
self.EQ = 'normal'
# This used to heck PEP8, but as a devoted follower of PEP8, i have taken effort to bring this in line with PEP8
self.EQEffects = {'normal': "",
'pop': ' -af equalizer=f=500:width_type=h:w=300:g=2,equalizer=f=1000:width_type=h:w=100:g=3,'
'equalizer=f=2000:width_type=h:w=100:g=-2,equalizer=f=4000:width_type=h:w=100:g=-4,'
'equalizer=f=8000:width_type=h:w=100:g=-4,equalizer=f=16000:width_type=h:w=100:g=-4',
'classic': ' -af equalizer=f=250:width_type=h:w=100:g=-6,'
'equalizer=f=1000:width_type=h:w=100:g=1,'
'equalizer=f=4000:width_type=h:w=100:g=6,'
'equalizer=f=8000:width_type=h:w=100:g=6,'
'equalizer=f=16000:width_type=h:w=100:g=6',
'jazz': ' -af equalizer=f=250:width_type=h:w=100:g=5,'
'equalizer=f=500:width_type=h:w=100:g=-5,equalizer=f=1000:width_type=h:w=100:g=-2,'
'equalizer=f=2000:width_type=h:w=100:g=2,equalizer=f=4000:width_type=h:w=100:g=-1,'
'equalizer=f=8000:width_type=h:w=100:g=-1,equalizer=f=16000:width_type=h:w=100:g=-1',
'rock': ' -af equalizer=f=250:width_type=h:w=100:g=3,'
'equalizer=f=500:width_type=h:w=100:g=-9,equalizer=f=1000:width_type=h:w=100:g=-1,'
'equalizer=f=2000:width_type=h:w=100:g=3,equalizer=f=4000:width_type=h:w=100:g=3,'
'equalizer=f=8000:width_type=h:w=100:g=3,equalizer=f=16000:width_type=h:w=100:g=3',
'balanced': ' -af equalizer=f=32:width_type=h:w=100:g=3,'
'equalizer=f=64:width_type=h:w=100:g=2,equalizer=f=500:width_type=h:w=100:g=-1,'
'equalizer=f=1000:width_type=h:w=100:g=-2,equalizer=f=4000:width_type=h:w=100:g=1,'
'equalizer=f=8000:width_type=h:w=100:g=3,equalizer=f=16000:width_type=h:w=100:g=3',
'bb': ' -af bass=g=8',
'vocals': ' -af compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2',
'easy': ' -af earwax',
'live': ' -af extrastereo'
}
self.effects = {'pop': 'Pop', 'classic': 'Classic', 'jazz': 'Jazz', 'rock': 'Rock', 'bb': 'Bass Boost',
'normal': 'Normal', 'vocals': 'Vocals', 'balanced': 'Balanced', 'easy': 'Easy Listening',
'live': 'Live'}
async def reset(self, seektime=None, prog=None):
""" Nasty function that makes a player that will play from exactly when it was stopped , this allows us to
change effects on the fly """
self.voice_client.stop()
if prog is None:
prog = self.accu_progress
if not self.volume_event.is_set():
print('waiting for vol')
await self.volume_event.wait()
if not self.seek_event.is_set():
print('waiting for seek')
await self.seek_event.wait()
if seektime is None:
seektime = datetime.datetime.utcfromtimestamp(prog)
self.bot.loop.create_task(self.play(str(seektime.strftime('%H:%M:%S.%f')), prog))
async def play(self, seek=None, seeksec=0):
"""
Manage Effects, Volume and Seeking,
makes the voice_client play an FFmpeg AudioSource
'next' is provided as the functon to be called when
the source is done playing
"""
print('inside play')
if self.state == MusicState.DEAD or self.voice_client.is_playing():
return
if self.timeout_handle is not None:
self.timeout_handle.cancel()
# Make a volume string to feed to ffmpeg
volumestr = ' -filter:a "volume=%s"' % self.volume
# This lock is the key to having only one entry being played at once
with await self.lock:
now = self.playlist.entries[self.index]
with await now.lock:
self.state = MusicState.PLAYING
if now.effect == 'None':
addon = ""
# If karaoke mode
if now.effect == 'k':
# Youtube-DL pipes download to ffmpeg#1, ffmpeg #1 outputs data with phase cancellation
# and in opus format to pipe, this is piped into our normal FFmpegPCMAudio
# Why not just use the -af filter and output one channel in the main ffmpeg?
# d.py has channels set to 2 as default on player/voice_client, so from ffmpeg#1
# data goes out as one channel, now this is interpreted as two channels and split equally
# by ffmpeg#2, if this method isnt used, a phase cancellation will occur, but with a tempo
# and pitch increase
# if karaoke was asked for on a livestream, not sure why but uhm its just here
if now.is_live:
stream_process = subprocess.Popen(["livestreamer", "-Q", "-O", now.url, "360p"],
stdout=subprocess.PIPE
)
# Normal karaoke, no live
else:
stream_process = subprocess.Popen(["wget", "-qO-", now.url],
stdout=subprocess.PIPE
)
ff2_process = subprocess.Popen(['ffmpeg', '-i', '-', '-nostdin', '-f', 'opus', '-af',
'pan=stereo|c0=c0|c1=-1*c1', '-ac', '1', '-'],
stdout=subprocess.PIPE, stdin=stream_process.stdout
)
self.current_process = ff2_process
await asyncio.sleep(1.5)
ytdl_player = discord.FFmpegPCMAudio(
ff2_process.stdout,
before_options=f"-nostdin{' -ss '+seek if seek is not None else ''}",
options="-acodec pcm_s16le" + volumestr + self.EQEffects[self.EQ],
pipe=True)
# Normal track no live
elif not now.is_live:
# Have youtube-dl handle downloading rather than ffmpeg , again cause ffmpeg is just bad at it
stream_process = subprocess.Popen(["wget", "-qO-", now.url],
stdout=subprocess.PIPE)
self.current_process = stream_process
#await asyncio.sleep(1.5)
ytdl_player = discord.FFmpegPCMAudio(
stream_process.stdout,
before_options=f"-nostdin{' -ss '+seek if seek is not None else ''}",
options="-acodec pcm_s16le -vn -b:a 128k" + volumestr + self.EQEffects[self.EQ],
pipe=True)
# Livestream
else:
# Also no -ss here, seeking doesn't work on livestreams
# Handle downloading through livestreamer for multithreaded downloading, manual pipe to source
livestreamer = subprocess.Popen(["livestreamer", "-Q", "-O", now.url, "360p"], stdout=subprocess.PIPE)
self.current_process = livestreamer
ytdl_player = discord.FFmpegPCMAudio(
livestreamer.stdout,
before_options="-nostdin -nostats -loglevel 0 ",
options="-acodec pcm_s16le -vn -b:a 128k" + addon + volumestr + self.EQEffects[self.EQ],
pipe=True)
# So it might seem like you can only set Equalizer and Volume once,
# the code below facilitates changes at runtime all thanks to FFmpeg,
# by keeping track of the original start time down to microseconds
# The Volume,EQ and Seek commands, all stop the player and set a flag
# for their respective effect to True, telling the player to not move
# to the next track when the player is stopped. 'play' is then called
# again with altered player attributes
self.current_player = ytdl_player
self.current_entry = now
self.voice_client.play(ytdl_player, after=self.next)
if not self.volume_event.is_set():
if seeksec == 0:
self.start_time = time.time()
else:
self.start_time = time.time() - seeksec
else:
self.volume_event.clear()
self.seek_event.clear()
if seek is None and not self.volume_event.is_set():
self.bot.loop.create_task(self.manage_nowplaying())
else:
self.volume_event.clear()
# Both 'pause' and 'resume' will set current_time so that using the
# NowPlaying command doesn't change time when the song isn't even playing
async def pause(self):
self.state = MusicState.PAUSED
self.current_time = time.time()
self.voice_client.pause()
async def resume(self):
self.state = MusicState.PLAYING
self.current_time = time.time()
self.voice_client.resume()
async def manage_nowplaying(self):
if self.state == MusicState.DEAD:
return
song_total = str(timedelta(seconds=self.current_entry.duration)).lstrip('0').lstrip(':')
prog_str = '%s' % song_total
np_embed = discord.Embed(title=self.current_entry.title,
description='added by **%s**' % self.current_entry.author.name,
url=self.current_entry.webpage_url, colour=0xffffff)
if not self.current_entry.is_live:
np_embed.add_field(name='Duration', value=prog_str)
np_embed.add_field(name='Autoplay', value='On' if self.autoplay else 'Off')
np_embed.add_field(name='Equalizer', value=self.effects[self.EQ])
if self.current_entry.is_live:
tm = str(timedelta(seconds=self.current_entry.duration)).lstrip('0').lstrip(':')
np_embed.add_field(name='Progress', value=("▰"*10)+f" {tm}/ Live :red_circle:", inline=False)
np_embed.set_image(url=self.current_entry.thumb)
np_embed.set_author(name='Now Playing', icon_url=self.current_entry.author.avatar_url)
# Check when the last now playing message was sent, so we can
# delete it if its older than the last message in that channel,
# if its the last message on that channel, we just edit it to
# display new info
if self.current_entry.channel.guild in self.bot.np_msgs:
np_msg = self.bot.np_msgs[self.current_entry.channel.guild]
async for msg in self.current_entry.channel.history(limit=1):
if msg != np_msg:
try:
await np_msg.delete()
except discord.Forbidden:
pass
self.bot.np_msgs[self.current_entry.channel.guild] = None
try:
if self.bot.np_msgs[self.current_entry.channel.guild]:
self.bot.np_msgs[self.current_entry.channel.guild] = await self.bot.np_msgs[
self.current_entry.channel.guild].edit(embed=np_embed)
else:
self.bot.np_msgs[self.current_entry.channel.guild] = await self.current_entry.channel.send(
embed=np_embed, delete_after=None)
except KeyError:
self.bot.np_msgs[self.current_entry.channel.guild] = await self.current_entry.channel.send(
embed=np_embed, delete_after=None)
# Having a normal function that adds an async function to the loop
# was my solution to not being able to pass an awaitable to 'after'
# in 'play', also returns when volume was changed of seeking was done.
def next(self, error):
if self.current_process is not None:
self.current_process.kill()
print('kill issued')
if self.current_process.poll() is None:
# Murder
print('murder')
self.current_process.communicate()
self.current_process = None
print('in normal next')
if self.state == MusicState.DEAD or self.change:
print('normal next returned')
if not self.volume_event.is_set():
self.volume_event.set()
if not self.seek_event.is_set():
self.seek_event.set()
self.change = False
return
self.bot.loop.create_task(self.real_next())
# Checks if there are more entries after our current index, if yes, increase index
# and call prepare_entry, but if repeat is set to True, the index won't change
# autoplay is always the last condition to be checked, so manually queued songs
# will be served before autoplay_manager is called
def _jump_check(self):
if self.jump_event.is_set():
self.jump_event.clear()
try:
self.index = self.jump_event.index
print('setting')
except AttributeError:
pass
if self.jump_return is not None:
self.jump_return.set()
self.jump_return = None
def _timeout_dc(self):
self.bot.loop.create_task(self._dc())
async def _dc(self):
if self.state == MusicState.DEAD:
return
await self.current_entry.channel.send("The bot has been inactive for 10 minutes, it will now disconnect.")
self.state = MusicState.DEAD
self.bot.players.pop(self.voice_client.guild)
await self.bot.vc_clients.pop(self.voice_client.guild).disconnect(force=True)
async def real_next(self):
self.state = MusicState.SWITCHING
if len(collections.deque(islice(self.playlist.entries, self.index, len(self.playlist.entries) - 1))) > 0:
if not self.repeat and not self.jump_event.is_set():
self.index += 1
self._jump_check()
with await self.playlist.entries[self.index].lock:
self.bot.loop.create_task(self.play())
elif self.repeat:
self.bot.loop.create_task(self.play())
elif self.autoplay:
if not self.repeat:
self.index += 1
self._jump_check()
self.bot.loop.create_task(self.autoplay_manager())
else:
if self.jump_event.is_set():
self._jump_check()
self.bot.loop.create_task(self.play())
else:
self.index += 1
self.state = MusicState.STOPPED
self.timeout_handle = self.bot.loop.call_later(600, self._timeout_dc)
# Following some minimal scraping, autoplay links are pulled
# at times this might be empty so we just get the other entries below it,
# Livestreams haven't been implemented yet and i wouldn't want a livestream
# to interrupt anyone's exploration of youtube either way, so a quick check
# if the live label on the item exists or not, the entry to be queued
# is determined.
async def autoplay_manager(self):
if self.state == MusicState.DEAD:
return
with await self.download_lock:
async with self.bot.session.get(self.current_entry.webpage_url) as resp:
response = await resp.text()
soup = bs4.BeautifulSoup(response, "lxml")
autoplayitems = [a for a in
soup.select('div.autoplay-bar div.content-wrapper')] # a[href^=/watch] a[title^=]
altitems = [a for a in soup.select('ul#watch-related li div.content-wrapper')]
altitems.insert(0, autoplayitems[0])
song_choice = 0
while 1:
test = False
try:
for entry in self.playlist.entries:
if entry.webpage_url.split('/')[3] == \
altitems[song_choice].select('a[href^=/watch]')[0].attrs.get('href').split('/')[1]:
test = True
break
except (IndexError, KeyError):
pass
if len(altitems[song_choice].select('.yt-badge-live')) or test:
song_choice += 1
else:
song_url = 'http://www.youtube.com' + altitems[song_choice].select('a[href^=/watch]')[0].attrs.get(
'href')
break
info = await self.bot.downloader.extract_info(self.bot.loop, song_url, download=False, process=True,
retry_on_error=True)
entry, position = self.playlist.add(info['url'], song_url, self.bot.user, self.current_entry.channel, info['title'],
info['duration'], 'None', info['thumbnail'], info['is_live'])
ap_msg = await self.current_entry.channel.send(
'**:musical_score: Autoplay:** **%s** has been queued to be played.' % entry.title)
await asyncio.sleep(7)
await ap_msg.delete()
await self.play()
# Some redundant stuff below here, accu_progress is only used by
# effects that need to keep exact track of time, checks if we're paused
# if not then the real current_time is used.
@property
def progress(self):
if not self.state == MusicState.PAUSED:
self.current_time = time.time()
if not self.current_entry.is_live:
return round(self.current_time - self.start_time)
else:
return self.current_entry.duration + round(self.current_time - self.start_time)
@property
def accu_progress(self):
if not self.state == MusicState.PAUSED:
self.current_time = time.time()
return self.current_time - self.start_time
|
199063
|
import os
from base64 import b64encode
from rdflib import *
import json
from io import StringIO
from whyis import nanopub
from whyis import autonomic
from whyis.test.agent_unit_test_case import AgentUnitTestCase
import unittest
class OntologyImportAgentTestCase(AgentUnitTestCase):
def test_foaf_import(self):
np = nanopub.Nanopublication()
np.assertion.parse(data='''{
"@id": "http://example.com/testonto",
"@type" : "http://www.w3.org/2002/07/owl#Ontology",
"http://www.w3.org/2002/07/owl#imports":{"@id":"http://xmlns.com/foaf/0.1/"}
}''', format="json-ld")
#print(np.serialize(format="trig"))
agent = autonomic.OntologyImporter()
results = self.run_agent(agent, nanopublication=np)
self.assertEquals(len(results), 1)
self.assertTrue(results[0].resource(URIRef('http://xmlns.com/foaf/0.1/'))[RDF.type:OWL.Ontology])
@unittest.skip("Skipping until RDFlib solves permanent redirect issues")
def test_dc_terms_import(self):
np = nanopub.Nanopublication()
np.assertion.parse(data=str('''<http://example.com/testonto> a <http://www.w3.org/2002/07/owl#Ontology>;
<http://www.w3.org/2002/07/owl#imports> <http://purl.org/dc/terms/>.'''), format="turtle")
#print(np.serialize(format="trig"))
agent = autonomic.OntologyImporter()
results = self.run_agent(agent, nanopublication=np)
self.assertEquals(len(results), 1)
#print(results[0].serialize(format="trig"))
self.assertTrue(results[0].resource(URIRef('http://purl.org/dc/terms/created'))[RDF.type:RDF.Property])
def test_prov_import(self):
# 20190807 CircleCI is having some difficulty fetching https URLs
if os.environ.get("CI") == "true":
return
np = nanopub.Nanopublication()
np.assertion.parse(data='''{
"@id": "http://example.com/testonto",
"@type" : "http://www.w3.org/2002/07/owl#Ontology",
"http://www.w3.org/2002/07/owl#imports":{"@id":"http://www.w3.org/ns/prov#"}
}''', format="json-ld")
#print(np.serialize(format="trig"))
agent = autonomic.OntologyImporter()
results = self.run_agent(agent, nanopublication=np)
self.assertEquals(len(results), 1)
self.assertTrue(len(results[0]) > 0)
self.assertTrue(results[0].resource(URIRef('http://www.w3.org/ns/prov#'))[RDF.type:OWL.Ontology])
def test_sio_import(self):
# 20190807 CircleCI is having some difficulty fetching https URLs
if os.environ.get("CI") == "true":
return
SIO_URL = "http://semanticscience.org/ontology/sio.owl"
# Use the final URL instead
# SIO_URL = "https://raw.githubusercontent.com/micheldumontier/semanticscience/master/ontology/sio/release/sio-release.owl"
np = nanopub.Nanopublication()
np.assertion.parse(data='''{
"@id": "http://example.com/testonto",
"@type" : "http://www.w3.org/2002/07/owl#Ontology",
"http://www.w3.org/2002/07/owl#imports":{"@id":"%(SIO_URL)s"}
}''' % locals(), format="json-ld")
agent = autonomic.OntologyImporter()
results = self.run_agent(agent, nanopublication=np)
self.assertEquals(len(results), 1)
self.assertTrue(results[0].resource(URIRef(SIO_URL))[RDF.type:OWL.Ontology])
|
199095
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def create_customer_payment_instrument_pinless_debit():
customerTokenId = "AB695DA801DD1BB6E05341588E0A3BDC"
cardExpirationMonth = "12"
cardExpirationYear = "2031"
cardType = "001"
cardIssueNumber = "01"
cardStartMonth = "01"
cardStartYear = "2020"
cardUseAs = "pinless debit"
card = Tmsv2customersEmbeddedDefaultPaymentInstrumentCard(
expiration_month = cardExpirationMonth,
expiration_year = cardExpirationYear,
type = cardType,
issue_number = cardIssueNumber,
start_month = cardStartMonth,
start_year = cardStartYear,
use_as = cardUseAs
)
billToFirstName = "John"
billToLastName = "Doe"
billToCompany = "CyberSource"
billToAddress1 = "1 Market St"
billToLocality = "San Francisco"
billToAdministrativeArea = "CA"
billToPostalCode = "94105"
billToCountry = "US"
billToEmail = "<EMAIL>"
billToPhoneNumber = "4158880000"
billTo = Tmsv2customersEmbeddedDefaultPaymentInstrumentBillTo(
first_name = billToFirstName,
last_name = billToLastName,
company = billToCompany,
address1 = billToAddress1,
locality = billToLocality,
administrative_area = billToAdministrativeArea,
postal_code = billToPostalCode,
country = billToCountry,
email = billToEmail,
phone_number = billToPhoneNumber
)
instrumentIdentifierId = "7010000000016241111"
instrumentIdentifier = Tmsv2customersEmbeddedDefaultPaymentInstrumentInstrumentIdentifier(
id = instrumentIdentifierId
)
requestObj = PostCustomerPaymentInstrumentRequest(
card = card.__dict__,
bill_to = billTo.__dict__,
instrument_identifier = instrumentIdentifier.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = CustomerPaymentInstrumentApi(client_config)
return_data, status, body = api_instance.post_customer_payment_instrument(customerTokenId, requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling CustomerPaymentInstrumentApi->post_customer_payment_instrument: %s\n" % e)
if __name__ == "__main__":
create_customer_payment_instrument_pinless_debit()
|
199145
|
from prompt_toolkit.styles import style_from_dict
from pygments.token import Token
TABLE_JOB_MODEL = [['Spider', 'Started Time', 'Items', 'Tags', 'State', 'Close Reason', 'Errors', 'Version']]
TABLE_JOBS_MODEL = [['Id', 'Spider', 'Started Time', 'Items', 'Tags', 'State', 'Close Reason', 'Errors', 'Version']]
HC_TABLE_JOBS_MODEL = [['Id', 'Spider', 'Finished Time', 'State', 'Close Reason', 'Errors', 'Logs', 'Version']]
TABLE_SPIDERS_MODEL = [['Id', 'Tags', 'Version', 'Type']]
token_style = style_from_dict({
Token.ErrorMessage: '#ff0066',
Token.ShubFileModel: '#ccaa33',
Token.NoInternetConnection: '#ff0066',
Token.ShubApiError: '#ff0066',
Token.ShubApiErrorHintsHeadline: '#ccaa33',
Token.ShubApiErrorHints1: '#ccaa33',
Token.ShubApiErrorHints2: '#cc<PASSWORD>',
Token.Credentials: <PASSWORD>',
Token.GeneralErrorMessage: '#ff0066',
Token.GeneralInfoMessage: '#<PASSWORD>',
})
tokens = [
(Token.ShubApiError, 'Unknown response status from ScrapingHub: badrequest.\n'),
(Token.ShubApiErrorHintsHeadline, 'Hints:\n\n'),
(Token.ShubApiErrorHints1, '- Are your credentials [api key, project id] set correctly?\n'),
(Token.ShubApiErrorHints2, '- Are the job/jobs parameters named and passed corretly?\n'),
(Token.NoInternetConnection, 'You do not have Internet connection.\n'),
(Token.ErrorMessage, 'You need to set up your .scrapinghub.yml with a default project and api key:\n'),
(Token.ShubFileModel,
'''
~/.scrapinghub.yml
apikeys:
default: <KEY>
projects:
default: 89090
\n
'''
)
]
def create_error_token(message):
return Token.GeneralErrorMessage, message + '\n'
def create_info_token(message):
return Token.GeneralInfoMessage, message + '\n'
|
199156
|
import sys, os, tempfile, shutil
from PIL import Image
import util
import subprocess
'''
TOOL_PATH = os.path.join( os.path.dirname( __file__ ), "../../../../../../tools/bin" )
TEXTURE_CONVERTER = os.path.abspath( os.path.join( TOOL_PATH, "TextureConverter.exe" ) )
def Convert( src_filenames, dest_filename, texture_format="bc3", no_premultiply=False, force=False, platform='opengl',
generate_mips=False, width=None, height=None,
verbose=False, ignore_exceptions=False):
is_newer = False
# If a list is passed in, concatenate the filenames with semi-colon separators, otherwise just use the filename
src_filename_str = None
if isinstance( src_filenames, list ):
src_filename_str = ';'.join( src_filenames )
for filename in src_filenames:
is_newer = is_newer or util.IsFileNewer( filename, dest_filename )
else:
src_filename_str = src_filenames
is_newer = is_newer or util.IsFileNewer( src_filename_str, dest_filename )
if force or is_newer:
cmd_list = [ TEXTURE_CONVERTER,
'--swizzle',
'--format ' + texture_format,
'--platform ' + platform,
'-i ' + src_filename_str,
'-o ' + dest_filename,
]
if generate_mips:
cmd_list.append( '--mipmap' )
if not no_premultiply:
cmd_list.append( '--premultiply' )
if width:
cmd_list.append( '-w {}'.format( width ) )
if height:
cmd_list.append( '-h {}'.format( height ) )
cmd = " ".join( cmd_list )
if verbose:
print( cmd )
if subprocess.call( cmd_list ) != 0:
sys.stderr.write( "Error attempting to convert {} to {}\n".format( src_filenames, dest_filename ) )
sys.stderr.write( cmd + "\n" )
if not ignore_exceptions:
raise
'''
KTECH_LOCATION = "ktech"
texture_format_translation = {
'bc1': 'dxt1',
'bc2': 'dxt3',
'bc3': 'dxt5',
'rgb': 'rgb',
'argb': 'rgba',
}
def Convert( src_filenames, dest_filename, texture_format="bc3", no_premultiply=False, force=False, platform='opengl',
generate_mips=False, width=None, height=None,
verbose=False, ignore_exceptions=False):
is_newer = False
assert texture_format_translation[texture_format], "Invalid texture format {}".format(texture_format)
texture_format = texture_format_translation[texture_format]
# ktech has multiplatform texture support as a stub, but disabled since only opengl is used.
# (the support was built in, but the option to define in as a program parameter was left commented out)
assert platform == "opengl", "Invalid platform {}".format(platform)
# If a list is passed in, concatenate the filenames with semi-colon separators, otherwise just use the filename
src_filename_str = None
if isinstance( src_filenames, list ):
src_filename_str = ','.join( src_filenames )
for filename in src_filenames:
is_newer = is_newer or util.IsFileNewer( filename, dest_filename )
else:
src_filename_str = src_filenames
is_newer = is_newer or util.IsFileNewer( src_filename_str, dest_filename )
if force or is_newer:
cmd_list = [ KTECH_LOCATION,
'--quiet',
'--compression ' + texture_format,
]
if not generate_mips:
cmd_list.append( '--no-mipmaps' )
if no_premultiply:
cmd_list.append( '--no-premultiply' )
if width:
cmd_list.append( '--width {}'.format( width ) )
if height:
cmd_list.append( '--height {}'.format( height ) )
cmd_list.append( '--' )
cmd_list.append( src_filename_str )
cmd_list.append( dest_filename )
cmd = " ".join( cmd_list )
if verbose:
print( cmd )
if subprocess.call( cmd_list ) != 0:
sys.stderr.write( "Error attempting to convert {} to {}\n".format( src_filenames, dest_filename ) )
sys.stderr.write( cmd + "\n" )
if not ignore_exceptions:
raise
def GenerateMips( im ):
mips = []
w, h = im.size
while w >= 1 or h >= 1:
mips.append( im )
w /= 2
h /= 2
im = im.resize( ( max( w, 1 ), max( h, 1 ) ), Image.ANTIALIAS )
return mips
def SaveImagesToTemp( images, basename ):
tempdir = tempfile.mkdtemp()
idx = 0
filenames = []
for image in images:
name = "{0}{1}.png".format( basename, idx )
filename = os.path.join( tempdir, name )
filenames.append( filename )
image.save( filename )
idx += 1
return ( tempdir, filenames )
def MipAndConvert( im, dest_filename, platform='opengl', texture_format="bc3", no_premultiply = False, force=False, ignore_exceptions=False ):
if isinstance( im, str ):
im = Image.open( im )
mips = GenerateMips( im )
tempdir, filenames = SaveImagesToTemp( mips, "mip" )
try:
Convert( src_filenames=filenames, dest_filename=dest_filename, texture_format=texture_format, platform=platform,
no_premultiply=no_premultiply, force=force, ignore_exceptions=ignore_exceptions )
finally:
if os.path.exists( tempdir ):
shutil.rmtree( tempdir )
|
199176
|
from alpa.collective.collective import (
nccl_available, gloo_available, is_group_initialized, init_collective_group,
destroy_collective_group, create_collective_group, get_rank,
get_collective_group_size, allreduce, allreduce_multigpu, barrier, reduce,
reduce_multigpu, broadcast, broadcast_partialgpu, broadcast_multigpu, allgather,
allgather_multigpu, reducescatter, reducescatter_multigpu, send,
send_multigpu, recv, recv_multigpu, check_and_get_group)
__all__ = [
"nccl_available", "gloo_available", "is_group_initialized",
"init_collective_group", "destroy_collective_group",
"create_collective_group", "get_rank", "get_collective_group_size",
"allreduce", "allreduce_multigpu", "barrier", "reduce", "reduce_multigpu",
"broadcast", "broadcast_multigpu", "allgather", "allgather_multigpu",
"reducescatter", "reducescatter_multigpu", "send", "send_multigpu", "recv",
"recv_multigpu", "check_and_get_group"
]
|
199180
|
import inspect
from pathlib import Path
from unittest.mock import Mock
import parso
from test_pkg import functions
import pytest
from ploomber.util import dotted_path
from ploomber.exceptions import SpecValidationError
from ploomber.sources.inspect import getfile
@pytest.mark.parametrize('spec', [
'test_pkg.functions.some_function',
{
'dotted_path': 'test_pkg.functions.some_function'
},
])
def test_call_dotted_path_calls_function(monkeypatch, spec):
mock = Mock()
monkeypatch.setattr(functions, 'some_function', mock)
dotted_path.DottedPath(spec)()
mock.assert_called_once_with()
def test_call_spec_with_kwargs(monkeypatch):
mock = Mock()
monkeypatch.setattr(functions, 'some_function', mock)
spec = {
'dotted_path': 'test_pkg.functions.some_function',
'a': 1,
'b': 2,
}
dotted_path.DottedPath(spec)()
mock.assert_called_once_with(a=1, b=2)
def test_call_spec_without_dotted_path_key():
spec = {'a': 1}
with pytest.raises(SpecValidationError) as excinfo:
dotted_path.DottedPath(spec)()
assert excinfo.value.errors == [{
'loc': ('dotted_path', ),
'msg': 'field required',
'type': 'value_error.missing'
}]
@pytest.mark.parametrize('kwargs, expected', [
[None, 42],
[dict(a=1), 1],
])
def test_call_dotted_path(tmp_directory, add_current_to_sys_path,
no_sys_modules_cache, kwargs, expected):
Path('my_module.py').write_text("""
def function(a=42):
return a
""")
assert dotted_path.call_dotted_path('my_module.function',
kwargs=kwargs) == expected
def test_call_dotted_path_unexpected_kwargs(tmp_directory,
add_current_to_sys_path,
no_sys_modules_cache):
Path('my_module.py').write_text("""
def function():
pass
""")
with pytest.raises(TypeError) as excinfo:
dotted_path.call_dotted_path('my_module.function', kwargs=dict(a=1))
expected = ("function() got an unexpected keyword argument 'a' "
"(Loaded from:")
assert expected in str(excinfo.value)
_two = """
def some_name():
pass
def some_name():
pass
"""
_nested_before = """
def something():
def some_name():
pass
def some_name():
pass
"""
_nested_after = """
def some_name():
pass
def something():
def some_name():
pass
"""
_decorated = """
@some_dectorator
def some_name():
pass
"""
_decorated_many = """
@some_decorator
@another_dectorator
def some_name():
pass
"""
_test_many_names = """
def another():
some_name = 1
some_name = 1
def some_name():
some_name = pd.read_csv('aa')
x['some_name']
fn(some_name)
"""
@pytest.mark.parametrize('source, loc_expected', [
[_test_many_names, 'function.py:7'],
[_two, 'function.py:5'],
[_nested_before, 'function.py:6'],
[_decorated, 'function.py:2'],
[_decorated_many, 'function.py:2'],
[_nested_after, 'function.py:2'],
],
ids=[
'test-many-name',
'two',
'nested-before',
'decorated',
'decorated-many',
'nested-after',
])
def test_check_defines_function_with_name(tmp_directory,
add_current_to_sys_path,
no_sys_modules_cache, source,
loc_expected):
Path('function.py').write_text(source)
loc, source = dotted_path._check_defines_function_with_name(
'function.py', 'some_name', None)
assert loc == loc_expected
_overwritten_int = """
def name():
pass
name = 1
"""
_overwritten_multi = """
def name():
pass
name, x = 1, 2
"""
_overwritten_import = """
def name():
pass
import name
"""
_overwritten_from_import = """
def name():
pass
from something import name
"""
_overwritten_class = """
def name():
pass
class name:
pass
"""
@pytest.mark.parametrize('source', [
_overwritten_int,
_overwritten_import,
_overwritten_from_import,
_overwritten_multi,
_overwritten_class,
])
def test_check_last_definition_is_function(source):
module = parso.parse(source)
with pytest.raises(TypeError) as excinfo:
dotted_path._check_last_definition_is_function(module, 'name',
'x.name')
assert ("Failed to load dotted path 'x.name'. "
"Expected last defined 'name' to be a function. Got:"
in str(excinfo.value))
# TODO: test many names but last one is correct
# -sub test case: with decorator
# TODO nested alias, should be skipped>?""
# TODO: test ignores other imports that do not alias
# try more than one alias
@pytest.mark.parametrize('import_', [
'from pkg import some_name',
'from pkg.sub import some_name',
'from . import some_name',
'from .pkg import some_name',
'from .pkg.sub import some_name',
'from .pkg.sub import some_name, another_name',
'from pkg import some_name, another_name',
'from pkg.sub import some_name, another_name',
])
def test_check_defines_function_with_name_detects_aliasing(
tmp_directory, add_current_to_sys_path, no_sys_modules_cache, import_):
Path('function.py').write_text(import_)
with pytest.raises(NotImplementedError):
dotted_path._check_defines_function_with_name('function.py',
'some_name', None)
@pytest.mark.parametrize('dotted_path_str', [
'test_pkg.decorated.functions.function',
'test_pkg.decorated.functions.decorated_function',
'test_pkg.decorated.functions.double_decorated_function',
'test_pkg.callables.root',
],
ids=[
'regular',
'decorated-function',
'double-decorated-function',
'defined-in-init-file',
])
def test_lazily_located_dotted_path(dotted_path_str, tmp_imports):
loc, source = dotted_path.lazily_locate_dotted_path(dotted_path_str)
obj = dotted_path.load_dotted_path(dotted_path_str)
loc_real = getfile(obj)
lines, line = inspect.getsourcelines(obj)
source_expected = ''.join(lines)
loc_expected = f'{loc_real}:{line}'
assert loc == loc_expected
assert source == source_expected
@pytest.mark.parametrize('dotted_path_str', ['a.b', 'a.b.c'])
def test_lazily_locate_dotted_path_error_if_no_package_spec(dotted_path_str):
with pytest.raises(ModuleNotFoundError) as excinfo:
dotted_path.lazily_locate_dotted_path(dotted_path_str)
assert (f"Error processing dotted path '{dotted_path_str}', no "
"module named 'a'" in str(excinfo.value))
@pytest.mark.parametrize('dotted_path_str', ['a', 'a..b.c'])
def test_lazily_locate_dotted_path_error_if_invalid_dotted_path(
dotted_path_str):
with pytest.raises(ValueError) as excinfo:
dotted_path.lazily_locate_dotted_path(dotted_path_str)
expected = (f"Invalid dotted path '{dotted_path_str}'. "
"Value must be a dot "
"separated string, with at least two parts: "
"[module_name].[function_name]")
assert str(excinfo.value) == expected
def test_lazily_locate_dotted_path_missing_module(tmp_directory,
add_current_to_sys_path,
no_sys_modules_cache):
Path('a').mkdir()
Path('a', '__init__.py').touch()
with pytest.raises(ModuleNotFoundError) as excinfo:
dotted_path.lazily_locate_dotted_path('a.b.c')
assert "No module named 'a.b'. Expected to find one of" in str(
excinfo.value)
def test_error_if_doesnt_define_name(tmp_directory, add_current_to_sys_path,
no_sys_modules_cache):
Path('a.py').touch()
with pytest.raises(AttributeError) as excinfo:
dotted_path.lazily_locate_dotted_path('a.unknown_name')
assert "Failed to locate dotted path 'a.unknown_name'" in str(
excinfo.value)
assert "a.py" in str(excinfo.value)
assert "a function named 'unknown_name'" in str(excinfo.value)
def test_lazy_load_missing_function():
dp = dotted_path.DottedPath('not_a_module.not_a_function', lazy_load=True)
with pytest.raises(ModuleNotFoundError):
dp()
def test_eager_load_missing_function():
with pytest.raises(ModuleNotFoundError):
dotted_path.DottedPath('not_a_module.not_a_function', lazy_load=False)
def test_init_and_call_dotted_path(tmp_directory, tmp_imports):
Path('some_module.py').write_text("""
def fn(some_arg):
return some_arg
""")
dp = dotted_path.DottedPath('some_module.fn', lazy_load=False)
assert dp(42) == 42
@pytest.mark.parametrize('primitive', [
'some_module.fn',
{
'dotted_path': 'some_module.fn'
},
{
'dotted_path': 'some_module.fn',
'some_arg': 42,
},
])
def test_dotted_path_repr(tmp_directory, tmp_imports, primitive):
Path('some_module.py').write_text("""
def fn(some_arg):
return some_arg
""")
dp = dotted_path.DottedPath(primitive, lazy_load=True)
assert repr(dp) == "DottedPath('some_module.fn')"
dp._load_callable()
assert 'loaded:' in repr(dp)
def test_dotted_path_from_dict(tmp_directory, tmp_imports):
Path('some_module.py').write_text("""
def fn(some_arg):
return some_arg
""")
dp = dotted_path.DottedPath(dict(dotted_path='some_module.fn',
some_arg=10),
lazy_load=False)
assert dp() == 10
def test_dotted_path_if_overriding_args(tmp_directory, tmp_imports):
Path('some_module.py').write_text("""
def fn(some_arg):
return some_arg
""")
dp = dotted_path.DottedPath(dict(dotted_path='some_module.fn',
some_arg=10),
lazy_load=False)
with pytest.warns(UserWarning) as record:
dp(some_arg=20)
expected = ("Got duplicated arguments ('some_arg') when calling "
"dotted path 'some_module.fn'. Overriding values...")
assert record[0].message.args[0] == expected
|
199259
|
from operator import itemgetter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from openreview_matcher.evals import base_evaluator
from openreview_matcher import utils
matplotlib.style.use('ggplot')
class Evaluator(base_evaluator.Evaluator):
"""
An Evaluator instance that evaluates
precision_at_m =
(number of papers reviewers bid positively on in top M) /
(total number of papers retrieved)
This evaluation method requires us to look at the bids, so we import
them from somewhere in the __init__() method
"""
def __init__(self, params=None):
datapath = params["data_path"]
self.m_values = params["m_values"]
self.data = utils.load_obj(datapath)
self.bids_by_forum = self.data["bids_by_forum"]
def evaluate(self, ranklists):
"""
Evaluate the model using a ranked list. Either you can evaluate using a single ranked list or
evaluate against each individual query and average their precision scores
Arguments
@ranklists: a list of tuples.
The 0th index of the tuple contains the forum ID of the rank of the list being evaluated
The 1st index of the tuple contains a list of reviewer IDS, in order of expertise score
Returns
a generator object that yields an array of scores for each ranked list. If only one score
is need, return the score in an array by itself
"""
return self.evaluate_using_single_rank(ranklists)
def evaluate_using_individual_queries(self, ranklists):
""" Evaluate using individual query ranks """
for forum, rank_list in ranklists:
scores = []
for m in self.m_values:
positive_labels = ["I want to review", "I can review"]
positive_bids = [bid.signatures[0].encode('utf-8') for bid in self.bids_by_forum[forum] if bid.tag in positive_labels]
relevant_reviewers = [1 if reviewer_id in positive_bids else 0 for reviewer_id in rank_list]
precision = self.precision_at_m(relevant_reviewers, m)
scores.append(precision)
yield forum, scores
def setup_ranked_list(self, rank_list):
"""
Setup the single ranked list for a model
Combines all of the individual query ranks into one single rank
"""
new_rank_list = []
for forum, rank_list in rank_list:
for reviewer_score in rank_list:
reviewer = reviewer_score.split(";")[0]
score = float(reviewer_score.split(";")[1])
has_bid = self.reviewer_has_bid(reviewer, forum) # filter for reviewers that gave a bid value
if has_bid:
new_rank_list.append((reviewer, score, forum))
ranked_reviewers = sorted(new_rank_list, key=itemgetter(1), reverse=True)
return ranked_reviewers
def reviewer_has_bid(self, reviewer, paper):
""" Returns True if the reviewer bid on that 'paper' """
paper_bids = self.bids_by_forum[paper]
has_bid = [True if bid.signatures[0] == reviewer.decode("utf-8") else False for bid in paper_bids][0]
return has_bid
def get_bid_for_reviewer_paper(self, reviewer, paper):
"""
Gets the bid for the reviewer and the paper
Returns 0 if the bid is not relevant and 1 if the bid is relevant
"""
positive_labels = ['I want to review', 'I can review']
paper_bids = self.bids_by_forum[paper]
bid_value = [1 if bid.tag in positive_labels else 0 for bid in paper_bids if
bid.signatures[0] == reviewer.decode('utf-8')]
if len(bid_value) > 0:
return bid_value[0]
else:
return 0
def evaluate_using_single_rank(self, rank_list):
"""
Evaluate against a single ranked list computed by the model
"""
ranked_reviewers = self.setup_ranked_list(rank_list)
scores = []
positive_bids = 0
for reviewer, score, forum in ranked_reviewers:
bid = self.get_bid_for_reviewer_paper(reviewer, forum)
if bid == 1:
positive_bids +=1
for m in range(1, len(ranked_reviewers) + 1):
topM = ranked_reviewers[0: m]
topM = map(lambda reviewer: (reviewer[0], self.get_bid_for_reviewer_paper(reviewer[0], reviewer[2])), topM)
pos_bids_from_topM = [bid for bid in topM if bid[1] == 1]
precision = float(len(pos_bids_from_topM)) / float(m) # precision => relevant bids retrieved / # of retrieved
scores.append((m, precision))
return scores
def precision_at_m(self, ranked_list, m):
"""
Computes precision at M
Arguments:
ranked_list: ranked list of reviewers for a forum where each entry is either a 0 or 1
1 - reviewer that reviewer wanted to bid
0 - reviewer did not want to bid
m: cuttoff value
Returns:
A float representing the precision
"""
topM = np.asarray(ranked_list)[:m] != 0
return np.mean(topM)
def graph_precision_values(self, precision_values):
""" Graph the recall values against M values """
fig, ax = plt.subplots()
df_recall = pd.DataFrame({
'@M': range(1, len(precision_values)+1),
'Recall': precision_values
})
ax = df_recall.plot.line(x="@M", y="Recall", ax=ax)
ax.set_title("Recall Curve", y=1.08)
ax.set_ylabel("Recall")
fig.savefig("results/figures/{0}".format("recall_curve_bow_avg"), dpi=200)
|
199290
|
import unittest
import io
from svgelements import *
class TestElementShape(unittest.TestCase):
def test_rect_dict(self):
values = {
'tag': 'rect',
'rx': "4",
'ry': "2",
'x': "50",
'y': "51",
'width': "20",
'height': "10"
}
e = Rect(values)
e2 = Rect(50, 51, 20, 10, 4, 2)
self.assertEqual(e, e2)
e2 *= "translate(2)"
e3 = Rect()
self.assertNotEqual(e, e3)
def test_line_dict(self):
values = {
'tag': 'rect',
'x1': "0",
'y1': "0",
'x2': "100",
'y2': "100"
}
e = SimpleLine(values)
e2 = SimpleLine(0, '0px', '100px', '100px')
e3 = SimpleLine(0, 0, 100, 100)
self.assertEqual(e, e2)
self.assertEqual(e, e3)
e4 = SimpleLine()
self.assertNotEqual(e, e4)
def test_ellipse_dict(self):
values = {
'tag': 'ellipse',
'rx': "4.0",
'ry': "8.0",
'cx': "22.4",
'cy': "33.33"
}
e = Ellipse(values)
e2 = Ellipse(22.4, 33.33, 4, 8)
self.assertEqual(e, e2)
e3 = Ellipse()
self.assertNotEqual(e, e3)
def test_circle_dict(self):
values = {
'tag': 'circle',
'r': "4.0",
'cx': "22.4",
'cy': "33.33"
}
e = Circle(values)
e2 = Circle(22.4, 33.33, 4)
self.assertEqual(e, e2)
e3 = Circle()
self.assertNotEqual(e, e3)
circle_d = e.d()
self.assertEqual(Path(circle_d),
'M26.4,33.33A4,4 0 0,1 22.4,37.33 A4,4 0 0,1 18.4,33.33 A4,4 0 0,1 22.4,29.33 A4,4 0 0,1 26.4,33.33Z')
def test_polyline_dict(self):
values = {
'tag': 'polyline',
'points': '0,100 50,25 50,75 100,0',
}
e = Polyline(values)
e2 = Polyline(0, 100, 50, 25, 50, 75, 100, 0)
self.assertEqual(e, e2)
e3 = Polyline()
self.assertNotEqual(e, e3)
polyline_d = e.d()
self.assertEqual(Path(polyline_d), "M 0,100 L 50,25 L 50,75 L 100,0")
def test_polygon_dict(self):
values = {
'tag': 'polyline',
'points': '0,100 50,25 50,75 100,0',
}
e = Polygon(values)
e2 = Polygon(0, 100, 50, 25, 50, 75, 100, 0)
self.assertEqual(e, e2)
e3 = Polygon()
self.assertNotEqual(e, e3)
polygon_d = e.d()
self.assertEqual(Path(polygon_d), 'M 0,100 L 50,25 L 50,75 L 100,0 Z')
def test_circle_ellipse_equal(self):
self.assertTrue(Ellipse(center=(0, 0), rx=10, ry=10) == Circle(center="0,0", r=10.0))
def test_transform_circle_to_ellipse(self):
c = Circle(center="0,0", r=10.0)
p = c * Matrix.skew_x(Angle.degrees(50))
p.reify()
p = c * "translate(10,1)"
p.reify()
p = c * "scale(10,1)"
p.reify()
p = c * "rotate(10deg)"
p.reify()
p = c * "skewy(10)"
p.reify()
self.assertFalse(isinstance(Circle(), Ellipse))
self.assertFalse(isinstance(Ellipse(), Circle))
def test_circle_decomp(self):
circle = Circle()
c = Path(circle.d())
self.assertEqual(c, "M 1,0 A 1,1 0 0,1 0,1 A 1,1 0 0,1 -1,0 A 1,1 0 0,1 0,-1 A 1,1 0 0,1 1,0 Z")
circle *= "scale(2,1)"
c = Path(circle.d())
self.assertEqual(c, "M 2,0 A 2,1 0 0,1 0,1 A 2,1 0 0,1 -2,0 A 2,1 0 0,1 0,-1 A 2,1 0 0,1 2,0 Z")
circle *= "scale(0.5,1)"
c = Path(circle.d())
self.assertEqual(c, "M 1,0 A 1,1 0 0,1 0,1 A 1,1 0 0,1 -1,0 A 1,1 0 0,1 0,-1 A 1,1 0 0,1 1,0 Z")
def test_circle_implicit(self):
shape = Circle()
shape *= "translate(40,40) rotate(15deg) scale(2,1.5)"
self.assertAlmostEqual(shape.implicit_rx, 2.0)
self.assertAlmostEqual(shape.implicit_ry, 1.5)
self.assertAlmostEqual(shape.rotation, Angle.degrees(15))
self.assertEqual(shape.implicit_center, (40, 40))
def test_rect_implicit(self):
shape = Rect()
shape *= "translate(40,40) rotate(15deg) scale(2,1.5)"
self.assertAlmostEqual(shape.implicit_x, 40)
self.assertAlmostEqual(shape.implicit_y, 40)
self.assertAlmostEqual(shape.implicit_width, 2)
self.assertAlmostEqual(shape.implicit_height, 1.5)
self.assertAlmostEqual(shape.implicit_rx, 0)
self.assertAlmostEqual(shape.implicit_ry, 0)
self.assertAlmostEqual(shape.rotation, Angle.degrees(15))
def test_line_implicit(self):
shape = SimpleLine(0, 0, 1, 1)
shape *= "translate(40,40) rotate(15deg) scale(2,1.5)"
self.assertAlmostEqual(shape.implicit_x1, 40)
self.assertAlmostEqual(shape.implicit_y1, 40)
p = Point(1, 1) * "rotate(15deg) scale(2,1.5)"
self.assertAlmostEqual(shape.implicit_x2, 40 + p[0])
self.assertAlmostEqual(shape.implicit_y2, 40 + p[1])
self.assertAlmostEqual(shape.rotation, Angle.degrees(15))
def test_circle_equals_transformed_circle(self):
shape1 = Circle(r=2)
shape2 = Circle().set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_rect_equals_transformed_rect(self):
shape1 = Rect(x=0, y=0, width=2, height=2)
shape2 = Rect(0, 0, 1, 1).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_rrect_equals_transformed_rrect(self):
shape1 = Rect(0, 0, 2, 2, 1, 1)
shape2 = Rect(0, 0, 1, 1, 0.5, 0.5).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_line_equals_transformed_line(self):
shape1 = SimpleLine(0, 0, 2, 2)
shape2 = SimpleLine(0, 0, 1, 1).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_polyline_equals_transformed_polyline(self):
shape1 = Polyline(0, 0, 2, 2)
shape2 = Polyline(0, 0, 1, 1).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_polygon_equals_transformed_polygon(self):
shape1 = Polyline(0, 0, 2, 2)
shape2 = Polyline(0, 0, 1, 1).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
shape2.reify()
self.assertEqual(shape1, shape2)
def test_polyline_not_equal_transformed_polygon(self):
shape1 = Polyline(0, 0, 2, 2)
shape2 = Polygon(0, 0, 1, 1) * "scale(2)"
self.assertNotEqual(shape1, shape2)
def test_polyline_closed_equals_transformed_polygon(self):
shape1 = Path(Polyline(0, 0, 2, 2)) + "z"
shape2 = Polygon(0, 0, 1, 1).set('vector-effect', 'non-scaling-stroke') * "scale(2)"
self.assertEqual(shape1, shape2)
def test_path_plus_shape(self):
path = Path("M 0,0 z")
path += Rect(0, 0, 1, 1)
self.assertEqual(path, "M0,0zM0,0h1v1h-1z")
def test_circle_not_equal_red_circle(self):
shape1 = Circle()
shape2 = Circle(stroke="red")
self.assertNotEqual(shape1, shape2)
shape1 = Circle()
shape2 = Circle(fill="red")
self.assertNotEqual(shape1, shape2)
def test_rect_initialize(self):
shapes = (
Rect(),
Rect(0),
Rect(0, 0),
Rect(0, 0, 1),
Rect(0, 0, 1, 1),
Rect(0, y=0),
Rect(0, y=0, width=1),
Rect(0, y=0, width=1, height=1),
Rect(width=1, height=1, x=0, y=0),
Rect(0, 0, 1, 1, 0, 0),
Rect(0, 0, 1, 1, rx=0, ry=0)
)
for s in shapes:
self.assertEqual(shapes[0], s)
def test_circle_initialize(self):
shapes = (
Circle(),
Circle(0, 0),
Circle(center=(0, 0), r=1),
Circle("0px", "0px", 1),
Ellipse("0", "0", 1, 1),
Ellipse("0", "0", rx=1, ry=1),
Ellipse(0, 0, 1, ry=1),
Circle(Circle()),
Circle({"cx": 0, "cy": 0, "r": 1}),
Ellipse({"cx": 0, "cy": 0, "rx": 1}),
Ellipse({"cx": 0, "cy": 0, "ry": 1}),
Ellipse({"cx": 0, "cy": 0, "rx": 1, "ry": 1.0}),
Circle(Ellipse()),
Ellipse(Circle())
)
for s in shapes:
self.assertEqual(shapes[0], s)
def test_polyline_initialize(self):
shapes = (
Polyline(0, 0, 1, 1),
Polyline((0, 0), (1, 1)),
Polyline(points=((0, 0), (1, 1))),
Polyline("0,0", "1,1"),
Polyline("0,0", (1, 1)),
Polyline("0,0", Point(1, 1)),
Polyline({"points": "0,0,1,1"}),
Polyline(Polyline(0, 0, 1, 1)),
Path("M0,0L1,1"),
SimpleLine(0, 0, 1, 1),
)
for s in shapes:
self.assertEqual(shapes[0], s)
def test_polygon_initialize(self):
shapes = (
Polygon(0, 0, 1, 1),
Polygon((0, 0), (1, 1)),
Polygon(points=((0, 0), (1, 1))),
Polygon("0,0", "1,1"),
Polygon("0,0", (1, 1)),
Polygon("0,0", Point(1, 1)),
Polygon({"points": "0,0,1,1"}),
Polygon(Polyline(0, 0, 1, 1)),
Polygon("0,0,1,1"),
Path("M0,0L1,1z"),
)
for s in shapes:
self.assertEqual(shapes[0], s)
def test_shapes_repr(self):
s = Rect(fill='red')
self.assertEqual(repr(s), "Rect(width=1, height=1, fill='#ff0000')")
s = Ellipse(fill='red')
self.assertEqual(repr(s), "Ellipse(cx=0, cy=0, r=1, fill='#ff0000')")
s = Circle(fill='red')
self.assertEqual(repr(s), "Circle(cx=0, cy=0, r=1, fill='#ff0000')")
s = SimpleLine(fill='red')
self.assertEqual(repr(s), "SimpleLine(x1=0.0, y1=0.0, x2=0.0, y2=0.0, fill='#ff0000')")
s = Polygon(fill='red')
self.assertEqual(repr(s), "Polygon(points='', fill='#ff0000')")
s = Polyline(fill='red')
self.assertEqual(repr(s), "Polyline(points='', fill='#ff0000')")
s = Path(fill='red')
self.assertEqual(repr(s), "Path(fill='#ff0000')")
def test_shape_bbox(self):
s = Rect() * 'scale(20)'
self.assertEqual(s.bbox(False), (0, 0, 1, 1))
self.assertEqual(s.bbox(True), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(False), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(True), (0, 0, 1, 1))
s = Circle() * 'scale(20)'
self.assertEqual(s.bbox(False), (-1, -1, 1, 1))
self.assertEqual(s.bbox(True), (-20, -20, 20, 20))
self.assertNotEqual(s.bbox(False), (-20, -20, 20, 20))
self.assertNotEqual(s.bbox(True), (-1, -1, 1, 1))
s = Ellipse() * 'scale(20)'
self.assertEqual(s.bbox(False), (-1, -1, 1, 1))
self.assertEqual(s.bbox(True), (-20, -20, 20, 20))
self.assertNotEqual(s.bbox(False), (-20, -20, 20, 20))
self.assertNotEqual(s.bbox(True), (-1, -1, 1, 1))
s = Polygon() * 'scale(20)'
self.assertEqual(s.bbox(False), None)
self.assertEqual(s.bbox(True), None)
self.assertNotEqual(s.bbox(False), (0, 0, 0, 0))
self.assertNotEqual(s.bbox(True), (0, 0, 0, 0))
s = Polyline() * 'scale(20)'
self.assertEqual(s.bbox(False), None)
self.assertEqual(s.bbox(True), None)
self.assertNotEqual(s.bbox(False), (0, 0, 0, 0))
self.assertNotEqual(s.bbox(True), (0, 0, 0, 0))
s = Polygon("0,0 0,1 1,1 1,0 0,0") * 'scale(20)'
self.assertEqual(s.bbox(False), (0, 0, 1, 1))
self.assertEqual(s.bbox(True), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(False), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(True), (0, 0, 1, 1))
s = Polyline("0,0 0,1 1,1 1,0 0,0") * 'scale(20)'
self.assertEqual(s.bbox(False), (0, 0, 1, 1))
self.assertEqual(s.bbox(True), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(False), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(True), (0, 0, 1, 1))
s = SimpleLine(0, 0, 1, 1) * 'scale(20)'
self.assertEqual(s.bbox(False), (0, 0, 1, 1))
self.assertEqual(s.bbox(True), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(False), (0, 0, 20, 20))
self.assertNotEqual(s.bbox(True), (0, 0, 1, 1))
def test_rect_rot_equal_rect_path_rotate(self):
r = Rect(10, 10, 8, 4)
a = r.d()
b = Path(a).d()
self.assertEqual(a, b)
a = (Path(r.d()) * "rotate(0.5turns)").d()
b = (r * "rotate(0.5turns)").d()
self.assertEqual(a, b)
def test_rect_reify(self):
"""Reifying a rotated rect."""
reification_checks(self, Rect())
reification_checks(self, Rect(2, 2, 4, 4))
shape = Rect() * "rotate(-90) translate(20,0)"
t = Rect(0, -20, 1, 1)
t *= "rotate(-90, 0, -20)"
self.assertEqual(t, shape)
def test_circle_reify(self):
"""Reifying a rotated circle."""
reification_checks(self, Circle())
reification_checks(self, Circle(2, 2, 4, 4))
def test_ellipse_reify(self):
"""Reifying a rotated ellipse."""
reification_checks(self, Ellipse(rx=1, ry=2))
reification_checks(self, Ellipse(2, 2, 5, 8))
def test_polyline_reify(self):
"""Reifying a rotated polyline."""
reification_checks(self, Polyline("0,0 1,1 2,2"))
reification_checks(self, Polyline("0,0 1,1 2,0"))
def test_polygon_reify(self):
"""Reifying a rotated polygon."""
reification_checks(self, Polygon("0,0 1,1 2,2"))
reification_checks(self, Polygon("0,0 1,1 2,0"))
def test_line_reify(self):
"""Reifying a rotated line."""
reification_checks(self, SimpleLine(0, 0, 1, 1))
reification_checks(self, SimpleLine(2, 2, 1, 0))
def test_path_reify(self):
"""Reifying a path."""
reification_checks(self, Path("M0,0L1,1L1,0z"))
reification_checks(self, Path("M100,100L70,70L45,0z"))
def test_shapes_degenerate(self):
"""Testing Degenerate Shapes"""
self.assertEqual(Rect(0, 0, 0, 100).d(), '')
self.assertEqual(Rect(0, 0, 100, 0).d(), '')
self.assertEqual(Circle(0, 0, 0).d(), '')
self.assertEqual(Ellipse(0,0,0,100).d(), '')
self.assertEqual(Ellipse(0, 0, 100, 0).d(), '')
self.assertEqual(Polygon(points='').d(), '')
def test_issue_95(self):
"""Testing Issue 95 stroke-width"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg>
<ellipse style="stroke:#fc0000;stroke-width:1;fill:none" cx="0" cy="0" rx="1" ry="1" transform="scale(100) rotate(-90,0,0)"/>
<rect style="stroke:#fc0000;stroke-width:1;fill:none" x="0" y="0" width="10" height="10" transform="scale(100) rotate(-90,0,0)"/>
</svg>''')
m = SVG.parse(q)
ellipse = m[0]
for i in range(5):
ellipse = ellipse.reify()
self.assertEqual(ellipse.stroke_width, 1.0)
rect = m[1]
for i in range(5):
rect = rect.reify()
self.assertEqual(rect.stroke_width, 1.0)
def test_issue_99(self):
"""Test Issue of inverted circle reified location"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg
width="82.475mm"
height="35.215mm"
viewBox="24.766026 -242.607513 82.475082 35.214996"
version="1.1"
>
<circle
transform="scale(1,-1)"
style="opacity:0.99;fill:none;stroke:#ff0000;stroke-width:0.0264584;stroke-miterlimit:4;stroke-dasharray:none"
r="2"
cx="100.41245"
cy="211.59723"
id="circle2" /></svg>
''')
m = SVG.parse(q, reify=False)
q = copy(m[0])
r = copy(m[0])
self.assertEqual(q, r)
q.reify()
r = Path(r)
q = Path(q)
self.assertEqual(q, r)
r.reify()
q.reify()
self.assertEqual(q, r)
def test_issue_99b(self):
"""Test Issue of double inverted circle reified location"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg
width="82.475mm"
height="35.215mm"
viewBox="24.766026 -242.607513 82.475082 35.214996"
version="1.1"
>
<circle
transform="scale(-1,-1)"
style="opacity:0.99;fill:none;stroke:#ff0000;stroke-width:0.0264584;stroke-miterlimit:4;stroke-dasharray:none"
r="2"
cx="100.41245"
cy="211.59723"
id="circle2" /></svg>
''')
m = SVG.parse(q, reify=False)
q = copy(m[0])
r = copy(m[0])
self.assertEqual(q, r)
q.reify()
r = Path(r)
q = Path(q)
self.assertEqual(q, r)
r.reify()
q.reify()
self.assertEqual(q, r)
def test_issue_99c(self):
"""Test Issue of inverted rect reified location"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg
width="82.475mm"
height="35.215mm"
viewBox="24.766026 -242.607513 82.475082 35.214996"
version="1.1"
>
<rect
transform="scale(1,-1)"
style="opacity:0.99;fill:none;stroke:#ff0000;stroke-width:0.0264584;stroke-miterlimit:4;stroke-dasharray:none"
rx="2"
x="100.41245"
y="211.59723"
width="100"
height="100"
id="circle2" /></svg>
''')
m = SVG.parse(q, reify=False)
q = copy(m[0])
r = copy(m[0])
self.assertEqual(q, r)
q.reify()
r = Path(r)
q = Path(q)
self.assertEqual(q, r)
r.reify()
q.reify()
self.assertEqual(q, r)
def test_issue_99d(self):
"""Test Issue of double inverted rect reified location"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg
width="82.475mm"
height="35.215mm"
viewBox="24.766026 -242.607513 82.475082 35.214996"
version="1.1"
>
<rect
transform="scale(-1,-1)"
style="opacity:0.99;fill:none;stroke:#ff0000;stroke-width:0.0264584;stroke-miterlimit:4;stroke-dasharray:none"
rx="2"
x="100.41245"
y="211.59723"
width="100"
height="100"
id="circle2" /></svg>
''')
m = SVG.parse(q, reify=False)
q = copy(m[0])
r = copy(m[0])
self.assertEqual(q, r)
q.reify()
r = Path(r)
q = Path(q)
self.assertEqual(q, r)
r.reify()
q.reify()
self.assertEqual(q, r)
def test_issue_104(self):
"""Testing Issue 104 degenerate parsing"""
q = io.StringIO(u'''<?xml version="1.0" encoding="utf-8" ?>
<svg>
<polygon points=""/>
<polygon/>
<rect x="0" y="0" width="0" height="10"/>
<circle cx="0" cy="0" r="0"/>
</svg>''')
m = SVG.parse(q)
self.assertEqual(len(m), 0)
def test_rect_strict(self):
values = {
'tag': 'rect',
'rx': "-4",
'x': "50",
'y': "51",
'width': "20",
'height': "10"
}
e = Rect(values)
e2 = Rect(50, 51, 20, 10)
self.assertEqual(e, e2)
e3 = Rect(values)
e3._strict = False # unstrict rx-negative rectangles, have scooped corners.
self.assertNotEqual(e3, e2)
values['ry'] = 4
e4 = Rect(values)
self.assertEqual(e, e4)
def test_shape_npoints(self):
import numpy as np
shapes = [
Rect(10, 20, 300, 340),
Circle(10, 10, 5),
Ellipse(50, 50, 30, 20),
Polygon(points=((10, 10), (20, 30), (50, 20))),
Polyline(points=((10, 10), (20, 30), (50, 20), (100, 120))),
]
for shape in shapes:
pos = np.linspace(0, 1, 1000)
# with disable_numpy():
pts1 = shape.npoint(pos) # Test rendered worthless.
pts2 = shape.npoint(pos)
for p, p1, p2 in zip(pos, pts1, pts2):
self.assertEqual(shape.point(p), Point(p1))
self.assertEqual(Point(p1), Point(p2))
def reification_checks(test, shape):
correct_reify(test, shape * "rotate(-90) translate(20,0)")
correct_reify(test, shape * "rotate(12turn)")
correct_reify(test, shape * "translate(20,0)")
correct_reify(test, shape * "scale(2) translate(20,0)")
correct_reify(test, shape * "rotate(90) scale(-1) translate(20,0)")
correct_reify(test, shape * "rotate(90) translate(20,0)")
correct_reify(test, shape * "skewX(10)")
correct_reify(test, shape * "skewY(10)")
def correct_reify(test, shape):
path = abs(Path(shape))
reified = abs(copy(shape))
test.assertEqual(path, shape)
test.assertEqual(reified, shape)
test.assertEqual(reified, path)
|
199313
|
from itertools import groupby
import django
from django import template
register = template.Library()
class DynamicRegroupNode(template.Node):
"""
Extends Django's regroup tag to accept a variable instead of a string literal
for the property you want to regroup on
"""
def __init__(self, target, parser, expression, var_name):
self.target = target
self.expression = template.Variable(expression)
self.var_name = var_name
self.parser = parser
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
#Try to resolve the filter expression from the template context.
#If the variable doesn't exist, accept the value that passed to the
#template tag and convert it to a string
try:
exp = self.expression.resolve(context)
except template.VariableDoesNotExist:
exp = str(self.expression)
filter_exp = self.parser.compile_filter(exp)
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda v, f=filter_exp.resolve: f(v, True))
]
return ''
@register.tag
def dynamic_regroup(parser, token):
"""
Django expects the value of `expression` to be an attribute available on
your objects. The value you pass to the template tag gets converted into a
FilterExpression object from the literal.
Sometimes we need the attribute to group on to be dynamic. So, instead
of converting the value to a FilterExpression here, we're going to pass the
value as-is and convert it in the Node.
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise template.TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise template.TemplateSyntaxError(
"second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise template.TemplateSyntaxError(
"next-to-last argument to 'regroup' tag must be 'as'")
expression = lastbits_reversed[2][::-1]
var_name = lastbits_reversed[0][::-1]
#We also need to hand the parser to the node in order to convert the value
#for `expression` to a FilterExpression.
return DynamicRegroupNode(target, parser, expression, var_name)
@register.simple_tag
def get_django_version():
version = django.VERSION
return {'major': version[0], 'minor': version[1]}
|
199317
|
import uuid
try:
from django_mongoengine import Document
except ImportError:
from mongoengine import Document
from mongoengine import StringField, UUIDField, BooleanField
from mongoengine import EmbeddedDocument
from django.conf import settings
from crits.core.crits_mongoengine import CritsBaseAttributes
from crits.core.crits_mongoengine import CritsSourceDocument
from crits.core.crits_mongoengine import CommonAccess, CritsDocumentFormatter
from crits.core.crits_mongoengine import CritsActionsDocument
from crits.events.migrate import migrate_event
from crits.vocabulary.events import EventTypes
class UnreleasableEventError(Exception):
"""
Exception for attempting to release an event relationship that is
unreleasable.
"""
def __init__(self, value, **kwargs):
self.message = "Relationship %s cannot be released to the event's \
releasability list." % value
super(UnreleasableEventError, self).__init__(**kwargs)
def __str__(self):
return repr(self.message)
class Event(CritsBaseAttributes, CritsSourceDocument, CritsActionsDocument,
Document):
"""
Event class.
"""
meta = {
"collection": settings.COL_EVENTS,
"auto_create_index": False,
"crits_type": 'Event',
"latest_schema_version": 3,
"schema_doc": {
'title': 'Title of this event',
'event_id': 'Unique event ID',
'event_type': 'Type of event based on Event Type options',
'description': 'Description of the event',
'source': ('List [] of sources who provided information about this'
' event')
},
"jtable_opts": {
'details_url': 'crits-events-views-view_event',
'details_url_key': 'id',
'default_sort': "created DESC",
'searchurl': 'crits-events-views-events_listing',
'fields': [ "title", "event_type", "created",
"source", "campaign", "status", "id"],
'jtopts_fields': [ "details",
"title",
"event_type",
"created",
"source",
"campaign",
"status",
"favorite",
"id"],
'hidden_fields': [],
'linked_fields': ["source", "campaign", "event_type"],
'details_link': 'details',
'no_sort': ['details']
}
}
title = StringField(required=True)
event_type = StringField(required=True)
# description also exists in CritsBaseAttributes, but this one is required.
description = StringField(required=True)
event_id = UUIDField(binary=True, required=True, default=uuid.uuid4)
def set_event_type(self, event_type):
"""
Set the Event Type.
:param event_type: The event type to set (must exist in DB).
:type event_type: str
"""
if event_type in EventTypes.values():
self.event_type = event_type
def migrate(self):
"""
Migrate to the latest schema version.
"""
migrate_event(self)
class EventAccess(EmbeddedDocument, CritsDocumentFormatter, CommonAccess):
"""
ACL for Events.
"""
add_sample = BooleanField(default=False)
title_edit = BooleanField(default=False)
type_edit = BooleanField(default=False)
|
199408
|
import oe.path
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
from oeqa.core.decorator.oeid import OETestID
class Fetch(OESelftestTestCase):
@OETestID(1058)
def test_git_mirrors(self):
"""
Verify that the git fetcher will fall back to the HTTP mirrors. The
recipe needs to be one that we have on the Yocto Project source mirror
and is hosted in git.
"""
# TODO: mktempd instead of hardcoding
dldir = os.path.join(self.builddir, "download-git-mirrors")
self.track_for_cleanup(dldir)
# No mirrors, should use git to fetch successfully
features = """
DL_DIR = "%s"
MIRRORS_forcevariable = ""
PREMIRRORS_forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
bitbake("dbus-wait -c fetch -f")
# No mirrors and broken git, should fail
features = """
DL_DIR = "%s"
GIT_PROXY_COMMAND = "false"
MIRRORS_forcevariable = ""
PREMIRRORS_forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
with self.assertRaises(AssertionError):
bitbake("dbus-wait -c fetch -f")
# Broken git but a specific mirror
features = """
DL_DIR = "%s"
GIT_PROXY_COMMAND = "false"
MIRRORS_forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
bitbake("dbus-wait -c fetch -f")
|
199430
|
import rpy2_R6.r6b as r6b
import rpy2_arrow.pyarrow_rarrow as pyr
import rpy2.rinterface as rinterface
import rpy2.robjects
import rpy2.robjects.conversion
# Python proxies for the R6 class factories
array_factory = r6b.R6DynamicClassGenerator(pyr.rarrow.Array)
recordbatch_factory = r6b.R6DynamicClassGenerator(pyr.rarrow.RecordBatch)
chunkedarray_factory = r6b.R6DynamicClassGenerator(pyr.rarrow.ChunkedArray)
schema_factory = r6b.R6DynamicClassGenerator(pyr.rarrow.Schema)
table_factory = r6b.R6DynamicClassGenerator(pyr.rarrow.Table)
# Conversion functions and rules
converter = rpy2.robjects.conversion.Converter(
'R6b conversion for pyarrow/arrow',
template=rpy2.robjects.default_converter
)
def rpy2py_array(obj):
return array_factory.__R6CLASS__(obj)
def rpy2py_recordbatch(obj):
return recordbatch_factory.__R6CLASS__(obj)
def rpy2py_chunkedarray(obj):
return chunkedarray_factory.__R6CLASS__(obj)
def rpy2py_schema(obj):
return schema_factory.__R6CLASS__(obj)
def rpy2py_table(obj):
return table_factory.__R6CLASS__(obj)
(converter.rpy2py_nc_name[rinterface.SexpEnvironment]
.update({
'Array': array_factory.__R6CLASS__,
'ChunkedArray': chunkedarray_factory.__R6CLASS__,
'RecordBatch': recordbatch_factory.__R6CLASS__,
'Table': table_factory.__R6CLASS__,
'Schema': schema_factory.__R6CLASS__
}))
|
199460
|
import sys
def main():
cnt = 0
f_cnt = 0
input_file = sys.argv[1]
output_prefix = sys.argv[2]
chunk_size = int(sys.argv[3])
f_ov = open(f'{output_prefix}.valid.txt', 'w', encoding='utf-8')
f_ot = None
with open(input_file, 'r', encoding='utf-8') as f_in:
for line in f_in:
if cnt % 200 == 199:
f_ov.write(line)
else:
if cnt // chunk_size >= f_cnt:
f_ot = open(f'{output_prefix}.train.txt.{f_cnt}', 'w', encoding='utf-8')
f_cnt += 1
f_ot.write(line)
cnt += 1
f_ov.close()
f_ot.close()
if __name__ == '__main__':
main()
|
199513
|
from analizer.abstract import instruction
from analizer.typechecker.Metadata import File
from analizer.typechecker.Metadata import Struct
from analizer.reports.Nodo import Nodo
class AlterIndex(instruction.Instruction):
def __init__(self, name, exists, newName, row, column, idOrNumber=None):
instruction.Instruction.__init__(self, row, column)
self.name = name
self.exists = exists
self.newName = newName
self.id = idOrNumber
def execute(self, environment):
Index = File.importFile("Index")
exists = Index.get(self.name)
result = []
if not exists:
if self.exists:
result.append("El INDEX : " + self.name + " no existe")
else:
result.append("Error: El INDEX : " + self.name + " no existe")
return result
if not self.id:
exists = Index.get(self.newName)
if not exists:
Index[self.newName] = Index.pop(self.name)
result.append(
"Se cambio el nombre del INDEX : "
+ self.name
+ " a "
+ self.newName
)
else:
result.append("Error: El INDEX : " + self.newName + " ya existe")
else:
column = self.newName
index = Index[self.name]
for c in index["Columns"]:
if c["Name"] == column:
if type(self.id) == int:
table = index["Table"]
columns = Struct.extractColumns(instruction.dbtemp, table)
if columns:
if self.id > len(columns):
result.append(
"Error fatal: INDEX "
+ self.name
+ "numero de columna invalido"
)
else:
col = columns[self.id - 1].name
c["Name"] = col
result.append(
"INDEX : "
+ self.name
+ " cambio la columna "
+ column
+ " por "
+ col
)
else:
result.append("Error fatal: INDEX " + self.name)
else:
c["Name"] = self.id
result.append(
"INDEX : "
+ self.name
+ " cambio la columna "
+ column
+ " por "
+ self.id
)
Index[self.name] = index
break
if result == []:
result.append(
"Error fatal: INDEX "
+ self.name
+ " columna invalida : "
+ self.newName
)
File.exportFile(Index, "Index")
return result
def dot(self):
new = Nodo("ALTER_INDEX")
n = Nodo(str(self.name))
new.addNode(n)
if self.exists:
ifex = Nodo("IF_EXISTS")
new.addNode(ifex)
nn = Nodo(str(self.newName))
new.addNode(nn)
if self.id:
idornum = Nodo(str(self.id))
new.addNode(idornum)
return new
|
199514
|
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.ImageCollection('JRC/GSW1_1/YearlyHistory') \
.filter(ee.Filter.date('2015-01-01', '2015-12-31'))
waterClass = dataset.select('waterClass')
waterClassVis = {
'min': 0.0,
'max': 3.0,
'palette': ['cccccc', 'ffffff', '99d9ea', '0000ff'],
}
Map.setCenter(59.414, 45.182, 7)
Map.addLayer(waterClass, waterClassVis, 'Water Class')
# Display the map.
Map
|
199658
|
import unittest
from unittest.mock import MagicMock, patch
from conjur import Client
from conjur.errors import MissingRequiredParameterException
from conjur.controller.host_controller import HostController
from conjur.data_object.host_resource_data import HostResourceData
from conjur.resource import Resource
class HostControllerTest(unittest.TestCase):
client = Client
host_resource_data = HostResourceData(action='someaction', host_to_update='somehost')
host_controller = HostController(client, host_resource_data)
def test_host_controller_constructor(self):
mock_client = None
mock_host_resource_data = None
host_controller = HostController(mock_client, mock_host_resource_data)
assert host_controller.client == mock_client
assert host_controller.host_resource_data == mock_host_resource_data
@patch('conjur.api.client')
def test_rotate_api_key_calls_necessary_functions(self, mock_client):
mock_host_resource_data = HostResourceData(action='someaction', host_to_update='somehost')
mock_host_controller = HostController(mock_client, mock_host_resource_data)
mock_host_controller.prompt_for_host_id_if_needed = MagicMock()
mock_host_controller.client.rotate_other_api_key = MagicMock()
mock_host_controller.rotate_api_key()
mock_host_controller.prompt_for_host_id_if_needed.assert_called_once()
mock_host_controller.client.rotate_other_api_key.assert_called_once_with(Resource(type_='host', name=mock_host_resource_data.host_to_update))
def test_user_does_not_provide_host_id_raises_exception(self):
mock_client=Client
mock_host_resource_data = HostResourceData(action='someaction', host_to_update=None)
mock_host_controller = HostController(mock_client, mock_host_resource_data)
# Raise error that the ID is required
with self.assertRaises(MissingRequiredParameterException):
with patch('builtins.input', return_value=''):
mock_host_controller.prompt_for_host_id_if_needed()
assert mock_host_resource_data.host_to_update == ''
|
199664
|
from flask import request, jsonify, make_response, current_app
import base64
import os
import redis
import uuid
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class tus_manager(object):
def __init__(self, app=None, upload_url='/file-upload', upload_folder='uploads/', overwrite=True, upload_finish_cb=None):
self.app = app
if app is not None:
self.init_app(app, upload_url, upload_folder, overwrite=overwrite, upload_finish_cb=upload_finish_cb)
def init_app(self, app, upload_url='/file-upload', upload_folder='uploads/', overwrite=True, upload_finish_cb=None):
self.upload_url = upload_url
self.upload_folder = upload_folder
self.tus_api_version = '1.0.0'
self.tus_api_version_supported = '1.0.0'
self.tus_api_extensions = ['creation', 'termination', 'file-check']
self.tus_max_file_size = 4294967296 # 4GByte
self.file_overwrite = overwrite
self.upload_finish_cb = upload_finish_cb
self.upload_file_handler_cb = None
# register the two file upload endpoints
app.add_url_rule(self.upload_url, 'file-upload', self.tus_file_upload, methods=['OPTIONS', 'POST', 'GET'])
app.add_url_rule('{}/<resource_id>'.format( self.upload_url ), 'file-upload-chunk', self.tus_file_upload_chunk, methods=['HEAD', 'PATCH', 'DELETE'])
def upload_file_handler( self, callback ):
self.upload_file_handler_cb = callback
return callback
# handle redis server connection
def redis_connect(self):
return redis.Redis()
@property
def redis_connection(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'tus_redis'):
ctx.tus_redis = self.redis_connect()
return ctx.tus_redis
def tus_file_upload(self):
response = make_response("", 200)
if request.method == 'GET':
metadata = {}
for kv in request.headers.get("Upload-Metadata", None).split(","):
(key, value) = kv.split(" ")
metadata[key] = base64.b64decode(value)
if metadata.get("filename", None) is None:
return make_response("metadata filename is not set", 404)
(filename_name, extension) = os.path.splitext( metadata.get("filename"))
if filename_name.upper() in [os.path.splitext(f)[0].upper() for f in os.listdir( os.path.dirname( self.upload_folder ))]:
response.headers['Tus-File-Name'] = metadata.get("filename")
response.headers['Tus-File-Exists'] = True
else:
response.headers['Tus-File-Exists'] = False
return response
elif request.method == 'OPTIONS' and request.headers.get('Access-Control-Request-Method', None) is not None:
# CORS option request, return 200
return response
if request.headers.get("Tus-Resumable") is not None:
response.headers['Tus-Resumable'] = self.tus_api_version
response.headers['Tus-Version'] = self.tus_api_version_supported
if request.method == 'OPTIONS':
response.headers['Tus-Extension'] = ",".join(self.tus_api_extensions)
response.headers['Tus-Max-Size'] = self.tus_max_file_size
response.status_code = 204
return response
# process upload metadata
metadata = {}
for kv in request.headers.get("Upload-Metadata", None).split(","):
(key, value) = kv.split(" ")
metadata[key] = base64.b64decode(value)
if os.path.lexists( os.path.join( self.upload_folder, metadata.get("filename") )) and self.file_overwrite is False:
response.status_code = 409
return response
file_size = int(request.headers.get("Upload-Length", "0"))
resource_id = str(uuid.uuid4())
p = self.redis_connection.pipeline()
p.setex("file-uploads/{}/filename".format(resource_id), "{}".format(metadata.get("filename")), 3600)
p.setex("file-uploads/{}/file_size".format(resource_id), file_size, 3600)
p.setex("file-uploads/{}/offset".format(resource_id), 0, 3600)
p.setex("file-uploads/{}/upload-metadata".format(resource_id), request.headers.get("Upload-Metadata"), 3600)
p.execute()
try:
f = open( os.path.join( self.upload_folder, resource_id ), "wb")
f.seek( file_size - 1)
f.write("\0")
f.close()
except IOError as e:
self.app.logger.error("Unable to create file: {}".format(e))
response.status_code = 500
return response
response.status_code = 201
response.headers['Location'] = '{}/{}/{}'.format(request.url_root, self.upload_url, resource_id)
response.headers['Tus-Temp-Filename'] = resource_id
response.autocorrect_location_header = False
else:
self.app.logger.warning("Received File upload for unsupported file transfer protocol")
response.data = "Received File upload for unsupported file transfer protocol"
response.status_code = 500
return response
def tus_file_upload_chunk(self, resource_id):
response = make_response("", 204)
response.headers['Tus-Resumable'] = self.tus_api_version
response.headers['Tus-Version'] = self.tus_api_version_supported
offset = self.redis_connection.get("file-uploads/{}/offset".format( resource_id ))
upload_file_path = os.path.join( self.upload_folder, resource_id )
if request.method == 'HEAD':
offset = self.redis_connection.get("file-uploads/{}/offset".format( resource_id ))
if offset is None:
response.status_code = 404
return response
else:
response.status_code = 200
response.headers['Upload-Offset'] = offset
response.headers['Cache-Control'] = 'no-store'
return response
if request.method == 'DELETE':
os.unlink( upload_file_path )
p = self.redis_connection.pipeline()
p.delete("file-uploads/{}/filename".format(resource_id))
p.delete("file-uploads/{}/file_size".format(resource_id))
p.delete("file-uploads/{}/offset".format(resource_id))
p.delete("file-uploads/{}/upload-metadata".format(resource_id))
p.execute()
response.status_code = 204
return respose
if request.method == 'PATCH':
filename = self.redis_connection.get("file-uploads/{}/filename".format( resource_id ))
if filename is None or os.path.lexists( upload_file_path ) is False:
self.app.logger.info( "PATCH sent for resource_id that does not exist. {}".format( resource_id))
response.status_code = 410
return response
file_offset = int(request.headers.get("Upload-Offset", 0))
chunk_size = int(request.headers.get("Content-Length", 0))
file_size = int( self.redis_connection.get( "file-uploads/{}/file_size".format( resource_id )) )
if request.headers.get("Upload-Offset") != self.redis_connection.get( "file-uploads/{}/offset".format( resource_id )): # check to make sure we're in sync
response.status_code = 409 # HTTP 409 Conflict
return response
try:
f = open( upload_file_path, "r+b")
except IOError:
f = open( upload_file_path, "wb")
finally:
f.seek( file_offset )
f.write(request.data)
f.close()
new_offset = self.redis_connection.incrby( "file-uploads/{}/offset".format( resource_id ), chunk_size)
response.headers['Upload-Offset'] = new_offset
response.headers['Tus-Temp-Filename'] = resource_id
if file_size == new_offset: # file transfer complete, rename from resource id to actual filename
if self.upload_file_handler_cb is None:
os.rename( upload_file_path, os.path.join( self.upload_folder, filename ))
else:
filename = self.upload_file_handler_cb( upload_file_path, filename )
p = self.redis_connection.pipeline()
p.delete("file-uploads/{}/filename".format(resource_id))
p.delete("file-uploads/{}/file_size".format(resource_id))
p.delete("file-uploads/{}/offset".format(resource_id))
p.delete("file-uploads/{}/upload-metadata".format(resource_id))
p.execute()
if self.upload_finish_cb is not None:
self.upload_finish_cb()
return response
|
199682
|
import re
from collections import deque
from contextlib import closing
from cStringIO import StringIO
from flanker.mime.message.headers.parsing import parse_stream
from flanker.mime.message.headers import MimeHeaders
def detect(message):
headers = collect(message)
return Result(
score=len(headers) / float(len(HEADERS)),
status=get_status(headers),
notification=get_notification(message),
diagnostic_code=headers.get('Diagnostic-Code'))
def collect(message):
collected = deque()
for p in message.walk(with_self=True):
for h in HEADERS:
if h in p.headers:
collected.append((h, p.headers[h]))
if p.content_type.is_delivery_status():
collected += collect_from_status(p.body)
return MimeHeaders(collected)
def collect_from_status(body):
out = deque()
with closing(StringIO(body)) as stream:
for i in xrange(3):
out += parse_stream(stream)
return out
def get_status(headers):
for v in headers.getall('Status'):
if RE_STATUS.match(v.strip()):
return v
def get_notification(message):
for part in message.walk():
if part.headers.get('Content-Description',
'').lower() == 'notification':
return part.body
HEADERS = ('Action',
'Content-Description',
'Diagnostic-Code',
'Final-Recipient',
'Received',
'Remote-Mta',
'Reporting-Mta',
'Status')
RE_STATUS = re.compile(r'\d\.\d+\.\d+', re.IGNORECASE)
class Result(object):
def __init__(self, score, status, notification, diagnostic_code):
self.score = score
self.status = status
self.notification = notification
self.diagnostic_code = diagnostic_code
def __repr__(self):
return (u'bounce.Result(status={}, score={}, notification={},'
u' diag_code={})'.format(self.status, self.score,
self.notification,
self.diagnostic_code))
|
199715
|
def fib(n):
'''
uses generater to return fibonacci sequence
up to given # n dynamically
'''
a,b = 1,1
for _ in range(0,n):
yield a
a,b = b,a+b
return a
|
199740
|
from enum import IntEnum
import attr
import pytest
from cattr import Converter, GenConverter, UnstructureStrategy
class E(IntEnum):
ONE = 1
TWO = 2
@attr.define
class C:
a: int
b: float
c: str
d: bytes
e: E
f: int
g: float
h: str
i: bytes
j: E
k: int
l: float
m: str
n: bytes
o: E
p: int
q: float
r: str
s: bytes
t: E
u: int
v: float
w: str
x: bytes
y: E
z: int
aa: float
ab: str
ac: bytes
ad: E
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_unstructure_attrs_primitives(
benchmark, converter_cls, unstructure_strat
):
"""Benchmark a large (30 attributes) attrs class containing primitives."""
c = converter_cls(unstruct_strat=unstructure_strat)
benchmark(
c.unstructure,
C(
1,
1.0,
"a small string",
"test".encode(),
E.ONE,
2,
2.0,
"a small string",
"test".encode(),
E.TWO,
3,
3.0,
"a small string",
"test".encode(),
E.ONE,
4,
4.0,
"a small string",
"test".encode(),
E.TWO,
5,
5.0,
"a small string",
"test".encode(),
E.ONE,
6,
6.0,
"a small string",
"test".encode(),
E.TWO,
),
)
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_structure_attrs_primitives(
benchmark, converter_cls, unstructure_strat
):
"""Benchmark a large (30 attributes) attrs class containing primitives."""
c = converter_cls(unstruct_strat=unstructure_strat)
inst = C(
1,
1.0,
"a small string",
"test".encode(),
E.ONE,
2,
2.0,
"a small string",
"test".encode(),
E.TWO,
3,
3.0,
"a small string",
"test".encode(),
E.ONE,
4,
4.0,
"a small string",
"test".encode(),
E.TWO,
5,
5.0,
"a small string",
"test".encode(),
E.ONE,
6,
6.0,
"a small string",
"test".encode(),
E.TWO,
)
raw = c.unstructure(inst)
benchmark(c.structure, raw, C)
|
199779
|
from azure.iot.device.aio import IoTHubDeviceClient, ProvisioningDeviceClient
from azure.iot.device import MethodResponse, Message
import smbus2, bme280, os, asyncio, json, time
from grove.grove_moisture_sensor import GroveMoistureSensor
from dotenv import load_dotenv
from grove.grove_light_sensor_v1_2 import GroveLightSensor
import RPi.GPIO as GPIO
from threading import Thread
# Configuration parameters
bme_pin = 1
bme_address = 0x76
moisture_pin = 2
light_pin = 0
# Setting the pins used for controlling the LEDs in the raspberry pi
red_led_pin = 16 # pin 36
blue_led_pin = 26 # pin 37
violet_led_pin = 6 # pin 31
# Confiuration of the GPIO Pins
GPIO.setmode(GPIO.BCM)
GPIO.setup(red_led_pin,GPIO.OUT)
GPIO.setup(blue_led_pin,GPIO.OUT)
GPIO.setup(violet_led_pin,GPIO.OUT)
red_light_time = None
blue_light_time = None
violet_light_time = None
red_light = False
blue_light = False
violet_light = False
# Create the sensors
bus = smbus2.SMBus(bme_pin)
calibration_params = bme280.load_calibration_params(bus, bme_address)
moisture_sensor = GroveMoistureSensor(moisture_pin)
light_sensor = GroveLightSensor(light_pin)
# Get the Connection data
load_dotenv()
id_scope = os.getenv('ID_SCOPE')
device_id = os.getenv('DEVICE_ID')
primary_key = os.getenv('PRIMARY_KEY')
def getTemperaturePressureHumidity():
return bme280.sample(bus, bme_address, calibration_params)
def getMoisture():
return round(moisture_sensor.moisture, 2)
def getLight():
return round(light_sensor.light, 2)/10
def getTelemetryData():
temp = round(getTemperaturePressureHumidity().temperature, 2)
moisture = getMoisture()
pressure = round(getTemperaturePressureHumidity().pressure, 2)
humidity = round(getTemperaturePressureHumidity().humidity, 2)
light = getLight()
data = {
"humidity": humidity,
"pressure": pressure,
"temperature": temp,
"soil_moisture": moisture,
"light_level": light
}
return json.dumps(data)
# Control function for the Red LEDs
def control_red(red_light_pin):
start = time.perf_counter()
print(f'Turn on red LED for {red_light_time} minutes')
GPIO.output(red_light_pin, GPIO.HIGH)
while red_light and start + 60*red_light_time >= int(time.perf_counter()):
if time.perf_counter() >= start + 60*red_light_time:
print(f'Turn off red LED')
GPIO.output(red_light_pin, GPIO.LOW)
break
# Control function for the Blue LEDs
def control_blue(blue_light_pin):
start = time.perf_counter()
print(f'Turn on blue LED for {blue_light_time} minutes')
GPIO.output(blue_light_pin, GPIO.HIGH)
while blue_light and start + 60*blue_light_time >= int(time.perf_counter()):
if time.perf_counter() >= start + 60*blue_light_time:
print(f'Turn off blue LED')
GPIO.output(blue_light_pin, GPIO.LOW)
break
# Control function for the Violet LEDs
def control_violet(violet_light_pin):
start = time.perf_counter()
print(f'Turn on violet LED for {violet_light_time} minutes')
GPIO.output(violet_light_pin, GPIO.HIGH)
while violet_light and start + 60*violet_light_time >= int(time.perf_counter()):
if time.perf_counter() >= start + 60*violet_light_time:
print(f'Turn off violet LED')
GPIO.output(violet_light_pin, GPIO.LOW)
break
async def main():
# provision the device
async def register_device():
provisioning_device_client = ProvisioningDeviceClient.create_from_symmetric_key(
provisioning_host='global.azure-devices-provisioning.net',
registration_id=device_id,
id_scope=id_scope,
symmetric_key=primary_key)
return await provisioning_device_client.register()
results = await asyncio.gather(register_device())
registration_result = results[0]
# build the connection string
conn_str='HostName=' + registration_result.registration_state.assigned_hub + \
';DeviceId=' + device_id + \
';SharedAccessKey=' + primary_key
# The client object is used to interact with Azure IoT Central.
device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
# connect the client.
print('Connecting')
await device_client.connect()
print('Connected')
# async loop that sends the telemetry
async def main_loop():
while True:
telemetry = getTelemetryData()
await device_client.send_message(telemetry)
print(telemetry)
await asyncio.sleep(20)
async def redLight(request):
response = MethodResponse.create_from_method_request(
request, status = 200# payload = {'description': f'Red Light for {request.payload} minutes'}
)
global red_light_time
global red_light
await device_client.send_method_response(response) # send response
red_light = False
if request.payload == None or request.payload == 0:
print('Turn off the Red LED')
else:
await asyncio.sleep(1)
red_light = True
red_light_time = request.payload
red = Thread(target=control_red, args=(red_led_pin, ), daemon=True)
red.start()
async def blueLight(request):
response = MethodResponse.create_from_method_request(
request, status = 200# payload = {'description': f'Blue Light for {request.payload} minutes'}
)
global blue_light_time
global blue_light
await device_client.send_method_response(response) # send response
blue_light = False
if request.payload == None or request.payload == 0:
print('Turn off the Blue LED')
else:
await asyncio.sleep(1)
blue_light = True
blue_light_time = request.payload
blue = Thread(target=control_blue, args=(blue_led_pin, ), daemon=True)
blue.start()
async def violetLight(request):
response = MethodResponse.create_from_method_request(
request, status = 200# payload = {'description': f'Violet Light for {request.payload} minutes'}
)
global violet_light_time
global violet_light
await device_client.send_method_response(response) # send response
violet_light = False
if request.payload == None or request.payload == 0:
print('Turn off the Violet LED')
else:
await asyncio.sleep(1)
violet_light = True
violet_light_time = request.payload
violet = Thread(target=control_violet, args=(violet_led_pin, ), daemon=True)
violet.start()
commands = {
'red_led': redLight,
'blue_led': blueLight,
'violet_led': violetLight,
}
# Define behavior for handling commands
async def command_listener(device_client):
print('command listener')
while True:
method_request = await device_client.receive_method_request() # Wait for commands
await commands[method_request.name](method_request)
listeners = asyncio.gather(command_listener(device_client))
await main_loop()
listeners.cancel()
# Finally, disconnect
await device_client.disconnect()
if __name__ == '__main__':
# python3.7 or newer
asyncio.run(main())
# python3.6
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
|
199818
|
from gmusicapi._version import __version__
from gmusicapi.clients import Webclient, Musicmanager, Mobileclient
from gmusicapi.exceptions import CallFailure
__copyright__ = 'Copyright 2018 <NAME>'
__license__ = 'BSD 3-Clause'
__title__ = 'gmusicapi'
# appease flake8: the imports are purposeful
(__version__, Webclient, Musicmanager, Mobileclient, CallFailure)
|
199829
|
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from nets.deepdsod_model import DSOD
from loss import MultiBoxLoss
from datasets.dsod_dataset import mydateset
from os.path import exists
from utils import *
# {'car': 1, 'person': 2, 'truck': 3, 'bus': 4, 'rider': 5, 'rear': 6, 'front': 7}
# Data parameters
keep_difficult = True # use objects considered difficult to detect?
use_focalloss = False
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(opt):
"""
Training and validation.
"""
global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint, lr_scheduler
epochs_since_improvement = opt['epochs_since_improvement']
start_epoch = opt['start_epoch']
best_loss = opt['best_loss']
checkpoint = opt['checkpoint']
lr_scheduler = opt['lr_scheduler']
batch_size = opt['batch_size']
epochs = opt['epochs']
lr = opt['lr']
momentum = opt['momentum']
weight_decay = opt['weight_decay']
grad_clip = opt['grad_clip']
workers = opt['workers']
print_freq = opt['print_freq']
root = opt['root']
# Initialize model or load checkpoint
if checkpoint is None:
model = DSOD(n_classes=n_classes)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
best_loss = checkpoint['best_loss']
print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
model = checkpoint['model']
# optimizer = checkpoint['optimizer']
# or
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
optimizer = torch.optim.SGD(model.parameters(),
lr=lr, momentum=momentum, weight_decay=weight_decay)
print('Learning Rate: ', optimizer.param_groups[-1]['lr'])
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.5, patience=20, verbose=True
)
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy, use_focalloss=use_focalloss).to(device)
# Custom dataloaders
train_dataset = mydateset(root='../data', transform=True)
val_dataset = mydateset(root='../data', mode='test')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
collate_fn=val_dataset.collate_fn, num_workers=workers,
pin_memory=True)
# Epochs
for epoch in range(start_epoch, epochs):
# One epoch's training
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# One epoch's validation
val_loss = validate(val_loader=val_loader,
model=model,
criterion=criterion)
# Did validation loss improve?
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
if lr_scheduler is not None:
lr_scheduler.step(best_loss)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(epoch, epochs_since_improvement, model, optimizer, val_loss, best_loss, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
# Batches
for i, (images, boxes, labels, masks) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to default device
images = images.to(device)
boxes = [torch.cat(b).to(device) for b in boxes]
labels = [l.to(device) for l in labels]
masks = torch.cat([m.unsqueeze(0) for m in masks]).to(device)
# Forward prop.
predicted_locs, predicted_scores, segm_score = model(images)
# print(predicted_locs.shape, predicted_scores.shape, segm_score.shape)
# Loss
loss = criterion(predicted_locs, predicted_scores, segm_score, boxes, labels, masks) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
def validate(val_loader, model, criterion):
"""
One epoch's validation.
:param val_loader: DataLoader for validation data
:param model: model
:param criterion: MultiBox loss
:return: average validation loss
"""
model.eval() # eval mode disables dropout
batch_time = AverageMeter()
losses = AverageMeter()
start = time.time()
# Prohibit gradient computation explicity because I had some problems with memory
with torch.no_grad():
# Batches
for i, (images, boxes, labels, masks, difficulties) in enumerate(val_loader):
# Move to default device
images = images.to(device)
boxes = [torch.cat(b).to(device) for b in boxes]
labels = [l.to(device) for l in labels]
masks = torch.cat([m.unsqueeze(0) for m in masks]).to(device)
# Forward prop.
predicted_locs, predicted_scores, segm_score = model(images)
# Loss
loss = criterion(predicted_locs, predicted_scores, segm_score, boxes, labels, masks)
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('[{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i, len(val_loader),
batch_time=batch_time,
loss=losses))
print('\n * LOSS - {loss.avg:.3f}\n'.format(loss=losses))
return losses.avg
if __name__ == '__main__':
train()
|
199875
|
import copy
from matplotlib import cm
import matplotlib.colors
import numpy as np
import hexrd.ui.constants
from hexrd.ui.brightness_contrast_editor import BrightnessContrastEditor
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.utils import block_signals
class ColorMapEditor:
def __init__(self, image_object, parent=None):
# The image_object can be any object with the following functions:
# 1. set_cmap: a function to set the cmap on the image
# 2. set_norm: a function to set the norm on the image
self.image_object = image_object
loader = UiLoader()
self.ui = loader.load_file('color_map_editor.ui', parent)
self.bounds = (0, 16384)
self._data = None
self.bc_editor = None
self.load_cmaps()
self.setup_connections()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
self._data = v
self.update_bc_enable_state()
if self.bc_editor:
self.bc_editor.data = v
self.update_bc_editor()
def load_cmaps(self):
cmaps = sorted(i[:-2] for i in dir(cm) if i.endswith('_r'))
self.ui.color_map.addItems(cmaps)
# Set the combobox to be the default
self.ui.color_map.setCurrentText(hexrd.ui.constants.DEFAULT_CMAP)
def setup_connections(self):
self.ui.bc_editor_button.pressed.connect(self.bc_editor_button_pressed)
self.ui.minimum.valueChanged.connect(self.range_edited)
self.ui.maximum.valueChanged.connect(self.range_edited)
self.ui.color_map.currentIndexChanged.connect(self.update_cmap)
self.ui.reverse.toggled.connect(self.update_cmap)
self.ui.show_under.toggled.connect(self.update_cmap)
self.ui.show_over.toggled.connect(self.update_cmap)
self.ui.log_scale.toggled.connect(self.update_norm)
def range_edited(self):
self.update_bc_editor()
self.update_mins_and_maxes()
self.update_norm()
def update_bc_enable_state(self):
has_data = self.data is not None
self.ui.bc_editor_button.setEnabled(has_data)
def bc_editor_button_pressed(self):
if self.bc_editor:
self.bc_editor.ui.reject()
bc = self.bc_editor = BrightnessContrastEditor(self.ui)
bc.data = self.data
bc.edited.connect(self.bc_editor_modified)
bc.reset.connect(self.reset_range)
bc.ui.finished.connect(self.remove_bc_editor)
# Hide overlays while the BC editor is open
self._bc_previous_show_overlays = HexrdConfig().show_overlays
if self._bc_previous_show_overlays:
HexrdConfig().show_overlays = False
HexrdConfig().active_material_modified.emit()
self.update_bc_editor()
self.bc_editor.ui.show()
def update_bc_editor(self):
if not self.bc_editor:
return
widgets = (self.ui.minimum, self.ui.maximum)
new_range = [x.value() for x in widgets]
with block_signals(self.bc_editor):
self.bc_editor.ui_range = new_range
def remove_bc_editor(self):
self.bc_editor = None
if self._bc_previous_show_overlays and not HexrdConfig().show_overlays:
# Show the overlays again
HexrdConfig().show_overlays = True
HexrdConfig().active_material_modified.emit()
def bc_editor_modified(self):
with block_signals(self.ui.minimum, self.ui.maximum):
self.ui.minimum.setValue(self.bc_editor.ui_min)
self.ui.maximum.setValue(self.bc_editor.ui_max)
self.range_edited()
def update_mins_and_maxes(self):
# We can't do this in PySide2 for some reason:
# self.ui.maximum.valueChanged.connect(self.ui.minimum.setMaximum)
# self.ui.minimum.valueChanged.connect(self.ui.maximum.setMinimum)
self.ui.maximum.setMinimum(self.ui.minimum.value())
self.ui.minimum.setMaximum(self.ui.maximum.value())
def block_updates(self, blocked):
self.updates_blocked = blocked
def update_bounds(self, data):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the bounds
return
bounds = self.percentile_range(data)
self.ui.minimum.setValue(bounds[0])
self.ui.minimum.setToolTip('Min: ' + str(bounds[0]))
self.ui.maximum.setValue(bounds[1])
self.ui.maximum.setToolTip('Max: ' + str(bounds[1]))
self.bounds = bounds
self.data = data
@staticmethod
def percentile_range(data, low=69.0, high=99.9):
if isinstance(data, dict):
values = data.values()
elif not isinstance(data, (list, tuple)):
values = [data]
l = min([np.nanpercentile(v, low) for v in values])
h = min([np.nanpercentile(v, high) for v in values])
if h - l < 5:
h = l + 5
return l, h
def reset_range(self):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the range
return
if self.ui.minimum.maximum() < self.bounds[0]:
# Make sure we can actually set the value...
self.ui.minimum.setMaximum(self.bounds[0])
self.ui.minimum.setValue(self.bounds[0])
self.ui.maximum.setValue(self.bounds[1])
def update_cmap(self):
# Get the Colormap object from the name
cmap = cm.get_cmap(self.ui.color_map.currentText())
if self.ui.reverse.isChecked():
cmap = cmap.reversed()
# For set_under() and set_over(), we don't want to edit the
# original color map, so make a copy
cmap = copy.copy(cmap)
if self.ui.show_under.isChecked():
cmap.set_under('b')
if self.ui.show_over.isChecked():
cmap.set_over('r')
self.image_object.set_cmap(cmap)
def update_norm(self):
min = self.ui.minimum.value()
max = self.ui.maximum.value()
if self.ui.log_scale.isChecked():
# The min cannot be 0 here, or this will raise an exception
# For some reason, if it is less than 1.e-7, for some datasets,
# matplotlib will round it to 0, and then raise an exception.
# Thus, keep it at 1.e-7 for now.
min = 1.e-7 if min < 1.e-7 else min
norm = matplotlib.colors.LogNorm(vmin=min, vmax=max)
else:
norm = matplotlib.colors.Normalize(vmin=min, vmax=max)
self.image_object.set_norm(norm)
|
199886
|
from .build_features_matrix import build_matrix, load_dataset, load_matrix
from .clustering_algo import ClusteringAlgo, ClusteringAlgoSparse
from .eval import general_statistics, cluster_event_match, mcminn_eval
|
199921
|
import datetime
from google.appengine.ext import ndb
class ExportTask(ndb.Model):
blob_key = ndb.BlobKeyProperty()
total_posts = ndb.IntegerProperty(default=0)
total_photos = ndb.IntegerProperty(default=0)
exported_posts = ndb.IntegerProperty(default=0)
exported_photos = ndb.IntegerProperty(default=0)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
filename = ndb.StringProperty()
status = ndb.StringProperty(choices=['new', 'inprogress', 'finished', 'failed'],default='new')
message = ndb.TextProperty(default='Waiting for task to start...')
def update(self, message, **kwargs):
self.message = message
for k,v in kwargs.items():
self.__setattr__(k, v)
self.put()
|
199934
|
from typing import Dict
import pandas as pd
from abm1559.config import rng
from abm1559.utils import (
get_basefee_bounds,
)
from abm1559.txs import (
Tx1559,
TxFloatingEsc
)
class User:
"""
Users submit transactions. They have a (randomly chosen) value per Gwei :math:`v`, (we choose per Gwei such that all evaluations of their welfare can be done independently of how much gas the transaction uses).
The user evaluates its current value (:math:`cv`) in one of two ways, embodied by two different subclasses:
- :py:class:`abm1559.users.AffineUser`: Incurs a fixed (but randomly selected) cost :math:`c` per unit of time (block-to-block), so :math:`cv(t) = v - c * t`.
- :py:class:`abm1559.users.DiscountUser`: Incurs a discount :math:`\delta` over time, so :math:`cv(t) = v * (1 - \delta)^t`.
`AffineUser` or `DiscountUser` are subclassed to create users who send different types of transactions, e.g., 1559-type transactions, escalators or something different. The subclasses should implement:
- (Optional) `expected_time(env)`: How the user estimates how long they will wait for their transaction to be included.
- (Optional) `decide_parameters(env)`: Based on their type and `env` (typically, current basefee, length of the queue or salient statistics e.g., distribution of tips in the queue), return transaction parameters.
- (Requested) `transact(env)`: Queried by the simulation when user is spawned. Returns either a transaction or `None` if they balk.
"""
def __init__(self, wakeup_block, pub_key=None, value=None, rng=rng, **kwargs):
self.wakeup_block = wakeup_block
self.rng = rng
if pub_key is None:
self.pub_key = rng.bytes(8)
else:
self.pub_key = pub_key
# Users have a value (in wei) per unit of gas for the transaction
if value is None:
self.value = int(rng.uniform(low = 0, high = 20) * (10 ** 9))
else:
self.value = value
def cost(self, env):
"""
Args:
env (Dict): Includes `gas_price` in wei
"""
gas_price = env["gas_price"]
return self.value - self.current_value(env) + gas_price
def payoff(self, env):
"""
Args:
env (Dict): Includes `gas_price` in wei
"""
gas_price = env["gas_price"]
return self.current_value(env) - gas_price
def transact(self, env):
tx = self.create_transaction(env)
if tx is None:
self.tx_hash = None
else:
self.tx_hash = tx.tx_hash
return tx
def cancel(self, tx):
return False
def export(self):
return {
"user": self,
"pub_key": self.pub_key.hex(),
"value": self.value / (10 ** 9), # in Gwei
"wakeup_block": self.wakeup_block,
}
class AffineUser(User):
"""
Affine users incur a fixed cost per unit of time.
"""
def __init__(self, wakeup_block, **kwargs):
super().__init__(wakeup_block, **kwargs)
if not "cost_per_unit" in kwargs:
rng = kwargs["rng"]
self.cost_per_unit = int(rng.uniform(low = 0, high = 1) * (10 ** 9))
else:
self.cost_per_unit = kwargs["cost_per_unit"]
def __str__(self):
return f"Affine User with value {self.value} and cost {self.cost_per_unit}"
def current_value(self, env):
current_block = env["current_block"]
elapsed_time = current_block - self.wakeup_block
return self.value - self.cost_per_unit * elapsed_time
def export(self):
return {
**super().export(),
"user_type": "affine_user",
"cost_per_unit": self.cost_per_unit / (10 ** 9), # in Gwei
}
class DiscountUser(User):
"""
The value of discount users is reduced over time.
"""
def __init__(self, wakeup_block, **kwargs):
super().__init__(wakeup_block, **kwargs)
if not "discount_rate" in kwargs:
self.discount_rate = 0.01
else:
self.discount_rate = kwargs["discount_rate"]
def __str__(self):
return f"Discount User with value {self.value} and discount rate {self.discount_rate}"
def current_value(self, env):
current_block = env["current_block"]
elapsed_time = current_block - self.wakeup_block
return self.value * (1 - self.discount_rate) ** elapsed_time
def export(self):
return {
**super().export(),
"user_type": "discount_user",
"discount_rate": self.discount_rate,
}
class User1559(AffineUser):
"""
An affine user sending 1559 transactions.
"""
# Expects to be included within 5 blocks
# Prefers not to participate if its expected payoff is negative
# Fixed gas_premium
def expected_time(self, env):
return 5
def decide_parameters(self, env):
gas_premium = 1 * (10 ** 9)
max_fee = self.value
return {
"max_fee": max_fee, # in wei
"gas_premium": gas_premium, # in wei
"start_block": self.wakeup_block,
}
def create_transaction(self, env):
tx_params = self.decide_parameters(env)
tx = Tx1559(
sender = self.pub_key,
tx_params = tx_params,
)
expected_block = self.wakeup_block + self.expected_time(env)
expected_gas_price = tx.gas_price({
**env,
"current_block": expected_block
})
expected_payoff = self.payoff({
"gas_price": expected_gas_price,
"current_block": expected_block,
})
if expected_payoff <= 0:
return None
return tx
def export(self):
return {
**super().export(),
"user_type": "user_1559",
}
def __str__(self):
return f"1559 affine user with value {self.value} and cost {self.cost_per_unit}"
class UserFloatingEsc(AffineUser):
"""
An affine user sending floating escalator transactions.
"""
# Expects to be included in the next block
# Prefers not to participate if its expected payoff is negative
def expected_time(self, env):
return 0
def create_transaction(self, env):
tx_params = self.decide_parameters(env)
tx = TxFloatingEsc(
sender = self.pub_key,
tx_params = tx_params,
rng = self.rng,
)
expected_block = self.wakeup_block + self.expected_time(env)
expected_gas_price = tx.gas_price({
**env,
"current_block": expected_block,
})
expected_payoff = self.payoff({
"gas_price": expected_gas_price,
"current_block": expected_block,
})
if expected_payoff <= 0:
return None
return tx
def decide_parameters(self, env):
assert False, "This method needs to be overridden"
def export(self):
return {
**super().export(),
"user_type": "user_floatingesc",
}
def __str__(self):
return f"Floating escalator affine user with value {self.value} and cost {self.cost_per_unit}"
|
199954
|
from pebl.test import testfile
from pebl import data, result
from pebl.learner import simanneal
class TestGreedyLearner:
def setUp(self):
self.data = data.fromfile(testfile('testdata5.txt'))
self.data.discretize()
def test_default_params(self):
s = simanneal.SimulatedAnnealingLearner(self.data)
s.run()
assert True
def test_param_effect(self):
s1 = simanneal.SimulatedAnnealingLearner(self.data)
s1.run()
s2 = simanneal.SimulatedAnnealingLearner( self.data, start_temp = 50)
s2.run()
assert s1.stats.iterations > s2.stats.iterations
|
199956
|
from orbit.models.ktrlite import KTRLite
import pandas as pd
import numpy as np
import math
from scipy.stats import nct
from enum import Enum
import torch
import matplotlib.pyplot as plt
from copy import deepcopy
from ..constants.constants import (
KTRTimePointPriorKeys,
PredictMethod,
TrainingMetaKeys,
PredictionMetaKeys
)
from ..exceptions import IllegalArgument, ModelException, PredictionException
from ..utils.general import is_ordered_datetime
from ..utils.kernels import gauss_kernel, sandwich_kernel
from ..utils.features import make_seasonal_regressors
from .model_template import ModelTemplate
from ..estimators.pyro_estimator import PyroEstimatorSVI
from ..models import KTRLite
from orbit.constants.palette import OrbitPalette
from ..utils.knots import get_knot_idx, get_knot_dates
from ..utils.plot import orbit_style_decorator
class DataInputMapper(Enum):
"""
mapping from object input to pyro input
"""
# All of the following have default defined in DEFAULT_SLGT_FIT_ATTRIBUTES
# ---------- Data Input ---------- #
# observation related
NUM_OF_VALID_RESPONSE = 'N_VALID_RES'
WHICH_VALID_RESPONSE = 'WHICH_VALID_RES'
RESPONSE_OFFSET = 'MEAN_Y'
DEGREE_OF_FREEDOM = 'DOF'
_RESIDUALS_SCALE_UPPER = 'RESID_SCALE_UB'
# ---------- Level ---------- #
_NUM_KNOTS_LEVEL = 'N_KNOTS_LEV'
LEVEL_KNOT_SCALE = 'LEV_KNOT_SCALE'
_KERNEL_LEVEL = 'K_LEV'
# ---------- Regression ---------- #
_NUM_KNOTS_COEFFICIENTS = 'N_KNOTS_COEF'
_KERNEL_COEFFICIENTS = 'K_COEF'
_NUM_OF_REGULAR_REGRESSORS = 'N_RR'
_NUM_OF_POSITIVE_REGRESSORS = 'N_PR'
_NUM_OF_NEGATIVE_REGRESSORS = 'N_NR'
_REGULAR_REGRESSOR_MATRIX = 'RR'
_POSITIVE_REGRESSOR_MATRIX = 'PR'
_NEGATIVE_REGRESSOR_MATRIX = 'NR'
_REGULAR_REGRESSOR_INIT_KNOT_LOC = 'RR_INIT_KNOT_LOC'
_REGULAR_REGRESSOR_INIT_KNOT_SCALE = 'RR_INIT_KNOT_SCALE'
_REGULAR_REGRESSOR_KNOT_SCALE = 'RR_KNOT_SCALE'
_POSITIVE_REGRESSOR_INIT_KNOT_LOC = 'PR_INIT_KNOT_LOC'
_POSITIVE_REGRESSOR_INIT_KNOT_SCALE = 'PR_INIT_KNOT_SCALE'
_POSITIVE_REGRESSOR_KNOT_SCALE = 'PR_KNOT_SCALE'
_NEGATIVE_REGRESSOR_INIT_KNOT_LOC = 'NR_INIT_KNOT_LOC'
_NEGATIVE_REGRESSOR_INIT_KNOT_SCALE = 'NR_INIT_KNOT_SCALE'
_NEGATIVE_REGRESSOR_KNOT_SCALE = 'NR_KNOT_SCALE'
# ---------- Prior Specification ---------- #
_COEF_PRIOR_LIST = 'COEF_PRIOR_LIST'
_LEVEL_KNOTS = 'LEV_KNOT_LOC'
_SEAS_TERM = 'SEAS_TERM'
class BaseSamplingParameters(Enum):
"""
The output sampling parameters related with the base model
"""
LEVEL_KNOT = 'lev_knot'
LEVEL = 'lev'
YHAT = 'yhat'
OBS_SCALE = 'obs_scale'
class RegressionSamplingParameters(Enum):
"""
The output sampling parameters related with regression component.
"""
COEFFICIENTS_KNOT = 'coef_knot'
COEFFICIENTS_INIT_KNOT = 'coef_init_knot'
COEFFICIENTS = 'coef'
# Defaults Values
DEFAULT_REGRESSOR_SIGN = '='
DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE = 1.0
DEFAULT_COEFFICIENTS_INIT_KNOT_LOC = 0
DEFAULT_COEFFICIENTS_KNOT_SCALE = 0.1
DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER = 0.01
DEFAULT_UPPER_BOUND_SCALE_MULTIPLIER = 1.0
class KTRModel(ModelTemplate):
"""Base KTR model object with shared functionality for PyroVI method
Parameters
----------
level_knot_scale : float
sigma for level; default to be .1
level_segments : int
the number of segments partitioned by the knots of level (trend)
level_knot_distance : int
the distance between every two knots of level (trend)
level_knot_dates : array like
list of pre-specified dates for the level knots
seasonality : int, or list of int
multiple seasonality
seasonality_fs_order : int, or list of int
fourier series order for seasonality
seasonality_segments : int
the number of segments partitioned by the knots of seasonality
seasonal_initial_knot_scale : float
scale parameter for seasonal regressors initial coefficient knots; default to be 1
seasonal_knot_scale : float
scale parameter for seasonal regressors drift of coefficient knots; default to be 0.1.
regressor_col : array-like strings
regressor columns
regressor_sign : list
list of signs with '=' for regular regressor, '+' for positive regressor, and '-' for negative regressor.
regressor_init_knot_loc : list
list of regressor knot pooling mean priors, default to be 0's
regressor_init_knot_scale : list
list of regressor knot pooling sigma's to control the pooling strength towards the grand mean of regressors;
default to be 1.
regressor_knot_scale : list
list of regressor knot sigma priors; default to be 0.1.
regression_segments : int
the number of segments partitioned by the knots of regression
regression_knot_distance : int
the distance between every two knots of regression
regression_knot_dates : array-like
list of pre-specified dates for regression knots
regression_rho : float
sigma in the Gaussian kernel for the regression term
degree of freedom : int
degree of freedom for error t-distribution
date_freq : str
date frequency; if not supplied, the minimum timestamp difference in the date would be used.
coef_prior_list : list of dicts
each dict in the list should have keys as
'name', prior_start_tp_idx' (inclusive), KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value (not inclusive),
KTRTimePointPriorKeys.PRIOR_MEAN.value, KTRTimePointPriorKeys.PRIOR_SD.value, and KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
residuals_scale_upper : float
flat_multiplier : bool
Default set as True. If False, we will adjust knot scale with a multiplier based on regressor volume
around each knot; When True, set all multiplier as 1
ktrlite_optim_args : dict
the optimizing config for the ktrlite model (to fit level/seasonality). Default to be dict().
"""
_data_input_mapper = DataInputMapper
# stan or pyro model name (e.g. name of `*.stan` file in package)
_model_name = 'ktr'
_supported_estimator_types = [PyroEstimatorSVI]
def __init__(self,
# level
level_knot_scale=0.1,
level_segments=10,
level_knot_distance=None,
level_knot_dates=None,
# seasonality
seasonality=None,
seasonality_fs_order=None,
seasonality_segments=2,
seasonal_initial_knot_scale=1.0,
seasonal_knot_scale=0.1,
# regression
regressor_col=None,
regressor_sign=None,
regressor_init_knot_loc=None,
regressor_init_knot_scale=None,
regressor_knot_scale=None,
regression_segments=5,
regression_knot_distance=None,
regression_knot_dates=None,
regression_rho=0.15,
# shared
degree_of_freedom=30,
date_freq=None,
# time-based coefficient priors
coef_prior_list=None,
flat_multiplier=True,
residuals_scale_upper=None,
ktrlite_optim_args=dict(),
**kwargs):
super().__init__(**kwargs) # create estimator in base class
# level configurations
self.level_knot_scale = level_knot_scale
self.level_segments = level_segments
self.level_knot_distance = level_knot_distance
self.level_knot_dates = level_knot_dates
self._level_knot_dates = self.level_knot_dates
self.level_knots = None
self._level_knots = None
self._kernel_level = None
self._num_knots_level = None
self.knots_tp_level = None
# seasonality configurations
self.seasonality = seasonality
self.seasonality_fs_order = seasonality_fs_order
self._seasonality = self.seasonality
# used to name different seasonal components in prediction
self._seasonality_labels = list()
self._seasonality_fs_order = self.seasonality_fs_order
self.seasonal_initial_knot_scale = seasonal_initial_knot_scale
self.seasonal_knot_scale = seasonal_knot_scale
self.seasonality_segments = seasonality_segments
self._seas_term = 0
self._seasonality_coef_knot_dates = None
self._seasonality_coef_knots = None
# regression configurations
self.regressor_col = regressor_col
self.regressor_sign = regressor_sign
self.regressor_init_knot_loc = regressor_init_knot_loc
self.regressor_init_knot_scale = regressor_init_knot_scale
self.regressor_knot_scale = regressor_knot_scale
self.regression_knot_distance = regression_knot_distance
self.regression_segments = regression_segments
self._regression_knot_dates = regression_knot_dates
self.regression_rho = regression_rho
self.flat_multiplier = flat_multiplier
# set private var to arg value
# if None set default in _set_default_args()
self._regressor_sign = self.regressor_sign
self._regressor_init_knot_loc = self.regressor_init_knot_loc
self._regressor_init_knot_scale = self.regressor_init_knot_scale
self._regressor_knot_scale = self.regressor_knot_scale
self.coef_prior_list = coef_prior_list
self._coef_prior_list = []
self._regression_knots_idx = None
self._num_of_regressors = 0
# positive regressors
self._num_of_positive_regressors = 0
self._positive_regressor_col = list()
self._positive_regressor_init_knot_loc = list()
self._positive_regressor_init_knot_scale = list()
self._positive_regressor_knot_scale_1d = list()
self._positive_regressor_knot_scale = list()
# negative regressors
self._num_of_negative_regressors = 0
self._negative_regressor_col = list()
self._negative_regressor_init_knot_loc = list()
self._negative_regressor_init_knot_scale = list()
self._negative_regressor_knot_scale_1d = list()
self._negative_regressor_knot_scale = list()
# regular regressors
self._num_of_regular_regressors = 0
self._regular_regressor_col = list()
self._regular_regressor_init_knot_loc = list()
self._regular_regressor_init_knot_scale = list()
self._regular_regressor_knot_scale_1d = list()
self._regular_regressor_knot_scale = list()
self._regressor_col = list()
# init dynamic data attributes
# the following are set by `_set_dynamic_attributes()` and generally set during fit()
# from input df
# response data
self._is_valid_response = None
self._which_valid_response = None
self._num_of_valid_response = 0
# regression data
self._knots_tp_coefficients = None
self._positive_regressor_matrix = None
self._negative_regressor_matrix = None
self._regular_regressor_matrix = None
# other configurations
self.date_freq = date_freq
self.degree_of_freedom = degree_of_freedom
self.residuals_scale_upper = residuals_scale_upper
self._residuals_scale_upper = residuals_scale_upper
self.ktrlite_optim_args = ktrlite_optim_args
self._set_static_attributes()
self._set_model_param_names()
def _set_model_param_names(self):
"""Overriding base template functions. Model parameters to extract"""
self._model_param_names += [param.value for param in BaseSamplingParameters]
if self._num_of_regressors > 0:
self._model_param_names += [param.value for param in RegressionSamplingParameters]
def _set_default_args(self):
"""Set default attributes for None"""
# default checks for seasonality and seasonality_fs_order will be conducted
# in ktrlite model and we will extract them from ktrlite model directly later
if self.coef_prior_list is not None:
self._coef_prior_list = deepcopy(self.coef_prior_list)
# if no regressors, end here #
if self.regressor_col is None:
# regardless of what args are set for these, if regressor_col is None
# these should all be empty lists
self._regressor_sign = list()
self._regressor_init_knot_loc = list()
self._regressor_init_knot_scale = list()
self._regressor_knot_scale = list()
return
def _validate_params_len(params, valid_length):
for p in params:
if p is not None and len(p) != valid_length:
raise IllegalArgument('Wrong dimension length in Regression Param Input')
# regressor defaults
num_of_regressors = len(self.regressor_col)
_validate_params_len([
self.regressor_sign, self.regressor_init_knot_loc,
self.regressor_init_knot_scale, self.regressor_knot_scale],
num_of_regressors
)
if self.regressor_sign is None:
self._regressor_sign = [DEFAULT_REGRESSOR_SIGN] * num_of_regressors
if self.regressor_init_knot_loc is None:
self._regressor_init_knot_loc = [DEFAULT_COEFFICIENTS_INIT_KNOT_LOC] * num_of_regressors
if self.regressor_init_knot_scale is None:
self._regressor_init_knot_scale = [DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE] * num_of_regressors
if self.regressor_knot_scale is None:
self._regressor_knot_scale = [DEFAULT_COEFFICIENTS_KNOT_SCALE] * num_of_regressors
self._num_of_regressors = num_of_regressors
def _set_static_regression_attributes(self):
# if no regressors, end here
if self._num_of_regressors == 0:
return
for index, reg_sign in enumerate(self._regressor_sign):
if reg_sign == '+':
self._num_of_positive_regressors += 1
self._positive_regressor_col.append(self.regressor_col[index])
# used for 'pr_knot_loc' sampling in pyro
self._positive_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._positive_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'pr_knot' sampling in pyro
self._positive_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
elif reg_sign == '-':
self._num_of_negative_regressors += 1
self._negative_regressor_col.append(self.regressor_col[index])
# used for 'nr_knot_loc' sampling in pyro
self._negative_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._negative_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'nr_knot' sampling in pyro
self._negative_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
else:
self._num_of_regular_regressors += 1
self._regular_regressor_col.append(self.regressor_col[index])
# used for 'rr_knot_loc' sampling in pyro
self._regular_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._regular_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'rr_knot' sampling in pyro
self._regular_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
# regular first, then positive, then negative
self._regressor_col = self._regular_regressor_col + self._positive_regressor_col + self._negative_regressor_col
# numpy conversion
self._positive_regressor_init_knot_loc = np.array(self._positive_regressor_init_knot_loc)
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_knot_scale_1d = np.array(self._positive_regressor_knot_scale_1d)
self._negative_regressor_init_knot_loc = np.array(self._negative_regressor_init_knot_loc)
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_knot_scale_1d = np.array(self._negative_regressor_knot_scale_1d)
self._regular_regressor_init_knot_loc = np.array(self._regular_regressor_init_knot_loc)
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_knot_scale_1d = np.array(self._regular_regressor_knot_scale_1d)
@staticmethod
def _validate_coef_prior(coef_prior_list):
for test_dict in coef_prior_list:
if set(test_dict.keys()) != set([
KTRTimePointPriorKeys.NAME.value,
KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
]):
raise IllegalArgument('wrong key name in inserted prior dict')
len_insert_prior = list()
for key, val in test_dict.items():
if key in [
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value,
]:
len_insert_prior.append(len(val))
if not all(len_insert == len_insert_prior[0] for len_insert in len_insert_prior):
raise IllegalArgument('wrong dimension length in inserted prior dict')
# @staticmethod
# def _validate_level_knot_inputs(level_knot_dates, level_knots):
# if len(level_knots) != len(level_knot_dates):
# raise IllegalArgument('level_knots and level_knot_dates should have the same length')
def _set_coef_prior_idx(self):
if self._coef_prior_list and len(self._regressor_col) > 0:
for x in self._coef_prior_list:
prior_regressor_col_idx = [
np.where(np.array(self._regressor_col) == col)[0][0]
for col in x[KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value]
]
x.update({'prior_regressor_col_idx': prior_regressor_col_idx})
def _set_static_attributes(self):
"""model data input based on args at instantiation or computed from args at instantiation"""
self._set_default_args()
self._set_static_regression_attributes()
# self._validate_level_knot_inputs(self.level_knot_dates, self.level_knots)
if self._coef_prior_list:
self._validate_coef_prior(self._coef_prior_list)
self._set_coef_prior_idx()
def _set_valid_response_attributes(self, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
response = training_meta[TrainingMetaKeys.RESPONSE.value]
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
if num_of_observations < max_seasonality:
raise ModelException(
"Number of observations {} is less than max seasonality {}".format(
num_of_observations, max_seasonality))
# get some reasonable offset to regularize response to make default priors scale-insensitive
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
self.response_offset = np.nanmean(response[:max_seasonality])
else:
self.response_offset = np.nanmean(response)
self.is_valid_response = ~np.isnan(response)
# [0] to convert tuple back to array
self.which_valid_response = np.where(self.is_valid_response)[0]
self.num_of_valid_response = len(self.which_valid_response)
def _set_regressor_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
# validate regression columns
if self.regressor_col is not None and \
not set(self.regressor_col).issubset(df.columns):
raise ModelException(
"DataFrame does not contain specified regressor column(s)."
)
# init of regression matrix depends on length of response vector
self._positive_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._negative_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._regular_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
# update regression matrices
if self._num_of_positive_regressors > 0:
self._positive_regressor_matrix = df.filter(
items=self._positive_regressor_col, ).values
if self._num_of_negative_regressors > 0:
self._negative_regressor_matrix = df.filter(
items=self._negative_regressor_col, ).values
if self._num_of_regular_regressors > 0:
self._regular_regressor_matrix = df.filter(
items=self._regular_regressor_col, ).values
def _set_coefficients_kernel_matrix(self, df, training_meta):
"""Derive knots position and kernel matrix and other related meta data"""
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
# placeholder
self._kernel_coefficients = np.zeros((num_of_observations, 0), dtype=np.double)
self._num_knots_coefficients = 0
if self._num_of_regressors > 0:
self._regression_knots_idx = get_knot_idx(
date_array=date_array,
num_of_obs=num_of_observations,
knot_dates=self._regression_knot_dates,
knot_distance=self.regression_knot_distance,
num_of_segments=self.regression_segments,
date_freq=self.date_freq,
)
tp = np.arange(1, num_of_observations + 1) / num_of_observations
self._knots_tp_coefficients = (1 + self._regression_knots_idx) / num_of_observations
self._kernel_coefficients = gauss_kernel(tp, self._knots_tp_coefficients, rho=self.regression_rho)
self._num_knots_coefficients = len(self._knots_tp_coefficients)
if self.date_freq is None:
self.date_freq = date_array.diff().min()
self._regression_knot_dates = get_knot_dates(date_array[0], self._regression_knots_idx, self.date_freq)
def _set_knots_scale_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
if self._num_of_positive_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_positive_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._positive_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._positive_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._positive_regressor_knot_scale has shape num_of_pr x num_of_knot
self._positive_regressor_knot_scale = (
multiplier * np.expand_dims(self._positive_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._positive_regressor_knot_scale[self._positive_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_init_knot_scale[self._positive_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_negative_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_negative_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._negative_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._negative_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
self._negative_regressor_knot_scale = (
multiplier * np.expand_dims(self._negative_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._negative_regressor_knot_scale[self._negative_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_init_knot_scale[self._negative_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_regular_regressors > 0:
# do the same for regular regressor
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_regular_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._regular_regressor_matrix[str_idx:end_idx]), axis=0)
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
global_mean = np.expand_dims(np.mean(np.fabs(self._regular_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._regular_regressor_knot_scale has shape num_of_pr x num_of_knot
self._regular_regressor_knot_scale = (
multiplier * np.expand_dims(self._regular_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._regular_regressor_knot_scale[self._regular_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_init_knot_scale[self._regular_regressor_init_knot_scale < 1e-4] = 1e-4
def _generate_tp(self, training_meta, prediction_date_array):
"""Used in _generate_seas"""
training_end = training_meta[TrainingMetaKeys.END.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
prediction_start = prediction_date_array[0]
output_len = len(prediction_date_array)
if prediction_start > training_end:
start = num_of_observations
else:
start = pd.Index(date_array).get_loc(prediction_start)
new_tp = np.arange(start + 1, start + output_len + 1) / num_of_observations
return new_tp
def _generate_insample_tp(self, training_meta, date_array):
"""Used in _generate_seas"""
train_date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
idx = np.nonzero(np.in1d(train_date_array, date_array))[0]
tp = (idx + 1) / num_of_observations
return tp
# def _generate_coefs(self, training_meta, prediction_date_array, coef_knot_dates, coef_knot):
# """Used in _generate_seas"""
# new_tp = self._generate_tp(training_meta, prediction_date_array)
# knots_tp_coef = self._generate_insample_tp(training_meta, coef_knot_dates)
# kernel_coef = sandwich_kernel(new_tp, knots_tp_coef)
# coefs = np.squeeze(np.matmul(coef_knot, kernel_coef.transpose(1, 0)), axis=0).transpose(1, 0)
# return coefs
def _generate_seas(self, df, training_meta, coef_knot_dates, coef_knots,
seasonality, seasonality_fs_order, seasonality_labels):
"""To calculate the seasonality term based on the _seasonal_knots_input.
Parameters
----------
df : pd.DataFrame
input df
training_meta: dict
meta dictionary for the training input
coef_knot_dates : 1-D array like
dates for seasonality coefficient knots
coef_knots : dict
dict of seasonal coefficient knots from each seasonality
seasonality : list
seasonality input; list of float
seasonality_fs_order : list
seasonality_fs_order input list of int
Returns
-----------
dict :
a dictionary contains seasonal regression components mapped by each seasonality
"""
df = df.copy()
# store each component as a dictionary
seas_decomp = dict()
if seasonality is not None and len(seasonality) > 0:
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
training_end = training_meta[TrainingMetaKeys.END.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
prediction_date_array = df[date_col].values
prediction_start = prediction_date_array[0]
if prediction_start > training_end:
# time index for prediction start
start = num_of_observations
else:
# time index for prediction start
start = pd.Index(date_array).get_loc(prediction_start)
# dictionary
seas_regressors = make_seasonal_regressors(
n=df.shape[0],
periods=seasonality,
orders=seasonality_fs_order,
labels=seasonality_labels,
shift=start,
)
new_tp = self._generate_tp(training_meta, prediction_date_array)
knots_tp_coef = self._generate_insample_tp(training_meta, coef_knot_dates)
coef_kernel = sandwich_kernel(new_tp, knots_tp_coef)
# init of regression matrix depends on length of response vector
total_seas_regression = np.zeros((1, df.shape[0]), dtype=np.double)
for k in seasonality_labels:
seas_regresor_matrix = seas_regressors[k]
coef_knot = coef_knots[k]
# time-step x coefficients
seas_coef = np.squeeze(np.matmul(coef_knot, coef_kernel.transpose(1, 0)), axis=0).transpose(1, 0)
seas_regression = np.sum(seas_coef * seas_regresor_matrix, axis=-1)
seas_decomp[k] = np.expand_dims(seas_regression, 0)
total_seas_regression += seas_regression
else:
total_seas_regression = np.zeros((1, df.shape[0]), dtype=np.double)
return total_seas_regression, seas_decomp
def _set_levs_and_seas(self, df, training_meta):
response_col = training_meta['response_col']
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# use ktrlite to derive levs and seas
ktrlite = KTRLite(
response_col=response_col,
date_col=date_col,
level_knot_scale=self.level_knot_scale,
level_segments=self.level_segments,
level_knot_dates=self.level_knot_dates,
level_knot_distance=self.level_knot_distance,
seasonality=self.seasonality,
seasonality_fs_order=self.seasonality_fs_order,
seasonal_initial_knot_scale=self.seasonal_initial_knot_scale,
seasonal_knot_scale=self.seasonal_knot_scale,
seasonality_segments=self.seasonality_segments,
degree_of_freedom=self.degree_of_freedom,
date_freq=self.date_freq,
estimator='stan-map',
**self.ktrlite_optim_args
)
ktrlite.fit(df=df)
# self._ktrlite_model = ktrlite
ktrlite_pt_posteriors = ktrlite.get_point_posteriors()
ktrlite_obs_scale = ktrlite_pt_posteriors['map']['obs_scale']
# load _seasonality and _seasonality_fs_order
self._seasonality = ktrlite._model._seasonality
self._seasonality_fs_order = ktrlite._model._seasonality_fs_order
for seas in self._seasonality:
self._seasonality_labels.append('seasonality_{}'.format(seas))
# if input None for upper bound of residuals scale, use data-driven input
if self.residuals_scale_upper is None:
# make it 5 times to have some buffer in case we over-fit in KTRLite
self._residuals_scale_upper = min(ktrlite_obs_scale * 5, training_meta['response_sd'])
# this part is to extract level and seasonality result from KTRLite
self._level_knots = np.squeeze(ktrlite_pt_posteriors['map']['lev_knot'])
self._level_knot_dates = ktrlite._model._level_knot_dates
tp = np.arange(1, num_of_observations + 1) / num_of_observations
# # trim level knots dates when they are beyond training dates
# lev_knot_dates = list()
# lev_knots = list()
# for i, x in enumerate(self.level_knot_dates):
# if (x <= df[date_col].max()) and (x >= df[date_col].min()):
# lev_knot_dates.append(x)
# lev_knots.append(self._level_knots[i])
# self._level_knot_dates = pd.to_datetime(lev_knot_dates)
# self._level_knots = np.array(lev_knots)
self._level_knots_idx = get_knot_idx(
date_array=date_array,
num_of_obs=None,
knot_dates=self._level_knot_dates,
knot_distance=None,
num_of_segments=None,
date_freq=self.date_freq,
)
self.knots_tp_level = (1 + self._level_knots_idx) / num_of_observations
self._kernel_level = sandwich_kernel(tp, self.knots_tp_level)
self._num_knots_level = len(self._level_knot_dates)
if self._seasonality:
self._seasonality_coef_knot_dates = ktrlite._model._coef_knot_dates
coef_knots_flatten = ktrlite_pt_posteriors['map']['coef_knot']
coef_knots = dict()
pos = 0
for idx, label in enumerate(self._seasonality_labels):
order = self._seasonality_fs_order[idx]
coef_knots[label] = coef_knots_flatten[..., pos:(pos + 2 * order), :]
pos += 2 * order
self._seasonality_coef_knots = coef_knots
# we just need total here and because of
self._seas_term, _ = self._generate_seas(
df,
training_meta,
self._seasonality_coef_knot_dates,
self._seasonality_coef_knots,
self._seasonality,
self._seasonality_fs_order,
self._seasonality_labels)
# remove batch size as an input for models
self._seas_term = np.squeeze(self._seas_term, 0)
def _filter_coef_prior(self, df):
if self._coef_prior_list and len(self._regressor_col) > 0:
# iterate over a copy due to the removal operation
for test_dict in self._coef_prior_list[:]:
prior_regressor_col = test_dict[KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value]
m = test_dict[KTRTimePointPriorKeys.PRIOR_MEAN.value]
sd = test_dict[KTRTimePointPriorKeys.PRIOR_SD.value]
end_tp_idx = min(test_dict[KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value], df.shape[0])
start_tp_idx = min(test_dict[KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value], df.shape[0])
if start_tp_idx < end_tp_idx:
expected_shape = (end_tp_idx - start_tp_idx, len(prior_regressor_col))
test_dict.update({KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value: end_tp_idx})
test_dict.update({KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value: start_tp_idx})
# mean/sd expanding
test_dict.update({KTRTimePointPriorKeys.PRIOR_MEAN.value: np.full(expected_shape, m)})
test_dict.update({KTRTimePointPriorKeys.PRIOR_SD.value: np.full(expected_shape, sd)})
else:
# removing invalid prior
self._coef_prior_list.remove(test_dict)
def set_dynamic_attributes(self, df, training_meta):
"""Overriding: func: `~orbit.models.BaseETS._set_dynamic_attributes"""
self._set_regressor_matrix(df, training_meta)
self._set_coefficients_kernel_matrix(df, training_meta)
self._set_knots_scale_matrix(df, training_meta)
self._set_levs_and_seas(df, training_meta)
self._filter_coef_prior(df)
self._set_valid_response_attributes(training_meta)
@staticmethod
def _concat_regression_coefs(pr_beta=None, rr_beta=None):
"""Concatenates regression posterior matrix
In the case that `pr_beta` or `rr_beta` is a 1d tensor, transform to 2d tensor and
concatenate.
Args
----
pr_beta : array like
postive-value constrainted regression betas
rr_beta : array like
regular regression betas
Returns
-------
array like
concatenated 2d array of shape (1, len(rr_beta) + len(pr_beta))
"""
regressor_beta = None
if pr_beta is not None and rr_beta is not None:
pr_beta = pr_beta if len(pr_beta.shape) == 2 else pr_beta.reshape(1, -1)
rr_beta = rr_beta if len(rr_beta.shape) == 2 else rr_beta.reshape(1, -1)
regressor_beta = torch.cat((rr_beta, pr_beta), dim=1)
elif pr_beta is not None:
regressor_beta = pr_beta
elif rr_beta is not None:
regressor_beta = rr_beta
return regressor_beta
def predict(self, posterior_estimates, df, training_meta, prediction_meta,
coefficient_method="smooth",
include_error=False, store_prediction_array=False, **kwargs):
"""Vectorized version of prediction math
Parameters
----
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
this mainly impacts the aggregated estimation method; full bayesian should not be impacted
include_error : bool
if generating the noise samples
store_prediction_array : bool
if storing the prediction array
"""
################################################################
# Model Attributes
################################################################
# FIXME: do we still need this?
model = deepcopy(posterior_estimates)
arbitrary_posterior_value = list(model.values())[0]
num_sample = arbitrary_posterior_value.shape[0]
################################################################
# Prediction Attributes
################################################################
output_len = prediction_meta[PredictionMetaKeys.PREDICTION_DF_LEN.value]
prediction_start = prediction_meta[PredictionMetaKeys.START.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
training_end = training_meta[TrainingMetaKeys.END.value]
# Here assume dates are ordered and consecutive
# if prediction_meta[PredictionMetaKeys.START.value] > self.training_end,
# assume prediction starts right after train end
if prediction_start > training_end:
# time index for prediction start
start = num_of_observations
else:
start = pd.Index(date_array).get_loc(prediction_start)
new_tp = np.arange(start + 1, start + output_len + 1) / num_of_observations
if include_error:
# in-sample knots
lev_knot_in = model.get(BaseSamplingParameters.LEVEL_KNOT.value)
# TODO: hacky way; let's just assume last two knot distance is knots distance for all knots
lev_knot_width = self.knots_tp_level[-1] - self.knots_tp_level[-2]
# check whether we need to put new knots for simulation
if new_tp[-1] >= self.knots_tp_level[-1] + lev_knot_width:
# derive knots tp
knots_tp_level_out = np.arange(self.knots_tp_level[-1] + lev_knot_width, new_tp[-1], lev_knot_width)
new_knots_tp_level = np.concatenate([self.knots_tp_level, knots_tp_level_out])
lev_knot_out = np.random.laplace(0, self.level_knot_scale,
size=(lev_knot_in.shape[0], len(knots_tp_level_out)))
lev_knot_out = np.cumsum(np.concatenate([lev_knot_in[:, -1].reshape(-1, 1), lev_knot_out],
axis=1), axis=1)[:, 1:]
lev_knot = np.concatenate([lev_knot_in, lev_knot_out], axis=1)
else:
new_knots_tp_level = self.knots_tp_level
lev_knot = lev_knot_in
kernel_level = sandwich_kernel(new_tp, new_knots_tp_level)
else:
lev_knot = model.get(BaseSamplingParameters.LEVEL_KNOT.value)
kernel_level = sandwich_kernel(new_tp, self.knots_tp_level)
obs_scale = model.get(BaseSamplingParameters.OBS_SCALE.value)
obs_scale = obs_scale.reshape(-1, 1)
# if self._seasonality is not None:
# condition of seasonality is checked inside
total_seas, seas_decomp = self._generate_seas(df, training_meta,
self._seasonality_coef_knot_dates,
self._seasonality_coef_knots,
self._seasonality,
self._seasonality_fs_order,
self._seasonality_labels)
# # seas is 1-d array, add the batch size back
# seas = np.expand_dims(seas, 0)
# else:
# # follow component shapes
# seas = np.zeros((1, output_len))
trend = np.matmul(lev_knot, kernel_level.transpose((1, 0)))
regression = np.zeros(trend.shape)
if self._num_of_regressors > 0:
regressor_matrix = df.filter(items=self._regressor_col, ).values
regressor_betas = self._get_regression_coefs_matrix(
training_meta,
posterior_estimates,
coefficient_method,
date_array=prediction_meta[TrainingMetaKeys.DATE_ARRAY.value]
)
regression = np.sum(regressor_betas * regressor_matrix, axis=-1)
if include_error:
epsilon = nct.rvs(self.degree_of_freedom, nc=0, loc=0,
scale=obs_scale, size=(num_sample, len(new_tp)))
trend += epsilon
pred_array = trend + total_seas + regression
# if decompose output dictionary of components
decomp_dict = {
'prediction': pred_array,
'trend': trend,
'regression': regression
}
# this is an input from ktrlite
decomp_dict.update(seas_decomp)
if store_prediction_array:
self.pred_array = pred_array
else:
self.pred_array = None
return decomp_dict
def _get_regression_coefs_matrix(self, training_meta, posteriors, coefficient_method='smooth', date_array=None):
"""internal function to provide coefficient matrix given a date array
Args
----
posteriors : dict
posterior samples
date_array : array like
array of date stamp
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
this mainly impacts the aggregated estimation method; full bayesian should not be impacted.
"""
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
training_start = training_meta[TrainingMetaKeys.START.value]
training_end = training_meta[TrainingMetaKeys.END.value]
train_date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
if self._num_of_regular_regressors + self._num_of_positive_regressors + self._num_of_negative_regressors == 0:
return None
# if date_array not specified, coefficients in the training period will be retrieved
if date_array is None:
if coefficient_method == 'smooth':
coef_knots = posteriors.get(RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
# only 1 knot for 0 segments
if self.regression_segments == 0:
coef_knots = np.expand_dims(coef_knots, -1)
if len(self._regressor_col) == 1:
coef_knots = np.expand_dims(coef_knots, 1)
# result in batch x time step x regressor size shape
regressor_betas = np.matmul(coef_knots, self._kernel_coefficients.transpose((1, 0)))
# if len(self._regressor_col) == 1:
# regressor_betas = np.expand_dims(regressor_betas, 0)
regressor_betas = regressor_betas.transpose((0, 2, 1))
elif coefficient_method == 'empirical':
regressor_betas = posteriors.get(RegressionSamplingParameters.COEFFICIENTS.value)
else:
raise IllegalArgument('Wrong coefficient_method:{}'.format(coefficient_method))
else:
date_array = pd.to_datetime(date_array).values
output_len = len(date_array)
train_len = num_of_observations
# some validation of date array
if not is_ordered_datetime(date_array):
raise IllegalArgument('Datetime index must be ordered and not repeat')
prediction_start = date_array[0]
if prediction_start < training_start:
raise PredictionException('Prediction start must be after training start.')
# If we cannot find a match of prediction range, assume prediction starts right after train end
if prediction_start > training_end:
# time index for prediction start
start = train_len
coef_repeats = [0] * (start - 1) + [output_len]
else:
# time index for prediction start
start = pd.Index(train_date_array).get_loc(prediction_start)
if output_len <= train_len - start:
coef_repeats = [0] * start + [1] * output_len + [0] * (train_len - start - output_len)
else:
coef_repeats = [0] * start + [1] * (train_len - start - 1) + [output_len - train_len + start + 1]
new_tp = np.arange(start + 1, start + output_len + 1) / num_of_observations
if coefficient_method == 'smooth':
kernel_coefficients = gauss_kernel(new_tp, self._knots_tp_coefficients, rho=self.regression_rho)
coef_knots = posteriors.get(RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
if len(self._regressor_col) == 1:
coef_knots = np.expand_dims(coef_knots, -1)
# only 1 knot for 0 segments
if self.regression_segments == 0:
coef_knots = np.expand_dims(coef_knots, -1)
regressor_betas = np.matmul(coef_knots, kernel_coefficients.transpose((1, 0)))
if len(regressor_betas.shape) == 2:
regressor_betas = np.expand_dims(regressor_betas, 0)
regressor_betas = regressor_betas.transpose((0, 2, 1))
elif coefficient_method == 'empirical':
regressor_betas = posteriors.get(RegressionSamplingParameters.COEFFICIENTS.value)
regressor_betas = np.repeat(regressor_betas, repeats=coef_repeats, axis=1)
else:
raise IllegalArgument('Wrong coefficient_method:{}'.format(coefficient_method))
return regressor_betas
def get_regression_coefs(self, training_meta, point_method, point_posteriors, posterior_samples,
coefficient_method='smooth', date_array=None,
include_ci=False, lower=0.05, upper=0.95
):
"""Return DataFrame regression coefficients.
Parameters
----------
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
date_array : array-like
the list of dates for which the regressio coefficients will be reported.
Default to be None. When it's None, all the dates in the training data will be used.
include_ci : bool
if including the confidence intervals for the regression coefficients
lower : float between (0, 1). default to be 0.05
lower bound for the CI
upper : float between (0, 1). default to be 0.95.
upper bound for the CI
Returns
-------
Pandas data frame holding the dynamic regression coefficients
"""
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
reg_df = pd.DataFrame()
if self._num_of_regressors == 0:
return reg_df
_point_method = point_method
if point_method is None:
_point_method = PredictMethod.MEDIAN.value
posteriors = point_posteriors.get(_point_method)
coefs = np.squeeze(self._get_regression_coefs_matrix(training_meta,
posteriors,
coefficient_method=coefficient_method,
date_array=date_array))
if len(coefs.shape) == 1:
coefs = np.expand_dims(coefs, -1)
reg_df = pd.DataFrame(data=coefs, columns=self._regressor_col)
if date_array is not None:
reg_df[date_col] = date_array
else:
reg_df[date_col] = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# re-arrange columns
reg_df = reg_df[[date_col] + self._regressor_col]
if include_ci:
posteriors = posterior_samples
coefs = self._get_regression_coefs_matrix(training_meta,
posteriors,
coefficient_method=coefficient_method,
date_array=date_array)
coefficients_lower = np.quantile(coefs, lower, axis=0)
coefficients_upper = np.quantile(coefs, upper, axis=0)
reg_df_lower = reg_df.copy()
reg_df_upper = reg_df.copy()
for idx, col in enumerate(self._regressor_col):
reg_df_lower[col] = coefficients_lower[:, idx]
reg_df_upper[col] = coefficients_upper[:, idx]
return reg_df, reg_df_lower, reg_df_upper
return reg_df
def get_regression_coef_knots(self, training_meta, point_method, point_posteriors, posterior_samples):
"""Return DataFrame regression coefficient knots
"""
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
_point_method = point_method
if point_method is None:
_point_method = PredictMethod.MEDIAN.value
# init dataframe
knots_df = pd.DataFrame()
# end if no regressors
if self._num_of_regular_regressors + self._num_of_positive_regressors + self._num_of_negative_regressors == 0:
return knots_df
knots_df[date_col] = self._regression_knot_dates
# TODO: make the label as a constant
knots_df['step'] = self._regression_knots_idx
# batch size x regressor size x knot size
coef_knots = point_posteriors \
.get(_point_method) \
.get(RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
# only 1 knot for 0 segments
if self.regression_segments == 0:
coef_knots = np.expand_dims(coef_knots, -1)
if len(self._regressor_col) == 1:
coef_knots = np.expand_dims(coef_knots, 1)
for idx, col in enumerate(self._regressor_col):
knots_df[col] = np.transpose(coef_knots[:, idx])
return knots_df
@orbit_style_decorator
def plot_regression_coefs(self, training_meta, point_method, point_posteriors, posterior_samples,
coefficient_method='smooth', date_array=None,
include_ci=False, lower=0.05, upper=0.95,
with_knot=False, is_visible=True,
ncol=2, ylim=None, markersize=200, figsize=(16, 8)):
"""Plot regression coefficients.
Parameters
----------
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
date_array : array-like
the list of dates for which the regressio coefficients will be reported.
Default to be None. When it's None, all the dates in the training data will be used.
include_ci : bool
if including the confidence intervals for the regression coefficients
lower : float between (0, 1). default to be 0.05
lower bound for the CI
upper : float between (0, 1). default to be 0.95.
upper bound for the CI
with_knot : bool
if plotting the regression knots in the graph
ncol : int
number of columns of the panel grid
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
is_visible : bool
whether we want to show the plot. If called from unittest, is_visible might = False.
markersize : int; optional
knot marker size
figsize : tuple; optional
figsize passed to `matplotlib.pyplot.figure()`
"""
# assume your first column is the date; this way can use a static method
if include_ci:
coef_df, coef_df_lower, coef_df_upper = self.get_regression_coefs(
training_meta, point_method, point_posteriors, posterior_samples,
coefficient_method=coefficient_method, date_array=date_array,
include_ci=include_ci, lower=lower, upper=upper
)
else:
coef_df = self.get_regression_coefs(
training_meta, point_method, point_posteriors, posterior_samples,
coefficient_method=coefficient_method, date_array=date_array,
include_ci=include_ci, lower=lower, upper=upper
)
coef_df_lower, coef_df_upper = None, None
if with_knot:
knot_df = self.get_regression_coef_knots(training_meta, point_method, point_posteriors, posterior_samples)
else:
knot_df = None
regressor_col = coef_df.columns.tolist()[1:]
nrow = math.ceil(len(regressor_col) / ncol)
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, squeeze=False)
for idx, col in enumerate(regressor_col):
row_idx = idx // ncol
col_idx = idx % ncol
coef = coef_df[col]
axes[row_idx, col_idx].plot(coef, alpha=.8, label='coefficients', color=OrbitPalette.BLUE.value)
if coef_df_lower is not None and coef_df_upper is not None:
coef_lower = coef_df_lower[col]
coef_upper = coef_df_upper[col]
axes[row_idx, col_idx].fill_between(np.arange(0, coef_df.shape[0]), coef_lower, coef_upper,
alpha=.3, color=OrbitPalette.BLUE.value)
if knot_df is not None:
step = knot_df['step']
knots = knot_df[col].values
axes[row_idx, col_idx].scatter(x=step, y=knots, marker='^', s=markersize,
color=OrbitPalette.GREEN.value, alpha=0.5)
if ylim is not None:
axes[row_idx, col_idx].set_ylim(ylim)
axes[row_idx, col_idx].set_title('{}'.format(col))
axes[row_idx, col_idx].ticklabel_format(useOffset=False)
plt.tight_layout()
if is_visible:
plt.show()
else:
plt.close()
return axes
# TODO: need a unit test of this function
def get_level_knots(self, training_meta, point_method, point_posteriors, posterior_samples):
"""Given posteriors, return knots and correspondent date"""
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
_point_method = point_method
if point_method is None:
_point_method = PredictMethod.MEDIAN.value
lev_knots = point_posteriors \
.get(_point_method) \
.get(BaseSamplingParameters.LEVEL_KNOT.value)
lev_knots = np.squeeze(lev_knots, 0)
out = {
date_col: self._level_knot_dates,
BaseSamplingParameters.LEVEL_KNOT.value: lev_knots,
}
return pd.DataFrame(out)
def get_levels(self, training_meta, point_method, point_posteriors, posterior_samples):
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
_point_method = point_method
if point_method is None:
_point_method = PredictMethod.MEDIAN.value
levs = point_posteriors \
.get(_point_method) \
.get(BaseSamplingParameters.LEVEL.value)
levs = np.squeeze(levs, 0)
out = {
date_col: date_array,
BaseSamplingParameters.LEVEL.value: levs,
}
return pd.DataFrame(out)
@orbit_style_decorator
def plot_lev_knots(self, training_meta, point_method, point_posteriors, posterior_samples,
path=None, is_visible=True, title="", fontsize=16,
markersize=250, figsize=(16, 8)):
""" Plot the fitted level knots along with the actual time series.
Parameters
----------
path : str; optional
path to save the figure
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
title : str; optional
title of the plot
fontsize : int; optional
fontsize of the title
markersize : int; optional
knot marker size
figsize : tuple; optional
figsize passed to `matplotlib.pyplot.figure()`
Returns
-------
matplotlib axes object
"""
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
response = training_meta[TrainingMetaKeys.RESPONSE.value]
levels_df = self.get_levels(training_meta, point_method, point_posteriors, posterior_samples)
knots_df = self.get_level_knots(training_meta, point_method, point_posteriors, posterior_samples)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(date_array, response, color=OrbitPalette.BLUE.value, lw=1, alpha=0.7, label='actual')
ax.plot(levels_df[date_col], levels_df[BaseSamplingParameters.LEVEL.value],
color=OrbitPalette.BLACK.value, lw=1, alpha=0.8,
label=BaseSamplingParameters.LEVEL.value)
ax.scatter(knots_df[date_col], knots_df[BaseSamplingParameters.LEVEL_KNOT.value],
color=OrbitPalette.GREEN.value, lw=1, s=markersize, marker='^', alpha=0.8,
label=BaseSamplingParameters.LEVEL_KNOT.value)
ax.legend()
ax.grid(True, which='major', c='grey', ls='-', lw=1, alpha=0.5)
ax.set_title(title, fontsize=fontsize)
if path:
fig.savefig(path)
if is_visible:
plt.show()
else:
plt.close()
return ax
|
1600051
|
import logging
from logging import (
Logger,
)
from dependency_injector.wiring import (
Provide,
)
from src.queries.models import (
Cart,
)
from src.queries.repository import (
CartQueryRepository,
)
from minos.aggregate import (
Event,
)
from minos.cqrs import (
QueryService,
)
from minos.networks import (
Request,
Response,
enroute,
)
logger = logging.getLogger(__name__)
class CartQueryService(QueryService):
"""CartQueryService class."""
repository: CartQueryRepository = Provide["cart_repository"]
@enroute.rest.query("/cart/{uuid}", "GET")
async def get_cart(self, request: Request) -> Response:
"""Get a Cart instance.
:param request: A request instance..
:return: A response exception.
"""
params = await request.params()
cart_obj: Cart = self.repository.get(params["uuid"])
return Response(cart_obj)
@enroute.rest.query("/cart/{uuid}/items", "GET")
async def get_cart_items(self, request: Request) -> Response:
"""Get a Cart instance.
:param request: A request instance..
:return: A response exception.
"""
params = await request.params()
items_obj = self.repository.get_items_cart(params["uuid"])
return Response(items_obj)
@enroute.broker.event("CartCreated")
async def cart_created(self, request: Request) -> None:
"""Handle the Cart creation events.
:param request: A request instance containing the aggregate difference.
:return: This method does not return anything.
"""
event: Event = await request.content()
self.repository.add(event)
@enroute.broker.event("CartUpdated")
async def cart_updated(self, request: Request) -> None:
"""Handle the Cart update events.
:param request: A request instance containing the aggregate difference.
:return: This method does not return anything.
"""
event: Event = await request.content()
cart_uuid = event["uuid"]
items = event.get_all()
await items["products"][0]["product"].resolve()
self.repository.add_item(
cart_uuid=cart_uuid, item=items["products"][0], product=items["products"][0]["product"]
)
|
1600057
|
def co_code_findloadednames(co):
"""Find in the code of a code object, all loaded names.
(by LOAD_NAME, LOAD_GLOBAL or LOAD_FAST) """
import dis
from opcode import HAVE_ARGUMENT, opmap
hasloadname = (opmap['LOAD_NAME'],
opmap['LOAD_GLOBAL'], opmap['LOAD_FAST'])
insns = dis.get_instructions(co)
len_co_names = len(co.co_names)
indexset = {}
for insn in insns:
if insn.opcode >= HAVE_ARGUMENT:
if insn.opcode in hasloadname:
indexset[insn.argval] = 1
if len(indexset) >= len_co_names:
break
for name in co.co_varnames:
try:
del indexset[name]
except KeyError:
pass
return indexset
def co_findloadednames(co):
"""Find all loaded names in a code object and all its consts of code type"""
names = {}
names.update(co_code_findloadednames(co))
for c in co.co_consts:
if isinstance(c, type(co)):
names.update(co_findloadednames(c))
return names
|
1600065
|
from typing import Optional
from aiogrpcclient import BaseGrpcClient
from idm.api.proto.chat_manager_service_pb2 import Chat as ChatPb
from nexus.hub.proto.delivery_service_pb2 import \
StartDeliveryRequest as StartDeliveryRequestPb
from nexus.hub.proto.delivery_service_pb2 import \
StartDeliveryResponse as StartDeliveryResponsePb
from nexus.hub.proto.delivery_service_pb2_grpc import DeliveryStub
from nexus.hub.proto.submitter_service_pb2 import \
SubmitRequest as SubmitRequestPb
from nexus.hub.proto.submitter_service_pb2 import \
SubmitResponse as SubmitResponsePb
from nexus.hub.proto.submitter_service_pb2_grpc import SubmitterStub
from nexus.models.proto.typed_document_pb2 import \
TypedDocument as TypedDocumentPb
class HubGrpcClient(BaseGrpcClient):
stub_clses = {
'delivery': DeliveryStub,
'submitter': SubmitterStub,
}
async def start_delivery(
self,
typed_document_pb: TypedDocumentPb,
chat: ChatPb,
request_id: Optional[str],
session_id: Optional[str],
) -> StartDeliveryResponsePb:
return await self.stubs['delivery'].start_delivery(
StartDeliveryRequestPb(
typed_document=typed_document_pb,
chat=chat,
),
metadata=(('request-id', request_id), ('session-id', session_id))
)
async def submit(
self,
telegram_document: bytes,
telegram_file_id: str,
chat: ChatPb,
request_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> SubmitResponsePb:
return await self.stubs['submitter'].submit(
SubmitRequestPb(
telegram_document=telegram_document,
telegram_file_id=telegram_file_id,
chat=chat,
),
metadata=(('request-id', request_id), ('session-id', session_id))
)
|
1600096
|
from .utils import *
from .cat import build_model
from .dataset import build_loader
from .config import get_config
from .optimizer import build_optimizer
from .lr_scheduler import build_scheduler
from .logger import create_logger
|
1600124
|
from __future__ import absolute_import, division, print_function
import os
import re
import pickle
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from rlssm import plotting
from .utils import list_individual_variables
from .stan_utility import check_all_diagnostics
from .random import random_ddm, random_rdm_2A
class FittedModel(object):
def __init__(self,
stan_model,
data,
hierarchical_levels,
model_label,
family,
n_parameters_individual,
n_parameters_trial,
print_diagnostics,
priors):
self.stan_model = stan_model
self.model_label = model_label
self.family = family
self.priors = priors
# Print mcmc diagnostics...
if print_diagnostics:
check_all_diagnostics(self.stan_model)
self.data_info = {'N': data.shape[0], 'data':data}
n_samples_after_warmup = self.stan_model.stan_args[0]['iter'] - self.stan_model.stan_args[0]['warmup']
n_posterior_samples = n_samples_after_warmup / self.stan_model.stan_args[0]['thin']*len(self.stan_model.stan_args)
self.parameters_info = {'hierarchical_levels': hierarchical_levels,
'n_parameters_individual':n_parameters_individual,
'n_parameters_trial': n_parameters_trial,
'n_posterior_samples': int(n_posterior_samples)}
if self.parameters_info['hierarchical_levels'] == 2:
self.data_info.update({'L': len(pd.unique(data.participant))})
self.parameters_info.update({'n_parameters_group': n_parameters_individual*2,
'n_parameters_hierarchical': n_parameters_individual*2 + n_parameters_individual*self.data_info['L']})
r = re.compile("transf_.+")
parameters_names_transf = list(filter(r.match, self.stan_model.flatnames))
individual_parameters_names = [name[10:] for name in parameters_names_transf]
r = re.compile("mu_.+")
group_parameters_mu = list(filter(r.match, self.stan_model.flatnames))
r = re.compile("sd_.+")
group_parameters_sd = list(filter(r.match, self.stan_model.flatnames))
group_parameters_names_transf = parameters_names_transf + group_parameters_sd # add transformed par names for plotting
group_parameters_names = group_parameters_mu + group_parameters_sd
r = re.compile("z_.+_trial.+")
trials_deviations = list(filter(r.match, self.stan_model.flatnames))
r = re.compile("z_.+")
individual_deviations = list(filter(r.match, self.stan_model.flatnames))
if len(trials_deviations) > 0:
[individual_deviations.remove(el) for el in trials_deviations]
parameters_names = group_parameters_names + individual_deviations
parameters_names_all = parameters_names + trials_deviations
self.parameters_info.update({'parameters_names': parameters_names, # group parameters and individual deviations
'group_parameters_names': group_parameters_names, # group parameters
'individual_parameters_names': individual_parameters_names, # names of individual parameters
'group_parameters_names_transf': parameters_names_transf, # group parameters for plotting
'parameters_names_all': parameters_names_all}) # all parameters for the rhat calculations
else:
self.data_info.update({'L': 1})
r = re.compile("transf_.+")
parameters_names_transf = list(filter(r.match, self.stan_model.flatnames))
parameters_names = [name[7:] for name in parameters_names_transf]
r = re.compile("z_.+_trial.+")
parameters_names_all = parameters_names + list(filter(r.match, self.stan_model.flatnames))
self.parameters_info.update({'parameters_names': parameters_names})
self.parameters_info.update({'parameters_names_transf': parameters_names_transf}) # add transformed par names for plotting
self.parameters_info.update({'parameters_names_all': parameters_names_all}) # for the rhat calculations
def get_rhat(self):
"""Extracts rhat from stan model's summary as a pandas dataframe.
Only considers parameters (Not all variables specified in stan's model).
Note that, when DDM parameters are estimated at a trial level, these are included in the rhat stats.
Returns
-------
convergence: DataFrame
Data frame with rows the parameters and columns the rhat and variable names.
"""
summary = self.stan_model.summary(pars=self.parameters_info['parameters_names_all'])
convergence = pd.DataFrame({'rhat': np.array(summary['summary'])[:, 9],
'variable': summary['summary_rownames']})
return convergence
def calculate_waic(self, pointwise=False):
"""Calculates the Watanabe-Akaike information criteria.
Calculates pWAIC1 and pWAIC2
according to http://www.stat.columbia.edu/~gelman/research/published/waic_understand3.pdf
Parameters
----------
pointwise : bool, default to False
By default, gives the averaged waic.
Set to True is you want additional waic per observation.
Returns
-------
out: dict
Dictionary containing lppd (log pointwise predictive density),
p_waic, waic, waic_se (standard error of the waic), and
pointwise_waic (when `pointwise` is True).
"""
log_likelihood = self.stan_model['log_lik'] # n_samples X N observations
likelihood = np.exp(log_likelihood)
mean_l = np.mean(likelihood, axis=0) # N observations
pointwise_lppd = np.log(mean_l)
lppd = np.sum(pointwise_lppd)
pointwise_var_l = np.var(log_likelihood, axis=0) # N observations
var_l = np.sum(pointwise_var_l)
pointwise_waic = - 2*pointwise_lppd + 2*pointwise_var_l
waic = -2*lppd + 2*var_l
waic_se = np.sqrt(self.data_info['N'] * np.var(pointwise_waic))
if pointwise:
out = {'lppd':lppd,
'p_waic':var_l,
'waic':waic,
'waic_se':waic_se,
'pointwise_waic':pointwise_waic}
else:
out = {'lppd':lppd,
'p_waic':var_l,
'waic':waic,
'waic_se':waic_se}
return out
def get_last_values(self):
"""Extracts the last posterior estimates values in each chain.
Returns
-------
starting_points: DataFrame
Data frame with as many rows as number of chains that were run.
Parameter values are in separate columns.
"""
samplesChains = self.stan_model.to_dataframe(pars=self.parameters_info['parameters_names_all'],
permuted=False,
diagnostics=False)
starting_points = samplesChains[samplesChains['draw'] == max(samplesChains['draw'])]
return starting_points
class ModelResults(object):
def __init__(self,
model_label,
data_info,
parameters_info,
priors,
rhat,
waic,
last_values,
samples,
trial_samples):
"""Initiates a ModelResults object.
Parameters
----------
Attributes
----------
"""
self.model_label = model_label
self.data_info = data_info
self.parameters_info = parameters_info
self.priors = priors
self.rhat = rhat
self.waic = waic
self.last_values = last_values
self.samples = samples
self.trial_samples = trial_samples
def to_pickle(self, filename=None):
"""Pickle the fitted model's results object to file.
This can be used to store the model's result
and read them and inspect them at a later stage,
without having to refit the model.
Parameters
----------
filename : str, optional
File path where the pickled object will be stored.
If not specified, is set to
"""
dir_path = os.getcwd()#os.path.dirname(os.path.realpath(__file__))
if filename is None:
filename = os.path.join(dir_path, '{}.pkl'.format(self.model_label))
print("Saving file as: {}".format(filename))
with open(filename, 'wb') as f:
pickle.dump(self, f)
f.close()
def plot_posteriors(self,
gridsize=100,
clip=None,
show_intervals="HDI",
alpha_intervals=.05,
intervals_kws=None,
**kwargs):
"""Plots posterior predictives of the model's parameters.
If the model is hierarchical, then only the group parameters are plotted.
In particular, group means are plotted in the first row
and group standard deviations are plotted in the second row.
By default, 95 percent HDI are shown.
The kernel density estimation is calculated using scipy.stats.gaussian_kde.
Parameters
----------
gridsize : int, default to 100
Resolution of the kernel density estimation function, default to 100.
clip : tuple of (float, float), optional
Range for the kernel density estimation function.
Default is min and max values of the distribution.
show_intervals : str, default to "HDI"
Either "HDI", "BCI", or None.
HDI is better when the distribution is not simmetrical.
If None, then no intervals are shown.
alpha_intervals : float, default to .05
Alpha level for the intervals.
Default is 5 percent which gives 95 percent BCIs and HDIs.
intervals_kws : dict
Additional arguments for `matplotlib.axes.Axes.fill_between`
that shows shaded intervals.
By default, they are 50 percent transparent.
Other Parameters
----------------
**kwargs
Additional parameters for seaborn.FacetGrid.
Returns
-------
g : seaborn.FacetGrid
"""
if self.parameters_info['hierarchical_levels'] == 2:
cols = self.parameters_info['group_parameters_names_transf']
else:
cols = self.parameters_info['parameters_names_transf']
dfm = pd.melt(self.samples[cols], value_vars=cols)
g = sns.FacetGrid(dfm,
col="variable",
col_wrap=self.parameters_info['n_parameters_individual'],
sharex=False,
**kwargs)
g.map(plotting.plot_posterior,
"value",
gridsize=gridsize,
clip=clip,
show_intervals=show_intervals,
alpha_intervals=alpha_intervals,
intervals_kws=intervals_kws)
return g
|
1600165
|
from rtamt.node.node import Node
class UnaryNode(Node):
def __init__(self, child):
"""Constructor for Node"""
super(Node, self).__init__()
self.add_child(child)
|
1600180
|
from peewee import (
IntegerField,
BooleanField,
FloatField,
CharField,
Model
)
# This table exists because we fetch _all_ shot_chart_detail records,
# even if the game doesn't exist in the game table. So, we stage all
# records in this temporary table, then insert into the main table
# filtering by existing game_ids.
class ShotChartDetailTemp(Model):
# Autogenerated id column here.
game_id = IntegerField(index=True, unique=False)
player_id = IntegerField(unique=False)
team_id = IntegerField(unique=False)
game_event_id = IntegerField(null=True)
period = IntegerField(null=True)
minutes_remaining = IntegerField(null=True)
seconds_remaining = IntegerField(null=True)
event_type = CharField(null=True)
action_type = CharField(null=True)
shot_type = CharField(null=True)
shot_zone_basic = CharField(null=True)
shot_zone_area = CharField(null=True)
shot_zone_range = CharField(null=True)
shot_distance = FloatField(null=True)
loc_x = IntegerField(null=True)
loc_y = IntegerField(null=True)
shot_attempted_flag = BooleanField(null=True)
shot_made_flag = BooleanField(null=True)
htm = CharField(null=True)
vtm = CharField(null=True)
class Meta:
db_table = 'shot_chart_detail_temp'
temporary = True
|
1600192
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from utilities.handy_wrappers import HandyWrappers
import time
class DynamicXPathFormat():
def test(self):
baseUrl = "https://letskodeit.teachable.com"
driver = webdriver.Firefox()
driver.maximize_window()
driver.implicitly_wait(10)
driver.get(baseUrl)
# Login -> Lecture "How to click and type on a web element"
driver.find_element(By.LINK_TEXT, "Login").click()
email = driver.find_element(By.ID, "user_email")
email.send_keys("<EMAIL>")
password = driver.find_element(By.ID, "user_password")
password.send_keys("<PASSWORD>")
driver.find_element(By.NAME, "commit").click()
# Search for courses -> You don't need to search the course
# You can select it without searching also
searchBox = driver.find_element(By.ID, "search-courses")
searchBox.send_keys("JavaScript")
# Select Course
_course = "//div[contains(@class,'course-listing-title') and contains(text(),'{0}')]"
_courseLocator = _course.format("JavaScript for beginners")
courseElement = driver.find_element(By.XPATH, _courseLocator)
courseElement.click()
ff = DynamicXPathFormat()
ff.test()
|
1600204
|
import sys
import os
def check_acc(file_gt, file_pred):
def parse_line(line):
line_split = line.split()
flag = True
parse_dict = {}
if 'SELECT' in line_split and \
line_split.index('SELECT') != 0:
flag = False
if 'SELECT' in line_split:
pos = None
if 'WHERE' in line_split:
pos = line_split.index('WHERE')
else:
pos = len(line_split)
select = line_split[line_split.index('SELECT') + 1:pos]
if len(select) > 0 and select[0] in ['max', 'min', 'count', 'sum', 'avg']:
parse_dict['agg'] = select[0]
select = select[1:]
else:
parse_dict['agg'] = None
parse_dict['sel'] = ' '.join(select)
else:
parse_dict['sel'] = parse_dict['agg'] = None
if 'WHERE' in line_split:
startpos = line_split.index('WHERE') + 1
cond = []
and_indices = [i for i, x in enumerate(line_split) if x == "AND"]
and_indices.append(len(line_split))
for endpos in and_indices:
if endpos < startpos:
continue
cond_t = line_split[startpos:endpos]
pos_t = None
if 'EQL' in cond_t:
pos_t = cond_t.index('EQL')
elif 'GT' in cond_t:
pos_t = cond_t.index('GT')
elif 'LT' in cond_t:
pos_t = cond_t.index('LT')
if pos_t is None:
flag = False
else:
cond.append([' '.join(cond_t[0:pos_t]), cond_t[pos_t], ' '.join(cond_t[pos_t + 1:])])
startpos = endpos + 1
parse_dict['cond'] = cond
else:
parse_dict['cond'] = None
return flag, parse_dict
gt = open(file_gt)
pred = open(file_pred)
lines_p = pred.readlines()
lines_g = gt.readlines()
if len(lines_p) != len(lines_g):
print("Different line number!\n")
return
tot_err = 0
for i, (line_p, line_g) in enumerate(zip(lines_p, lines_g)):
print("=========================")
print(line_g)
print(line_p)
flag_p, dict_p = parse_line(line_p)
flag_g, dict_g = parse_line(line_g)
print(flag_p, dict_p)
print(flag_g, dict_g)
if flag_p == False or flag_g == False:
tot_err += 1
print("{}: Fail!".format(i))
continue
if dict_p['sel'] != dict_g['sel'] or dict_p['agg'] != dict_g['agg']:
tot_err += 1
print("{}: Fail!".format(i))
continue
cond_p = dict_p['cond']
cond_g = dict_g['cond']
if cond_p is None and cond_g is None:
print("{}: Succeed!".format(i))
continue
if cond_p is None or cond_g is None:
tot_err += 1
print("{}: Fail!".format(i))
continue
if set(x[0] for x in cond_p) != set(x[0] for x in cond_g):
tot_err += 1
print("{}: Fail!".format(i))
continue
flag = True
for idx_p in range(len(cond_p)):
idx_g = tuple(x[0] for x in cond_g).index(cond_p[idx_p][0])
if cond_g[idx_g][1] != cond_p[idx_p][1] or cond_g[idx_g][2] != cond_p[idx_p][2]:
flag = False
break
if not flag:
tot_err += 1
print("{}: Fail!".format(i))
continue
print("{}: Succeed!".format(i))
print(tot_err)
return tot_err
check_acc('GROUND_TRUTH', 'RESULT')
|
1600216
|
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('nj')
J.url = 'http://state.nj.us'
|
1600222
|
from datetime import timedelta
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_USER = "myuser"
BROKER_PASSWORD = "<PASSWORD>"
BROKER_VHOST = "myvhost"
CELERY_RESULT_BACKEND = "amqp"
CELERY_AMQP_TASK_RESULT_EXPIRES = 300
CELERY_IMPORTS = ("testapp.tasks", )
CELERY_ROUTES = ("testapp.process_router.ProcessRouter",)
CELERYBEAT_SCHEDULER = "testapp.ntimes_scheduler.NTimesScheduler"
CELERYBEAT_SCHEDULE = {
"runs-right-away": {
"task": "testapp.tasks.add",
"schedule": timedelta(seconds=0),
"times": 1,
"args": (16, 16)
},
"runs-every-five": {
"task": "testapp.tasks.add",
"schedule": timedelta(seconds=15),
"args": (5, 10)
},
}
|
1600249
|
df14 = h2o.H2OFrame.from_python(
{'D': ['18OCT2015:11:00:00',
'19OCT2015:12:00:00',
'20OCT2015:13:00:00']},
column_types=['time'])
df14.types
# {u'D': u'time'}
|
1600255
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BlurPool2d(nn.Sequential):
"""Blur Pooling Layer (MaxPool2d replacement)
See: https://richzhang.github.io/antialiased-cnns/
Paper: https://arxiv.org/abs/1904.11486
"""
__constants__ = ["in_features"]
_blur_kernel = torch.tensor(
[[1 / 16, 2 / 16, 1 / 16], [2 / 16, 4 / 16, 2 / 16], [1 / 16, 2 / 16, 1 / 16]]
)
def __init__(self, in_features):
"""
Args:
in_features (int): The number of channels in the input
"""
super().__init__()
self.in_features = in_features
self.add_module("maxpool", nn.MaxPool2d(2, stride=1))
blurpool = nn.Conv2d(
in_features,
in_features,
kernel_size=3,
padding=1,
stride=2,
bias=False,
groups=in_features,
)
blurpool.weight = torch.nn.Parameter(
self._blur_kernel.repeat(in_features, 1, 1, 1), requires_grad=False
)
self.add_module("blurpool", blurpool)
def forward(self, x):
return super(BlurPool2d, self).forward(x)
def extra_repr(self):
return "in_features={}".format(self.in_features)
|
1600269
|
import Core
info = {
"friendly_name": "Recent Changes List",
"example_template": "changecount",
"summary": "Inserts a description of recent Wiki activity.",
"details": """
<p>If 'changecount' is omitted, all changes recorded since the
current server was started are printed; otherwise, the list is
limited to just the most recent 'changecount' changes.</p>
"""
}
def SublanguageHandler(args, doc, renderer):
if args.strip():
count = int(args.strip())
else:
count = None
renderer.add(Core.RecentChanges(count))
|
1600274
|
from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
if self.parent:
return f"{self.parent.name} / {self.name}"
else:
return f"ROOT / {self.name}"
class Group(models.Model):
name = models.CharField(max_length=255)
number = models.CharField(max_length=255, null=True, unique=True)
desc = models.CharField(max_length=1024, null=True, blank=True)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True)
last_modified = models.DateTimeField(auto_now=True)
bot_enabled = models.BooleanField(default=False)
vacancy = models.BooleanField(default=True)
flag = models.IntegerField(default=0, null=False)
def __str__(self):
return f"{self.category.name} / ({self.name}, {self.number})"
class Website(models.Model):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255)
desc = models.CharField(max_length=1024, null=True, blank=True)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True)
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.category.name} / ({self.name}, {self.url})"
|
1600282
|
import math
import random
import csv
import numpy as np
""""Functions used by build_l4.py"""
def lerp(v0, v1, t):
return v0 * (1.0 - t) + v1 * t
def distance_weight(delta_p, w_min, w_max, r_max):
r = np.linalg.norm(delta_p)
if r >= r_max:
return 0.0
else:
return lerp(w_max, w_min, r / r_max)
def orientation_tuning_weight(tuning1, tuning2, w_min, w_max):
# 0-180 is the same as 180-360, so just modulo by 180
delta_tuning = math.fmod(abs(tuning1 - tuning2), 180.0)
# 90-180 needs to be flipped, then normalize to 0-1
delta_tuning = delta_tuning if delta_tuning < 90.0 else 180.0 - delta_tuning
# t = delta_tuning / 90.0
return lerp(w_max, w_min, delta_tuning / 90.0)
def distance_tuning_connection_handler(source, target, d_weight_min, d_weight_max, d_max, t_weight_min,
t_weight_max, nsyn_min, nsyn_max):
# Avoid self-connections.n_nodes
sid = source.node_id
tid = target.node_id
if sid == tid:
if sid % 100 == 0:
print "processing connections for node", sid
return None
# first create weights by euclidean distance between cells
# DO NOT use PERIODIC boundary conditions in x and y!
dw = distance_weight(np.array([source['x'], source['y']]) - np.array([target['x'], target['y']]), d_weight_min,
d_weight_max, d_max)
# drop the connection if the weight is too low
if dw <= 0:
return None
# next create weights by orientation tuning [ aligned, misaligned ] --> [ 1, 0 ]
# Check that the orientation tuning property exists for both cells; otherwise,
# ignore the orientation tuning.
if source['tuning_angle'] > 0 and target['tuning_angle'] > 0:
tw = dw * orientation_tuning_weight(source['tuning_angle'],
target['tuning_angle'],
t_weight_min, t_weight_max)
else:
tw = dw
# filter out nodes by treating the weight as a probability of connection
if random.random() > tw:
return None
# Add the number of synapses for every connection.
# It is probably very useful to take this out into a separate function.
tmp_nsyn = random.randint(nsyn_min, nsyn_max)
return tmp_nsyn
def distance_connection_handler(source, target, d_weight_min, d_weight_max, d_max, nsyn_min, nsyn_max):
# Avoid self-connections.
sid = source.node_id
tid = target.node_id
if sid == tid:
if sid % 100 == 0:
print "processing connections for node", sid
return None
# first create weights by euclidean distance between cells
# DO NOT use PERIODIC boundary conditions in x and y!
dw = distance_weight(np.array([source['x'], source['y']]) - np.array([target['x'], target['y']]), d_weight_min,
d_weight_max, d_max)
# drop the connection if the weight is too low
if dw <= 0:
return None
# filter out nodes by treating the weight as a probability of connection
if random.random() > dw:
return None
# Add the number of synapses for every connection.
# It is probably very useful to take this out into a separate function.
tmp_nsyn = random.randint(nsyn_min, nsyn_max)
return tmp_nsyn
def generate_random_positions(N, center, height, radius_outer, radius_inner):
"""
:param N: umber of positions to generate
:param center: center of the cylinder (numpy array)
:param height: cylinder height
:param radius_outer: outer radius, within which all positions are generated
:param radius_inner: inner radius, within which no positions are generated
:return: A generated list of poisitons with in the given bounds
"""
# Generate N random x and y values using polar coordinates;
# for phi, use uniform distribution;
# for r, the probability density is p(r)dr = r dr, so use inverse transform sampling:
# integral_R0_R p(r) dr = R^2/2 - R0^2/2; draw x = R^2/2 - R0^2/2 from a uniform distribution with values of x
# between 0 and R1^2/2 - R0^2/2.
phi = 2.0 * math.pi * np.random.random([N])
r = np.sqrt((radius_outer**2 - radius_inner**2) * np.random.random([N]) + radius_inner**2)
x = center[0] + r * np.cos(phi)
z = center[2] + r * np.sin(phi)
# Generate N random z values.
y = center[1] + height * (np.random.random([N]) - 0.5)
return np.column_stack((x, y, z))
def cylinder_from_density(N, density, height, center=None):
"""
Build a cylinder for given point density, center and height.
N: number of points
density: density of points
height: height of the cylinder
center: desired center of the cylinder
"""
if center is None:
center = np.array([0.0, 0.0, 0.0])
height = float(height)
radius = math.sqrt((N / density) / (height * math.pi) )
return center, height, radius
def gaussianLL(src, trg, weight, weight_sigma=50.0):
src_tuning = src['tuning_angle']
tar_tuning = trg['tuning_angle']
delta_tuning = abs(abs(abs(180.0 - abs(float(tar_tuning) - float(src_tuning)) % 360.0) - 90.0) - 90.0)
return weight * math.exp(-(delta_tuning / weight_sigma) ** 2)
"""Functions used by build_lgn.py"""
def read_dat_file(filename, type_mapping={'transient_ON': 'tON_001', 'transient_OFF': 'tOFF_001', 'transient_ON_OFF': 'tONOFF_001'}):
positions_table = {val: [] for val in type_mapping.values()}
offset_table = {val: [] for val in type_mapping.values()}
with open(filename, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ')
for row in csvreader:
model_type = type_mapping.get(row[0], None)
if model_type:
positions_table[model_type].append([float(row[1]), float(row[2])])
offset_table[model_type].append([float(row[3]), float(row[4])])
return positions_table, offset_table
def calc_tuning_angle(offset_vect):
offset_sum = sum(offset_vect)
if offset_sum == 0:
return -1.0
else:
tmp_vec = offset_vect / np.sqrt(offset_vect[0]**2 + offset_vect[1]**2)
return (360.0 + 180.0 * np.arctan2(tmp_vec[1], tmp_vec[0]) / np.pi) % 360.0
def select_source_cells(sources, target, lgn_mean, lgn_dim, l4_mean, l4_dim, N_syn):
target_id = target.node_id
source_ids = [s.node_id for s in sources]
if target_id%1000 == 0:
print "connection LGN cells to L4 cell #", target_id
subfields_centers_distance_min = 10.0 # 10.0
subfields_centers_distance_max = 11.0 # 10.0
subfields_centers_distance_L = subfields_centers_distance_max - subfields_centers_distance_min
subfields_ON_OFF_width_min = 6.0 # 8.0 #10.0 #8.0 #8.0 #14.0 #15.0
subfields_ON_OFF_width_max = 8.0 # 10.0 #12.0 #10.0 #15.0 #20.0 #15.0
subfields_ON_OFF_width_L = subfields_ON_OFF_width_max - subfields_ON_OFF_width_min
subfields_width_aspect_ratio_min = 2.8 # 1.9 #1.4 #0.9 #1.0
subfields_width_aspect_ratio_max = 3.0 # 2.0 #1.5 #1.1 #1.0
subfields_width_aspect_ratio_L = subfields_width_aspect_ratio_max - subfields_width_aspect_ratio_min
vis_x = lgn_mean[0] + ((target['x'] - l4_mean[0]) / l4_dim[0]) * lgn_dim[0]
vis_y = lgn_mean[1] + ((target['y'] - l4_mean[2]) / l4_dim[2]) * lgn_dim[1]
ellipse_center_x0 = vis_x #tar_cells[tar_gid]['vis_x']
ellipse_center_y0 = vis_y #tar_cells[tar_gid]['vis_y']
try:
tuning_angle = float(target['tuning_angle'])
tuning_angle = None if math.isnan(tuning_angle) or tuning_angle < 0 else tuning_angle
except Exception:
tuning_angle = None
if tuning_angle is None:
ellipse_b0 = (subfields_ON_OFF_width_min + random() * subfields_ON_OFF_width_L) / 2.0 # Divide by 2 to convert from width to radius.
ellipse_b0 = 2.5 * ellipse_b0 # 1.5 * ellipse_b0
ellipse_a0 = ellipse_b0 # ellipse_b0
top_N_src_cells_subfield = 15 # 20
ellipses_centers_halfdistance = 0.0
else:
tuning_angle_value = float(tuning_angle)
ellipses_centers_halfdistance = (subfields_centers_distance_min + random() * subfields_centers_distance_L) / 2.0
ellipse_b0 = (subfields_ON_OFF_width_min + random() * subfields_ON_OFF_width_L) / 2.0 # Divide by 2 to convert from width to radius.
ellipse_a0 = ellipse_b0 * (subfields_width_aspect_ratio_min + random() * subfields_width_aspect_ratio_L)
ellipse_phi = tuning_angle_value + 180.0 + 90.0 # Angle, in degrees, describing the rotation of the canonical ellipse away from the x-axis.
ellipse_cos_mphi = math.cos(-math.radians(ellipse_phi))
ellipse_sin_mphi = math.sin(-math.radians(ellipse_phi))
top_N_src_cells_subfield = 8 # 10 #9
# to match previous algorithm reorganize source cells by type
cell_type_dict = {
'tON_001': [(src_id, src_dict) for src_id, src_dict in zip(source_ids, sources) if src_dict['pop_id'] == 'tON_001'],
'tOFF_001': [(src_id, src_dict) for src_id, src_dict in zip(source_ids, sources) if src_dict['pop_id'] == 'tOFF_001'],
'tONOFF_001': [(src_id, src_dict) for src_id, src_dict in zip(source_ids, sources) if src_dict['pop_id'] == 'tONOFF_001']
}
src_cells_selected = {}
for src_type in cell_type_dict.keys():
src_cells_selected[src_type] = []
if tuning_angle is None:
ellipse_center_x = ellipse_center_x0
ellipse_center_y = ellipse_center_y0
ellipse_a = ellipse_a0
ellipse_b = ellipse_b0
else:
if src_type == 'tON_001':
ellipse_center_x = ellipse_center_x0 + ellipses_centers_halfdistance * ellipse_sin_mphi
ellipse_center_y = ellipse_center_y0 + ellipses_centers_halfdistance * ellipse_cos_mphi
ellipse_a = ellipse_a0
ellipse_b = ellipse_b0
elif src_type == 'tOFF_001':
ellipse_center_x = ellipse_center_x0 - ellipses_centers_halfdistance * ellipse_sin_mphi
ellipse_center_y = ellipse_center_y0 - ellipses_centers_halfdistance * ellipse_cos_mphi
ellipse_a = ellipse_a0
ellipse_b = ellipse_b0
else:
# Make this a simple circle.
ellipse_center_x = ellipse_center_x0
ellipse_center_y = ellipse_center_y0
# Make the region from which source cells are selected a bit smaller for the transient_ON_OFF cells,
# since each source cell in this case produces both ON and OFF responses.
ellipse_b = ellipses_centers_halfdistance/2.0 #0.01 #ellipses_centers_halfdistance + 1.0*ellipse_b0 #0.01 #0.5 * ellipse_b0 # 0.8 * ellipse_b0
ellipse_a = ellipse_b0 #0.01 #ellipse_b0
# Find those source cells of the appropriate type that have their visual space coordinates within the ellipse.
for src_id, src_dict in cell_type_dict[src_type]:
x, y = (src_dict['x'], src_dict['y'])
x = x - ellipse_center_x
y = y - ellipse_center_y
x_new = x
y_new = y
if tuning_angle is not None:
x_new = x * ellipse_cos_mphi - y * ellipse_sin_mphi
y_new = x * ellipse_sin_mphi + y * ellipse_cos_mphi
if ((x_new/ellipse_a)**2 + (y_new/ellipse_b) ** 2) <= 1.0:
if (tuning_angle is not None) and (src_type == 'tONOFF_001'):
src_tuning_angle = float(src_dict['tuning_angle'])
delta_tuning = abs(abs(abs(180.0-abs(tuning_angle_value-src_tuning_angle)%360.0)-90.0)-90.0)
if delta_tuning < 15.0:
src_cells_selected[src_type].append(src_id)
else:
src_cells_selected[src_type].append(src_id)
while len(src_cells_selected[src_type]) > top_N_src_cells_subfield:
src_cells_selected[src_type].remove(random.choice(src_cells_selected[src_type]))
select_cell_ids = [id for _, selected in src_cells_selected.items() for id in selected]
nsyns_ret = [N_syn if id in select_cell_ids else None for id in source_ids]
return nsyns_ret
|
1600322
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .resnet_embedding import ResNet50
from .resnet_embedding import ResNet101
from .resnet_embedding import ResNet152
from .resnext_vd_embedding import ResNeXt50_vd_32x4d
from .resnext_vd_embedding import ResNeXt50_vd_64x4d
from .resnext_vd_embedding import ResNeXt101_vd_32x4d
from .resnext_vd_embedding import ResNeXt101_vd_64x4d
from .resnext_vd_embedding import ResNeXt152_vd_32x4d
from .resnext_vd_embedding import ResNeXt152_vd_64x4d
from .se_resnext_vd_embedding import SE_ResNeXt50_vd_32x4d
from .se_resnext_vd_embedding import SE_ResNeXt101_vd_32x4d
from .se_resnext_vd_embedding import SENet154_vd
from .efficientnet_embedding import EfficientNetB4
from .res2net_vd import Res2Net101_vd_26w_4s
from .res2net_vd import Res2Net50_vd_26w_4s
from .hrnet_embedding import HRNet_W64_C
|
1600358
|
class Solution:
def longestBeautifulSubstring(self, word: str) -> int:
cnt = 1
start = res = 0
for i, (prev, cur) in enumerate(zip(word, word[1:]), 1):
if cur < prev:
cnt = 1
start = i
elif cur > prev:
cnt += 1
if cnt == 5:
res = max(res, i - start + 1)
return res
class Solution:
def longestBeautifulSubstring(self, word: str) -> int:
seen = set()
lo, longest, cur = -1, 0, 0
for hi, c in enumerate(word):
if hi > 0 and c < word[hi - 1]:
seen = set()
lo = hi - 1
cur = 0
seen.add(c)
if len(seen) == 5:
longest = max(longest, hi - lo)
return longest
|
1600409
|
from django.core.exceptions import ObjectDoesNotExist
from mozdns.views import MozdnsCreateView
from mozdns.views import MozdnsDeleteView
from mozdns.views import MozdnsDetailView
from mozdns.views import MozdnsListView
from mozdns.views import MozdnsUpdateView
from mozdns.ptr.forms import PTRForm
from mozdns.ptr.models import PTR
from mozdns.domain.models import Domain
from core.network.utils import calc_parent_str
class PTRView(object):
model = PTR
form_class = PTRForm
queryset = PTR.objects.all()
class PTRDeleteView(PTRView, MozdnsDeleteView):
""" """
class PTRDetailView(PTRView, MozdnsDetailView):
""" """
template_name = "ptr/ptr_detail.html"
class PTRCreateView(PTRView, MozdnsCreateView):
def get_form(self, *args, **kwargs):
initial = self.get_form_kwargs()
if 'ip_type' in self.request.GET and 'ip_str' in self.request.GET:
ip_str = self.request.GET['ip_str']
ip_type = self.request.GET['ip_type']
network = calc_parent_str(ip_str, ip_type)
if network and network.vlan and network.site:
expected_name = "{0}.{1}.mozilla.com".format(
network.vlan.name, network.site.get_site_path())
try:
domain = Domain.objects.get(name=expected_name)
except ObjectDoesNotExist:
domain = None
if domain:
initial['initial'] = {'ip_str': ip_str,
'name': "." + domain.name,
'ip_type': ip_type}
else:
initial['initial'] = {'ip_str': ip_str, 'ip_type': ip_type}
return PTRForm(**initial)
class PTRUpdateView(PTRView, MozdnsUpdateView):
""" """
class PTRListView(PTRView, MozdnsListView):
""" """
|
1600436
|
import os
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_bio
import pandas as pd
import numpy as np
import math
import plotly.graph_objects as go
from layout_helper import run_standalone_app
text_style = {"color": "#506784", "font-family": "Open Sans"}
_COMPONENT_ID = "pileup-browser"
def description():
return "An interactive in-browser track viewer."
def azure_url(file):
return os.path.join(
"https://sampleappsdata.blob.core.windows.net/dash-pileup-demo/rna/", file
)
def header_colors():
return {
"bg_color": "#0F5BA7",
"font_color": "white",
}
def rna_differential(app):
basal_lactate = {
"url": azure_url("SRR1552454.fastq.gz.sampled.bam"),
"indexUrl": azure_url("SRR1552454.fastq.gz.sampled.bam.bai"),
}
luminal_lactate = {
"url": azure_url("SRR1552448.fastq.gz.sampled.bam"),
"indexUrl": azure_url("SRR1552448.fastq.gz.sampled.bam.bai"),
}
HOSTED_TRACKS = {
"range": {"contig": "chr1", "start": 54986297, "stop": 54991347},
"celltype": [
{"viz": "scale", "label": "Scale"},
{"viz": "location", "label": "Location"},
{
"viz": "genes",
"label": "genes",
"source": "bigBed",
"sourceOptions": {"url": azure_url("mm10.ncbiRefSeq.sorted.bb")},
},
{
"viz": "coverage",
"label": "Basal",
"source": "bam",
"sourceOptions": basal_lactate,
},
{
"viz": "pileup",
"vizOptions": {"viewAsPairs": True},
"label": "Basal",
"source": "bam",
"sourceOptions": basal_lactate,
},
{
"viz": "coverage",
"label": "Luminal",
"source": "bam",
"sourceOptions": luminal_lactate,
},
{
"viz": "pileup",
"label": "Luminal",
"source": "bam",
"sourceOptions": luminal_lactate,
},
],
}
return HOSTED_TRACKS
REFERENCE = {
"label": "mm10",
"url": "https://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/mm10.2bit",
}
DATAPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets/data")
# Differentially expressed genes (identified in R, see assets/data/rna/README.md)
DE_dataframe = pd.read_csv(azure_url("DE_genes.csv"))
# filter for the cell type condition
DE_dataframe = DE_dataframe[
DE_dataframe["Comparison"] == "luminal__v__basal"
].reset_index()
# add SNP column
DE_dataframe["SNP"] = "NA"
# get min and max effect sizes
df_min = math.floor(min(DE_dataframe["log2FoldChange"]))
df_max = math.ceil(max(DE_dataframe["log2FoldChange"]))
def layout(app):
HOSTED_CASE_DICT = rna_differential(app)
return html.Div(
id="pileup-body",
className="app-body",
children=[
html.Div(
id="pileup-control-tabs",
className="control-tabs",
children=[
dcc.Tabs(
id="pileup-tabs",
value="data",
children=[
dcc.Tab(
label="Volcano plot",
value="data",
children=html.Div(
className="control-tab",
children=[
"Effect Size",
dcc.RangeSlider(
id="pileup-volcanoplot-input",
min=df_min,
max=df_max,
step=None,
marks={
i: {"label": str(i)}
for i in range(df_min, df_max + 1, 2)
},
value=[-1, 1],
),
html.Br(),
dcc.Graph(
id="pileup-dashbio-volcanoplot",
figure=dash_bio.VolcanoPlot(
dataframe=DE_dataframe,
margin=go.layout.Margin(l=0, r=0, b=0),
legend={
"orientation": "h",
"yanchor": "bottom",
"y": 1.02,
"bgcolor": "#f2f5fa",
},
effect_size="log2FoldChange",
effect_size_line=[-1, 1],
title="Differentially Expressed Genes",
genomewideline_value=-np.log10(0.05),
p="padj",
snp="SNP",
gene="Gene",
),
),
],
),
),
dcc.Tab(
label="About this tutorial",
value="description",
children=html.Div(
className="control-tab",
children=[
html.H4(
className="description",
children="""Visualizing RNA-seq data with pileup.js
and volcano plots""",
),
dcc.Markdown(
"""
In this example, we use the pileup.js and volcano plot components from dash-bio
to visualize two RNA-sequencing
(RNA-seq) samples from two conditions. RNA-seq allows us to learn how the expression
of genes changes between different samples of interest. Here, we are looking at
RNA-seq from two samples that are taken from two different mouse cell types.
We refer to these different cell types as basal and luminal cell types.
On the right, we use pileup.js to visualize aligned reads from RNA-seq samples.
On the left, we have a volcano plot, that visualizes the magnitude of change
in gene expression between the two samples. On the x-axis, the `Effect Size`
indicates the log2 fold change in expression
between the two conditions. On the y-axis, `-log10(p)` indicates the -log10(p-value)
for each gene. This p-value, along with the effect size,
can help determine whether each gene is significantly
differentially expressed between the conditions of interest.
To explore a gene, you can click on a gene in the volcano plot. After clicking on
a gene, the genomic region overlapping that gene will show up in the pileup.js
browser on the right. Now, you can investigate RNA-seq alignments at each
gene of interest. You may notice that genes with a negative effect size in the volcano
plot have more RNA-seq reads in the top sample (the basal cell type), while genes
with a positive effect size have more reads in the bottom sample
(the luminal cell type).
"""
),
],
),
),
dcc.Tab(
label="About pileup.js",
value="what-is",
children=html.Div(
className="control-tab",
children=[
html.H4(
className="what-is",
children="What is pileup.js?",
),
dcc.Markdown(
"""
The Dash pileup.js component is a high-performance genomics
data visualization component developed originally by the Hammer Lab
(https://github.com/hammerlab/pileup.js). pileup.js
supports visualization of genomic file formats, such as vcf,
bam, and bigbed files. pileup.js additionally allows flexible
interaction with non-standard data formats. Users can visualize
GA4GH JSON formatted alignments, features and variants. Users can
also connect with and visualize data stored in GA4GH formatted data
stores.
"""
),
],
),
),
],
)
],
),
dcc.Loading(
parent_className="dashbio-loading",
id="pileup-output",
children=html.Div(
[
dash_bio.Pileup(
id=_COMPONENT_ID,
range=HOSTED_CASE_DICT["range"],
reference=REFERENCE,
tracks=HOSTED_CASE_DICT["celltype"],
)
]
),
),
],
)
def callbacks(_app):
HOSTED_CASE_DICT = rna_differential(_app)
@_app.callback(
Output("pileup-dashbio-volcanoplot", "figure"),
[Input("pileup-volcanoplot-input", "value")],
)
def update_volcano(effects):
return dash_bio.VolcanoPlot(
dataframe=DE_dataframe,
margin=go.layout.Margin(l=0, r=0, b=0),
legend={"orientation": "h", "yanchor": "bottom", "y": 1.02, "x": 0.0,},
effect_size="log2FoldChange",
effect_size_line=effects,
title="Differentially Expressed Genes",
genomewideline_value=-np.log10(0.05),
p="padj",
snp="SNP",
gene="Gene",
)
@_app.callback(
Output(_COMPONENT_ID, "range"), Input("pileup-dashbio-volcanoplot", "clickData")
)
def update_range(point):
if point is None:
range = HOSTED_CASE_DICT["range"]
else:
# get genomic location of selected genes and goto
pointText = point["points"][0]["text"]
gene = pointText.split("GENE: ")[-1]
row = DE_dataframe[DE_dataframe["Gene"] == gene].iloc[0]
range = {"contig": row["chr"], "start": row["start"], "stop": row["end"]}
return range
app = run_standalone_app(layout, callbacks, header_colors, __file__)
server = app.server
if __name__ == "__main__":
app.run_server(debug=True, port=8050)
|
1600459
|
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django_dynamic_fixture import G
from django_webtest import WebTest
from fluent_pages.models import PageLayout
from fluent_pages.pagetypes.fluentpage.models import FluentPage
from . import descriptors
from icekit.utils import fluent_contents
from icekit.plugins.horizontal_rule.models import HorizontalRuleItem
User = get_user_model()
class PlaceholderDescriptor(WebTest):
def setUp(self):
self.site, __ = Site.objects.get_or_create(
pk=1,
defaults={'name': 'example.com', 'domain': 'example.com'})
self.user_1 = G(User)
descriptors.contribute_to_class(FluentPage)
self.page_layout_1 = G(
PageLayout,
template_path='icekit/layouts/default.html',
)
self.page_1 = FluentPage.objects.create(
author=self.user_1,
title='Test title',
layout=self.page_layout_1,
)
def test_descriptor(self):
self.assertIsInstance(FluentPage.slots, descriptors.PlaceholderDescriptor)
self.assertFalse(hasattr(self.page_1.slots, 'main'))
horizontal_rule_1 = fluent_contents.create_content_instance(
HorizontalRuleItem,
self.page_1
)
self.assertEqual(self.page_1.slots.main.count(), 1)
with self.assertRaises(AttributeError):
getattr(self.page_1.slots, 'fake_slot')
horizontal_rule_1.delete()
self.assertEqual(self.page_1.slots.main.count(), 0)
# Test that the same object is not returned. For context, we have had previous issues with
# slots which were bound to class objects and cached across instances.
self.assertNotEqual(self.page_1.slots, self.page_1.slots)
def tearDown(self):
self.page_1.delete()
self.page_layout_1.delete()
self.site.delete()
|
1600472
|
import collections
import copy
import six
import chainer
from chainer import configuration
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer import function
from chainer import link
from chainer import reporter as reporter_module
from chainer.training import extension
class MicroEvaluator(chainer.training.extensions.Evaluator):
def evaluate(self):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
# summary = reporter_module.DictSummary()
summary = collections.defaultdict(list)
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
in_arrays = self.converter(batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
eval_func(*in_arrays)
elif isinstance(in_arrays, dict):
eval_func(**in_arrays)
else:
eval_func(in_arrays)
n_data = len(batch)
summary['n'].append(n_data)
# summary.add(observation)
for k, v in observation.items():
summary[k].append(v)
mean = dict()
ns = summary['n']
del summary['n']
for k, vs in summary.items():
mean[k] = sum(v * n for v, n in zip(vs, ns)) / sum(ns)
return mean
# return summary.compute_mean()
|
1600474
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.controllers import ControllerWaterCoil
log = logging.getLogger(__name__)
class TestControllerWaterCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_controllerwatercoil(self):
pyidf.validation_level = ValidationLevel.error
obj = ControllerWaterCoil()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_variable = "Temperature"
obj.control_variable = var_control_variable
# alpha
var_action = "Normal"
obj.action = var_action
# alpha
var_actuator_variable = "Flow"
obj.actuator_variable = var_actuator_variable
# node
var_sensor_node_name = "node|Sensor Node Name"
obj.sensor_node_name = var_sensor_node_name
# node
var_actuator_node_name = "node|Actuator Node Name"
obj.actuator_node_name = var_actuator_node_name
# real
var_controller_convergence_tolerance = 7.7
obj.controller_convergence_tolerance = var_controller_convergence_tolerance
# real
var_maximum_actuated_flow = 8.8
obj.maximum_actuated_flow = var_maximum_actuated_flow
# real
var_minimum_actuated_flow = 9.9
obj.minimum_actuated_flow = var_minimum_actuated_flow
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.controllerwatercoils[0].name, var_name)
self.assertEqual(idf2.controllerwatercoils[0].control_variable, var_control_variable)
self.assertEqual(idf2.controllerwatercoils[0].action, var_action)
self.assertEqual(idf2.controllerwatercoils[0].actuator_variable, var_actuator_variable)
self.assertEqual(idf2.controllerwatercoils[0].sensor_node_name, var_sensor_node_name)
self.assertEqual(idf2.controllerwatercoils[0].actuator_node_name, var_actuator_node_name)
self.assertAlmostEqual(idf2.controllerwatercoils[0].controller_convergence_tolerance, var_controller_convergence_tolerance)
self.assertAlmostEqual(idf2.controllerwatercoils[0].maximum_actuated_flow, var_maximum_actuated_flow)
self.assertAlmostEqual(idf2.controllerwatercoils[0].minimum_actuated_flow, var_minimum_actuated_flow)
|
1600488
|
from os import system
import time
import conexion as conn
db = conn.DB()
system("clear")
def create():
name = str(input("INGRESA SU NOMBRE: "))
email = str(input("INGRESA SU EMAIL: "))
if(len(name) > 0 and len(email) > 0):
sql = "INSERT INTO sistema(name,email) VALUES(?,?)"
parametros = (name,email)
db.ejecutar_consulta(sql,parametros)
print("Insertados")
def read():
sql = "SELECT * FROM sistema"
result = db.ejecutar_consulta(sql)
for data in result:
print("""
ID : {}
NOMBRE : {}
EMAIL : {}
""".format(data[0],data[1],data[2]))
def update():
id = int(input("INGRESA EL ID: "))
if(id != 0):
name = str(input("INGRESA SU NOMBRE: "))
email = str(input("INGRESA SU EMAIL: "))
if(len(name) > 0 and len(email) > 0):
sql = "UPDATE sistema SET name=?,email=? WHERE id=?"
parametros = (name,email,id)
db.ejecutar_consulta(sql,parametros)
print("Actualizado!")
else:
print("Se require un ID")
def delete():
id = int(input("INGRESA EL ID: "))
if(id != 0):
sql = "DELETE FROM sistema WHERE id=?"
parametros = (id,)
db.ejecutar_consulta(sql,parametros)
print("Eliminado!")
else:
print("Se require un ID")
def search():
nombre = str(input("Buscar por nombre: "))
if(len(nombre) > 0):
sql = "SELECT * FROM sistema WHERE name LIKE ?"
parametros = ('%{}%'.format(nombre),)
result = db.ejecutar_consulta(sql,parametros)
for data in result:
print("""
+ID : {}
+NOMBRE : {}
+EMAIL : {}""".format(data[0],data[1],data[2]))
while True:
print("=========================================")
print("\tCRUD CON SQLite3")
print("=========================================")
print("\t[1] Insertar registro")
print("\t[2] Listar registros")
print("\t[3] Actualizar registros")
print("\t[4] Eliminar registros")
print("\t[5] Buscar registros")
print("\t[6] Salir")
print("=========================================")
try:
opcion = int(input("Selecciona una opcion: "))
if(opcion == 1):
create()
time.sleep(1)
system("clear")
elif (opcion == 2):
read()
time.sleep(1)
elif (opcion == 3):
update()
time.sleep(1)
system("clear")
elif (opcion == 4):
delete()
time.sleep(1)
system("clear")
elif (opcion == 5):
search()
elif (opcion == 6):
break
except:
print("Por favor, selecciona las opciones correctas")
system("clear")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.